aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt41
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas14
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/message/fusion/mptctl.c7
-rw-r--r--drivers/message/fusion/mptspi.c5
-rw-r--r--drivers/scsi/Kconfig11
-rw-r--r--drivers/scsi/aacraid/linit.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr.h146
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c120
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2326
-rw-r--r--drivers/scsi/be2iscsi/be.h2
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c40
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h24
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c31
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c42
-rw-r--r--drivers/scsi/be2iscsi/be_main.h8
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c15
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c15
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c3
-rw-r--r--drivers/scsi/csiostor/csio_hw.h2
-rw-r--r--drivers/scsi/csiostor/csio_isr.c24
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/dpt_i2o.c1
-rw-r--r--drivers/scsi/eata.c9
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c39
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c5
-rw-r--r--drivers/scsi/fnic/fnic_trace.c5
-rw-r--r--drivers/scsi/hpsa.c70
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/iscsi_tcp.c10
-rw-r--r--drivers/scsi/libfc/fc_libfc.c4
-rw-r--r--drivers/scsi/libiscsi.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c53
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c225
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c247
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h161
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c1099
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c327
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c439
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h105
-rw-r--r--drivers/scsi/mpt2sas/Kconfig2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h12
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h29
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h8
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h74
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h8
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h44
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_type.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c328
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h28
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c197
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c2
-rw-r--r--drivers/scsi/mpt3sas/Kconfig2
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h18
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h64
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h45
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c287
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c78
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h2
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c8
-rw-r--r--drivers/scsi/pmcraid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c34
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h191
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c943
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c133
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c132
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c693
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h31
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c45
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_debug.c136
-rw-r--r--drivers/scsi/scsi_error.c27
-rw-r--r--drivers/scsi/scsi_lib.c24
-rw-r--r--drivers/scsi/scsi_scan.c26
-rw-r--r--drivers/scsi/scsi_sysfs.c17
-rw-r--r--drivers/scsi/sd.c8
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/st.c2
-rw-r--r--drivers/scsi/storvsc_drv.c12
-rw-r--r--drivers/scsi/ufs/Kconfig2
-rw-r--r--drivers/scsi/ufs/ufs.h132
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c55
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c291
-rw-r--r--drivers/scsi/ufs/ufshcd.c2514
-rw-r--r--drivers/scsi/ufs/ufshcd.h280
-rw-r--r--drivers/scsi/ufs/ufshci.h9
-rw-r--r--drivers/scsi/ufs/unipro.h56
-rw-r--r--drivers/usb/storage/usb.c8
-rw-r--r--include/scsi/scsi.h1
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/scsi/scsi_host.h6
142 files changed, 10336 insertions, 2992 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 20468b2a7516..53579197eca2 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -8,9 +8,50 @@ Required properties:
8- interrupts : <interrupt mapping for UFS host controller IRQ> 8- interrupts : <interrupt mapping for UFS host controller IRQ>
9- reg : <registers mapping> 9- reg : <registers mapping>
10 10
11Optional properties:
12- vdd-hba-supply : phandle to UFS host controller supply regulator node
13- vcc-supply : phandle to VCC supply regulator node
14- vccq-supply : phandle to VCCQ supply regulator node
15- vccq2-supply : phandle to VCCQ2 supply regulator node
16- vcc-supply-1p8 : For embedded UFS devices, valid VCC range is 1.7-1.95V
17 or 2.7-3.6V. This boolean property when set, specifies
18 to use low voltage range of 1.7-1.95V. Note for external
19 UFS cards this property is invalid and valid VCC range is
20 always 2.7-3.6V.
21- vcc-max-microamp : specifies max. load that can be drawn from vcc supply
22- vccq-max-microamp : specifies max. load that can be drawn from vccq supply
23- vccq2-max-microamp : specifies max. load that can be drawn from vccq2 supply
24- <name>-fixed-regulator : boolean property specifying that <name>-supply is a fixed regulator
25
26- clocks : List of phandle and clock specifier pairs
27- clock-names : List of clock input name strings sorted in the same
28 order as the clocks property.
29- freq-table-hz : Array of <min max> operating frequencies stored in the same
30 order as the clocks property. If this property is not
31 defined or a value in the array is "0" then it is assumed
32 that the frequency is set by the parent clock or a
33 fixed rate clock source.
34
35Note: If above properties are not defined it can be assumed that the supply
36regulators or clocks are always on.
37
11Example: 38Example:
12 ufshc@0xfc598000 { 39 ufshc@0xfc598000 {
13 compatible = "jedec,ufs-1.1"; 40 compatible = "jedec,ufs-1.1";
14 reg = <0xfc598000 0x800>; 41 reg = <0xfc598000 0x800>;
15 interrupts = <0 28 0>; 42 interrupts = <0 28 0>;
43
44 vdd-hba-supply = <&xxx_reg0>;
45 vdd-hba-fixed-regulator;
46 vcc-supply = <&xxx_reg1>;
47 vcc-supply-1p8;
48 vccq-supply = <&xxx_reg2>;
49 vccq2-supply = <&xxx_reg3>;
50 vcc-max-microamp = 500000;
51 vccq-max-microamp = 200000;
52 vccq2-max-microamp = 200000;
53
54 clocks = <&core 0>, <&ref 0>, <&iface 0>;
55 clock-names = "core_clk", "ref_clk", "iface_clk";
56 freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
16 }; 57 };
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 91ba58ef02d7..18b570990040 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,17 @@
1Release Date : Thu. Jun 19, 2014 17:00:00 PST 2014 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4 Kashyap Desai
5 Sumit Saxena
6 Uday Lingala
7Current Version : 06.803.02.00-rc1
8Old Version : 06.803.01.00-rc1
9 1. Fix reset_mutex leak in megasas_reset_fusion().
10 2. Remove unused variables in megasas_instance.
11 3. Fix LD/VF affiliation parsing.
12 4. Add missing initial call to megasas_get_ld_vf_affiliation().
13 5. Version and Changelog update.
14-------------------------------------------------------------------------------
1Release Date : Mon. Mar 10, 2014 17:00:00 PST 2014 - 15Release Date : Mon. Mar 10, 2014 17:00:00 PST 2014 -
2 (emaild-id:megaraidlinux@lsi.com) 16 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford 17 Adam Radford
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a896d948b79e..187f83629f7e 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1400,7 +1400,6 @@ mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
1400 * @vendor: pci vendor id 1400 * @vendor: pci vendor id
1401 * @device: pci device id 1401 * @device: pci device id
1402 * @revision: pci revision id 1402 * @revision: pci revision id
1403 * @prod_name: string returned
1404 * 1403 *
1405 * Returns product string displayed when driver loads, 1404 * Returns product string displayed when driver loads,
1406 * in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product 1405 * in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
@@ -3172,12 +3171,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
3172 facts->FWImageSize = le32_to_cpu(facts->FWImageSize); 3171 facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
3173 } 3172 }
3174 3173
3175 sz = facts->FWImageSize; 3174 facts->FWImageSize = ALIGN(facts->FWImageSize, 4);
3176 if ( sz & 0x01 )
3177 sz += 1;
3178 if ( sz & 0x02 )
3179 sz += 2;
3180 facts->FWImageSize = sz;
3181 3175
3182 if (!facts->RequestFrameSize) { 3176 if (!facts->RequestFrameSize) {
3183 /* Something is wrong! */ 3177 /* Something is wrong! */
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b0a892a2bf1b..70bb7530b22c 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1741,12 +1741,7 @@ mptctl_replace_fw (unsigned long arg)
1741 1741
1742 /* Allocate memory for the new FW image 1742 /* Allocate memory for the new FW image
1743 */ 1743 */
1744 newFwSize = karg.newImageSize; 1744 newFwSize = ALIGN(karg.newImageSize, 4);
1745
1746 if (newFwSize & 0x01)
1747 newFwSize += 1;
1748 if (newFwSize & 0x02)
1749 newFwSize += 2;
1750 1745
1751 mpt_alloc_fw_memory(ioc, newFwSize); 1746 mpt_alloc_fw_memory(ioc, newFwSize);
1752 if (ioc->cached_fw == NULL) 1747 if (ioc->cached_fw == NULL)
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 787933d43d32..613231c16194 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1419,6 +1419,11 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1419 goto out_mptspi_probe; 1419 goto out_mptspi_probe;
1420 } 1420 }
1421 1421
1422 /* VMWare emulation doesn't properly implement WRITE_SAME
1423 */
1424 if (pdev->subsystem_vendor == 0x15AD)
1425 sh->no_write_same = 1;
1426
1422 spin_lock_irqsave(&ioc->FreeQlock, flags); 1427 spin_lock_irqsave(&ioc->FreeQlock, flags);
1423 1428
1424 /* Attach the SCSI Host to the IOC structure 1429 /* Attach the SCSI Host to the IOC structure
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index bd85fb4978e0..e85e64a07d02 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -45,6 +45,17 @@ config SCSI_NETLINK
45 default n 45 default n
46 depends on NET 46 depends on NET
47 47
48config SCSI_MQ_DEFAULT
49 bool "SCSI: use blk-mq I/O path by default"
50 depends on SCSI
51 ---help---
52 This option enables the new blk-mq based I/O path for SCSI
53 devices by default. With the option the scsi_mod.use_blk_mq
54 module/boot option defaults to Y, without it to N, but it can
55 still be overriden either way.
56
57 If unsure say N.
58
48config SCSI_PROC_FS 59config SCSI_PROC_FS
49 bool "legacy /proc/scsi/ support" 60 bool "legacy /proc/scsi/ support"
50 depends on SCSI && PROC_FS 61 depends on SCSI && PROC_FS
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 63f576c9300a..a759cb2d4b15 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1152,6 +1152,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1152 shost->irq = pdev->irq; 1152 shost->irq = pdev->irq;
1153 shost->unique_id = unique_id; 1153 shost->unique_id = unique_id;
1154 shost->max_cmd_len = 16; 1154 shost->max_cmd_len = 16;
1155 shost->use_cmd_list = 1;
1155 1156
1156 aac = (struct aac_dev *)shost->hostdata; 1157 aac = (struct aac_dev *)shost->hostdata;
1157 aac->base_start = pci_resource_start(pdev, 0); 1158 aac->base_start = pci_resource_start(pdev, 0);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 77b26f5b9c33..3bcaaac0ae4b 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,13 +45,14 @@
45#include <linux/interrupt.h> 45#include <linux/interrupt.h>
46struct device_attribute; 46struct device_attribute;
47/*The limit of outstanding scsi command that firmware can handle*/ 47/*The limit of outstanding scsi command that firmware can handle*/
48#define ARCMSR_MAX_OUTSTANDING_CMD 256
49#ifdef CONFIG_XEN 48#ifdef CONFIG_XEN
50 #define ARCMSR_MAX_FREECCB_NUM 160 49 #define ARCMSR_MAX_FREECCB_NUM 160
50#define ARCMSR_MAX_OUTSTANDING_CMD 155
51#else 51#else
52 #define ARCMSR_MAX_FREECCB_NUM 320 52 #define ARCMSR_MAX_FREECCB_NUM 320
53#define ARCMSR_MAX_OUTSTANDING_CMD 255
53#endif 54#endif
54#define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2010/08/05" 55#define ARCMSR_DRIVER_VERSION "v1.30.00.04-20140919"
55#define ARCMSR_SCSI_INITIATOR_ID 255 56#define ARCMSR_SCSI_INITIATOR_ID 255
56#define ARCMSR_MAX_XFER_SECTORS 512 57#define ARCMSR_MAX_XFER_SECTORS 512
57#define ARCMSR_MAX_XFER_SECTORS_B 4096 58#define ARCMSR_MAX_XFER_SECTORS_B 4096
@@ -62,11 +63,17 @@ struct device_attribute;
62#define ARCMSR_MAX_QBUFFER 4096 63#define ARCMSR_MAX_QBUFFER 4096
63#define ARCMSR_DEFAULT_SG_ENTRIES 38 64#define ARCMSR_DEFAULT_SG_ENTRIES 38
64#define ARCMSR_MAX_HBB_POSTQUEUE 264 65#define ARCMSR_MAX_HBB_POSTQUEUE 264
66#define ARCMSR_MAX_ARC1214_POSTQUEUE 256
67#define ARCMSR_MAX_ARC1214_DONEQUEUE 257
65#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */ 68#define ARCMSR_MAX_XFER_LEN 0x26000 /* 152K */
66#define ARCMSR_CDB_SG_PAGE_LENGTH 256 69#define ARCMSR_CDB_SG_PAGE_LENGTH 256
70#define ARCMST_NUM_MSIX_VECTORS 4
67#ifndef PCI_DEVICE_ID_ARECA_1880 71#ifndef PCI_DEVICE_ID_ARECA_1880
68#define PCI_DEVICE_ID_ARECA_1880 0x1880 72#define PCI_DEVICE_ID_ARECA_1880 0x1880
69 #endif 73 #endif
74#ifndef PCI_DEVICE_ID_ARECA_1214
75 #define PCI_DEVICE_ID_ARECA_1214 0x1214
76#endif
70/* 77/*
71********************************************************************************** 78**********************************************************************************
72** 79**
@@ -100,10 +107,11 @@ struct CMD_MESSAGE
100** IOP Message Transfer Data for user space 107** IOP Message Transfer Data for user space
101******************************************************************************* 108*******************************************************************************
102*/ 109*/
110#define ARCMSR_API_DATA_BUFLEN 1032
103struct CMD_MESSAGE_FIELD 111struct CMD_MESSAGE_FIELD
104{ 112{
105 struct CMD_MESSAGE cmdmessage; 113 struct CMD_MESSAGE cmdmessage;
106 uint8_t messagedatabuffer[1032]; 114 uint8_t messagedatabuffer[ARCMSR_API_DATA_BUFLEN];
107}; 115};
108/* IOP message transfer */ 116/* IOP message transfer */
109#define ARCMSR_MESSAGE_FAIL 0x0001 117#define ARCMSR_MESSAGE_FAIL 0x0001
@@ -337,6 +345,56 @@ struct FIRMWARE_INFO
337#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000 345#define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK 0x80000000
338/* 346/*
339******************************************************************************* 347*******************************************************************************
348** SPEC. for Areca Type D adapter
349*******************************************************************************
350*/
351#define ARCMSR_ARC1214_CHIP_ID 0x00004
352#define ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION 0x00008
353#define ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK 0x00034
354#define ARCMSR_ARC1214_SAMPLE_RESET 0x00100
355#define ARCMSR_ARC1214_RESET_REQUEST 0x00108
356#define ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS 0x00200
357#define ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE 0x0020C
358#define ARCMSR_ARC1214_INBOUND_MESSAGE0 0x00400
359#define ARCMSR_ARC1214_INBOUND_MESSAGE1 0x00404
360#define ARCMSR_ARC1214_OUTBOUND_MESSAGE0 0x00420
361#define ARCMSR_ARC1214_OUTBOUND_MESSAGE1 0x00424
362#define ARCMSR_ARC1214_INBOUND_DOORBELL 0x00460
363#define ARCMSR_ARC1214_OUTBOUND_DOORBELL 0x00480
364#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE 0x00484
365#define ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW 0x01000
366#define ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH 0x01004
367#define ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER 0x01018
368#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW 0x01060
369#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH 0x01064
370#define ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER 0x0106C
371#define ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER 0x01070
372#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE 0x01088
373#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE 0x0108C
374#define ARCMSR_ARC1214_MESSAGE_WBUFFER 0x02000
375#define ARCMSR_ARC1214_MESSAGE_RBUFFER 0x02100
376#define ARCMSR_ARC1214_MESSAGE_RWBUFFER 0x02200
377/* Host Interrupt Mask */
378#define ARCMSR_ARC1214_ALL_INT_ENABLE 0x00001010
379#define ARCMSR_ARC1214_ALL_INT_DISABLE 0x00000000
380/* Host Interrupt Status */
381#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR 0x00001000
382#define ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR 0x00000010
383/* DoorBell*/
384#define ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY 0x00000001
385#define ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ 0x00000002
386/*inbound message 0 ready*/
387#define ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK 0x00000001
388/*outbound DATA WRITE isr door bell clear*/
389#define ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK 0x00000002
390/*outbound message 0 ready*/
391#define ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE 0x02000000
392/*outbound message cmd isr door bell clear*/
393/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
394#define ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK 0x80000000
395#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR 0x00000001
396/*
397*******************************************************************************
340** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504) 398** ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
341******************************************************************************* 399*******************************************************************************
342*/ 400*/
@@ -357,7 +415,7 @@ struct ARCMSR_CDB
357#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10 415#define ARCMSR_CDB_FLAG_ORDEREDQ 0x10
358 416
359 uint8_t msgPages; 417 uint8_t msgPages;
360 uint32_t Context; 418 uint32_t msgContext;
361 uint32_t DataLength; 419 uint32_t DataLength;
362 uint8_t Cdb[16]; 420 uint8_t Cdb[16];
363 uint8_t DeviceStatus; 421 uint8_t DeviceStatus;
@@ -494,6 +552,56 @@ struct MessageUnit_C{
494 uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/ 552 uint32_t msgcode_rwbuffer[256]; /*2200 23FF*/
495}; 553};
496/* 554/*
555*********************************************************************
556** Messaging Unit (MU) of Type D processor
557*********************************************************************
558*/
559struct InBound_SRB {
560 uint32_t addressLow; /* pointer to SRB block */
561 uint32_t addressHigh;
562 uint32_t length; /* in DWORDs */
563 uint32_t reserved0;
564};
565
566struct OutBound_SRB {
567 uint32_t addressLow; /* pointer to SRB block */
568 uint32_t addressHigh;
569};
570
571struct MessageUnit_D {
572 struct InBound_SRB post_qbuffer[ARCMSR_MAX_ARC1214_POSTQUEUE];
573 volatile struct OutBound_SRB
574 done_qbuffer[ARCMSR_MAX_ARC1214_DONEQUEUE];
575 u16 postq_index;
576 volatile u16 doneq_index;
577 u32 __iomem *chip_id; /* 0x00004 */
578 u32 __iomem *cpu_mem_config; /* 0x00008 */
579 u32 __iomem *i2o_host_interrupt_mask; /* 0x00034 */
580 u32 __iomem *sample_at_reset; /* 0x00100 */
581 u32 __iomem *reset_request; /* 0x00108 */
582 u32 __iomem *host_int_status; /* 0x00200 */
583 u32 __iomem *pcief0_int_enable; /* 0x0020C */
584 u32 __iomem *inbound_msgaddr0; /* 0x00400 */
585 u32 __iomem *inbound_msgaddr1; /* 0x00404 */
586 u32 __iomem *outbound_msgaddr0; /* 0x00420 */
587 u32 __iomem *outbound_msgaddr1; /* 0x00424 */
588 u32 __iomem *inbound_doorbell; /* 0x00460 */
589 u32 __iomem *outbound_doorbell; /* 0x00480 */
590 u32 __iomem *outbound_doorbell_enable; /* 0x00484 */
591 u32 __iomem *inboundlist_base_low; /* 0x01000 */
592 u32 __iomem *inboundlist_base_high; /* 0x01004 */
593 u32 __iomem *inboundlist_write_pointer; /* 0x01018 */
594 u32 __iomem *outboundlist_base_low; /* 0x01060 */
595 u32 __iomem *outboundlist_base_high; /* 0x01064 */
596 u32 __iomem *outboundlist_copy_pointer; /* 0x0106C */
597 u32 __iomem *outboundlist_read_pointer; /* 0x01070 0x01072 */
598 u32 __iomem *outboundlist_interrupt_cause; /* 0x1088 */
599 u32 __iomem *outboundlist_interrupt_enable; /* 0x108C */
600 u32 __iomem *message_wbuffer; /* 0x2000 */
601 u32 __iomem *message_rbuffer; /* 0x2100 */
602 u32 __iomem *msgcode_rwbuffer; /* 0x2200 */
603};
604/*
497******************************************************************************* 605*******************************************************************************
498** Adapter Control Block 606** Adapter Control Block
499******************************************************************************* 607*******************************************************************************
@@ -505,19 +613,26 @@ struct AdapterControlBlock
505 #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */ 613 #define ACB_ADAPTER_TYPE_B 0x00000002 /* hbb M IOP */
506 #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */ 614 #define ACB_ADAPTER_TYPE_C 0x00000004 /* hbc P IOP */
507 #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */ 615 #define ACB_ADAPTER_TYPE_D 0x00000008 /* hbd A IOP */
616 u32 roundup_ccbsize;
508 struct pci_dev * pdev; 617 struct pci_dev * pdev;
509 struct Scsi_Host * host; 618 struct Scsi_Host * host;
510 unsigned long vir2phy_offset; 619 unsigned long vir2phy_offset;
620 struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
511 /* Offset is used in making arc cdb physical to virtual calculations */ 621 /* Offset is used in making arc cdb physical to virtual calculations */
512 uint32_t outbound_int_enable; 622 uint32_t outbound_int_enable;
513 uint32_t cdb_phyaddr_hi32; 623 uint32_t cdb_phyaddr_hi32;
514 uint32_t reg_mu_acc_handle0; 624 uint32_t reg_mu_acc_handle0;
515 spinlock_t eh_lock; 625 spinlock_t eh_lock;
516 spinlock_t ccblist_lock; 626 spinlock_t ccblist_lock;
627 spinlock_t postq_lock;
628 spinlock_t doneq_lock;
629 spinlock_t rqbuffer_lock;
630 spinlock_t wqbuffer_lock;
517 union { 631 union {
518 struct MessageUnit_A __iomem *pmuA; 632 struct MessageUnit_A __iomem *pmuA;
519 struct MessageUnit_B *pmuB; 633 struct MessageUnit_B *pmuB;
520 struct MessageUnit_C __iomem *pmuC; 634 struct MessageUnit_C __iomem *pmuC;
635 struct MessageUnit_D *pmuD;
521 }; 636 };
522 /* message unit ATU inbound base address0 */ 637 /* message unit ATU inbound base address0 */
523 void __iomem *mem_base0; 638 void __iomem *mem_base0;
@@ -544,6 +659,8 @@ struct AdapterControlBlock
544 /* iop init */ 659 /* iop init */
545 #define ACB_F_ABORT 0x0200 660 #define ACB_F_ABORT 0x0200
546 #define ACB_F_FIRMWARE_TRAP 0x0400 661 #define ACB_F_FIRMWARE_TRAP 0x0400
662 #define ACB_F_MSI_ENABLED 0x1000
663 #define ACB_F_MSIX_ENABLED 0x2000
547 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM]; 664 struct CommandControlBlock * pccb_pool[ARCMSR_MAX_FREECCB_NUM];
548 /* used for memory free */ 665 /* used for memory free */
549 struct list_head ccb_free_list; 666 struct list_head ccb_free_list;
@@ -557,19 +674,20 @@ struct AdapterControlBlock
557 /* dma_coherent used for memory free */ 674 /* dma_coherent used for memory free */
558 dma_addr_t dma_coherent_handle; 675 dma_addr_t dma_coherent_handle;
559 /* dma_coherent_handle used for memory free */ 676 /* dma_coherent_handle used for memory free */
560 dma_addr_t dma_coherent_handle_hbb_mu; 677 dma_addr_t dma_coherent_handle2;
678 void *dma_coherent2;
561 unsigned int uncache_size; 679 unsigned int uncache_size;
562 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER]; 680 uint8_t rqbuffer[ARCMSR_MAX_QBUFFER];
563 /* data collection buffer for read from 80331 */ 681 /* data collection buffer for read from 80331 */
564 int32_t rqbuf_firstindex; 682 int32_t rqbuf_getIndex;
565 /* first of read buffer */ 683 /* first of read buffer */
566 int32_t rqbuf_lastindex; 684 int32_t rqbuf_putIndex;
567 /* last of read buffer */ 685 /* last of read buffer */
568 uint8_t wqbuffer[ARCMSR_MAX_QBUFFER]; 686 uint8_t wqbuffer[ARCMSR_MAX_QBUFFER];
569 /* data collection buffer for write to 80331 */ 687 /* data collection buffer for write to 80331 */
570 int32_t wqbuf_firstindex; 688 int32_t wqbuf_getIndex;
571 /* first of write buffer */ 689 /* first of write buffer */
572 int32_t wqbuf_lastindex; 690 int32_t wqbuf_putIndex;
573 /* last of write buffer */ 691 /* last of write buffer */
574 uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN]; 692 uint8_t devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
575 /* id0 ..... id15, lun0...lun7 */ 693 /* id0 ..... id15, lun0...lun7 */
@@ -594,6 +712,8 @@ struct AdapterControlBlock
594 #define FW_DEADLOCK 0x0010 712 #define FW_DEADLOCK 0x0010
595 atomic_t rq_map_token; 713 atomic_t rq_map_token;
596 atomic_t ante_token_value; 714 atomic_t ante_token_value;
715 uint32_t maxOutstanding;
716 int msix_vector_count;
597};/* HW_DEVICE_EXTENSION */ 717};/* HW_DEVICE_EXTENSION */
598/* 718/*
599******************************************************************************* 719*******************************************************************************
@@ -606,7 +726,7 @@ struct CommandControlBlock{
606 struct list_head list; /*x32: 8byte, x64: 16byte*/ 726 struct list_head list; /*x32: 8byte, x64: 16byte*/
607 struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */ 727 struct scsi_cmnd *pcmd; /*8 bytes pointer of linux scsi command */
608 struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/ 728 struct AdapterControlBlock *acb; /*x32: 4byte, x64: 8byte*/
609 uint32_t cdb_phyaddr_pattern; /*x32: 4byte, x64: 4byte*/ 729 uint32_t cdb_phyaddr; /*x32: 4byte, x64: 4byte*/
610 uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/ 730 uint32_t arc_cdb_size; /*x32:4byte,x64:4byte*/
611 uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/ 731 uint16_t ccb_flags; /*x32: 2byte, x64: 2byte*/
612 #define CCB_FLAG_READ 0x0000 732 #define CCB_FLAG_READ 0x0000
@@ -684,8 +804,10 @@ struct SENSE_DATA
684#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01 804#define ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE 0x01
685#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F 805#define ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE 0x1F
686 806
687extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *); 807extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
688extern void arcmsr_iop_message_read(struct AdapterControlBlock *); 808extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
809 struct QBUFFER __iomem *);
810extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *);
689extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *); 811extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
690extern struct device_attribute *arcmsr_host_attrs[]; 812extern struct device_attribute *arcmsr_host_attrs[];
691extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *); 813extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index acdae33de521..9c86481f779f 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -50,6 +50,7 @@
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/delay.h> 51#include <linux/delay.h>
52#include <linux/pci.h> 52#include <linux/pci.h>
53#include <linux/circ_buf.h>
53 54
54#include <scsi/scsi_cmnd.h> 55#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h> 56#include <scsi/scsi_device.h>
@@ -68,42 +69,42 @@ static ssize_t arcmsr_sysfs_iop_message_read(struct file *filp,
68 struct device *dev = container_of(kobj,struct device,kobj); 69 struct device *dev = container_of(kobj,struct device,kobj);
69 struct Scsi_Host *host = class_to_shost(dev); 70 struct Scsi_Host *host = class_to_shost(dev);
70 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 71 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
71 uint8_t *pQbuffer,*ptmpQbuffer; 72 uint8_t *ptmpQbuffer;
72 int32_t allxfer_len = 0; 73 int32_t allxfer_len = 0;
74 unsigned long flags;
73 75
74 if (!capable(CAP_SYS_ADMIN)) 76 if (!capable(CAP_SYS_ADMIN))
75 return -EACCES; 77 return -EACCES;
76 78
77 /* do message unit read. */ 79 /* do message unit read. */
78 ptmpQbuffer = (uint8_t *)buf; 80 ptmpQbuffer = (uint8_t *)buf;
79 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 81 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
80 && (allxfer_len < 1031)) { 82 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
81 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 83 unsigned int tail = acb->rqbuf_getIndex;
82 memcpy(ptmpQbuffer, pQbuffer, 1); 84 unsigned int head = acb->rqbuf_putIndex;
83 acb->rqbuf_firstindex++; 85 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
84 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 86
85 ptmpQbuffer++; 87 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
86 allxfer_len++; 88 if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
89 allxfer_len = ARCMSR_API_DATA_BUFLEN;
90
91 if (allxfer_len <= cnt_to_end)
92 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
93 else {
94 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
95 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
96 }
97 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
87 } 98 }
88 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 99 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
89 struct QBUFFER __iomem *prbuffer; 100 struct QBUFFER __iomem *prbuffer;
90 uint8_t __iomem *iop_data;
91 int32_t iop_len;
92
93 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 101 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
94 prbuffer = arcmsr_get_iop_rqbuffer(acb); 102 prbuffer = arcmsr_get_iop_rqbuffer(acb);
95 iop_data = prbuffer->data; 103 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
96 iop_len = readl(&prbuffer->data_len); 104 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
97 while (iop_len > 0) {
98 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
99 acb->rqbuf_lastindex++;
100 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
101 iop_data++;
102 iop_len--;
103 }
104 arcmsr_iop_message_read(acb);
105 } 105 }
106 return (allxfer_len); 106 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
107 return allxfer_len;
107} 108}
108 109
109static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp, 110static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
@@ -115,43 +116,42 @@ static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
115 struct device *dev = container_of(kobj,struct device,kobj); 116 struct device *dev = container_of(kobj,struct device,kobj);
116 struct Scsi_Host *host = class_to_shost(dev); 117 struct Scsi_Host *host = class_to_shost(dev);
117 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 118 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
118 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 119 int32_t user_len, cnt2end;
119 uint8_t *pQbuffer, *ptmpuserbuffer; 120 uint8_t *pQbuffer, *ptmpuserbuffer;
121 unsigned long flags;
120 122
121 if (!capable(CAP_SYS_ADMIN)) 123 if (!capable(CAP_SYS_ADMIN))
122 return -EACCES; 124 return -EACCES;
123 if (count > 1032) 125 if (count > ARCMSR_API_DATA_BUFLEN)
124 return -EINVAL; 126 return -EINVAL;
125 /* do message unit write. */ 127 /* do message unit write. */
126 ptmpuserbuffer = (uint8_t *)buf; 128 ptmpuserbuffer = (uint8_t *)buf;
127 user_len = (int32_t)count; 129 user_len = (int32_t)count;
128 wqbuf_lastindex = acb->wqbuf_lastindex; 130 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
129 wqbuf_firstindex = acb->wqbuf_firstindex; 131 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
130 if (wqbuf_lastindex != wqbuf_firstindex) { 132 arcmsr_write_ioctldata2iop(acb);
131 arcmsr_post_ioctldata2iop(acb); 133 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
132 return 0; /*need retry*/ 134 return 0; /*need retry*/
133 } else { 135 } else {
134 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 136 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
135 &(ARCMSR_MAX_QBUFFER - 1); 137 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
136 if (my_empty_len >= user_len) { 138 if (user_len > cnt2end) {
137 while (user_len > 0) { 139 memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
138 pQbuffer = 140 ptmpuserbuffer += cnt2end;
139 &acb->wqbuffer[acb->wqbuf_lastindex]; 141 user_len -= cnt2end;
140 memcpy(pQbuffer, ptmpuserbuffer, 1); 142 acb->wqbuf_putIndex = 0;
141 acb->wqbuf_lastindex++; 143 pQbuffer = acb->wqbuffer;
142 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; 144 }
143 ptmpuserbuffer++; 145 memcpy(pQbuffer, ptmpuserbuffer, user_len);
144 user_len--; 146 acb->wqbuf_putIndex += user_len;
145 } 147 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
146 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { 148 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
147 acb->acb_flags &= 149 acb->acb_flags &=
148 ~ACB_F_MESSAGE_WQBUFFER_CLEARED; 150 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
149 arcmsr_post_ioctldata2iop(acb); 151 arcmsr_write_ioctldata2iop(acb);
150 }
151 return count;
152 } else {
153 return 0; /*need retry*/
154 } 152 }
153 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
154 return count;
155 } 155 }
156} 156}
157 157
@@ -165,22 +165,24 @@ static ssize_t arcmsr_sysfs_iop_message_clear(struct file *filp,
165 struct Scsi_Host *host = class_to_shost(dev); 165 struct Scsi_Host *host = class_to_shost(dev);
166 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; 166 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
167 uint8_t *pQbuffer; 167 uint8_t *pQbuffer;
168 unsigned long flags;
168 169
169 if (!capable(CAP_SYS_ADMIN)) 170 if (!capable(CAP_SYS_ADMIN))
170 return -EACCES; 171 return -EACCES;
171 172
172 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 173 arcmsr_clear_iop2drv_rqueue_buffer(acb);
173 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
174 arcmsr_iop_message_read(acb);
175 }
176 acb->acb_flags |= 174 acb->acb_flags |=
177 (ACB_F_MESSAGE_WQBUFFER_CLEARED 175 (ACB_F_MESSAGE_WQBUFFER_CLEARED
178 | ACB_F_MESSAGE_RQBUFFER_CLEARED 176 | ACB_F_MESSAGE_RQBUFFER_CLEARED
179 | ACB_F_MESSAGE_WQBUFFER_READED); 177 | ACB_F_MESSAGE_WQBUFFER_READED);
180 acb->rqbuf_firstindex = 0; 178 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
181 acb->rqbuf_lastindex = 0; 179 acb->rqbuf_getIndex = 0;
182 acb->wqbuf_firstindex = 0; 180 acb->rqbuf_putIndex = 0;
183 acb->wqbuf_lastindex = 0; 181 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
182 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
183 acb->wqbuf_getIndex = 0;
184 acb->wqbuf_putIndex = 0;
185 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
184 pQbuffer = acb->rqbuffer; 186 pQbuffer = acb->rqbuffer;
185 memset(pQbuffer, 0, sizeof (struct QBUFFER)); 187 memset(pQbuffer, 0, sizeof (struct QBUFFER));
186 pQbuffer = acb->wqbuffer; 188 pQbuffer = acb->wqbuffer;
@@ -193,7 +195,7 @@ static struct bin_attribute arcmsr_sysfs_message_read_attr = {
193 .name = "mu_read", 195 .name = "mu_read",
194 .mode = S_IRUSR , 196 .mode = S_IRUSR ,
195 }, 197 },
196 .size = 1032, 198 .size = ARCMSR_API_DATA_BUFLEN,
197 .read = arcmsr_sysfs_iop_message_read, 199 .read = arcmsr_sysfs_iop_message_read,
198}; 200};
199 201
@@ -202,7 +204,7 @@ static struct bin_attribute arcmsr_sysfs_message_write_attr = {
202 .name = "mu_write", 204 .name = "mu_write",
203 .mode = S_IWUSR, 205 .mode = S_IWUSR,
204 }, 206 },
205 .size = 1032, 207 .size = ARCMSR_API_DATA_BUFLEN,
206 .write = arcmsr_sysfs_iop_message_write, 208 .write = arcmsr_sysfs_iop_message_write,
207}; 209};
208 210
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index b13764ca23fd..0b44fb5ee485 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,11 +2,10 @@
2******************************************************************************* 2*******************************************************************************
3** O.S : Linux 3** O.S : Linux
4** FILE NAME : arcmsr_hba.c 4** FILE NAME : arcmsr_hba.c
5** BY : Nick Cheng 5** BY : Nick Cheng, C.L. Huang
6** Description: SCSI RAID Device Driver for 6** Description: SCSI RAID Device Driver for Areca RAID Controller
7** ARECA RAID Host adapter
8******************************************************************************* 7*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved 8** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
10** 9**
11** Web site: www.areca.com.tw 10** Web site: www.areca.com.tw
12** E-mail: support@areca.com.tw 11** E-mail: support@areca.com.tw
@@ -59,6 +58,7 @@
59#include <linux/slab.h> 58#include <linux/slab.h>
60#include <linux/pci.h> 59#include <linux/pci.h>
61#include <linux/aer.h> 60#include <linux/aer.h>
61#include <linux/circ_buf.h>
62#include <asm/dma.h> 62#include <asm/dma.h>
63#include <asm/io.h> 63#include <asm/io.h>
64#include <asm/uaccess.h> 64#include <asm/uaccess.h>
@@ -70,15 +70,15 @@
70#include <scsi/scsi_transport.h> 70#include <scsi/scsi_transport.h>
71#include <scsi/scsicam.h> 71#include <scsi/scsicam.h>
72#include "arcmsr.h" 72#include "arcmsr.h"
73MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>"); 73MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
74MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter"); 74MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
75MODULE_LICENSE("Dual BSD/GPL"); 75MODULE_LICENSE("Dual BSD/GPL");
76MODULE_VERSION(ARCMSR_DRIVER_VERSION); 76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77 77
78#define ARCMSR_SLEEPTIME 10 78#define ARCMSR_SLEEPTIME 10
79#define ARCMSR_RETRYCOUNT 12 79#define ARCMSR_RETRYCOUNT 12
80 80
81wait_queue_head_t wait_q; 81static wait_queue_head_t wait_q;
82static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 82static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
83 struct scsi_cmnd *cmd); 83 struct scsi_cmnd *cmd);
84static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); 84static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
@@ -89,25 +89,31 @@ static int arcmsr_bios_param(struct scsi_device *sdev,
89static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 89static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
90static int arcmsr_probe(struct pci_dev *pdev, 90static int arcmsr_probe(struct pci_dev *pdev,
91 const struct pci_device_id *id); 91 const struct pci_device_id *id);
92static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
93static int arcmsr_resume(struct pci_dev *pdev);
92static void arcmsr_remove(struct pci_dev *pdev); 94static void arcmsr_remove(struct pci_dev *pdev);
93static void arcmsr_shutdown(struct pci_dev *pdev); 95static void arcmsr_shutdown(struct pci_dev *pdev);
94static void arcmsr_iop_init(struct AdapterControlBlock *acb); 96static void arcmsr_iop_init(struct AdapterControlBlock *acb);
95static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); 97static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
96static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); 98static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
99static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
100 u32 intmask_org);
97static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); 101static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
98static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); 102static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
99static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); 103static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
100static void arcmsr_request_device_map(unsigned long pacb); 104static void arcmsr_request_device_map(unsigned long pacb);
101static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); 105static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
102static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); 106static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
103static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb); 107static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
104static void arcmsr_message_isr_bh_fn(struct work_struct *work); 108static void arcmsr_message_isr_bh_fn(struct work_struct *work);
105static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); 109static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
106static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); 110static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
107static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB); 111static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
112static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
108static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); 113static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
109static const char *arcmsr_info(struct Scsi_Host *); 114static const char *arcmsr_info(struct Scsi_Host *);
110static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); 115static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
116static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
111static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, 117static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
112 int queue_depth, int reason) 118 int queue_depth, int reason)
113{ 119{
@@ -122,15 +128,14 @@ static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
122 128
123static struct scsi_host_template arcmsr_scsi_host_template = { 129static struct scsi_host_template arcmsr_scsi_host_template = {
124 .module = THIS_MODULE, 130 .module = THIS_MODULE,
125 .name = "ARCMSR ARECA SATA/SAS RAID Controller" 131 .name = "Areca SAS/SATA RAID driver",
126 ARCMSR_DRIVER_VERSION,
127 .info = arcmsr_info, 132 .info = arcmsr_info,
128 .queuecommand = arcmsr_queue_command, 133 .queuecommand = arcmsr_queue_command,
129 .eh_abort_handler = arcmsr_abort, 134 .eh_abort_handler = arcmsr_abort,
130 .eh_bus_reset_handler = arcmsr_bus_reset, 135 .eh_bus_reset_handler = arcmsr_bus_reset,
131 .bios_param = arcmsr_bios_param, 136 .bios_param = arcmsr_bios_param,
132 .change_queue_depth = arcmsr_adjust_disk_queue_depth, 137 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
133 .can_queue = ARCMSR_MAX_FREECCB_NUM, 138 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
134 .this_id = ARCMSR_SCSI_INITIATOR_ID, 139 .this_id = ARCMSR_SCSI_INITIATOR_ID,
135 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, 140 .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES,
136 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, 141 .max_sectors = ARCMSR_MAX_XFER_SECTORS_C,
@@ -139,34 +144,59 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
139 .shost_attrs = arcmsr_host_attrs, 144 .shost_attrs = arcmsr_host_attrs,
140 .no_write_same = 1, 145 .no_write_same = 1,
141}; 146};
147
142static struct pci_device_id arcmsr_device_id_table[] = { 148static struct pci_device_id arcmsr_device_id_table[] = {
143 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, 149 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
144 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, 150 .driver_data = ACB_ADAPTER_TYPE_A},
145 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)}, 151 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
146 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)}, 152 .driver_data = ACB_ADAPTER_TYPE_A},
147 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)}, 153 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
148 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)}, 154 .driver_data = ACB_ADAPTER_TYPE_A},
149 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)}, 155 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
150 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)}, 156 .driver_data = ACB_ADAPTER_TYPE_A},
151 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)}, 157 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
152 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)}, 158 .driver_data = ACB_ADAPTER_TYPE_A},
153 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)}, 159 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
154 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)}, 160 .driver_data = ACB_ADAPTER_TYPE_B},
155 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)}, 161 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
156 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)}, 162 .driver_data = ACB_ADAPTER_TYPE_B},
157 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)}, 163 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
158 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)}, 164 .driver_data = ACB_ADAPTER_TYPE_B},
159 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)}, 165 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
160 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)}, 166 .driver_data = ACB_ADAPTER_TYPE_A},
161 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)}, 167 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
168 .driver_data = ACB_ADAPTER_TYPE_D},
169 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
170 .driver_data = ACB_ADAPTER_TYPE_A},
171 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
172 .driver_data = ACB_ADAPTER_TYPE_A},
173 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
174 .driver_data = ACB_ADAPTER_TYPE_A},
175 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
176 .driver_data = ACB_ADAPTER_TYPE_A},
177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
178 .driver_data = ACB_ADAPTER_TYPE_A},
179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
180 .driver_data = ACB_ADAPTER_TYPE_A},
181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
182 .driver_data = ACB_ADAPTER_TYPE_A},
183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
184 .driver_data = ACB_ADAPTER_TYPE_A},
185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
186 .driver_data = ACB_ADAPTER_TYPE_A},
187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
188 .driver_data = ACB_ADAPTER_TYPE_C},
162 {0, 0}, /* Terminating entry */ 189 {0, 0}, /* Terminating entry */
163}; 190};
164MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); 191MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
192
165static struct pci_driver arcmsr_pci_driver = { 193static struct pci_driver arcmsr_pci_driver = {
166 .name = "arcmsr", 194 .name = "arcmsr",
167 .id_table = arcmsr_device_id_table, 195 .id_table = arcmsr_device_id_table,
168 .probe = arcmsr_probe, 196 .probe = arcmsr_probe,
169 .remove = arcmsr_remove, 197 .remove = arcmsr_remove,
198 .suspend = arcmsr_suspend,
199 .resume = arcmsr_resume,
170 .shutdown = arcmsr_shutdown, 200 .shutdown = arcmsr_shutdown,
171}; 201};
172/* 202/*
@@ -174,16 +204,14 @@ static struct pci_driver arcmsr_pci_driver = {
174**************************************************************************** 204****************************************************************************
175*/ 205*/
176 206
177static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb) 207static void arcmsr_free_mu(struct AdapterControlBlock *acb)
178{ 208{
179 switch (acb->adapter_type) { 209 switch (acb->adapter_type) {
180 case ACB_ADAPTER_TYPE_A: 210 case ACB_ADAPTER_TYPE_B:
181 case ACB_ADAPTER_TYPE_C: 211 case ACB_ADAPTER_TYPE_D: {
212 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
213 acb->dma_coherent2, acb->dma_coherent_handle2);
182 break; 214 break;
183 case ACB_ADAPTER_TYPE_B:{
184 dma_free_coherent(&acb->pdev->dev,
185 sizeof(struct MessageUnit_B),
186 acb->pmuB, acb->dma_coherent_handle_hbb_mu);
187 } 215 }
188 } 216 }
189} 217}
@@ -229,6 +257,25 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb)
229 } 257 }
230 break; 258 break;
231 } 259 }
260 case ACB_ADAPTER_TYPE_D: {
261 void __iomem *mem_base0;
262 unsigned long addr, range, flags;
263
264 addr = (unsigned long)pci_resource_start(pdev, 0);
265 range = pci_resource_len(pdev, 0);
266 flags = pci_resource_flags(pdev, 0);
267 if (flags & IORESOURCE_CACHEABLE)
268 mem_base0 = ioremap(addr, range);
269 else
270 mem_base0 = ioremap_nocache(addr, range);
271 if (!mem_base0) {
272 pr_notice("arcmsr%d: memory mapping region fail\n",
273 acb->host->host_no);
274 return false;
275 }
276 acb->mem_base0 = mem_base0;
277 break;
278 }
232 } 279 }
233 return true; 280 return true;
234} 281}
@@ -249,6 +296,10 @@ static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb)
249 case ACB_ADAPTER_TYPE_C:{ 296 case ACB_ADAPTER_TYPE_C:{
250 iounmap(acb->pmuC); 297 iounmap(acb->pmuC);
251 } 298 }
299 break;
300 case ACB_ADAPTER_TYPE_D:
301 iounmap(acb->mem_base0);
302 break;
252 } 303 }
253} 304}
254 305
@@ -289,27 +340,7 @@ static int arcmsr_bios_param(struct scsi_device *sdev,
289 return 0; 340 return 0;
290} 341}
291 342
292static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb) 343static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
293{
294 struct pci_dev *pdev = acb->pdev;
295 u16 dev_id;
296 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
297 acb->dev_id = dev_id;
298 switch (dev_id) {
299 case 0x1880: {
300 acb->adapter_type = ACB_ADAPTER_TYPE_C;
301 }
302 break;
303 case 0x1201: {
304 acb->adapter_type = ACB_ADAPTER_TYPE_B;
305 }
306 break;
307
308 default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
309 }
310}
311
312static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
313{ 344{
314 struct MessageUnit_A __iomem *reg = acb->pmuA; 345 struct MessageUnit_A __iomem *reg = acb->pmuA;
315 int i; 346 int i;
@@ -327,7 +358,7 @@ static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
327 return false; 358 return false;
328} 359}
329 360
330static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) 361static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
331{ 362{
332 struct MessageUnit_B *reg = acb->pmuB; 363 struct MessageUnit_B *reg = acb->pmuB;
333 int i; 364 int i;
@@ -347,9 +378,9 @@ static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
347 return false; 378 return false;
348} 379}
349 380
350static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB) 381static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
351{ 382{
352 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; 383 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
353 int i; 384 int i;
354 385
355 for (i = 0; i < 2000; i++) { 386 for (i = 0; i < 2000; i++) {
@@ -365,13 +396,30 @@ static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
365 return false; 396 return false;
366} 397}
367 398
368static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) 399static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
400{
401 struct MessageUnit_D *reg = pACB->pmuD;
402 int i;
403
404 for (i = 0; i < 2000; i++) {
405 if (readl(reg->outbound_doorbell)
406 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
407 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
408 reg->outbound_doorbell);
409 return true;
410 }
411 msleep(10);
412 } /* max 20 seconds */
413 return false;
414}
415
416static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
369{ 417{
370 struct MessageUnit_A __iomem *reg = acb->pmuA; 418 struct MessageUnit_A __iomem *reg = acb->pmuA;
371 int retry_count = 30; 419 int retry_count = 30;
372 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); 420 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
373 do { 421 do {
374 if (arcmsr_hba_wait_msgint_ready(acb)) 422 if (arcmsr_hbaA_wait_msgint_ready(acb))
375 break; 423 break;
376 else { 424 else {
377 retry_count--; 425 retry_count--;
@@ -381,13 +429,13 @@ static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
381 } while (retry_count != 0); 429 } while (retry_count != 0);
382} 430}
383 431
384static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) 432static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
385{ 433{
386 struct MessageUnit_B *reg = acb->pmuB; 434 struct MessageUnit_B *reg = acb->pmuB;
387 int retry_count = 30; 435 int retry_count = 30;
388 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); 436 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
389 do { 437 do {
390 if (arcmsr_hbb_wait_msgint_ready(acb)) 438 if (arcmsr_hbaB_wait_msgint_ready(acb))
391 break; 439 break;
392 else { 440 else {
393 retry_count--; 441 retry_count--;
@@ -397,14 +445,14 @@ static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
397 } while (retry_count != 0); 445 } while (retry_count != 0);
398} 446}
399 447
400static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB) 448static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
401{ 449{
402 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; 450 struct MessageUnit_C __iomem *reg = pACB->pmuC;
403 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ 451 int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
404 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); 452 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
405 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 453 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
406 do { 454 do {
407 if (arcmsr_hbc_wait_msgint_ready(pACB)) { 455 if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
408 break; 456 break;
409 } else { 457 } else {
410 retry_count--; 458 retry_count--;
@@ -414,22 +462,44 @@ static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
414 } while (retry_count != 0); 462 } while (retry_count != 0);
415 return; 463 return;
416} 464}
465
466static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
467{
468 int retry_count = 15;
469 struct MessageUnit_D *reg = pACB->pmuD;
470
471 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
472 do {
473 if (arcmsr_hbaD_wait_msgint_ready(pACB))
474 break;
475
476 retry_count--;
477 pr_notice("arcmsr%d: wait 'flush adapter "
478 "cache' timeout, retry count down = %d\n",
479 pACB->host->host_no, retry_count);
480 } while (retry_count != 0);
481}
482
417static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) 483static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
418{ 484{
419 switch (acb->adapter_type) { 485 switch (acb->adapter_type) {
420 486
421 case ACB_ADAPTER_TYPE_A: { 487 case ACB_ADAPTER_TYPE_A: {
422 arcmsr_flush_hba_cache(acb); 488 arcmsr_hbaA_flush_cache(acb);
423 } 489 }
424 break; 490 break;
425 491
426 case ACB_ADAPTER_TYPE_B: { 492 case ACB_ADAPTER_TYPE_B: {
427 arcmsr_flush_hbb_cache(acb); 493 arcmsr_hbaB_flush_cache(acb);
428 } 494 }
429 break; 495 break;
430 case ACB_ADAPTER_TYPE_C: { 496 case ACB_ADAPTER_TYPE_C: {
431 arcmsr_flush_hbc_cache(acb); 497 arcmsr_hbaC_flush_cache(acb);
432 } 498 }
499 break;
500 case ACB_ADAPTER_TYPE_D:
501 arcmsr_hbaD_flush_cache(acb);
502 break;
433 } 503 }
434} 504}
435 505
@@ -473,7 +543,16 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
473 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; 543 acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
474 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ 544 for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
475 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); 545 cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
476 ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5)); 546 switch (acb->adapter_type) {
547 case ACB_ADAPTER_TYPE_A:
548 case ACB_ADAPTER_TYPE_B:
549 ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
550 break;
551 case ACB_ADAPTER_TYPE_C:
552 case ACB_ADAPTER_TYPE_D:
553 ccb_tmp->cdb_phyaddr = cdb_phyaddr;
554 break;
555 }
477 acb->pccb_pool[i] = ccb_tmp; 556 acb->pccb_pool[i] = ccb_tmp;
478 ccb_tmp->acb = acb; 557 ccb_tmp->acb = acb;
479 INIT_LIST_HEAD(&ccb_tmp->list); 558 INIT_LIST_HEAD(&ccb_tmp->list);
@@ -486,121 +565,126 @@ static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
486 565
487static void arcmsr_message_isr_bh_fn(struct work_struct *work) 566static void arcmsr_message_isr_bh_fn(struct work_struct *work)
488{ 567{
489 struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh); 568 struct AdapterControlBlock *acb = container_of(work,
569 struct AdapterControlBlock, arcmsr_do_message_isr_bh);
570 char *acb_dev_map = (char *)acb->device_map;
571 uint32_t __iomem *signature = NULL;
572 char __iomem *devicemap = NULL;
573 int target, lun;
574 struct scsi_device *psdev;
575 char diff, temp;
576
490 switch (acb->adapter_type) { 577 switch (acb->adapter_type) {
491 case ACB_ADAPTER_TYPE_A: { 578 case ACB_ADAPTER_TYPE_A: {
579 struct MessageUnit_A __iomem *reg = acb->pmuA;
492 580
493 struct MessageUnit_A __iomem *reg = acb->pmuA; 581 signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
494 char *acb_dev_map = (char *)acb->device_map; 582 devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
495 uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]); 583 break;
496 char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]); 584 }
497 int target, lun; 585 case ACB_ADAPTER_TYPE_B: {
498 struct scsi_device *psdev; 586 struct MessageUnit_B *reg = acb->pmuB;
499 char diff;
500
501 atomic_inc(&acb->rq_map_token);
502 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
503 for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
504 diff = (*acb_dev_map)^readb(devicemap);
505 if (diff != 0) {
506 char temp;
507 *acb_dev_map = readb(devicemap);
508 temp =*acb_dev_map;
509 for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
510 if((temp & 0x01)==1 && (diff & 0x01) == 1) {
511 scsi_add_device(acb->host, 0, target, lun);
512 }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
513 psdev = scsi_device_lookup(acb->host, 0, target, lun);
514 if (psdev != NULL ) {
515 scsi_remove_device(psdev);
516 scsi_device_put(psdev);
517 }
518 }
519 temp >>= 1;
520 diff >>= 1;
521 }
522 }
523 devicemap++;
524 acb_dev_map++;
525 }
526 }
527 break;
528 }
529 587
530 case ACB_ADAPTER_TYPE_B: { 588 signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
531 struct MessageUnit_B *reg = acb->pmuB; 589 devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
532 char *acb_dev_map = (char *)acb->device_map;
533 uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]);
534 char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]);
535 int target, lun;
536 struct scsi_device *psdev;
537 char diff;
538
539 atomic_inc(&acb->rq_map_token);
540 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
541 for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
542 diff = (*acb_dev_map)^readb(devicemap);
543 if (diff != 0) {
544 char temp;
545 *acb_dev_map = readb(devicemap);
546 temp =*acb_dev_map;
547 for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
548 if((temp & 0x01)==1 && (diff & 0x01) == 1) {
549 scsi_add_device(acb->host, 0, target, lun);
550 }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
551 psdev = scsi_device_lookup(acb->host, 0, target, lun);
552 if (psdev != NULL ) {
553 scsi_remove_device(psdev);
554 scsi_device_put(psdev);
555 }
556 }
557 temp >>= 1;
558 diff >>= 1;
559 }
560 }
561 devicemap++;
562 acb_dev_map++;
563 }
564 }
565 }
566 break; 590 break;
567 case ACB_ADAPTER_TYPE_C: { 591 }
568 struct MessageUnit_C *reg = acb->pmuC; 592 case ACB_ADAPTER_TYPE_C: {
569 char *acb_dev_map = (char *)acb->device_map; 593 struct MessageUnit_C __iomem *reg = acb->pmuC;
570 uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]); 594
571 char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]); 595 signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
572 int target, lun; 596 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
573 struct scsi_device *psdev; 597 break;
574 char diff; 598 }
575 599 case ACB_ADAPTER_TYPE_D: {
576 atomic_inc(&acb->rq_map_token); 600 struct MessageUnit_D *reg = acb->pmuD;
577 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { 601
578 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { 602 signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
579 diff = (*acb_dev_map)^readb(devicemap); 603 devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
580 if (diff != 0) { 604 break;
581 char temp; 605 }
582 *acb_dev_map = readb(devicemap); 606 }
583 temp = *acb_dev_map; 607 atomic_inc(&acb->rq_map_token);
584 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { 608 if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
585 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { 609 return;
586 scsi_add_device(acb->host, 0, target, lun); 610 for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
587 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { 611 target++) {
588 psdev = scsi_device_lookup(acb->host, 0, target, lun); 612 temp = readb(devicemap);
589 if (psdev != NULL) { 613 diff = (*acb_dev_map) ^ temp;
590 scsi_remove_device(psdev); 614 if (diff != 0) {
591 scsi_device_put(psdev); 615 *acb_dev_map = temp;
592 } 616 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
593 } 617 lun++) {
594 temp >>= 1; 618 if ((diff & 0x01) == 1 &&
595 diff >>= 1; 619 (temp & 0x01) == 1) {
596 } 620 scsi_add_device(acb->host,
621 0, target, lun);
622 } else if ((diff & 0x01) == 1
623 && (temp & 0x01) == 0) {
624 psdev = scsi_device_lookup(acb->host,
625 0, target, lun);
626 if (psdev != NULL) {
627 scsi_remove_device(psdev);
628 scsi_device_put(psdev);
597 } 629 }
598 devicemap++;
599 acb_dev_map++;
600 } 630 }
631 temp >>= 1;
632 diff >>= 1;
601 } 633 }
602 } 634 }
635 devicemap++;
636 acb_dev_map++;
637 }
638}
639
640static int
641arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
642{
643 int i, j, r;
644 struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
645
646 for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
647 entries[i].entry = i;
648 r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
649 if (r < 0)
650 goto msi_int;
651 acb->msix_vector_count = r;
652 for (i = 0; i < r; i++) {
653 if (request_irq(entries[i].vector,
654 arcmsr_do_interrupt, 0, "arcmsr", acb)) {
655 pr_warn("arcmsr%d: request_irq =%d failed!\n",
656 acb->host->host_no, entries[i].vector);
657 for (j = 0 ; j < i ; j++)
658 free_irq(entries[j].vector, acb);
659 pci_disable_msix(pdev);
660 goto msi_int;
661 }
662 acb->entries[i] = entries[i];
663 }
664 acb->acb_flags |= ACB_F_MSIX_ENABLED;
665 pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
666 return SUCCESS;
667msi_int:
668 if (pci_enable_msi_exact(pdev, 1) < 0)
669 goto legacy_int;
670 if (request_irq(pdev->irq, arcmsr_do_interrupt,
671 IRQF_SHARED, "arcmsr", acb)) {
672 pr_warn("arcmsr%d: request_irq =%d failed!\n",
673 acb->host->host_no, pdev->irq);
674 pci_disable_msi(pdev);
675 goto legacy_int;
676 }
677 acb->acb_flags |= ACB_F_MSI_ENABLED;
678 pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
679 return SUCCESS;
680legacy_int:
681 if (request_irq(pdev->irq, arcmsr_do_interrupt,
682 IRQF_SHARED, "arcmsr", acb)) {
683 pr_warn("arcmsr%d: request_irq = %d failed!\n",
684 acb->host->host_no, pdev->irq);
685 return FAILED;
603 } 686 }
687 return SUCCESS;
604} 688}
605 689
606static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) 690static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -637,7 +721,7 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
637 host->max_lun = ARCMSR_MAX_TARGETLUN; 721 host->max_lun = ARCMSR_MAX_TARGETLUN;
638 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ 722 host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/
639 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ 723 host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/
640 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */ 724 host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
641 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN; 725 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
642 host->this_id = ARCMSR_SCSI_INITIATOR_ID; 726 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
643 host->unique_id = (bus << 8) | dev_fun; 727 host->unique_id = (bus << 8) | dev_fun;
@@ -649,12 +733,16 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
649 } 733 }
650 spin_lock_init(&acb->eh_lock); 734 spin_lock_init(&acb->eh_lock);
651 spin_lock_init(&acb->ccblist_lock); 735 spin_lock_init(&acb->ccblist_lock);
736 spin_lock_init(&acb->postq_lock);
737 spin_lock_init(&acb->doneq_lock);
738 spin_lock_init(&acb->rqbuffer_lock);
739 spin_lock_init(&acb->wqbuffer_lock);
652 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | 740 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
653 ACB_F_MESSAGE_RQBUFFER_CLEARED | 741 ACB_F_MESSAGE_RQBUFFER_CLEARED |
654 ACB_F_MESSAGE_WQBUFFER_READED); 742 ACB_F_MESSAGE_WQBUFFER_READED);
655 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; 743 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
656 INIT_LIST_HEAD(&acb->ccb_free_list); 744 INIT_LIST_HEAD(&acb->ccb_free_list);
657 arcmsr_define_adapter_type(acb); 745 acb->adapter_type = id->driver_data;
658 error = arcmsr_remap_pciregion(acb); 746 error = arcmsr_remap_pciregion(acb);
659 if(!error){ 747 if(!error){
660 goto pci_release_regs; 748 goto pci_release_regs;
@@ -667,17 +755,13 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
667 if(error){ 755 if(error){
668 goto free_hbb_mu; 756 goto free_hbb_mu;
669 } 757 }
670 arcmsr_iop_init(acb);
671 error = scsi_add_host(host, &pdev->dev); 758 error = scsi_add_host(host, &pdev->dev);
672 if(error){ 759 if(error){
673 goto RAID_controller_stop; 760 goto free_ccb_pool;
674 } 761 }
675 error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb); 762 if (arcmsr_request_irq(pdev, acb) == FAILED)
676 if(error){
677 goto scsi_host_remove; 763 goto scsi_host_remove;
678 } 764 arcmsr_iop_init(acb);
679 host->irq = pdev->irq;
680 scsi_scan_host(host);
681 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); 765 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
682 atomic_set(&acb->rq_map_token, 16); 766 atomic_set(&acb->rq_map_token, 16);
683 atomic_set(&acb->ante_token_value, 16); 767 atomic_set(&acb->ante_token_value, 16);
@@ -689,16 +773,20 @@ static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
689 add_timer(&acb->eternal_timer); 773 add_timer(&acb->eternal_timer);
690 if(arcmsr_alloc_sysfs_attr(acb)) 774 if(arcmsr_alloc_sysfs_attr(acb))
691 goto out_free_sysfs; 775 goto out_free_sysfs;
776 scsi_scan_host(host);
692 return 0; 777 return 0;
693out_free_sysfs: 778out_free_sysfs:
694scsi_host_remove: 779 del_timer_sync(&acb->eternal_timer);
695 scsi_remove_host(host); 780 flush_work(&acb->arcmsr_do_message_isr_bh);
696RAID_controller_stop:
697 arcmsr_stop_adapter_bgrb(acb); 781 arcmsr_stop_adapter_bgrb(acb);
698 arcmsr_flush_adapter_cache(acb); 782 arcmsr_flush_adapter_cache(acb);
783 arcmsr_free_irq(pdev, acb);
784scsi_host_remove:
785 scsi_remove_host(host);
786free_ccb_pool:
699 arcmsr_free_ccb_pool(acb); 787 arcmsr_free_ccb_pool(acb);
700free_hbb_mu: 788free_hbb_mu:
701 arcmsr_free_hbb_mu(acb); 789 arcmsr_free_mu(acb);
702unmap_pci_region: 790unmap_pci_region:
703 arcmsr_unmap_pciregion(acb); 791 arcmsr_unmap_pciregion(acb);
704pci_release_regs: 792pci_release_regs:
@@ -710,75 +798,169 @@ pci_disable_dev:
710 return -ENODEV; 798 return -ENODEV;
711} 799}
712 800
713static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) 801static void arcmsr_free_irq(struct pci_dev *pdev,
802 struct AdapterControlBlock *acb)
803{
804 int i;
805
806 if (acb->acb_flags & ACB_F_MSI_ENABLED) {
807 free_irq(pdev->irq, acb);
808 pci_disable_msi(pdev);
809 } else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
810 for (i = 0; i < acb->msix_vector_count; i++)
811 free_irq(acb->entries[i].vector, acb);
812 pci_disable_msix(pdev);
813 } else
814 free_irq(pdev->irq, acb);
815}
816
817static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
818{
819 uint32_t intmask_org;
820 struct Scsi_Host *host = pci_get_drvdata(pdev);
821 struct AdapterControlBlock *acb =
822 (struct AdapterControlBlock *)host->hostdata;
823
824 intmask_org = arcmsr_disable_outbound_ints(acb);
825 arcmsr_free_irq(pdev, acb);
826 del_timer_sync(&acb->eternal_timer);
827 flush_work(&acb->arcmsr_do_message_isr_bh);
828 arcmsr_stop_adapter_bgrb(acb);
829 arcmsr_flush_adapter_cache(acb);
830 pci_set_drvdata(pdev, host);
831 pci_save_state(pdev);
832 pci_disable_device(pdev);
833 pci_set_power_state(pdev, pci_choose_state(pdev, state));
834 return 0;
835}
836
837static int arcmsr_resume(struct pci_dev *pdev)
838{
839 int error;
840 struct Scsi_Host *host = pci_get_drvdata(pdev);
841 struct AdapterControlBlock *acb =
842 (struct AdapterControlBlock *)host->hostdata;
843
844 pci_set_power_state(pdev, PCI_D0);
845 pci_enable_wake(pdev, PCI_D0, 0);
846 pci_restore_state(pdev);
847 if (pci_enable_device(pdev)) {
848 pr_warn("%s: pci_enable_device error\n", __func__);
849 return -ENODEV;
850 }
851 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
852 if (error) {
853 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
854 if (error) {
855 pr_warn("scsi%d: No suitable DMA mask available\n",
856 host->host_no);
857 goto controller_unregister;
858 }
859 }
860 pci_set_master(pdev);
861 if (arcmsr_request_irq(pdev, acb) == FAILED)
862 goto controller_stop;
863 arcmsr_iop_init(acb);
864 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
865 atomic_set(&acb->rq_map_token, 16);
866 atomic_set(&acb->ante_token_value, 16);
867 acb->fw_flag = FW_NORMAL;
868 init_timer(&acb->eternal_timer);
869 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
870 acb->eternal_timer.data = (unsigned long) acb;
871 acb->eternal_timer.function = &arcmsr_request_device_map;
872 add_timer(&acb->eternal_timer);
873 return 0;
874controller_stop:
875 arcmsr_stop_adapter_bgrb(acb);
876 arcmsr_flush_adapter_cache(acb);
877controller_unregister:
878 scsi_remove_host(host);
879 arcmsr_free_ccb_pool(acb);
880 arcmsr_unmap_pciregion(acb);
881 pci_release_regions(pdev);
882 scsi_host_put(host);
883 pci_disable_device(pdev);
884 return -ENODEV;
885}
886
887static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
714{ 888{
715 struct MessageUnit_A __iomem *reg = acb->pmuA; 889 struct MessageUnit_A __iomem *reg = acb->pmuA;
716 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); 890 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
717 if (!arcmsr_hba_wait_msgint_ready(acb)) { 891 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
718 printk(KERN_NOTICE 892 printk(KERN_NOTICE
719 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 893 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
720 , acb->host->host_no); 894 , acb->host->host_no);
721 return false; 895 return false;
722 } 896 }
723 return true; 897 return true;
724} 898}
725 899
726static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) 900static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
727{ 901{
728 struct MessageUnit_B *reg = acb->pmuB; 902 struct MessageUnit_B *reg = acb->pmuB;
729 903
730 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); 904 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
731 if (!arcmsr_hbb_wait_msgint_ready(acb)) { 905 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
732 printk(KERN_NOTICE 906 printk(KERN_NOTICE
733 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 907 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
734 , acb->host->host_no); 908 , acb->host->host_no);
735 return false; 909 return false;
736 } 910 }
737 return true; 911 return true;
738} 912}
739static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB) 913static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
740{ 914{
741 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; 915 struct MessageUnit_C __iomem *reg = pACB->pmuC;
742 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); 916 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
743 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 917 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
744 if (!arcmsr_hbc_wait_msgint_ready(pACB)) { 918 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
745 printk(KERN_NOTICE 919 printk(KERN_NOTICE
746 "arcmsr%d: wait 'abort all outstanding command' timeout \n" 920 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
747 , pACB->host->host_no); 921 , pACB->host->host_no);
748 return false; 922 return false;
749 } 923 }
750 return true; 924 return true;
751} 925}
926
927static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
928{
929 struct MessageUnit_D *reg = pACB->pmuD;
930
931 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
932 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
933 pr_notice("arcmsr%d: wait 'abort all outstanding "
934 "command' timeout\n", pACB->host->host_no);
935 return false;
936 }
937 return true;
938}
939
752static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) 940static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
753{ 941{
754 uint8_t rtnval = 0; 942 uint8_t rtnval = 0;
755 switch (acb->adapter_type) { 943 switch (acb->adapter_type) {
756 case ACB_ADAPTER_TYPE_A: { 944 case ACB_ADAPTER_TYPE_A: {
757 rtnval = arcmsr_abort_hba_allcmd(acb); 945 rtnval = arcmsr_hbaA_abort_allcmd(acb);
758 } 946 }
759 break; 947 break;
760 948
761 case ACB_ADAPTER_TYPE_B: { 949 case ACB_ADAPTER_TYPE_B: {
762 rtnval = arcmsr_abort_hbb_allcmd(acb); 950 rtnval = arcmsr_hbaB_abort_allcmd(acb);
763 } 951 }
764 break; 952 break;
765 953
766 case ACB_ADAPTER_TYPE_C: { 954 case ACB_ADAPTER_TYPE_C: {
767 rtnval = arcmsr_abort_hbc_allcmd(acb); 955 rtnval = arcmsr_hbaC_abort_allcmd(acb);
768 } 956 }
769 } 957 break;
770 return rtnval;
771}
772 958
773static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb) 959 case ACB_ADAPTER_TYPE_D:
774{ 960 rtnval = arcmsr_hbaD_abort_allcmd(acb);
775 struct MessageUnit_B *reg = pacb->pmuB; 961 break;
776 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
777 if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
778 printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
779 return false;
780 } 962 }
781 return true; 963 return rtnval;
782} 964}
783 965
784static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 966static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
@@ -837,12 +1019,18 @@ static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
837 } 1019 }
838 break; 1020 break;
839 case ACB_ADAPTER_TYPE_C:{ 1021 case ACB_ADAPTER_TYPE_C:{
840 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 1022 struct MessageUnit_C __iomem *reg = acb->pmuC;
841 /* disable all outbound interrupt */ 1023 /* disable all outbound interrupt */
842 orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */ 1024 orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
843 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask); 1025 writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
844 } 1026 }
845 break; 1027 break;
1028 case ACB_ADAPTER_TYPE_D: {
1029 struct MessageUnit_D *reg = acb->pmuD;
1030 /* disable all outbound interrupt */
1031 writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
1032 }
1033 break;
846 } 1034 }
847 return orig_mask; 1035 return orig_mask;
848} 1036}
@@ -933,7 +1121,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma
933static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) 1121static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
934{ 1122{
935 int i = 0; 1123 int i = 0;
936 uint32_t flag_ccb; 1124 uint32_t flag_ccb, ccb_cdb_phy;
937 struct ARCMSR_CDB *pARCMSR_CDB; 1125 struct ARCMSR_CDB *pARCMSR_CDB;
938 bool error; 1126 bool error;
939 struct CommandControlBlock *pCCB; 1127 struct CommandControlBlock *pCCB;
@@ -961,8 +1149,9 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
961 /*clear all outbound posted Q*/ 1149 /*clear all outbound posted Q*/
962 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ 1150 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
963 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { 1151 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
964 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) { 1152 flag_ccb = reg->done_qbuffer[i];
965 writel(0, &reg->done_qbuffer[i]); 1153 if (flag_ccb != 0) {
1154 reg->done_qbuffer[i] = 0;
966 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1155 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
967 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1156 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
968 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1157 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -975,11 +1164,7 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
975 } 1164 }
976 break; 1165 break;
977 case ACB_ADAPTER_TYPE_C: { 1166 case ACB_ADAPTER_TYPE_C: {
978 struct MessageUnit_C *reg = acb->pmuC; 1167 struct MessageUnit_C __iomem *reg = acb->pmuC;
979 struct ARCMSR_CDB *pARCMSR_CDB;
980 uint32_t flag_ccb, ccb_cdb_phy;
981 bool error;
982 struct CommandControlBlock *pCCB;
983 while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { 1168 while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
984 /*need to do*/ 1169 /*need to do*/
985 flag_ccb = readl(&reg->outbound_queueport_low); 1170 flag_ccb = readl(&reg->outbound_queueport_low);
@@ -989,9 +1174,54 @@ static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
989 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 1174 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
990 arcmsr_drain_donequeue(acb, pCCB, error); 1175 arcmsr_drain_donequeue(acb, pCCB, error);
991 } 1176 }
992 } 1177 }
1178 break;
1179 case ACB_ADAPTER_TYPE_D: {
1180 struct MessageUnit_D *pmu = acb->pmuD;
1181 uint32_t outbound_write_pointer;
1182 uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
1183 unsigned long flags;
1184
1185 residual = atomic_read(&acb->ccboutstandingcount);
1186 for (i = 0; i < residual; i++) {
1187 spin_lock_irqsave(&acb->doneq_lock, flags);
1188 outbound_write_pointer =
1189 pmu->done_qbuffer[0].addressLow + 1;
1190 doneq_index = pmu->doneq_index;
1191 if ((doneq_index & 0xFFF) !=
1192 (outbound_write_pointer & 0xFFF)) {
1193 toggle = doneq_index & 0x4000;
1194 index_stripped = (doneq_index & 0xFFF) + 1;
1195 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1196 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1197 ((toggle ^ 0x4000) + 1);
1198 doneq_index = pmu->doneq_index;
1199 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1200 addressLow = pmu->done_qbuffer[doneq_index &
1201 0xFFF].addressLow;
1202 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
1203 pARCMSR_CDB = (struct ARCMSR_CDB *)
1204 (acb->vir2phy_offset + ccb_cdb_phy);
1205 pCCB = container_of(pARCMSR_CDB,
1206 struct CommandControlBlock, arcmsr_cdb);
1207 error = (addressLow &
1208 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
1209 true : false;
1210 arcmsr_drain_donequeue(acb, pCCB, error);
1211 writel(doneq_index,
1212 pmu->outboundlist_read_pointer);
1213 } else {
1214 spin_unlock_irqrestore(&acb->doneq_lock, flags);
1215 mdelay(10);
1216 }
1217 }
1218 pmu->postq_index = 0;
1219 pmu->doneq_index = 0x40FF;
1220 }
1221 break;
993 } 1222 }
994} 1223}
1224
995static void arcmsr_remove(struct pci_dev *pdev) 1225static void arcmsr_remove(struct pci_dev *pdev)
996{ 1226{
997 struct Scsi_Host *host = pci_get_drvdata(pdev); 1227 struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -1029,9 +1259,9 @@ static void arcmsr_remove(struct pci_dev *pdev)
1029 } 1259 }
1030 } 1260 }
1031 } 1261 }
1032 free_irq(pdev->irq, acb); 1262 arcmsr_free_irq(pdev, acb);
1033 arcmsr_free_ccb_pool(acb); 1263 arcmsr_free_ccb_pool(acb);
1034 arcmsr_free_hbb_mu(acb); 1264 arcmsr_free_mu(acb);
1035 arcmsr_unmap_pciregion(acb); 1265 arcmsr_unmap_pciregion(acb);
1036 pci_release_regions(pdev); 1266 pci_release_regions(pdev);
1037 scsi_host_put(host); 1267 scsi_host_put(host);
@@ -1045,6 +1275,7 @@ static void arcmsr_shutdown(struct pci_dev *pdev)
1045 (struct AdapterControlBlock *)host->hostdata; 1275 (struct AdapterControlBlock *)host->hostdata;
1046 del_timer_sync(&acb->eternal_timer); 1276 del_timer_sync(&acb->eternal_timer);
1047 arcmsr_disable_outbound_ints(acb); 1277 arcmsr_disable_outbound_ints(acb);
1278 arcmsr_free_irq(pdev, acb);
1048 flush_work(&acb->arcmsr_do_message_isr_bh); 1279 flush_work(&acb->arcmsr_do_message_isr_bh);
1049 arcmsr_stop_adapter_bgrb(acb); 1280 arcmsr_stop_adapter_bgrb(acb);
1050 arcmsr_flush_adapter_cache(acb); 1281 arcmsr_flush_adapter_cache(acb);
@@ -1091,11 +1322,19 @@ static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
1091 } 1322 }
1092 break; 1323 break;
1093 case ACB_ADAPTER_TYPE_C: { 1324 case ACB_ADAPTER_TYPE_C: {
1094 struct MessageUnit_C *reg = acb->pmuC; 1325 struct MessageUnit_C __iomem *reg = acb->pmuC;
1095 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); 1326 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
1096 writel(intmask_org & mask, &reg->host_int_mask); 1327 writel(intmask_org & mask, &reg->host_int_mask);
1097 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; 1328 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
1098 } 1329 }
1330 break;
1331 case ACB_ADAPTER_TYPE_D: {
1332 struct MessageUnit_D *reg = acb->pmuD;
1333
1334 mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
1335 writel(intmask_org | mask, reg->pcief0_int_enable);
1336 break;
1337 }
1099 } 1338 }
1100} 1339}
1101 1340
@@ -1115,7 +1354,7 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1115 arcmsr_cdb->TargetID = pcmd->device->id; 1354 arcmsr_cdb->TargetID = pcmd->device->id;
1116 arcmsr_cdb->LUN = pcmd->device->lun; 1355 arcmsr_cdb->LUN = pcmd->device->lun;
1117 arcmsr_cdb->Function = 1; 1356 arcmsr_cdb->Function = 1;
1118 arcmsr_cdb->Context = 0; 1357 arcmsr_cdb->msgContext = 0;
1119 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 1358 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1120 1359
1121 nseg = scsi_dma_map(pcmd); 1360 nseg = scsi_dma_map(pcmd);
@@ -1156,7 +1395,7 @@ static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
1156 1395
1157static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) 1396static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1158{ 1397{
1159 uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern; 1398 uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
1160 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; 1399 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1161 atomic_inc(&acb->ccboutstandingcount); 1400 atomic_inc(&acb->ccboutstandingcount);
1162 ccb->startdone = ARCMSR_CCB_START; 1401 ccb->startdone = ARCMSR_CCB_START;
@@ -1165,25 +1404,24 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
1165 struct MessageUnit_A __iomem *reg = acb->pmuA; 1404 struct MessageUnit_A __iomem *reg = acb->pmuA;
1166 1405
1167 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) 1406 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1168 writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, 1407 writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1169 &reg->inbound_queueport); 1408 &reg->inbound_queueport);
1170 else { 1409 else
1171 writel(cdb_phyaddr_pattern, &reg->inbound_queueport); 1410 writel(cdb_phyaddr, &reg->inbound_queueport);
1172 }
1173 }
1174 break; 1411 break;
1412 }
1175 1413
1176 case ACB_ADAPTER_TYPE_B: { 1414 case ACB_ADAPTER_TYPE_B: {
1177 struct MessageUnit_B *reg = acb->pmuB; 1415 struct MessageUnit_B *reg = acb->pmuB;
1178 uint32_t ending_index, index = reg->postq_index; 1416 uint32_t ending_index, index = reg->postq_index;
1179 1417
1180 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); 1418 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1181 writel(0, &reg->post_qbuffer[ending_index]); 1419 reg->post_qbuffer[ending_index] = 0;
1182 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { 1420 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1183 writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\ 1421 reg->post_qbuffer[index] =
1184 &reg->post_qbuffer[index]); 1422 cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
1185 } else { 1423 } else {
1186 writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]); 1424 reg->post_qbuffer[index] = cdb_phyaddr;
1187 } 1425 }
1188 index++; 1426 index++;
1189 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ 1427 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
@@ -1192,11 +1430,11 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
1192 } 1430 }
1193 break; 1431 break;
1194 case ACB_ADAPTER_TYPE_C: { 1432 case ACB_ADAPTER_TYPE_C: {
1195 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC; 1433 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1196 uint32_t ccb_post_stamp, arc_cdb_size; 1434 uint32_t ccb_post_stamp, arc_cdb_size;
1197 1435
1198 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; 1436 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
1199 ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1); 1437 ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
1200 if (acb->cdb_phyaddr_hi32) { 1438 if (acb->cdb_phyaddr_hi32) {
1201 writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high); 1439 writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
1202 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); 1440 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
@@ -1204,62 +1442,102 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
1204 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); 1442 writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
1205 } 1443 }
1206 } 1444 }
1445 break;
1446 case ACB_ADAPTER_TYPE_D: {
1447 struct MessageUnit_D *pmu = acb->pmuD;
1448 u16 index_stripped;
1449 u16 postq_index, toggle;
1450 unsigned long flags;
1451 struct InBound_SRB *pinbound_srb;
1452
1453 spin_lock_irqsave(&acb->postq_lock, flags);
1454 postq_index = pmu->postq_index;
1455 pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
1456 pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
1457 pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
1458 pinbound_srb->length = ccb->arc_cdb_size >> 2;
1459 arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
1460 toggle = postq_index & 0x4000;
1461 index_stripped = postq_index + 1;
1462 index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
1463 pmu->postq_index = index_stripped ? (index_stripped | toggle) :
1464 (toggle ^ 0x4000);
1465 writel(postq_index, pmu->inboundlist_write_pointer);
1466 spin_unlock_irqrestore(&acb->postq_lock, flags);
1467 break;
1468 }
1207 } 1469 }
1208} 1470}
1209 1471
1210static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) 1472static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
1211{ 1473{
1212 struct MessageUnit_A __iomem *reg = acb->pmuA; 1474 struct MessageUnit_A __iomem *reg = acb->pmuA;
1213 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1475 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1214 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); 1476 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1215 if (!arcmsr_hba_wait_msgint_ready(acb)) { 1477 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
1216 printk(KERN_NOTICE 1478 printk(KERN_NOTICE
1217 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1479 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1218 , acb->host->host_no); 1480 , acb->host->host_no);
1219 } 1481 }
1220} 1482}
1221 1483
1222static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) 1484static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
1223{ 1485{
1224 struct MessageUnit_B *reg = acb->pmuB; 1486 struct MessageUnit_B *reg = acb->pmuB;
1225 acb->acb_flags &= ~ACB_F_MSG_START_BGRB; 1487 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1226 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); 1488 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
1227 1489
1228 if (!arcmsr_hbb_wait_msgint_ready(acb)) { 1490 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
1229 printk(KERN_NOTICE 1491 printk(KERN_NOTICE
1230 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1492 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1231 , acb->host->host_no); 1493 , acb->host->host_no);
1232 } 1494 }
1233} 1495}
1234 1496
1235static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB) 1497static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
1236{ 1498{
1237 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; 1499 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1238 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; 1500 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1239 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); 1501 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1240 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 1502 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
1241 if (!arcmsr_hbc_wait_msgint_ready(pACB)) { 1503 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
1242 printk(KERN_NOTICE 1504 printk(KERN_NOTICE
1243 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" 1505 "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
1244 , pACB->host->host_no); 1506 , pACB->host->host_no);
1245 } 1507 }
1246 return; 1508 return;
1247} 1509}
1510
1511static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
1512{
1513 struct MessageUnit_D *reg = pACB->pmuD;
1514
1515 pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
1516 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
1517 if (!arcmsr_hbaD_wait_msgint_ready(pACB))
1518 pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
1519 "timeout\n", pACB->host->host_no);
1520}
1521
1248static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) 1522static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1249{ 1523{
1250 switch (acb->adapter_type) { 1524 switch (acb->adapter_type) {
1251 case ACB_ADAPTER_TYPE_A: { 1525 case ACB_ADAPTER_TYPE_A: {
1252 arcmsr_stop_hba_bgrb(acb); 1526 arcmsr_hbaA_stop_bgrb(acb);
1253 } 1527 }
1254 break; 1528 break;
1255 1529
1256 case ACB_ADAPTER_TYPE_B: { 1530 case ACB_ADAPTER_TYPE_B: {
1257 arcmsr_stop_hbb_bgrb(acb); 1531 arcmsr_hbaB_stop_bgrb(acb);
1258 } 1532 }
1259 break; 1533 break;
1260 case ACB_ADAPTER_TYPE_C: { 1534 case ACB_ADAPTER_TYPE_C: {
1261 arcmsr_stop_hbc_bgrb(acb); 1535 arcmsr_hbaC_stop_bgrb(acb);
1262 } 1536 }
1537 break;
1538 case ACB_ADAPTER_TYPE_D:
1539 arcmsr_hbaD_stop_bgrb(acb);
1540 break;
1263 } 1541 }
1264} 1542}
1265 1543
@@ -1268,7 +1546,7 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1268 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); 1546 dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
1269} 1547}
1270 1548
1271void arcmsr_iop_message_read(struct AdapterControlBlock *acb) 1549static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1272{ 1550{
1273 switch (acb->adapter_type) { 1551 switch (acb->adapter_type) {
1274 case ACB_ADAPTER_TYPE_A: { 1552 case ACB_ADAPTER_TYPE_A: {
@@ -1284,8 +1562,16 @@ void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1284 break; 1562 break;
1285 case ACB_ADAPTER_TYPE_C: { 1563 case ACB_ADAPTER_TYPE_C: {
1286 struct MessageUnit_C __iomem *reg = acb->pmuC; 1564 struct MessageUnit_C __iomem *reg = acb->pmuC;
1565
1287 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); 1566 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
1288 } 1567 }
1568 break;
1569 case ACB_ADAPTER_TYPE_D: {
1570 struct MessageUnit_D *reg = acb->pmuD;
1571 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
1572 reg->inbound_doorbell);
1573 }
1574 break;
1289 } 1575 }
1290} 1576}
1291 1577
@@ -1320,6 +1606,12 @@ static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1320 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell); 1606 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
1321 } 1607 }
1322 break; 1608 break;
1609 case ACB_ADAPTER_TYPE_D: {
1610 struct MessageUnit_D *reg = acb->pmuD;
1611 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
1612 reg->inbound_doorbell);
1613 }
1614 break;
1323 } 1615 }
1324} 1616}
1325 1617
@@ -1340,9 +1632,15 @@ struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1340 } 1632 }
1341 break; 1633 break;
1342 case ACB_ADAPTER_TYPE_C: { 1634 case ACB_ADAPTER_TYPE_C: {
1343 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC; 1635 struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
1344 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; 1636 qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
1345 } 1637 }
1638 break;
1639 case ACB_ADAPTER_TYPE_D: {
1640 struct MessageUnit_D *reg = acb->pmuD;
1641 qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
1642 }
1643 break;
1346 } 1644 }
1347 return qbuffer; 1645 return qbuffer;
1348} 1646}
@@ -1364,96 +1662,208 @@ static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBloc
1364 } 1662 }
1365 break; 1663 break;
1366 case ACB_ADAPTER_TYPE_C: { 1664 case ACB_ADAPTER_TYPE_C: {
1367 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 1665 struct MessageUnit_C __iomem *reg = acb->pmuC;
1368 pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer; 1666 pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
1369 } 1667 }
1370 1668 break;
1669 case ACB_ADAPTER_TYPE_D: {
1670 struct MessageUnit_D *reg = acb->pmuD;
1671 pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
1672 }
1673 break;
1371 } 1674 }
1372 return pqbuffer; 1675 return pqbuffer;
1373} 1676}
1374 1677
1375static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) 1678static uint32_t
1679arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
1680 struct QBUFFER __iomem *prbuffer)
1376{ 1681{
1377 struct QBUFFER __iomem *prbuffer; 1682 uint8_t *pQbuffer;
1378 struct QBUFFER *pQbuffer; 1683 uint8_t *buf1 = NULL;
1379 uint8_t __iomem *iop_data; 1684 uint32_t __iomem *iop_data;
1380 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; 1685 uint32_t iop_len, data_len, *buf2 = NULL;
1381 rqbuf_lastindex = acb->rqbuf_lastindex; 1686
1382 rqbuf_firstindex = acb->rqbuf_firstindex; 1687 iop_data = (uint32_t __iomem *)prbuffer->data;
1383 prbuffer = arcmsr_get_iop_rqbuffer(acb); 1688 iop_len = readl(&prbuffer->data_len);
1384 iop_data = (uint8_t __iomem *)prbuffer->data; 1689 if (iop_len > 0) {
1385 iop_len = prbuffer->data_len; 1690 buf1 = kmalloc(128, GFP_ATOMIC);
1386 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1); 1691 buf2 = (uint32_t *)buf1;
1387 1692 if (buf1 == NULL)
1388 if (my_empty_len >= iop_len) 1693 return 0;
1389 { 1694 data_len = iop_len;
1390 while (iop_len > 0) { 1695 while (data_len >= 4) {
1391 pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex]; 1696 *buf2++ = readl(iop_data);
1392 memcpy(pQbuffer, iop_data, 1);
1393 rqbuf_lastindex++;
1394 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1395 iop_data++; 1697 iop_data++;
1396 iop_len--; 1698 data_len -= 4;
1397 } 1699 }
1398 acb->rqbuf_lastindex = rqbuf_lastindex; 1700 if (data_len)
1399 arcmsr_iop_message_read(acb); 1701 *buf2 = readl(iop_data);
1702 buf2 = (uint32_t *)buf1;
1703 }
1704 while (iop_len > 0) {
1705 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
1706 *pQbuffer = *buf1;
1707 acb->rqbuf_putIndex++;
1708 /* if last, index number set it to 0 */
1709 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
1710 buf1++;
1711 iop_len--;
1400 } 1712 }
1713 kfree(buf2);
1714 /* let IOP know data has been read */
1715 arcmsr_iop_message_read(acb);
1716 return 1;
1717}
1718
1719uint32_t
1720arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
1721 struct QBUFFER __iomem *prbuffer) {
1401 1722
1402 else { 1723 uint8_t *pQbuffer;
1724 uint8_t __iomem *iop_data;
1725 uint32_t iop_len;
1726
1727 if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
1728 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
1729 iop_data = (uint8_t __iomem *)prbuffer->data;
1730 iop_len = readl(&prbuffer->data_len);
1731 while (iop_len > 0) {
1732 pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
1733 *pQbuffer = readb(iop_data);
1734 acb->rqbuf_putIndex++;
1735 acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
1736 iop_data++;
1737 iop_len--;
1738 }
1739 arcmsr_iop_message_read(acb);
1740 return 1;
1741}
1742
1743static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1744{
1745 unsigned long flags;
1746 struct QBUFFER __iomem *prbuffer;
1747 int32_t buf_empty_len;
1748
1749 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
1750 prbuffer = arcmsr_get_iop_rqbuffer(acb);
1751 buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
1752 (ARCMSR_MAX_QBUFFER - 1);
1753 if (buf_empty_len >= readl(&prbuffer->data_len)) {
1754 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
1755 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1756 } else
1403 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; 1757 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1758 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
1759}
1760
1761static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
1762{
1763 uint8_t *pQbuffer;
1764 struct QBUFFER __iomem *pwbuffer;
1765 uint8_t *buf1 = NULL;
1766 uint32_t __iomem *iop_data;
1767 uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
1768
1769 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1770 buf1 = kmalloc(128, GFP_ATOMIC);
1771 buf2 = (uint32_t *)buf1;
1772 if (buf1 == NULL)
1773 return;
1774
1775 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1776 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1777 iop_data = (uint32_t __iomem *)pwbuffer->data;
1778 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
1779 && (allxfer_len < 124)) {
1780 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
1781 *buf1 = *pQbuffer;
1782 acb->wqbuf_getIndex++;
1783 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
1784 buf1++;
1785 allxfer_len++;
1786 }
1787 data_len = allxfer_len;
1788 buf1 = (uint8_t *)buf2;
1789 while (data_len >= 4) {
1790 data = *buf2++;
1791 writel(data, iop_data);
1792 iop_data++;
1793 data_len -= 4;
1794 }
1795 if (data_len) {
1796 data = *buf2;
1797 writel(data, iop_data);
1798 }
1799 writel(allxfer_len, &pwbuffer->data_len);
1800 kfree(buf1);
1801 arcmsr_iop_message_wrote(acb);
1404 } 1802 }
1405} 1803}
1406 1804
1407static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) 1805void
1806arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
1408{ 1807{
1409 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; 1808 uint8_t *pQbuffer;
1410 if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { 1809 struct QBUFFER __iomem *pwbuffer;
1411 uint8_t *pQbuffer; 1810 uint8_t __iomem *iop_data;
1412 struct QBUFFER __iomem *pwbuffer; 1811 int32_t allxfer_len = 0;
1413 uint8_t __iomem *iop_data;
1414 int32_t allxfer_len = 0;
1415 1812
1813 if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
1814 arcmsr_write_ioctldata2iop_in_DWORD(acb);
1815 return;
1816 }
1817 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1416 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 1818 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1417 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 1819 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1418 iop_data = (uint8_t __iomem *)pwbuffer->data; 1820 iop_data = (uint8_t __iomem *)pwbuffer->data;
1419 1821 while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
1420 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \ 1822 && (allxfer_len < 124)) {
1421 (allxfer_len < 124)) { 1823 pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
1422 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; 1824 writeb(*pQbuffer, iop_data);
1423 memcpy(iop_data, pQbuffer, 1); 1825 acb->wqbuf_getIndex++;
1424 acb->wqbuf_firstindex++; 1826 acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
1425 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1426 iop_data++; 1827 iop_data++;
1427 allxfer_len++; 1828 allxfer_len++;
1428 } 1829 }
1429 pwbuffer->data_len = allxfer_len; 1830 writel(allxfer_len, &pwbuffer->data_len);
1430
1431 arcmsr_iop_message_wrote(acb); 1831 arcmsr_iop_message_wrote(acb);
1432 } 1832 }
1833}
1433 1834
1434 if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) { 1835static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1836{
1837 unsigned long flags;
1838
1839 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
1840 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
1841 if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
1842 arcmsr_write_ioctldata2iop(acb);
1843 if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
1435 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; 1844 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1436 } 1845 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
1437} 1846}
1438 1847
1439static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) 1848static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
1440{ 1849{
1441 uint32_t outbound_doorbell; 1850 uint32_t outbound_doorbell;
1442 struct MessageUnit_A __iomem *reg = acb->pmuA; 1851 struct MessageUnit_A __iomem *reg = acb->pmuA;
1443 outbound_doorbell = readl(&reg->outbound_doorbell); 1852 outbound_doorbell = readl(&reg->outbound_doorbell);
1444 writel(outbound_doorbell, &reg->outbound_doorbell); 1853 do {
1445 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { 1854 writel(outbound_doorbell, &reg->outbound_doorbell);
1446 arcmsr_iop2drv_data_wrote_handle(acb); 1855 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
1447 } 1856 arcmsr_iop2drv_data_wrote_handle(acb);
1448 1857 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
1449 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { 1858 arcmsr_iop2drv_data_read_handle(acb);
1450 arcmsr_iop2drv_data_read_handle(acb); 1859 outbound_doorbell = readl(&reg->outbound_doorbell);
1451 } 1860 } while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
1861 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
1452} 1862}
1453static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB) 1863static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
1454{ 1864{
1455 uint32_t outbound_doorbell; 1865 uint32_t outbound_doorbell;
1456 struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; 1866 struct MessageUnit_C __iomem *reg = pACB->pmuC;
1457 /* 1867 /*
1458 ******************************************************************* 1868 *******************************************************************
1459 ** Maybe here we need to check wrqbuffer_lock is lock or not 1869 ** Maybe here we need to check wrqbuffer_lock is lock or not
@@ -1462,19 +1872,42 @@ static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
1462 ******************************************************************* 1872 *******************************************************************
1463 */ 1873 */
1464 outbound_doorbell = readl(&reg->outbound_doorbell); 1874 outbound_doorbell = readl(&reg->outbound_doorbell);
1465 writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/ 1875 do {
1466 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { 1876 writel(outbound_doorbell, &reg->outbound_doorbell_clear);
1467 arcmsr_iop2drv_data_wrote_handle(pACB); 1877 readl(&reg->outbound_doorbell_clear);
1468 } 1878 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
1469 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { 1879 arcmsr_iop2drv_data_wrote_handle(pACB);
1470 arcmsr_iop2drv_data_read_handle(pACB); 1880 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
1471 } 1881 arcmsr_iop2drv_data_read_handle(pACB);
1472 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { 1882 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
1473 arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */ 1883 arcmsr_hbaC_message_isr(pACB);
1474 } 1884 outbound_doorbell = readl(&reg->outbound_doorbell);
1475 return; 1885 } while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
1886 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
1887 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
1476} 1888}
1477static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) 1889
1890static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
1891{
1892 uint32_t outbound_doorbell;
1893 struct MessageUnit_D *pmu = pACB->pmuD;
1894
1895 outbound_doorbell = readl(pmu->outbound_doorbell);
1896 do {
1897 writel(outbound_doorbell, pmu->outbound_doorbell);
1898 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
1899 arcmsr_hbaD_message_isr(pACB);
1900 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
1901 arcmsr_iop2drv_data_wrote_handle(pACB);
1902 if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
1903 arcmsr_iop2drv_data_read_handle(pACB);
1904 outbound_doorbell = readl(pmu->outbound_doorbell);
1905 } while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
1906 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
1907 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
1908}
1909
1910static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
1478{ 1911{
1479 uint32_t flag_ccb; 1912 uint32_t flag_ccb;
1480 struct MessageUnit_A __iomem *reg = acb->pmuA; 1913 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -1488,7 +1921,7 @@ static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1488 arcmsr_drain_donequeue(acb, pCCB, error); 1921 arcmsr_drain_donequeue(acb, pCCB, error);
1489 } 1922 }
1490} 1923}
1491static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) 1924static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
1492{ 1925{
1493 uint32_t index; 1926 uint32_t index;
1494 uint32_t flag_ccb; 1927 uint32_t flag_ccb;
@@ -1497,8 +1930,8 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1497 struct CommandControlBlock *pCCB; 1930 struct CommandControlBlock *pCCB;
1498 bool error; 1931 bool error;
1499 index = reg->doneq_index; 1932 index = reg->doneq_index;
1500 while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) { 1933 while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
1501 writel(0, &reg->done_qbuffer[index]); 1934 reg->done_qbuffer[index] = 0;
1502 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ 1935 pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
1503 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); 1936 pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
1504 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; 1937 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1509,35 +1942,80 @@ static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1509 } 1942 }
1510} 1943}
1511 1944
1512static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) 1945static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
1513{ 1946{
1514 struct MessageUnit_C *phbcmu; 1947 struct MessageUnit_C __iomem *phbcmu;
1515 struct ARCMSR_CDB *arcmsr_cdb; 1948 struct ARCMSR_CDB *arcmsr_cdb;
1516 struct CommandControlBlock *ccb; 1949 struct CommandControlBlock *ccb;
1517 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0; 1950 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
1518 int error; 1951 int error;
1519 1952
1520 phbcmu = (struct MessageUnit_C *)acb->pmuC; 1953 phbcmu = acb->pmuC;
1521 /* areca cdb command done */ 1954 /* areca cdb command done */
1522 /* Use correct offset and size for syncing */ 1955 /* Use correct offset and size for syncing */
1523 1956
1524 while (readl(&phbcmu->host_int_status) & 1957 while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
1525 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){ 1958 0xFFFFFFFF) {
1526 /* check if command done with no error*/ 1959 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1527 flag_ccb = readl(&phbcmu->outbound_queueport_low); 1960 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
1528 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/ 1961 + ccb_cdb_phy);
1529 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); 1962 ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
1530 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 1963 arcmsr_cdb);
1531 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; 1964 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
1532 /* check if command done with no error */ 1965 ? true : false;
1533 arcmsr_drain_donequeue(acb, ccb, error); 1966 /* check if command done with no error */
1534 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { 1967 arcmsr_drain_donequeue(acb, ccb, error);
1535 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell); 1968 throttling++;
1536 break; 1969 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
1537 } 1970 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
1538 throttling++; 1971 &phbcmu->inbound_doorbell);
1972 throttling = 0;
1973 }
1539 } 1974 }
1540} 1975}
1976
1977static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
1978{
1979 u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
1980 uint32_t addressLow, ccb_cdb_phy;
1981 int error;
1982 struct MessageUnit_D *pmu;
1983 struct ARCMSR_CDB *arcmsr_cdb;
1984 struct CommandControlBlock *ccb;
1985 unsigned long flags;
1986
1987 spin_lock_irqsave(&acb->doneq_lock, flags);
1988 pmu = acb->pmuD;
1989 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
1990 doneq_index = pmu->doneq_index;
1991 if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
1992 do {
1993 toggle = doneq_index & 0x4000;
1994 index_stripped = (doneq_index & 0xFFF) + 1;
1995 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
1996 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
1997 ((toggle ^ 0x4000) + 1);
1998 doneq_index = pmu->doneq_index;
1999 addressLow = pmu->done_qbuffer[doneq_index &
2000 0xFFF].addressLow;
2001 ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
2002 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
2003 + ccb_cdb_phy);
2004 ccb = container_of(arcmsr_cdb,
2005 struct CommandControlBlock, arcmsr_cdb);
2006 error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
2007 ? true : false;
2008 arcmsr_drain_donequeue(acb, ccb, error);
2009 writel(doneq_index, pmu->outboundlist_read_pointer);
2010 } while ((doneq_index & 0xFFF) !=
2011 (outbound_write_pointer & 0xFFF));
2012 }
2013 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
2014 pmu->outboundlist_interrupt_cause);
2015 readl(pmu->outboundlist_interrupt_cause);
2016 spin_unlock_irqrestore(&acb->doneq_lock, flags);
2017}
2018
1541/* 2019/*
1542********************************************************************************** 2020**********************************************************************************
1543** Handle a message interrupt 2021** Handle a message interrupt
@@ -1546,14 +2024,14 @@ static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
1546** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2024** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1547********************************************************************************** 2025**********************************************************************************
1548*/ 2026*/
1549static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) 2027static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
1550{ 2028{
1551 struct MessageUnit_A *reg = acb->pmuA; 2029 struct MessageUnit_A __iomem *reg = acb->pmuA;
1552 /*clear interrupt and message state*/ 2030 /*clear interrupt and message state*/
1553 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus); 2031 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
1554 schedule_work(&acb->arcmsr_do_message_isr_bh); 2032 schedule_work(&acb->arcmsr_do_message_isr_bh);
1555} 2033}
1556static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) 2034static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
1557{ 2035{
1558 struct MessageUnit_B *reg = acb->pmuB; 2036 struct MessageUnit_B *reg = acb->pmuB;
1559 2037
@@ -1570,114 +2048,142 @@ static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
1570** We want this in order to compare the drivemap so that we can detect newly-attached drives. 2048** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1571********************************************************************************** 2049**********************************************************************************
1572*/ 2050*/
1573static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) 2051static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
1574{ 2052{
1575 struct MessageUnit_C *reg = acb->pmuC; 2053 struct MessageUnit_C __iomem *reg = acb->pmuC;
1576 /*clear interrupt and message state*/ 2054 /*clear interrupt and message state*/
1577 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear); 2055 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
1578 schedule_work(&acb->arcmsr_do_message_isr_bh); 2056 schedule_work(&acb->arcmsr_do_message_isr_bh);
1579} 2057}
1580 2058
1581static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) 2059static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
2060{
2061 struct MessageUnit_D *reg = acb->pmuD;
2062
2063 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
2064 readl(reg->outbound_doorbell);
2065 schedule_work(&acb->arcmsr_do_message_isr_bh);
2066}
2067
2068static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
1582{ 2069{
1583 uint32_t outbound_intstatus; 2070 uint32_t outbound_intstatus;
1584 struct MessageUnit_A __iomem *reg = acb->pmuA; 2071 struct MessageUnit_A __iomem *reg = acb->pmuA;
1585 outbound_intstatus = readl(&reg->outbound_intstatus) & 2072 outbound_intstatus = readl(&reg->outbound_intstatus) &
1586 acb->outbound_int_enable; 2073 acb->outbound_int_enable;
1587 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { 2074 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
1588 return 1; 2075 return IRQ_NONE;
1589 } 2076 do {
1590 writel(outbound_intstatus, &reg->outbound_intstatus); 2077 writel(outbound_intstatus, &reg->outbound_intstatus);
1591 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { 2078 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
1592 arcmsr_hba_doorbell_isr(acb); 2079 arcmsr_hbaA_doorbell_isr(acb);
1593 } 2080 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
1594 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { 2081 arcmsr_hbaA_postqueue_isr(acb);
1595 arcmsr_hba_postqueue_isr(acb); 2082 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
1596 } 2083 arcmsr_hbaA_message_isr(acb);
1597 if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { 2084 outbound_intstatus = readl(&reg->outbound_intstatus) &
1598 /* messenger of "driver to iop commands" */ 2085 acb->outbound_int_enable;
1599 arcmsr_hba_message_isr(acb); 2086 } while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
1600 } 2087 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
1601 return 0; 2088 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
2089 return IRQ_HANDLED;
1602} 2090}
1603 2091
1604static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) 2092static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
1605{ 2093{
1606 uint32_t outbound_doorbell; 2094 uint32_t outbound_doorbell;
1607 struct MessageUnit_B *reg = acb->pmuB; 2095 struct MessageUnit_B *reg = acb->pmuB;
1608 outbound_doorbell = readl(reg->iop2drv_doorbell) & 2096 outbound_doorbell = readl(reg->iop2drv_doorbell) &
1609 acb->outbound_int_enable; 2097 acb->outbound_int_enable;
1610 if (!outbound_doorbell) 2098 if (!outbound_doorbell)
1611 return 1; 2099 return IRQ_NONE;
1612 2100 do {
1613 writel(~outbound_doorbell, reg->iop2drv_doorbell); 2101 writel(~outbound_doorbell, reg->iop2drv_doorbell);
1614 /*in case the last action of doorbell interrupt clearance is cached, 2102 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
1615 this action can push HW to write down the clear bit*/ 2103 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
1616 readl(reg->iop2drv_doorbell); 2104 arcmsr_iop2drv_data_wrote_handle(acb);
1617 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); 2105 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
1618 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { 2106 arcmsr_iop2drv_data_read_handle(acb);
1619 arcmsr_iop2drv_data_wrote_handle(acb); 2107 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
1620 } 2108 arcmsr_hbaB_postqueue_isr(acb);
1621 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { 2109 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
1622 arcmsr_iop2drv_data_read_handle(acb); 2110 arcmsr_hbaB_message_isr(acb);
1623 } 2111 outbound_doorbell = readl(reg->iop2drv_doorbell) &
1624 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { 2112 acb->outbound_int_enable;
1625 arcmsr_hbb_postqueue_isr(acb); 2113 } while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
1626 } 2114 | ARCMSR_IOP2DRV_DATA_READ_OK
1627 if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { 2115 | ARCMSR_IOP2DRV_CDB_DONE
1628 /* messenger of "driver to iop commands" */ 2116 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
1629 arcmsr_hbb_message_isr(acb); 2117 return IRQ_HANDLED;
1630 }
1631 return 0;
1632} 2118}
1633 2119
1634static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB) 2120static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
1635{ 2121{
1636 uint32_t host_interrupt_status; 2122 uint32_t host_interrupt_status;
1637 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; 2123 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
1638 /* 2124 /*
1639 ********************************************* 2125 *********************************************
1640 ** check outbound intstatus 2126 ** check outbound intstatus
1641 ********************************************* 2127 *********************************************
1642 */ 2128 */
1643 host_interrupt_status = readl(&phbcmu->host_int_status); 2129 host_interrupt_status = readl(&phbcmu->host_int_status) &
1644 if (!host_interrupt_status) { 2130 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
1645 /*it must be share irq*/ 2131 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
1646 return 1; 2132 if (!host_interrupt_status)
1647 } 2133 return IRQ_NONE;
1648 /* MU ioctl transfer doorbell interrupts*/ 2134 do {
1649 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { 2135 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
1650 arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */ 2136 arcmsr_hbaC_doorbell_isr(pACB);
1651 } 2137 /* MU post queue interrupts*/
1652 /* MU post queue interrupts*/ 2138 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
1653 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { 2139 arcmsr_hbaC_postqueue_isr(pACB);
1654 arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */ 2140 host_interrupt_status = readl(&phbcmu->host_int_status);
1655 } 2141 } while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
1656 return 0; 2142 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
2143 return IRQ_HANDLED;
1657} 2144}
2145
2146static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
2147{
2148 u32 host_interrupt_status;
2149 struct MessageUnit_D *pmu = pACB->pmuD;
2150
2151 host_interrupt_status = readl(pmu->host_int_status) &
2152 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2153 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
2154 if (!host_interrupt_status)
2155 return IRQ_NONE;
2156 do {
2157 /* MU post queue interrupts*/
2158 if (host_interrupt_status &
2159 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
2160 arcmsr_hbaD_postqueue_isr(pACB);
2161 if (host_interrupt_status &
2162 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
2163 arcmsr_hbaD_doorbell_isr(pACB);
2164 host_interrupt_status = readl(pmu->host_int_status);
2165 } while (host_interrupt_status &
2166 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
2167 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
2168 return IRQ_HANDLED;
2169}
2170
1658static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) 2171static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
1659{ 2172{
1660 switch (acb->adapter_type) { 2173 switch (acb->adapter_type) {
1661 case ACB_ADAPTER_TYPE_A: { 2174 case ACB_ADAPTER_TYPE_A:
1662 if (arcmsr_handle_hba_isr(acb)) { 2175 return arcmsr_hbaA_handle_isr(acb);
1663 return IRQ_NONE;
1664 }
1665 }
1666 break; 2176 break;
1667 2177 case ACB_ADAPTER_TYPE_B:
1668 case ACB_ADAPTER_TYPE_B: { 2178 return arcmsr_hbaB_handle_isr(acb);
1669 if (arcmsr_handle_hbb_isr(acb)) {
1670 return IRQ_NONE;
1671 }
1672 }
1673 break; 2179 break;
1674 case ACB_ADAPTER_TYPE_C: { 2180 case ACB_ADAPTER_TYPE_C:
1675 if (arcmsr_handle_hbc_isr(acb)) { 2181 return arcmsr_hbaC_handle_isr(acb);
1676 return IRQ_NONE; 2182 case ACB_ADAPTER_TYPE_D:
1677 } 2183 return arcmsr_hbaD_handle_isr(acb);
1678 } 2184 default:
2185 return IRQ_NONE;
1679 } 2186 }
1680 return IRQ_HANDLED;
1681} 2187}
1682 2188
1683static void arcmsr_iop_parking(struct AdapterControlBlock *acb) 2189static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
@@ -1695,296 +2201,273 @@ static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1695 } 2201 }
1696} 2202}
1697 2203
1698void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) 2204
2205void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
1699{ 2206{
1700 int32_t wqbuf_firstindex, wqbuf_lastindex; 2207 uint32_t i;
1701 uint8_t *pQbuffer; 2208
1702 struct QBUFFER __iomem *pwbuffer; 2209 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1703 uint8_t __iomem *iop_data; 2210 for (i = 0; i < 15; i++) {
1704 int32_t allxfer_len = 0; 2211 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1705 pwbuffer = arcmsr_get_iop_wqbuffer(acb); 2212 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1706 iop_data = (uint8_t __iomem *)pwbuffer->data; 2213 acb->rqbuf_getIndex = 0;
1707 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { 2214 acb->rqbuf_putIndex = 0;
1708 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); 2215 arcmsr_iop_message_read(acb);
1709 wqbuf_firstindex = acb->wqbuf_firstindex; 2216 mdelay(30);
1710 wqbuf_lastindex = acb->wqbuf_lastindex; 2217 } else if (acb->rqbuf_getIndex !=
1711 while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) { 2218 acb->rqbuf_putIndex) {
1712 pQbuffer = &acb->wqbuffer[wqbuf_firstindex]; 2219 acb->rqbuf_getIndex = 0;
1713 memcpy(iop_data, pQbuffer, 1); 2220 acb->rqbuf_putIndex = 0;
1714 wqbuf_firstindex++; 2221 mdelay(30);
1715 wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 2222 } else
1716 iop_data++; 2223 break;
1717 allxfer_len++;
1718 } 2224 }
1719 acb->wqbuf_firstindex = wqbuf_firstindex;
1720 pwbuffer->data_len = allxfer_len;
1721 arcmsr_iop_message_wrote(acb);
1722 } 2225 }
1723} 2226}
1724 2227
1725static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, 2228static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1726 struct scsi_cmnd *cmd) 2229 struct scsi_cmnd *cmd)
1727{ 2230{
1728 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
1729 int retvalue = 0, transfer_len = 0;
1730 char *buffer; 2231 char *buffer;
2232 unsigned short use_sg;
2233 int retvalue = 0, transfer_len = 0;
2234 unsigned long flags;
2235 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
2236 uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
2237 (uint32_t)cmd->cmnd[6] << 16 |
2238 (uint32_t)cmd->cmnd[7] << 8 |
2239 (uint32_t)cmd->cmnd[8];
1731 struct scatterlist *sg; 2240 struct scatterlist *sg;
1732 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 | 2241
1733 (uint32_t ) cmd->cmnd[6] << 16 | 2242 use_sg = scsi_sg_count(cmd);
1734 (uint32_t ) cmd->cmnd[7] << 8 |
1735 (uint32_t ) cmd->cmnd[8];
1736 /* 4 bytes: Areca io control code */
1737 sg = scsi_sglist(cmd); 2243 sg = scsi_sglist(cmd);
1738 buffer = kmap_atomic(sg_page(sg)) + sg->offset; 2244 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1739 if (scsi_sg_count(cmd) > 1) { 2245 if (use_sg > 1) {
1740 retvalue = ARCMSR_MESSAGE_FAIL; 2246 retvalue = ARCMSR_MESSAGE_FAIL;
1741 goto message_out; 2247 goto message_out;
1742 } 2248 }
1743 transfer_len += sg->length; 2249 transfer_len += sg->length;
1744
1745 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 2250 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1746 retvalue = ARCMSR_MESSAGE_FAIL; 2251 retvalue = ARCMSR_MESSAGE_FAIL;
2252 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
1747 goto message_out; 2253 goto message_out;
1748 } 2254 }
1749 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; 2255 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
1750 switch(controlcode) { 2256 switch (controlcode) {
1751
1752 case ARCMSR_MESSAGE_READ_RQBUFFER: { 2257 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1753 unsigned char *ver_addr; 2258 unsigned char *ver_addr;
1754 uint8_t *pQbuffer, *ptmpQbuffer; 2259 uint8_t *ptmpQbuffer;
1755 int32_t allxfer_len = 0; 2260 uint32_t allxfer_len = 0;
1756 2261 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
1757 ver_addr = kmalloc(1032, GFP_ATOMIC);
1758 if (!ver_addr) { 2262 if (!ver_addr) {
1759 retvalue = ARCMSR_MESSAGE_FAIL; 2263 retvalue = ARCMSR_MESSAGE_FAIL;
2264 pr_info("%s: memory not enough!\n", __func__);
1760 goto message_out; 2265 goto message_out;
1761 } 2266 }
1762
1763 ptmpQbuffer = ver_addr; 2267 ptmpQbuffer = ver_addr;
1764 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) 2268 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
1765 && (allxfer_len < 1031)) { 2269 if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
1766 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; 2270 unsigned int tail = acb->rqbuf_getIndex;
1767 memcpy(ptmpQbuffer, pQbuffer, 1); 2271 unsigned int head = acb->rqbuf_putIndex;
1768 acb->rqbuf_firstindex++; 2272 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
1769 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; 2273
1770 ptmpQbuffer++; 2274 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
1771 allxfer_len++; 2275 if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
2276 allxfer_len = ARCMSR_API_DATA_BUFLEN;
2277
2278 if (allxfer_len <= cnt_to_end)
2279 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
2280 else {
2281 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
2282 memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
2283 }
2284 acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
1772 } 2285 }
2286 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
2287 allxfer_len);
1773 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2288 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1774
1775 struct QBUFFER __iomem *prbuffer; 2289 struct QBUFFER __iomem *prbuffer;
1776 uint8_t __iomem *iop_data;
1777 int32_t iop_len;
1778
1779 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2290 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1780 prbuffer = arcmsr_get_iop_rqbuffer(acb); 2291 prbuffer = arcmsr_get_iop_rqbuffer(acb);
1781 iop_data = prbuffer->data; 2292 if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
1782 iop_len = readl(&prbuffer->data_len); 2293 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1783 while (iop_len > 0) {
1784 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
1785 acb->rqbuf_lastindex++;
1786 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1787 iop_data++;
1788 iop_len--;
1789 }
1790 arcmsr_iop_message_read(acb);
1791 }
1792 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1793 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1794 if(acb->fw_flag == FW_DEADLOCK) {
1795 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1796 }else{
1797 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1798 } 2294 }
2295 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
1799 kfree(ver_addr); 2296 kfree(ver_addr);
1800 } 2297 pcmdmessagefld->cmdmessage.Length = allxfer_len;
2298 if (acb->fw_flag == FW_DEADLOCK)
2299 pcmdmessagefld->cmdmessage.ReturnCode =
2300 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2301 else
2302 pcmdmessagefld->cmdmessage.ReturnCode =
2303 ARCMSR_MESSAGE_RETURNCODE_OK;
1801 break; 2304 break;
1802 2305 }
1803 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2306 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1804 unsigned char *ver_addr; 2307 unsigned char *ver_addr;
1805 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; 2308 int32_t user_len, cnt2end;
1806 uint8_t *pQbuffer, *ptmpuserbuffer; 2309 uint8_t *pQbuffer, *ptmpuserbuffer;
1807 2310 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
1808 ver_addr = kmalloc(1032, GFP_ATOMIC);
1809 if (!ver_addr) { 2311 if (!ver_addr) {
1810 retvalue = ARCMSR_MESSAGE_FAIL; 2312 retvalue = ARCMSR_MESSAGE_FAIL;
1811 goto message_out; 2313 goto message_out;
1812 } 2314 }
1813 if(acb->fw_flag == FW_DEADLOCK) {
1814 pcmdmessagefld->cmdmessage.ReturnCode =
1815 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1816 }else{
1817 pcmdmessagefld->cmdmessage.ReturnCode =
1818 ARCMSR_MESSAGE_RETURNCODE_OK;
1819 }
1820 ptmpuserbuffer = ver_addr; 2315 ptmpuserbuffer = ver_addr;
1821 user_len = pcmdmessagefld->cmdmessage.Length; 2316 user_len = pcmdmessagefld->cmdmessage.Length;
1822 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); 2317 memcpy(ptmpuserbuffer,
1823 wqbuf_lastindex = acb->wqbuf_lastindex; 2318 pcmdmessagefld->messagedatabuffer, user_len);
1824 wqbuf_firstindex = acb->wqbuf_firstindex; 2319 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
1825 if (wqbuf_lastindex != wqbuf_firstindex) { 2320 if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
1826 struct SENSE_DATA *sensebuffer = 2321 struct SENSE_DATA *sensebuffer =
1827 (struct SENSE_DATA *)cmd->sense_buffer; 2322 (struct SENSE_DATA *)cmd->sense_buffer;
1828 arcmsr_post_ioctldata2iop(acb); 2323 arcmsr_write_ioctldata2iop(acb);
1829 /* has error report sensedata */ 2324 /* has error report sensedata */
1830 sensebuffer->ErrorCode = 0x70; 2325 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
1831 sensebuffer->SenseKey = ILLEGAL_REQUEST; 2326 sensebuffer->SenseKey = ILLEGAL_REQUEST;
1832 sensebuffer->AdditionalSenseLength = 0x0A; 2327 sensebuffer->AdditionalSenseLength = 0x0A;
1833 sensebuffer->AdditionalSenseCode = 0x20; 2328 sensebuffer->AdditionalSenseCode = 0x20;
1834 sensebuffer->Valid = 1; 2329 sensebuffer->Valid = 1;
1835 retvalue = ARCMSR_MESSAGE_FAIL; 2330 retvalue = ARCMSR_MESSAGE_FAIL;
1836 } else { 2331 } else {
1837 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) 2332 pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
1838 &(ARCMSR_MAX_QBUFFER - 1); 2333 cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
1839 if (my_empty_len >= user_len) { 2334 if (user_len > cnt2end) {
1840 while (user_len > 0) { 2335 memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
1841 pQbuffer = 2336 ptmpuserbuffer += cnt2end;
1842 &acb->wqbuffer[acb->wqbuf_lastindex]; 2337 user_len -= cnt2end;
1843 memcpy(pQbuffer, ptmpuserbuffer, 1); 2338 acb->wqbuf_putIndex = 0;
1844 acb->wqbuf_lastindex++; 2339 pQbuffer = acb->wqbuffer;
1845 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1846 ptmpuserbuffer++;
1847 user_len--;
1848 }
1849 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1850 acb->acb_flags &=
1851 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1852 arcmsr_post_ioctldata2iop(acb);
1853 }
1854 } else {
1855 /* has error report sensedata */
1856 struct SENSE_DATA *sensebuffer =
1857 (struct SENSE_DATA *)cmd->sense_buffer;
1858 sensebuffer->ErrorCode = 0x70;
1859 sensebuffer->SenseKey = ILLEGAL_REQUEST;
1860 sensebuffer->AdditionalSenseLength = 0x0A;
1861 sensebuffer->AdditionalSenseCode = 0x20;
1862 sensebuffer->Valid = 1;
1863 retvalue = ARCMSR_MESSAGE_FAIL;
1864 } 2340 }
2341 memcpy(pQbuffer, ptmpuserbuffer, user_len);
2342 acb->wqbuf_putIndex += user_len;
2343 acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
2344 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
2345 acb->acb_flags &=
2346 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
2347 arcmsr_write_ioctldata2iop(acb);
1865 } 2348 }
1866 kfree(ver_addr);
1867 } 2349 }
2350 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2351 kfree(ver_addr);
2352 if (acb->fw_flag == FW_DEADLOCK)
2353 pcmdmessagefld->cmdmessage.ReturnCode =
2354 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
2355 else
2356 pcmdmessagefld->cmdmessage.ReturnCode =
2357 ARCMSR_MESSAGE_RETURNCODE_OK;
1868 break; 2358 break;
1869 2359 }
1870 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { 2360 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1871 uint8_t *pQbuffer = acb->rqbuffer; 2361 uint8_t *pQbuffer = acb->rqbuffer;
1872 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2362
1873 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2363 arcmsr_clear_iop2drv_rqueue_buffer(acb);
1874 arcmsr_iop_message_read(acb); 2364 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
1875 }
1876 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; 2365 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1877 acb->rqbuf_firstindex = 0; 2366 acb->rqbuf_getIndex = 0;
1878 acb->rqbuf_lastindex = 0; 2367 acb->rqbuf_putIndex = 0;
1879 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); 2368 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1880 if(acb->fw_flag == FW_DEADLOCK) { 2369 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2370 if (acb->fw_flag == FW_DEADLOCK)
1881 pcmdmessagefld->cmdmessage.ReturnCode = 2371 pcmdmessagefld->cmdmessage.ReturnCode =
1882 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2372 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1883 }else{ 2373 else
1884 pcmdmessagefld->cmdmessage.ReturnCode = 2374 pcmdmessagefld->cmdmessage.ReturnCode =
1885 ARCMSR_MESSAGE_RETURNCODE_OK; 2375 ARCMSR_MESSAGE_RETURNCODE_OK;
1886 }
1887 }
1888 break; 2376 break;
1889 2377 }
1890 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { 2378 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1891 uint8_t *pQbuffer = acb->wqbuffer; 2379 uint8_t *pQbuffer = acb->wqbuffer;
1892 if(acb->fw_flag == FW_DEADLOCK) { 2380 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2381 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2382 ACB_F_MESSAGE_WQBUFFER_READED);
2383 acb->wqbuf_getIndex = 0;
2384 acb->wqbuf_putIndex = 0;
2385 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
2386 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2387 if (acb->fw_flag == FW_DEADLOCK)
1893 pcmdmessagefld->cmdmessage.ReturnCode = 2388 pcmdmessagefld->cmdmessage.ReturnCode =
1894 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2389 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1895 }else{ 2390 else
1896 pcmdmessagefld->cmdmessage.ReturnCode = 2391 pcmdmessagefld->cmdmessage.ReturnCode =
1897 ARCMSR_MESSAGE_RETURNCODE_OK; 2392 ARCMSR_MESSAGE_RETURNCODE_OK;
1898 }
1899
1900 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1901 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1902 arcmsr_iop_message_read(acb);
1903 }
1904 acb->acb_flags |=
1905 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1906 ACB_F_MESSAGE_WQBUFFER_READED);
1907 acb->wqbuf_firstindex = 0;
1908 acb->wqbuf_lastindex = 0;
1909 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1910 }
1911 break; 2393 break;
1912 2394 }
1913 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { 2395 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1914 uint8_t *pQbuffer; 2396 uint8_t *pQbuffer;
1915 2397 arcmsr_clear_iop2drv_rqueue_buffer(acb);
1916 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { 2398 spin_lock_irqsave(&acb->rqbuffer_lock, flags);
1917 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; 2399 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1918 arcmsr_iop_message_read(acb); 2400 acb->rqbuf_getIndex = 0;
1919 } 2401 acb->rqbuf_putIndex = 0;
1920 acb->acb_flags |=
1921 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1922 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1923 | ACB_F_MESSAGE_WQBUFFER_READED);
1924 acb->rqbuf_firstindex = 0;
1925 acb->rqbuf_lastindex = 0;
1926 acb->wqbuf_firstindex = 0;
1927 acb->wqbuf_lastindex = 0;
1928 pQbuffer = acb->rqbuffer; 2402 pQbuffer = acb->rqbuffer;
1929 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 2403 memset(pQbuffer, 0, sizeof(struct QBUFFER));
2404 spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
2405 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
2406 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2407 ACB_F_MESSAGE_WQBUFFER_READED);
2408 acb->wqbuf_getIndex = 0;
2409 acb->wqbuf_putIndex = 0;
1930 pQbuffer = acb->wqbuffer; 2410 pQbuffer = acb->wqbuffer;
1931 memset(pQbuffer, 0, sizeof(struct QBUFFER)); 2411 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1932 if(acb->fw_flag == FW_DEADLOCK) { 2412 spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
2413 if (acb->fw_flag == FW_DEADLOCK)
1933 pcmdmessagefld->cmdmessage.ReturnCode = 2414 pcmdmessagefld->cmdmessage.ReturnCode =
1934 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2415 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1935 }else{ 2416 else
1936 pcmdmessagefld->cmdmessage.ReturnCode = 2417 pcmdmessagefld->cmdmessage.ReturnCode =
1937 ARCMSR_MESSAGE_RETURNCODE_OK; 2418 ARCMSR_MESSAGE_RETURNCODE_OK;
1938 }
1939 }
1940 break; 2419 break;
1941 2420 }
1942 case ARCMSR_MESSAGE_RETURN_CODE_3F: { 2421 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1943 if(acb->fw_flag == FW_DEADLOCK) { 2422 if (acb->fw_flag == FW_DEADLOCK)
1944 pcmdmessagefld->cmdmessage.ReturnCode = 2423 pcmdmessagefld->cmdmessage.ReturnCode =
1945 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2424 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1946 }else{ 2425 else
1947 pcmdmessagefld->cmdmessage.ReturnCode = 2426 pcmdmessagefld->cmdmessage.ReturnCode =
1948 ARCMSR_MESSAGE_RETURNCODE_3F; 2427 ARCMSR_MESSAGE_RETURNCODE_3F;
1949 }
1950 break; 2428 break;
1951 } 2429 }
1952 case ARCMSR_MESSAGE_SAY_HELLO: { 2430 case ARCMSR_MESSAGE_SAY_HELLO: {
1953 int8_t *hello_string = "Hello! I am ARCMSR"; 2431 int8_t *hello_string = "Hello! I am ARCMSR";
1954 if(acb->fw_flag == FW_DEADLOCK) { 2432 if (acb->fw_flag == FW_DEADLOCK)
1955 pcmdmessagefld->cmdmessage.ReturnCode = 2433 pcmdmessagefld->cmdmessage.ReturnCode =
1956 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2434 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1957 }else{ 2435 else
1958 pcmdmessagefld->cmdmessage.ReturnCode = 2436 pcmdmessagefld->cmdmessage.ReturnCode =
1959 ARCMSR_MESSAGE_RETURNCODE_OK; 2437 ARCMSR_MESSAGE_RETURNCODE_OK;
1960 } 2438 memcpy(pcmdmessagefld->messagedatabuffer,
1961 memcpy(pcmdmessagefld->messagedatabuffer, hello_string 2439 hello_string, (int16_t)strlen(hello_string));
1962 , (int16_t)strlen(hello_string));
1963 }
1964 break; 2440 break;
1965 2441 }
1966 case ARCMSR_MESSAGE_SAY_GOODBYE: 2442 case ARCMSR_MESSAGE_SAY_GOODBYE: {
1967 if(acb->fw_flag == FW_DEADLOCK) { 2443 if (acb->fw_flag == FW_DEADLOCK)
1968 pcmdmessagefld->cmdmessage.ReturnCode = 2444 pcmdmessagefld->cmdmessage.ReturnCode =
1969 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2445 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1970 } 2446 else
2447 pcmdmessagefld->cmdmessage.ReturnCode =
2448 ARCMSR_MESSAGE_RETURNCODE_OK;
1971 arcmsr_iop_parking(acb); 2449 arcmsr_iop_parking(acb);
1972 break; 2450 break;
1973 2451 }
1974 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: 2452 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
1975 if(acb->fw_flag == FW_DEADLOCK) { 2453 if (acb->fw_flag == FW_DEADLOCK)
1976 pcmdmessagefld->cmdmessage.ReturnCode = 2454 pcmdmessagefld->cmdmessage.ReturnCode =
1977 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; 2455 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1978 } 2456 else
2457 pcmdmessagefld->cmdmessage.ReturnCode =
2458 ARCMSR_MESSAGE_RETURNCODE_OK;
1979 arcmsr_flush_adapter_cache(acb); 2459 arcmsr_flush_adapter_cache(acb);
1980 break; 2460 break;
1981 2461 }
1982 default: 2462 default:
1983 retvalue = ARCMSR_MESSAGE_FAIL; 2463 retvalue = ARCMSR_MESSAGE_FAIL;
2464 pr_info("%s: unknown controlcode!\n", __func__);
2465 }
2466message_out:
2467 if (use_sg) {
2468 struct scatterlist *sg = scsi_sglist(cmd);
2469 kunmap_atomic(buffer - sg->offset);
1984 } 2470 }
1985 message_out:
1986 sg = scsi_sglist(cmd);
1987 kunmap_atomic(buffer - sg->offset);
1988 return retvalue; 2471 return retvalue;
1989} 2472}
1990 2473
@@ -1999,7 +2482,7 @@ static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock
1999 list_del_init(&ccb->list); 2482 list_del_init(&ccb->list);
2000 }else{ 2483 }else{
2001 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 2484 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2002 return 0; 2485 return NULL;
2003 } 2486 }
2004 spin_unlock_irqrestore(&acb->ccblist_lock, flags); 2487 spin_unlock_irqrestore(&acb->ccblist_lock, flags);
2005 return ccb; 2488 return ccb;
@@ -2079,9 +2562,6 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2079 arcmsr_handle_virtual_command(acb, cmd); 2562 arcmsr_handle_virtual_command(acb, cmd);
2080 return 0; 2563 return 0;
2081 } 2564 }
2082 if (atomic_read(&acb->ccboutstandingcount) >=
2083 ARCMSR_MAX_OUTSTANDING_CMD)
2084 return SCSI_MLQUEUE_HOST_BUSY;
2085 ccb = arcmsr_get_freeccb(acb); 2565 ccb = arcmsr_get_freeccb(acb);
2086 if (!ccb) 2566 if (!ccb)
2087 return SCSI_MLQUEUE_HOST_BUSY; 2567 return SCSI_MLQUEUE_HOST_BUSY;
@@ -2096,7 +2576,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2096 2576
2097static DEF_SCSI_QCMD(arcmsr_queue_command) 2577static DEF_SCSI_QCMD(arcmsr_queue_command)
2098 2578
2099static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) 2579static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
2100{ 2580{
2101 struct MessageUnit_A __iomem *reg = acb->pmuA; 2581 struct MessageUnit_A __iomem *reg = acb->pmuA;
2102 char *acb_firm_model = acb->firm_model; 2582 char *acb_firm_model = acb->firm_model;
@@ -2107,7 +2587,7 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2107 char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); 2587 char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
2108 int count; 2588 int count;
2109 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); 2589 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2110 if (!arcmsr_hba_wait_msgint_ready(acb)) { 2590 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2111 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 2591 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2112 miscellaneous data' timeout \n", acb->host->host_no); 2592 miscellaneous data' timeout \n", acb->host->host_no);
2113 return false; 2593 return false;
@@ -2135,10 +2615,10 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2135 iop_device_map++; 2615 iop_device_map++;
2136 count--; 2616 count--;
2137 } 2617 }
2138 printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", 2618 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2139 acb->host->host_no, 2619 acb->host->host_no,
2140 acb->firm_version, 2620 acb->firm_model,
2141 acb->firm_model); 2621 acb->firm_version);
2142 acb->signature = readl(&reg->message_rwbuffer[0]); 2622 acb->signature = readl(&reg->message_rwbuffer[0]);
2143 acb->firm_request_len = readl(&reg->message_rwbuffer[1]); 2623 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
2144 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]); 2624 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
@@ -2147,7 +2627,7 @@ static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
2147 acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/ 2627 acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/
2148 return true; 2628 return true;
2149} 2629}
2150static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb) 2630static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
2151{ 2631{
2152 struct MessageUnit_B *reg = acb->pmuB; 2632 struct MessageUnit_B *reg = acb->pmuB;
2153 struct pci_dev *pdev = acb->pdev; 2633 struct pci_dev *pdev = acb->pdev;
@@ -2163,12 +2643,18 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2163 char __iomem *iop_device_map; 2643 char __iomem *iop_device_map;
2164 /*firm_version,21,84-99*/ 2644 /*firm_version,21,84-99*/
2165 int count; 2645 int count;
2166 dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL); 2646
2647 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
2648 dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
2649 &dma_coherent_handle, GFP_KERNEL);
2167 if (!dma_coherent){ 2650 if (!dma_coherent){
2168 printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no); 2651 printk(KERN_NOTICE
2652 "arcmsr%d: dma_alloc_coherent got error for hbb mu\n",
2653 acb->host->host_no);
2169 return false; 2654 return false;
2170 } 2655 }
2171 acb->dma_coherent_handle_hbb_mu = dma_coherent_handle; 2656 acb->dma_coherent_handle2 = dma_coherent_handle;
2657 acb->dma_coherent2 = dma_coherent;
2172 reg = (struct MessageUnit_B *)dma_coherent; 2658 reg = (struct MessageUnit_B *)dma_coherent;
2173 acb->pmuB = reg; 2659 acb->pmuB = reg;
2174 reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL); 2660 reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
@@ -2183,7 +2669,7 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2183 iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/ 2669 iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/
2184 2670
2185 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); 2671 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
2186 if (!arcmsr_hbb_wait_msgint_ready(acb)) { 2672 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2187 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ 2673 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
2188 miscellaneous data' timeout \n", acb->host->host_no); 2674 miscellaneous data' timeout \n", acb->host->host_no);
2189 return false; 2675 return false;
@@ -2211,10 +2697,10 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2211 count--; 2697 count--;
2212 } 2698 }
2213 2699
2214 printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", 2700 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2215 acb->host->host_no, 2701 acb->host->host_no,
2216 acb->firm_version, 2702 acb->firm_model,
2217 acb->firm_model); 2703 acb->firm_version);
2218 2704
2219 acb->signature = readl(&reg->message_rwbuffer[1]); 2705 acb->signature = readl(&reg->message_rwbuffer[1]);
2220 /*firm_signature,1,00-03*/ 2706 /*firm_signature,1,00-03*/
@@ -2231,14 +2717,14 @@ static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
2231 return true; 2717 return true;
2232} 2718}
2233 2719
2234static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB) 2720static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
2235{ 2721{
2236 uint32_t intmask_org, Index, firmware_state = 0; 2722 uint32_t intmask_org, Index, firmware_state = 0;
2237 struct MessageUnit_C *reg = pACB->pmuC; 2723 struct MessageUnit_C __iomem *reg = pACB->pmuC;
2238 char *acb_firm_model = pACB->firm_model; 2724 char *acb_firm_model = pACB->firm_model;
2239 char *acb_firm_version = pACB->firm_version; 2725 char *acb_firm_version = pACB->firm_version;
2240 char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/ 2726 char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/
2241 char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/ 2727 char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/
2242 int count; 2728 int count;
2243 /* disable all outbound interrupt */ 2729 /* disable all outbound interrupt */
2244 intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */ 2730 intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
@@ -2277,10 +2763,10 @@ static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
2277 iop_firm_version++; 2763 iop_firm_version++;
2278 count--; 2764 count--;
2279 } 2765 }
2280 printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", 2766 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2281 pACB->host->host_no, 2767 pACB->host->host_no,
2282 pACB->firm_version, 2768 pACB->firm_model,
2283 pACB->firm_model); 2769 pACB->firm_version);
2284 pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/ 2770 pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/
2285 pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/ 2771 pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
2286 pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/ 2772 pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/
@@ -2289,17 +2775,166 @@ static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
2289 /*all interrupt service will be enable at arcmsr_iop_init*/ 2775 /*all interrupt service will be enable at arcmsr_iop_init*/
2290 return true; 2776 return true;
2291} 2777}
2778
2779static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
2780{
2781 char *acb_firm_model = acb->firm_model;
2782 char *acb_firm_version = acb->firm_version;
2783 char *acb_device_map = acb->device_map;
2784 char __iomem *iop_firm_model;
2785 char __iomem *iop_firm_version;
2786 char __iomem *iop_device_map;
2787 u32 count;
2788 struct MessageUnit_D *reg;
2789 void *dma_coherent2;
2790 dma_addr_t dma_coherent_handle2;
2791 struct pci_dev *pdev = acb->pdev;
2792
2793 acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
2794 dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
2795 &dma_coherent_handle2, GFP_KERNEL);
2796 if (!dma_coherent2) {
2797 pr_notice("DMA allocation failed...\n");
2798 return false;
2799 }
2800 memset(dma_coherent2, 0, acb->roundup_ccbsize);
2801 acb->dma_coherent_handle2 = dma_coherent_handle2;
2802 acb->dma_coherent2 = dma_coherent2;
2803 reg = (struct MessageUnit_D *)dma_coherent2;
2804 acb->pmuD = reg;
2805 reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID;
2806 reg->cpu_mem_config = acb->mem_base0 +
2807 ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION;
2808 reg->i2o_host_interrupt_mask = acb->mem_base0 +
2809 ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK;
2810 reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET;
2811 reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST;
2812 reg->host_int_status = acb->mem_base0 +
2813 ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS;
2814 reg->pcief0_int_enable = acb->mem_base0 +
2815 ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE;
2816 reg->inbound_msgaddr0 = acb->mem_base0 +
2817 ARCMSR_ARC1214_INBOUND_MESSAGE0;
2818 reg->inbound_msgaddr1 = acb->mem_base0 +
2819 ARCMSR_ARC1214_INBOUND_MESSAGE1;
2820 reg->outbound_msgaddr0 = acb->mem_base0 +
2821 ARCMSR_ARC1214_OUTBOUND_MESSAGE0;
2822 reg->outbound_msgaddr1 = acb->mem_base0 +
2823 ARCMSR_ARC1214_OUTBOUND_MESSAGE1;
2824 reg->inbound_doorbell = acb->mem_base0 +
2825 ARCMSR_ARC1214_INBOUND_DOORBELL;
2826 reg->outbound_doorbell = acb->mem_base0 +
2827 ARCMSR_ARC1214_OUTBOUND_DOORBELL;
2828 reg->outbound_doorbell_enable = acb->mem_base0 +
2829 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE;
2830 reg->inboundlist_base_low = acb->mem_base0 +
2831 ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW;
2832 reg->inboundlist_base_high = acb->mem_base0 +
2833 ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH;
2834 reg->inboundlist_write_pointer = acb->mem_base0 +
2835 ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER;
2836 reg->outboundlist_base_low = acb->mem_base0 +
2837 ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW;
2838 reg->outboundlist_base_high = acb->mem_base0 +
2839 ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH;
2840 reg->outboundlist_copy_pointer = acb->mem_base0 +
2841 ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER;
2842 reg->outboundlist_read_pointer = acb->mem_base0 +
2843 ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER;
2844 reg->outboundlist_interrupt_cause = acb->mem_base0 +
2845 ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE;
2846 reg->outboundlist_interrupt_enable = acb->mem_base0 +
2847 ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE;
2848 reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER;
2849 reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER;
2850 reg->msgcode_rwbuffer = acb->mem_base0 +
2851 ARCMSR_ARC1214_MESSAGE_RWBUFFER;
2852 iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
2853 iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
2854 iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
2855 if (readl(acb->pmuD->outbound_doorbell) &
2856 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
2857 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
2858 acb->pmuD->outbound_doorbell);/*clear interrupt*/
2859 }
2860 /* post "get config" instruction */
2861 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
2862 /* wait message ready */
2863 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
2864 pr_notice("arcmsr%d: wait get adapter firmware "
2865 "miscellaneous data timeout\n", acb->host->host_no);
2866 dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
2867 acb->dma_coherent2, acb->dma_coherent_handle2);
2868 return false;
2869 }
2870 count = 8;
2871 while (count) {
2872 *acb_firm_model = readb(iop_firm_model);
2873 acb_firm_model++;
2874 iop_firm_model++;
2875 count--;
2876 }
2877 count = 16;
2878 while (count) {
2879 *acb_firm_version = readb(iop_firm_version);
2880 acb_firm_version++;
2881 iop_firm_version++;
2882 count--;
2883 }
2884 count = 16;
2885 while (count) {
2886 *acb_device_map = readb(iop_device_map);
2887 acb_device_map++;
2888 iop_device_map++;
2889 count--;
2890 }
2891 acb->signature = readl(&reg->msgcode_rwbuffer[1]);
2892 /*firm_signature,1,00-03*/
2893 acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
2894 /*firm_request_len,1,04-07*/
2895 acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
2896 /*firm_numbers_queue,2,08-11*/
2897 acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
2898 /*firm_sdram_size,3,12-15*/
2899 acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
2900 /*firm_hd_channels,4,16-19*/
2901 acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
2902 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
2903 acb->host->host_no,
2904 acb->firm_model,
2905 acb->firm_version);
2906 return true;
2907}
2908
2292static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) 2909static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
2293{ 2910{
2294 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) 2911 bool rtn = false;
2295 return arcmsr_get_hba_config(acb); 2912
2296 else if (acb->adapter_type == ACB_ADAPTER_TYPE_B) 2913 switch (acb->adapter_type) {
2297 return arcmsr_get_hbb_config(acb); 2914 case ACB_ADAPTER_TYPE_A:
2915 rtn = arcmsr_hbaA_get_config(acb);
2916 break;
2917 case ACB_ADAPTER_TYPE_B:
2918 rtn = arcmsr_hbaB_get_config(acb);
2919 break;
2920 case ACB_ADAPTER_TYPE_C:
2921 rtn = arcmsr_hbaC_get_config(acb);
2922 break;
2923 case ACB_ADAPTER_TYPE_D:
2924 rtn = arcmsr_hbaD_get_config(acb);
2925 break;
2926 default:
2927 break;
2928 }
2929 if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
2930 acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
2298 else 2931 else
2299 return arcmsr_get_hbc_config(acb); 2932 acb->maxOutstanding = acb->firm_numbers_queue - 1;
2933 acb->host->can_queue = acb->maxOutstanding;
2934 return rtn;
2300} 2935}
2301 2936
2302static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, 2937static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
2303 struct CommandControlBlock *poll_ccb) 2938 struct CommandControlBlock *poll_ccb)
2304{ 2939{
2305 struct MessageUnit_A __iomem *reg = acb->pmuA; 2940 struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -2328,7 +2963,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
2328 } 2963 }
2329 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); 2964 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
2330 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 2965 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2331 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 2966 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
2332 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 2967 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2333 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 2968 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
2334 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 2969 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
@@ -2355,7 +2990,7 @@ static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
2355 return rtn; 2990 return rtn;
2356} 2991}
2357 2992
2358static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, 2993static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
2359 struct CommandControlBlock *poll_ccb) 2994 struct CommandControlBlock *poll_ccb)
2360{ 2995{
2361 struct MessageUnit_B *reg = acb->pmuB; 2996 struct MessageUnit_B *reg = acb->pmuB;
@@ -2371,7 +3006,8 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2371 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); 3006 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
2372 while(1){ 3007 while(1){
2373 index = reg->doneq_index; 3008 index = reg->doneq_index;
2374 if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) { 3009 flag_ccb = reg->done_qbuffer[index];
3010 if (flag_ccb == 0) {
2375 if (poll_ccb_done){ 3011 if (poll_ccb_done){
2376 rtn = SUCCESS; 3012 rtn = SUCCESS;
2377 break; 3013 break;
@@ -2384,7 +3020,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2384 goto polling_hbb_ccb_retry; 3020 goto polling_hbb_ccb_retry;
2385 } 3021 }
2386 } 3022 }
2387 writel(0, &reg->done_qbuffer[index]); 3023 reg->done_qbuffer[index] = 0;
2388 index++; 3024 index++;
2389 /*if last index number set it to 0 */ 3025 /*if last index number set it to 0 */
2390 index %= ARCMSR_MAX_HBB_POSTQUEUE; 3026 index %= ARCMSR_MAX_HBB_POSTQUEUE;
@@ -2392,7 +3028,7 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2392 /* check if command done with no error*/ 3028 /* check if command done with no error*/
2393 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); 3029 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
2394 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3030 ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2395 poll_ccb_done = (ccb == poll_ccb) ? 1:0; 3031 poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
2396 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { 3032 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2397 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { 3033 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
2398 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" 3034 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
@@ -2419,9 +3055,10 @@ static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2419 return rtn; 3055 return rtn;
2420} 3056}
2421 3057
2422static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) 3058static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
3059 struct CommandControlBlock *poll_ccb)
2423{ 3060{
2424 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 3061 struct MessageUnit_C __iomem *reg = acb->pmuC;
2425 uint32_t flag_ccb, ccb_cdb_phy; 3062 uint32_t flag_ccb, ccb_cdb_phy;
2426 struct ARCMSR_CDB *arcmsr_cdb; 3063 struct ARCMSR_CDB *arcmsr_cdb;
2427 bool error; 3064 bool error;
@@ -2448,7 +3085,7 @@ polling_hbc_ccb_retry:
2448 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); 3085 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2449 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/ 3086 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
2450 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); 3087 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
2451 poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0; 3088 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
2452 /* check ifcommand done with no error*/ 3089 /* check ifcommand done with no error*/
2453 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { 3090 if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
2454 if (pCCB->startdone == ARCMSR_CCB_ABORTED) { 3091 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
@@ -2475,6 +3112,81 @@ polling_hbc_ccb_retry:
2475 } 3112 }
2476 return rtn; 3113 return rtn;
2477} 3114}
3115
3116static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
3117 struct CommandControlBlock *poll_ccb)
3118{
3119 bool error;
3120 uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
3121 int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
3122 unsigned long flags;
3123 struct ARCMSR_CDB *arcmsr_cdb;
3124 struct CommandControlBlock *pCCB;
3125 struct MessageUnit_D *pmu = acb->pmuD;
3126
3127polling_hbaD_ccb_retry:
3128 poll_count++;
3129 while (1) {
3130 spin_lock_irqsave(&acb->doneq_lock, flags);
3131 outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
3132 doneq_index = pmu->doneq_index;
3133 if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
3134 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3135 if (poll_ccb_done) {
3136 rtn = SUCCESS;
3137 break;
3138 } else {
3139 msleep(25);
3140 if (poll_count > 40) {
3141 rtn = FAILED;
3142 break;
3143 }
3144 goto polling_hbaD_ccb_retry;
3145 }
3146 }
3147 toggle = doneq_index & 0x4000;
3148 index_stripped = (doneq_index & 0xFFF) + 1;
3149 index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
3150 pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
3151 ((toggle ^ 0x4000) + 1);
3152 doneq_index = pmu->doneq_index;
3153 spin_unlock_irqrestore(&acb->doneq_lock, flags);
3154 flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
3155 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3156 arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
3157 ccb_cdb_phy);
3158 pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
3159 arcmsr_cdb);
3160 poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
3161 if ((pCCB->acb != acb) ||
3162 (pCCB->startdone != ARCMSR_CCB_START)) {
3163 if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
3164 pr_notice("arcmsr%d: scsi id = %d "
3165 "lun = %d ccb = '0x%p' poll command "
3166 "abort successfully\n"
3167 , acb->host->host_no
3168 , pCCB->pcmd->device->id
3169 , (u32)pCCB->pcmd->device->lun
3170 , pCCB);
3171 pCCB->pcmd->result = DID_ABORT << 16;
3172 arcmsr_ccb_complete(pCCB);
3173 continue;
3174 }
3175 pr_notice("arcmsr%d: polling an illegal "
3176 "ccb command done ccb = '0x%p' "
3177 "ccboutstandingcount = %d\n"
3178 , acb->host->host_no
3179 , pCCB
3180 , atomic_read(&acb->ccboutstandingcount));
3181 continue;
3182 }
3183 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
3184 ? true : false;
3185 arcmsr_report_ccb_state(acb, pCCB, error);
3186 }
3187 return rtn;
3188}
3189
2478static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, 3190static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2479 struct CommandControlBlock *poll_ccb) 3191 struct CommandControlBlock *poll_ccb)
2480{ 3192{
@@ -2482,17 +3194,21 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2482 switch (acb->adapter_type) { 3194 switch (acb->adapter_type) {
2483 3195
2484 case ACB_ADAPTER_TYPE_A: { 3196 case ACB_ADAPTER_TYPE_A: {
2485 rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb); 3197 rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
2486 } 3198 }
2487 break; 3199 break;
2488 3200
2489 case ACB_ADAPTER_TYPE_B: { 3201 case ACB_ADAPTER_TYPE_B: {
2490 rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb); 3202 rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
2491 } 3203 }
2492 break; 3204 break;
2493 case ACB_ADAPTER_TYPE_C: { 3205 case ACB_ADAPTER_TYPE_C: {
2494 rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb); 3206 rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
2495 } 3207 }
3208 break;
3209 case ACB_ADAPTER_TYPE_D:
3210 rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
3211 break;
2496 } 3212 }
2497 return rtn; 3213 return rtn;
2498} 3214}
@@ -2500,6 +3216,7 @@ static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2500static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) 3216static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2501{ 3217{
2502 uint32_t cdb_phyaddr, cdb_phyaddr_hi32; 3218 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
3219 dma_addr_t dma_coherent_handle;
2503 3220
2504 /* 3221 /*
2505 ******************************************************************** 3222 ********************************************************************
@@ -2507,8 +3224,17 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2507 ** if freeccb.HighPart is not zero 3224 ** if freeccb.HighPart is not zero
2508 ******************************************************************** 3225 ********************************************************************
2509 */ 3226 */
2510 cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle); 3227 switch (acb->adapter_type) {
2511 cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle); 3228 case ACB_ADAPTER_TYPE_B:
3229 case ACB_ADAPTER_TYPE_D:
3230 dma_coherent_handle = acb->dma_coherent_handle2;
3231 break;
3232 default:
3233 dma_coherent_handle = acb->dma_coherent_handle;
3234 break;
3235 }
3236 cdb_phyaddr = lower_32_bits(dma_coherent_handle);
3237 cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
2512 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; 3238 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
2513 /* 3239 /*
2514 *********************************************************************** 3240 ***********************************************************************
@@ -2520,65 +3246,62 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2520 case ACB_ADAPTER_TYPE_A: { 3246 case ACB_ADAPTER_TYPE_A: {
2521 if (cdb_phyaddr_hi32 != 0) { 3247 if (cdb_phyaddr_hi32 != 0) {
2522 struct MessageUnit_A __iomem *reg = acb->pmuA; 3248 struct MessageUnit_A __iomem *reg = acb->pmuA;
2523 uint32_t intmask_org;
2524 intmask_org = arcmsr_disable_outbound_ints(acb);
2525 writel(ARCMSR_SIGNATURE_SET_CONFIG, \ 3249 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
2526 &reg->message_rwbuffer[0]); 3250 &reg->message_rwbuffer[0]);
2527 writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]); 3251 writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
2528 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ 3252 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
2529 &reg->inbound_msgaddr0); 3253 &reg->inbound_msgaddr0);
2530 if (!arcmsr_hba_wait_msgint_ready(acb)) { 3254 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2531 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ 3255 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
2532 part physical address timeout\n", 3256 part physical address timeout\n",
2533 acb->host->host_no); 3257 acb->host->host_no);
2534 return 1; 3258 return 1;
2535 } 3259 }
2536 arcmsr_enable_outbound_ints(acb, intmask_org);
2537 } 3260 }
2538 } 3261 }
2539 break; 3262 break;
2540 3263
2541 case ACB_ADAPTER_TYPE_B: { 3264 case ACB_ADAPTER_TYPE_B: {
2542 unsigned long post_queue_phyaddr;
2543 uint32_t __iomem *rwbuffer; 3265 uint32_t __iomem *rwbuffer;
2544 3266
2545 struct MessageUnit_B *reg = acb->pmuB; 3267 struct MessageUnit_B *reg = acb->pmuB;
2546 uint32_t intmask_org;
2547 intmask_org = arcmsr_disable_outbound_ints(acb);
2548 reg->postq_index = 0; 3268 reg->postq_index = 0;
2549 reg->doneq_index = 0; 3269 reg->doneq_index = 0;
2550 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); 3270 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
2551 if (!arcmsr_hbb_wait_msgint_ready(acb)) { 3271 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2552 printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \ 3272 printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
2553 acb->host->host_no); 3273 acb->host->host_no);
2554 return 1; 3274 return 1;
2555 } 3275 }
2556 post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
2557 rwbuffer = reg->message_rwbuffer; 3276 rwbuffer = reg->message_rwbuffer;
2558 /* driver "set config" signature */ 3277 /* driver "set config" signature */
2559 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); 3278 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
2560 /* normal should be zero */ 3279 /* normal should be zero */
2561 writel(cdb_phyaddr_hi32, rwbuffer++); 3280 writel(cdb_phyaddr_hi32, rwbuffer++);
2562 /* postQ size (256 + 8)*4 */ 3281 /* postQ size (256 + 8)*4 */
2563 writel(post_queue_phyaddr, rwbuffer++); 3282 writel(cdb_phyaddr, rwbuffer++);
2564 /* doneQ size (256 + 8)*4 */ 3283 /* doneQ size (256 + 8)*4 */
2565 writel(post_queue_phyaddr + 1056, rwbuffer++); 3284 writel(cdb_phyaddr + 1056, rwbuffer++);
2566 /* ccb maxQ size must be --> [(256 + 8)*4]*/ 3285 /* ccb maxQ size must be --> [(256 + 8)*4]*/
2567 writel(1056, rwbuffer); 3286 writel(1056, rwbuffer);
2568 3287
2569 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); 3288 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
2570 if (!arcmsr_hbb_wait_msgint_ready(acb)) { 3289 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2571 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 3290 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
2572 timeout \n",acb->host->host_no); 3291 timeout \n",acb->host->host_no);
2573 return 1; 3292 return 1;
2574 } 3293 }
2575 arcmsr_hbb_enable_driver_mode(acb); 3294 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
2576 arcmsr_enable_outbound_ints(acb, intmask_org); 3295 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
3296 pr_err("arcmsr%d: can't set driver mode.\n",
3297 acb->host->host_no);
3298 return 1;
3299 }
2577 } 3300 }
2578 break; 3301 break;
2579 case ACB_ADAPTER_TYPE_C: { 3302 case ACB_ADAPTER_TYPE_C: {
2580 if (cdb_phyaddr_hi32 != 0) { 3303 if (cdb_phyaddr_hi32 != 0) {
2581 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 3304 struct MessageUnit_C __iomem *reg = acb->pmuC;
2582 3305
2583 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", 3306 printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
2584 acb->adapter_index, cdb_phyaddr_hi32); 3307 acb->adapter_index, cdb_phyaddr_hi32);
@@ -2586,13 +3309,34 @@ static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2586 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]); 3309 writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
2587 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0); 3310 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
2588 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); 3311 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
2589 if (!arcmsr_hbc_wait_msgint_ready(acb)) { 3312 if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
2590 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ 3313 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
2591 timeout \n", acb->host->host_no); 3314 timeout \n", acb->host->host_no);
2592 return 1; 3315 return 1;
2593 } 3316 }
2594 } 3317 }
2595 } 3318 }
3319 break;
3320 case ACB_ADAPTER_TYPE_D: {
3321 uint32_t __iomem *rwbuffer;
3322 struct MessageUnit_D *reg = acb->pmuD;
3323 reg->postq_index = 0;
3324 reg->doneq_index = 0;
3325 rwbuffer = reg->msgcode_rwbuffer;
3326 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
3327 writel(cdb_phyaddr_hi32, rwbuffer++);
3328 writel(cdb_phyaddr, rwbuffer++);
3329 writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
3330 sizeof(struct InBound_SRB)), rwbuffer++);
3331 writel(0x100, rwbuffer);
3332 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
3333 if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
3334 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
3335 acb->host->host_no);
3336 return 1;
3337 }
3338 }
3339 break;
2596 } 3340 }
2597 return 0; 3341 return 0;
2598} 3342}
@@ -2619,15 +3363,24 @@ static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2619 } 3363 }
2620 break; 3364 break;
2621 case ACB_ADAPTER_TYPE_C: { 3365 case ACB_ADAPTER_TYPE_C: {
2622 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 3366 struct MessageUnit_C __iomem *reg = acb->pmuC;
2623 do { 3367 do {
2624 firmware_state = readl(&reg->outbound_msgaddr1); 3368 firmware_state = readl(&reg->outbound_msgaddr1);
2625 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); 3369 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
2626 } 3370 }
3371 break;
3372 case ACB_ADAPTER_TYPE_D: {
3373 struct MessageUnit_D *reg = acb->pmuD;
3374 do {
3375 firmware_state = readl(reg->outbound_msgaddr1);
3376 } while ((firmware_state &
3377 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
3378 }
3379 break;
2627 } 3380 }
2628} 3381}
2629 3382
2630static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) 3383static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
2631{ 3384{
2632 struct MessageUnit_A __iomem *reg = acb->pmuA; 3385 struct MessageUnit_A __iomem *reg = acb->pmuA;
2633 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 3386 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
@@ -2649,9 +3402,9 @@ static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2649 return; 3402 return;
2650} 3403}
2651 3404
2652static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) 3405static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
2653{ 3406{
2654 struct MessageUnit_B __iomem *reg = acb->pmuB; 3407 struct MessageUnit_B *reg = acb->pmuB;
2655 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ 3408 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
2656 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); 3409 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
2657 return; 3410 return;
@@ -2671,7 +3424,7 @@ static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2671 return; 3424 return;
2672} 3425}
2673 3426
2674static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb) 3427static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
2675{ 3428{
2676 struct MessageUnit_C __iomem *reg = acb->pmuC; 3429 struct MessageUnit_C __iomem *reg = acb->pmuC;
2677 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { 3430 if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
@@ -2694,69 +3447,119 @@ static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
2694 return; 3447 return;
2695} 3448}
2696 3449
3450static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
3451{
3452 struct MessageUnit_D *reg = acb->pmuD;
3453
3454 if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
3455 ((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
3456 ((acb->acb_flags & ACB_F_ABORT) != 0)) {
3457 mod_timer(&acb->eternal_timer,
3458 jiffies + msecs_to_jiffies(6 * HZ));
3459 } else {
3460 acb->fw_flag = FW_NORMAL;
3461 if (atomic_read(&acb->ante_token_value) ==
3462 atomic_read(&acb->rq_map_token)) {
3463 atomic_set(&acb->rq_map_token, 16);
3464 }
3465 atomic_set(&acb->ante_token_value,
3466 atomic_read(&acb->rq_map_token));
3467 if (atomic_dec_and_test(&acb->rq_map_token)) {
3468 mod_timer(&acb->eternal_timer, jiffies +
3469 msecs_to_jiffies(6 * HZ));
3470 return;
3471 }
3472 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
3473 reg->inbound_msgaddr0);
3474 mod_timer(&acb->eternal_timer, jiffies +
3475 msecs_to_jiffies(6 * HZ));
3476 }
3477}
3478
2697static void arcmsr_request_device_map(unsigned long pacb) 3479static void arcmsr_request_device_map(unsigned long pacb)
2698{ 3480{
2699 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; 3481 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
2700 switch (acb->adapter_type) { 3482 switch (acb->adapter_type) {
2701 case ACB_ADAPTER_TYPE_A: { 3483 case ACB_ADAPTER_TYPE_A: {
2702 arcmsr_request_hba_device_map(acb); 3484 arcmsr_hbaA_request_device_map(acb);
2703 } 3485 }
2704 break; 3486 break;
2705 case ACB_ADAPTER_TYPE_B: { 3487 case ACB_ADAPTER_TYPE_B: {
2706 arcmsr_request_hbb_device_map(acb); 3488 arcmsr_hbaB_request_device_map(acb);
2707 } 3489 }
2708 break; 3490 break;
2709 case ACB_ADAPTER_TYPE_C: { 3491 case ACB_ADAPTER_TYPE_C: {
2710 arcmsr_request_hbc_device_map(acb); 3492 arcmsr_hbaC_request_device_map(acb);
2711 } 3493 }
3494 break;
3495 case ACB_ADAPTER_TYPE_D:
3496 arcmsr_hbaD_request_device_map(acb);
3497 break;
2712 } 3498 }
2713} 3499}
2714 3500
2715static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) 3501static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
2716{ 3502{
2717 struct MessageUnit_A __iomem *reg = acb->pmuA; 3503 struct MessageUnit_A __iomem *reg = acb->pmuA;
2718 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3504 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2719 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0); 3505 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
2720 if (!arcmsr_hba_wait_msgint_ready(acb)) { 3506 if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
2721 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3507 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2722 rebulid' timeout \n", acb->host->host_no); 3508 rebulid' timeout \n", acb->host->host_no);
2723 } 3509 }
2724} 3510}
2725 3511
2726static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) 3512static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
2727{ 3513{
2728 struct MessageUnit_B *reg = acb->pmuB; 3514 struct MessageUnit_B *reg = acb->pmuB;
2729 acb->acb_flags |= ACB_F_MSG_START_BGRB; 3515 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2730 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); 3516 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
2731 if (!arcmsr_hbb_wait_msgint_ready(acb)) { 3517 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2732 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3518 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2733 rebulid' timeout \n",acb->host->host_no); 3519 rebulid' timeout \n",acb->host->host_no);
2734 } 3520 }
2735} 3521}
2736 3522
2737static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB) 3523static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
2738{ 3524{
2739 struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; 3525 struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
2740 pACB->acb_flags |= ACB_F_MSG_START_BGRB; 3526 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
2741 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); 3527 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
2742 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); 3528 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
2743 if (!arcmsr_hbc_wait_msgint_ready(pACB)) { 3529 if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
2744 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ 3530 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2745 rebulid' timeout \n", pACB->host->host_no); 3531 rebulid' timeout \n", pACB->host->host_no);
2746 } 3532 }
2747 return; 3533 return;
2748} 3534}
3535
3536static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
3537{
3538 struct MessageUnit_D *pmu = pACB->pmuD;
3539
3540 pACB->acb_flags |= ACB_F_MSG_START_BGRB;
3541 writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
3542 if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
3543 pr_notice("arcmsr%d: wait 'start adapter "
3544 "background rebulid' timeout\n", pACB->host->host_no);
3545 }
3546}
3547
2749static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) 3548static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2750{ 3549{
2751 switch (acb->adapter_type) { 3550 switch (acb->adapter_type) {
2752 case ACB_ADAPTER_TYPE_A: 3551 case ACB_ADAPTER_TYPE_A:
2753 arcmsr_start_hba_bgrb(acb); 3552 arcmsr_hbaA_start_bgrb(acb);
2754 break; 3553 break;
2755 case ACB_ADAPTER_TYPE_B: 3554 case ACB_ADAPTER_TYPE_B:
2756 arcmsr_start_hbb_bgrb(acb); 3555 arcmsr_hbaB_start_bgrb(acb);
2757 break; 3556 break;
2758 case ACB_ADAPTER_TYPE_C: 3557 case ACB_ADAPTER_TYPE_C:
2759 arcmsr_start_hbc_bgrb(acb); 3558 arcmsr_hbaC_start_bgrb(acb);
3559 break;
3560 case ACB_ADAPTER_TYPE_D:
3561 arcmsr_hbaD_start_bgrb(acb);
3562 break;
2760 } 3563 }
2761} 3564}
2762 3565
@@ -2783,13 +3586,48 @@ static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2783 } 3586 }
2784 break; 3587 break;
2785 case ACB_ADAPTER_TYPE_C: { 3588 case ACB_ADAPTER_TYPE_C: {
2786 struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; 3589 struct MessageUnit_C __iomem *reg = acb->pmuC;
2787 uint32_t outbound_doorbell; 3590 uint32_t outbound_doorbell, i;
2788 /* empty doorbell Qbuffer if door bell ringed */ 3591 /* empty doorbell Qbuffer if door bell ringed */
2789 outbound_doorbell = readl(&reg->outbound_doorbell); 3592 outbound_doorbell = readl(&reg->outbound_doorbell);
2790 writel(outbound_doorbell, &reg->outbound_doorbell_clear); 3593 writel(outbound_doorbell, &reg->outbound_doorbell_clear);
2791 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); 3594 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
3595 for (i = 0; i < 200; i++) {
3596 msleep(20);
3597 outbound_doorbell = readl(&reg->outbound_doorbell);
3598 if (outbound_doorbell &
3599 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
3600 writel(outbound_doorbell,
3601 &reg->outbound_doorbell_clear);
3602 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
3603 &reg->inbound_doorbell);
3604 } else
3605 break;
3606 }
3607 }
3608 break;
3609 case ACB_ADAPTER_TYPE_D: {
3610 struct MessageUnit_D *reg = acb->pmuD;
3611 uint32_t outbound_doorbell, i;
3612 /* empty doorbell Qbuffer if door bell ringed */
3613 outbound_doorbell = readl(reg->outbound_doorbell);
3614 writel(outbound_doorbell, reg->outbound_doorbell);
3615 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
3616 reg->inbound_doorbell);
3617 for (i = 0; i < 200; i++) {
3618 msleep(20);
3619 outbound_doorbell = readl(reg->outbound_doorbell);
3620 if (outbound_doorbell &
3621 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
3622 writel(outbound_doorbell,
3623 reg->outbound_doorbell);
3624 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
3625 reg->inbound_doorbell);
3626 } else
3627 break;
2792 } 3628 }
3629 }
3630 break;
2793 } 3631 }
2794} 3632}
2795 3633
@@ -2802,7 +3640,7 @@ static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2802 { 3640 {
2803 struct MessageUnit_B *reg = acb->pmuB; 3641 struct MessageUnit_B *reg = acb->pmuB;
2804 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); 3642 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
2805 if (!arcmsr_hbb_wait_msgint_ready(acb)) { 3643 if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
2806 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); 3644 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
2807 return; 3645 return;
2808 } 3646 }
@@ -2820,6 +3658,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2820 int i, count = 0; 3658 int i, count = 0;
2821 struct MessageUnit_A __iomem *pmuA = acb->pmuA; 3659 struct MessageUnit_A __iomem *pmuA = acb->pmuA;
2822 struct MessageUnit_C __iomem *pmuC = acb->pmuC; 3660 struct MessageUnit_C __iomem *pmuC = acb->pmuC;
3661 struct MessageUnit_D *pmuD = acb->pmuD;
2823 3662
2824 /* backup pci config data */ 3663 /* backup pci config data */
2825 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); 3664 printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
@@ -2840,6 +3679,8 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2840 writel(0xD, &pmuC->write_sequence); 3679 writel(0xD, &pmuC->write_sequence);
2841 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); 3680 } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
2842 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); 3681 writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
3682 } else if ((acb->dev_id == 0x1214)) {
3683 writel(0x20, pmuD->reset_request);
2843 } else { 3684 } else {
2844 pci_write_config_byte(acb->pdev, 0x84, 0x20); 3685 pci_write_config_byte(acb->pdev, 0x84, 0x20);
2845 } 3686 }
@@ -3016,9 +3857,7 @@ sleep:
3016 arcmsr_get_firmware_spec(acb); 3857 arcmsr_get_firmware_spec(acb);
3017 arcmsr_start_adapter_bgrb(acb); 3858 arcmsr_start_adapter_bgrb(acb);
3018 /* clear Qbuffer if door bell ringed */ 3859 /* clear Qbuffer if door bell ringed */
3019 outbound_doorbell = readl(&reg->outbound_doorbell); 3860 arcmsr_clear_doorbell_queue_buffer(acb);
3020 writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */
3021 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
3022 /* enable outbound Post Queue,outbound doorbell Interrupt */ 3861 /* enable outbound Post Queue,outbound doorbell Interrupt */
3023 arcmsr_enable_outbound_ints(acb, intmask_org); 3862 arcmsr_enable_outbound_ints(acb, intmask_org);
3024 atomic_set(&acb->rq_map_token, 16); 3863 atomic_set(&acb->rq_map_token, 16);
@@ -3038,6 +3877,66 @@ sleep:
3038 } 3877 }
3039 break; 3878 break;
3040 } 3879 }
3880 case ACB_ADAPTER_TYPE_D: {
3881 if (acb->acb_flags & ACB_F_BUS_RESET) {
3882 long timeout;
3883 pr_notice("arcmsr: there is an bus reset"
3884 " eh proceeding.......\n");
3885 timeout = wait_event_timeout(wait_q, (acb->acb_flags
3886 & ACB_F_BUS_RESET) == 0, 220 * HZ);
3887 if (timeout)
3888 return SUCCESS;
3889 }
3890 acb->acb_flags |= ACB_F_BUS_RESET;
3891 if (!arcmsr_iop_reset(acb)) {
3892 struct MessageUnit_D *reg;
3893 reg = acb->pmuD;
3894 arcmsr_hardware_reset(acb);
3895 acb->acb_flags &= ~ACB_F_IOP_INITED;
3896 nap:
3897 ssleep(ARCMSR_SLEEPTIME);
3898 if ((readl(reg->sample_at_reset) & 0x80) != 0) {
3899 pr_err("arcmsr%d: waiting for "
3900 "hw bus reset return, retry=%d\n",
3901 acb->host->host_no, retry_count);
3902 if (retry_count > ARCMSR_RETRYCOUNT) {
3903 acb->fw_flag = FW_DEADLOCK;
3904 pr_err("arcmsr%d: waiting for hw bus"
3905 " reset return, "
3906 "RETRY TERMINATED!!\n",
3907 acb->host->host_no);
3908 return FAILED;
3909 }
3910 retry_count++;
3911 goto nap;
3912 }
3913 acb->acb_flags |= ACB_F_IOP_INITED;
3914 /* disable all outbound interrupt */
3915 intmask_org = arcmsr_disable_outbound_ints(acb);
3916 arcmsr_get_firmware_spec(acb);
3917 arcmsr_start_adapter_bgrb(acb);
3918 arcmsr_clear_doorbell_queue_buffer(acb);
3919 arcmsr_enable_outbound_ints(acb, intmask_org);
3920 atomic_set(&acb->rq_map_token, 16);
3921 atomic_set(&acb->ante_token_value, 16);
3922 acb->fw_flag = FW_NORMAL;
3923 mod_timer(&acb->eternal_timer,
3924 jiffies + msecs_to_jiffies(6 * HZ));
3925 acb->acb_flags &= ~ACB_F_BUS_RESET;
3926 rtn = SUCCESS;
3927 pr_err("arcmsr: scsi bus reset "
3928 "eh returns with success\n");
3929 } else {
3930 acb->acb_flags &= ~ACB_F_BUS_RESET;
3931 atomic_set(&acb->rq_map_token, 16);
3932 atomic_set(&acb->ante_token_value, 16);
3933 acb->fw_flag = FW_NORMAL;
3934 mod_timer(&acb->eternal_timer,
3935 jiffies + msecs_to_jiffies(6 * HZ));
3936 rtn = SUCCESS;
3937 }
3938 break;
3939 }
3041 } 3940 }
3042 return rtn; 3941 return rtn;
3043} 3942}
@@ -3056,8 +3955,10 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
3056 (struct AdapterControlBlock *)cmd->device->host->hostdata; 3955 (struct AdapterControlBlock *)cmd->device->host->hostdata;
3057 int i = 0; 3956 int i = 0;
3058 int rtn = FAILED; 3957 int rtn = FAILED;
3958 uint32_t intmask_org;
3959
3059 printk(KERN_NOTICE 3960 printk(KERN_NOTICE
3060 "arcmsr%d: abort device command of scsi id = %d lun = %d \n", 3961 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
3061 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun); 3962 acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
3062 acb->acb_flags |= ACB_F_ABORT; 3963 acb->acb_flags |= ACB_F_ABORT;
3063 acb->num_aborts++; 3964 acb->num_aborts++;
@@ -3067,9 +3968,12 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
3067 ** we need to handle it as soon as possible and exit 3968 ** we need to handle it as soon as possible and exit
3068 ************************************************ 3969 ************************************************
3069 */ 3970 */
3070 if (!atomic_read(&acb->ccboutstandingcount)) 3971 if (!atomic_read(&acb->ccboutstandingcount)) {
3972 acb->acb_flags &= ~ACB_F_ABORT;
3071 return rtn; 3973 return rtn;
3974 }
3072 3975
3976 intmask_org = arcmsr_disable_outbound_ints(acb);
3073 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { 3977 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3074 struct CommandControlBlock *ccb = acb->pccb_pool[i]; 3978 struct CommandControlBlock *ccb = acb->pccb_pool[i];
3075 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { 3979 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
@@ -3079,6 +3983,7 @@ static int arcmsr_abort(struct scsi_cmnd *cmd)
3079 } 3983 }
3080 } 3984 }
3081 acb->acb_flags &= ~ACB_F_ABORT; 3985 acb->acb_flags &= ~ACB_F_ABORT;
3986 arcmsr_enable_outbound_ints(acb, intmask_org);
3082 return rtn; 3987 return rtn;
3083} 3988}
3084 3989
@@ -3108,19 +4013,20 @@ static const char *arcmsr_info(struct Scsi_Host *host)
3108 case PCI_DEVICE_ID_ARECA_1280: 4013 case PCI_DEVICE_ID_ARECA_1280:
3109 type = "SATA"; 4014 type = "SATA";
3110 break; 4015 break;
4016 case PCI_DEVICE_ID_ARECA_1214:
3111 case PCI_DEVICE_ID_ARECA_1380: 4017 case PCI_DEVICE_ID_ARECA_1380:
3112 case PCI_DEVICE_ID_ARECA_1381: 4018 case PCI_DEVICE_ID_ARECA_1381:
3113 case PCI_DEVICE_ID_ARECA_1680: 4019 case PCI_DEVICE_ID_ARECA_1680:
3114 case PCI_DEVICE_ID_ARECA_1681: 4020 case PCI_DEVICE_ID_ARECA_1681:
3115 case PCI_DEVICE_ID_ARECA_1880: 4021 case PCI_DEVICE_ID_ARECA_1880:
3116 type = "SAS"; 4022 type = "SAS/SATA";
3117 break; 4023 break;
3118 default: 4024 default:
3119 type = "X-TYPE"; 4025 type = "unknown";
4026 raid6 = 0;
3120 break; 4027 break;
3121 } 4028 }
3122 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s", 4029 sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
3123 type, raid6 ? "( RAID6 capable)" : "", 4030 type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
3124 ARCMSR_DRIVER_VERSION);
3125 return buf; 4031 return buf;
3126} 4032}
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 860f527d8f26..81e83a65a193 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 1432ed5e9fc6..80d97f3d2ed9 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -275,6 +275,19 @@ bool is_link_state_evt(u32 trailer)
275 ASYNC_EVENT_CODE_LINK_STATE); 275 ASYNC_EVENT_CODE_LINK_STATE);
276} 276}
277 277
278static bool is_iscsi_evt(u32 trailer)
279{
280 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
281 ASYNC_TRAILER_EVENT_CODE_MASK) ==
282 ASYNC_EVENT_CODE_ISCSI;
283}
284
285static int iscsi_evt_type(u32 trailer)
286{
287 return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
288 ASYNC_TRAILER_EVENT_TYPE_MASK;
289}
290
278static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 291static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
279{ 292{
280 if (compl->flags != 0) { 293 if (compl->flags != 0) {
@@ -438,7 +451,7 @@ void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
438 } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || 451 } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
439 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && 452 ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
440 (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { 453 (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
441 phba->state = BE_ADAPTER_LINK_UP; 454 phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
442 455
443 beiscsi_log(phba, KERN_ERR, 456 beiscsi_log(phba, KERN_ERR,
444 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, 457 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -461,7 +474,28 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
461 /* Interpret compl as a async link evt */ 474 /* Interpret compl as a async link evt */
462 beiscsi_async_link_state_process(phba, 475 beiscsi_async_link_state_process(phba,
463 (struct be_async_event_link_state *) compl); 476 (struct be_async_event_link_state *) compl);
464 else 477 else if (is_iscsi_evt(compl->flags)) {
478 switch (iscsi_evt_type(compl->flags)) {
479 case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
480 case ASYNC_EVENT_NEW_ISCSI_CONN:
481 case ASYNC_EVENT_NEW_TCP_CONN:
482 phba->state |= BE_ADAPTER_CHECK_BOOT;
483 beiscsi_log(phba, KERN_ERR,
484 BEISCSI_LOG_CONFIG |
485 BEISCSI_LOG_MBOX,
486 "BC_%d : Async iscsi Event,"
487 " flags handled = 0x%08x\n",
488 compl->flags);
489 break;
490 default:
491 beiscsi_log(phba, KERN_ERR,
492 BEISCSI_LOG_CONFIG |
493 BEISCSI_LOG_MBOX,
494 "BC_%d : Unsupported Async"
495 " Event, flags = 0x%08x\n",
496 compl->flags);
497 }
498 } else
465 beiscsi_log(phba, KERN_ERR, 499 beiscsi_log(phba, KERN_ERR,
466 BEISCSI_LOG_CONFIG | 500 BEISCSI_LOG_CONFIG |
467 BEISCSI_LOG_MBOX, 501 BEISCSI_LOG_MBOX,
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index cc7405c0eca0..98897434bcb4 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -26,9 +26,9 @@
26 * The commands are serviced by the ARM processor in the OneConnect's MPU. 26 * The commands are serviced by the ARM processor in the OneConnect's MPU.
27 */ 27 */
28struct be_sge { 28struct be_sge {
29 u32 pa_lo; 29 __le32 pa_lo;
30 u32 pa_hi; 30 __le32 pa_hi;
31 u32 len; 31 __le32 len;
32}; 32};
33 33
34#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */ 34#define MCC_WRB_SGE_CNT_SHIFT 3 /* bits 3 - 7 of dword 0 */
@@ -118,6 +118,14 @@ struct be_mcc_compl {
118#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */ 118#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
119#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF 119#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
120#define ASYNC_EVENT_CODE_LINK_STATE 0x1 120#define ASYNC_EVENT_CODE_LINK_STATE 0x1
121#define ASYNC_EVENT_CODE_ISCSI 0x4
122
123#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16 /* bits 16 - 23 */
124#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xF
125#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC 0x4
126#define ASYNC_EVENT_NEW_ISCSI_CONN 0x5
127#define ASYNC_EVENT_NEW_TCP_CONN 0x7
128
121struct be_async_event_trailer { 129struct be_async_event_trailer {
122 u32 code; 130 u32 code;
123}; 131};
@@ -624,11 +632,11 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
624/******************** Modify EQ Delay *******************/ 632/******************** Modify EQ Delay *******************/
625struct be_cmd_req_modify_eq_delay { 633struct be_cmd_req_modify_eq_delay {
626 struct be_cmd_req_hdr hdr; 634 struct be_cmd_req_hdr hdr;
627 u32 num_eq; 635 __le32 num_eq;
628 struct { 636 struct {
629 u32 eq_id; 637 __le32 eq_id;
630 u32 phase; 638 __le32 phase;
631 u32 delay_multiplier; 639 __le32 delay_multiplier;
632 } delay[MAX_CPUS]; 640 } delay[MAX_CPUS];
633} __packed; 641} __packed;
634 642
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 86162811812d..b7391a3f9f0b 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -1274,6 +1274,31 @@ int beiscsi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
1274} 1274}
1275 1275
1276/** 1276/**
1277 * beiscsi_flush_cq()- Flush the CQ created.
1278 * @phba: ptr device priv structure.
1279 *
1280 * Before the connection resource are freed flush
1281 * all the CQ enteries
1282 **/
1283static void beiscsi_flush_cq(struct beiscsi_hba *phba)
1284{
1285 uint16_t i;
1286 struct be_eq_obj *pbe_eq;
1287 struct hwi_controller *phwi_ctrlr;
1288 struct hwi_context_memory *phwi_context;
1289
1290 phwi_ctrlr = phba->phwi_ctrlr;
1291 phwi_context = phwi_ctrlr->phwi_ctxt;
1292
1293 for (i = 0; i < phba->num_cpus; i++) {
1294 pbe_eq = &phwi_context->be_eq[i];
1295 blk_iopoll_disable(&pbe_eq->iopoll);
1296 beiscsi_process_cq(pbe_eq);
1297 blk_iopoll_enable(&pbe_eq->iopoll);
1298 }
1299}
1300
1301/**
1277 * beiscsi_close_conn - Upload the connection 1302 * beiscsi_close_conn - Upload the connection
1278 * @ep: The iscsi endpoint 1303 * @ep: The iscsi endpoint
1279 * @flag: The type of connection closure 1304 * @flag: The type of connection closure
@@ -1294,6 +1319,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
1294 } 1319 }
1295 1320
1296 ret = beiscsi_mccq_compl(phba, tag, NULL, NULL); 1321 ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
1322
1323 /* Flush the CQ entries */
1324 beiscsi_flush_cq(phba);
1325
1297 return ret; 1326 return ret;
1298} 1327}
1299 1328
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 31ddc8494398..e0b3b2d1f27a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 915c26b23ab6..30d74a06b993 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -2068,7 +2068,7 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
2068 * return 2068 * return
2069 * Number of Completion Entries processed. 2069 * Number of Completion Entries processed.
2070 **/ 2070 **/
2071static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 2071unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2072{ 2072{
2073 struct be_queue_info *cq; 2073 struct be_queue_info *cq;
2074 struct sol_cqe *sol; 2074 struct sol_cqe *sol;
@@ -2110,6 +2110,18 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2110 2110
2111 cri_index = BE_GET_CRI_FROM_CID(cid); 2111 cri_index = BE_GET_CRI_FROM_CID(cid);
2112 ep = phba->ep_array[cri_index]; 2112 ep = phba->ep_array[cri_index];
2113
2114 if (ep == NULL) {
2115 /* connection has already been freed
2116 * just move on to next one
2117 */
2118 beiscsi_log(phba, KERN_WARNING,
2119 BEISCSI_LOG_INIT,
2120 "BM_%d : proc cqe of disconn ep: cid %d\n",
2121 cid);
2122 goto proc_next_cqe;
2123 }
2124
2113 beiscsi_ep = ep->dd_data; 2125 beiscsi_ep = ep->dd_data;
2114 beiscsi_conn = beiscsi_ep->conn; 2126 beiscsi_conn = beiscsi_ep->conn;
2115 2127
@@ -2219,6 +2231,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
2219 break; 2231 break;
2220 } 2232 }
2221 2233
2234proc_next_cqe:
2222 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2235 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2223 queue_tail_inc(cq); 2236 queue_tail_inc(cq);
2224 sol = queue_tail_node(cq); 2237 sol = queue_tail_node(cq);
@@ -4377,6 +4390,10 @@ static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
4377{ 4390{
4378 struct iscsi_boot_kobj *boot_kobj; 4391 struct iscsi_boot_kobj *boot_kobj;
4379 4392
4393 /* it has been created previously */
4394 if (phba->boot_kset)
4395 return 0;
4396
4380 /* get boot info using mgmt cmd */ 4397 /* get boot info using mgmt cmd */
4381 if (beiscsi_get_boot_info(phba)) 4398 if (beiscsi_get_boot_info(phba))
4382 /* Try to see if we can carry on without this */ 4399 /* Try to see if we can carry on without this */
@@ -5206,6 +5223,7 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
5206 free_irq(phba->pcidev->irq, phba); 5223 free_irq(phba->pcidev->irq, phba);
5207 } 5224 }
5208 pci_disable_msix(phba->pcidev); 5225 pci_disable_msix(phba->pcidev);
5226 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
5209 5227
5210 for (i = 0; i < phba->num_cpus; i++) { 5228 for (i = 0; i < phba->num_cpus; i++) {
5211 pbe_eq = &phwi_context->be_eq[i]; 5229 pbe_eq = &phwi_context->be_eq[i];
@@ -5227,7 +5245,6 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba,
5227 hwi_cleanup(phba); 5245 hwi_cleanup(phba);
5228 } 5246 }
5229 5247
5230 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
5231} 5248}
5232 5249
5233static void beiscsi_remove(struct pci_dev *pcidev) 5250static void beiscsi_remove(struct pci_dev *pcidev)
@@ -5276,9 +5293,9 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5276 for (i = 0; i <= phba->num_cpus; i++) 5293 for (i = 0; i <= phba->num_cpus; i++)
5277 phba->msix_entries[i].entry = i; 5294 phba->msix_entries[i].entry = i;
5278 5295
5279 status = pci_enable_msix(phba->pcidev, phba->msix_entries, 5296 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
5280 (phba->num_cpus + 1)); 5297 phba->num_cpus + 1, phba->num_cpus + 1);
5281 if (!status) 5298 if (status > 0)
5282 phba->msix_enabled = true; 5299 phba->msix_enabled = true;
5283 5300
5284 return; 5301 return;
@@ -5335,6 +5352,14 @@ static void be_eqd_update(struct beiscsi_hba *phba)
5335 } 5352 }
5336} 5353}
5337 5354
5355static void be_check_boot_session(struct beiscsi_hba *phba)
5356{
5357 if (beiscsi_setup_boot_info(phba))
5358 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5359 "BM_%d : Could not set up "
5360 "iSCSI boot info on async event.\n");
5361}
5362
5338/* 5363/*
5339 * beiscsi_hw_health_check()- Check adapter health 5364 * beiscsi_hw_health_check()- Check adapter health
5340 * @work: work item to check HW health 5365 * @work: work item to check HW health
@@ -5350,6 +5375,11 @@ beiscsi_hw_health_check(struct work_struct *work)
5350 5375
5351 be_eqd_update(phba); 5376 be_eqd_update(phba);
5352 5377
5378 if (phba->state & BE_ADAPTER_CHECK_BOOT) {
5379 phba->state &= ~BE_ADAPTER_CHECK_BOOT;
5380 be_check_boot_session(phba);
5381 }
5382
5353 beiscsi_ue_detect(phba); 5383 beiscsi_ue_detect(phba);
5354 5384
5355 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5385 schedule_delayed_work(&phba->beiscsi_hw_check_task,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 9ceab426eec9..7ee0ffc38514 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
36#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
37 37
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "10.2.273.0" 39#define BUILD_STR "10.4.114.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -104,6 +104,7 @@
104#define BE_ADAPTER_LINK_DOWN 0x002 104#define BE_ADAPTER_LINK_DOWN 0x002
105#define BE_ADAPTER_PCI_ERR 0x004 105#define BE_ADAPTER_PCI_ERR 0x004
106#define BE_ADAPTER_STATE_SHUTDOWN 0x008 106#define BE_ADAPTER_STATE_SHUTDOWN 0x008
107#define BE_ADAPTER_CHECK_BOOT 0x010
107 108
108 109
109#define BEISCSI_CLEAN_UNLOAD 0x01 110#define BEISCSI_CLEAN_UNLOAD 0x01
@@ -839,6 +840,9 @@ void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
839void hwi_ring_cq_db(struct beiscsi_hba *phba, 840void hwi_ring_cq_db(struct beiscsi_hba *phba,
840 unsigned int id, unsigned int num_processed, 841 unsigned int id, unsigned int num_processed,
841 unsigned char rearm, unsigned char event); 842 unsigned char rearm, unsigned char event);
843
844unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq);
845
842static inline bool beiscsi_error(struct beiscsi_hba *phba) 846static inline bool beiscsi_error(struct beiscsi_hba *phba)
843{ 847{
844 return phba->ue_detected || phba->fw_timeout; 848 return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 665afcb74a56..681d4e8f003a 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -943,17 +943,20 @@ mgmt_static_ip_modify(struct beiscsi_hba *phba,
943 943
944 if (ip_action == IP_ACTION_ADD) { 944 if (ip_action == IP_ACTION_ADD) {
945 memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value, 945 memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
946 ip_param->len); 946 sizeof(req->ip_params.ip_record.ip_addr.addr));
947 947
948 if (subnet_param) 948 if (subnet_param)
949 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, 949 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
950 subnet_param->value, subnet_param->len); 950 subnet_param->value,
951 sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
951 } else { 952 } else {
952 memcpy(req->ip_params.ip_record.ip_addr.addr, 953 memcpy(req->ip_params.ip_record.ip_addr.addr,
953 if_info->ip_addr.addr, ip_param->len); 954 if_info->ip_addr.addr,
955 sizeof(req->ip_params.ip_record.ip_addr.addr));
954 956
955 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, 957 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
956 if_info->ip_addr.subnet_mask, ip_param->len); 958 if_info->ip_addr.subnet_mask,
959 sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
957 } 960 }
958 961
959 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 962 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
@@ -981,7 +984,7 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
981 req->action = gtway_action; 984 req->action = gtway_action;
982 req->ip_addr.ip_type = BE2_IPV4; 985 req->ip_addr.ip_type = BE2_IPV4;
983 986
984 memcpy(req->ip_addr.addr, gt_addr, param_len); 987 memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
985 988
986 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 989 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
987} 990}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 24a8fc577477..bd81446936fc 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4c5891e66038..0679782d9d15 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1654,6 +1654,10 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1654 u64 addr; 1654 u64 addr;
1655 int i; 1655 int i;
1656 1656
1657 /*
1658 * Use dma_map_sg directly to ensure we're using the correct
1659 * dev struct off of pcidev.
1660 */
1657 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), 1661 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1658 scsi_sg_count(sc), sc->sc_data_direction); 1662 scsi_sg_count(sc), sc->sc_data_direction);
1659 scsi_for_each_sg(sc, sg, sg_count, i) { 1663 scsi_for_each_sg(sc, sg, sg_count, i) {
@@ -1703,9 +1707,16 @@ static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1703static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1707static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
1704{ 1708{
1705 struct scsi_cmnd *sc = io_req->sc_cmd; 1709 struct scsi_cmnd *sc = io_req->sc_cmd;
1710 struct bnx2fc_interface *interface = io_req->port->priv;
1711 struct bnx2fc_hba *hba = interface->hba;
1706 1712
1707 if (io_req->bd_tbl->bd_valid && sc) { 1713 /*
1708 scsi_dma_unmap(sc); 1714 * Use dma_unmap_sg directly to ensure we're using the correct
1715 * dev struct off of pcidev.
1716 */
1717 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1718 dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
1719 scsi_sg_count(sc), sc->sc_data_direction);
1709 io_req->bd_tbl->bd_valid = 0; 1720 io_req->bd_tbl->bd_valid = 0;
1710 } 1721 }
1711} 1722}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 40e22497d249..7a36388822aa 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2235,6 +2235,9 @@ static umode_t bnx2i_attr_is_visible(int param_type, int param)
2235 case ISCSI_PARAM_TGT_RESET_TMO: 2235 case ISCSI_PARAM_TGT_RESET_TMO:
2236 case ISCSI_PARAM_IFACE_NAME: 2236 case ISCSI_PARAM_IFACE_NAME:
2237 case ISCSI_PARAM_INITIATOR_NAME: 2237 case ISCSI_PARAM_INITIATOR_NAME:
2238 case ISCSI_PARAM_BOOT_ROOT:
2239 case ISCSI_PARAM_BOOT_NIC:
2240 case ISCSI_PARAM_BOOT_TARGET:
2238 return S_IRUGO; 2241 return S_IRUGO;
2239 default: 2242 default:
2240 return 0; 2243 return 0;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 49b1daa4476e..5db2d85195b1 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -94,7 +94,7 @@ enum {
94}; 94};
95 95
96struct csio_msix_entries { 96struct csio_msix_entries {
97 unsigned short vector; /* Vector assigned by pci_enable_msix */ 97 unsigned short vector; /* Assigned MSI-X vector */
98 void *dev_id; /* Priv object associated w/ this msix*/ 98 void *dev_id; /* Priv object associated w/ this msix*/
99 char desc[24]; /* Description of this vector */ 99 char desc[24]; /* Description of this vector */
100}; 100};
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index 7ee9777ae2c5..a8c748a35f9c 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -499,7 +499,7 @@ csio_reduce_sqsets(struct csio_hw *hw, int cnt)
499static int 499static int
500csio_enable_msix(struct csio_hw *hw) 500csio_enable_msix(struct csio_hw *hw)
501{ 501{
502 int rv, i, j, k, n, min, cnt; 502 int i, j, k, n, min, cnt;
503 struct csio_msix_entries *entryp; 503 struct csio_msix_entries *entryp;
504 struct msix_entry *entries; 504 struct msix_entry *entries;
505 int extra = CSIO_EXTRA_VECS; 505 int extra = CSIO_EXTRA_VECS;
@@ -521,21 +521,15 @@ csio_enable_msix(struct csio_hw *hw)
521 521
522 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt); 522 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
523 523
524 while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min) 524 cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
525 cnt = rv; 525 if (cnt < 0) {
526 if (!rv) {
527 if (cnt < (hw->num_sqsets + extra)) {
528 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
529 csio_reduce_sqsets(hw, cnt - extra);
530 }
531 } else {
532 if (rv > 0) {
533 pci_disable_msix(hw->pdev);
534 csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
535 }
536
537 kfree(entries); 526 kfree(entries);
538 return -ENOMEM; 527 return cnt;
528 }
529
530 if (cnt < (hw->num_sqsets + extra)) {
531 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
532 csio_reduce_sqsets(hw, cnt - extra);
539 } 533 }
540 534
541 /* Save off vectors */ 535 /* Save off vectors */
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index addd1dddce14..6a2001d6b442 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1852,7 +1852,7 @@ static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1852 u32 credits; 1852 u32 credits;
1853 1853
1854 log_debug(1 << CXGBI_DBG_PDU_RX, 1854 log_debug(1 << CXGBI_DBG_PDU_RX,
1855 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n", 1855 "csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
1856 csk, csk->state, csk->flags, csk->tid, csk->copied_seq, 1856 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1857 csk->rcv_wup, cdev->rx_credit_thres, 1857 csk->rcv_wup, cdev->rx_credit_thres,
1858 cdev->rcv_win); 1858 cdev->rcv_win);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 67283ef418ac..072f0ec2851e 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2363,6 +2363,7 @@ static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2363 host->unique_id = (u32)sys_tbl_pa + pHba->unit; 2363 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2364 host->sg_tablesize = pHba->sg_tablesize; 2364 host->sg_tablesize = pHba->sg_tablesize;
2365 host->can_queue = pHba->post_fifo_size; 2365 host->can_queue = pHba->post_fifo_size;
2366 host->use_cmd_list = 1;
2366 2367
2367 return 0; 2368 return 0;
2368} 2369}
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 813dd5c998e4..943ad3a19661 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -837,7 +837,6 @@ struct hostdata {
837static struct Scsi_Host *sh[MAX_BOARDS]; 837static struct Scsi_Host *sh[MAX_BOARDS];
838static const char *driver_name = "EATA"; 838static const char *driver_name = "EATA";
839static char sha[MAX_BOARDS]; 839static char sha[MAX_BOARDS];
840static DEFINE_SPINLOCK(driver_lock);
841 840
842/* Initialize num_boards so that ihdlr can work while detect is in progress */ 841/* Initialize num_boards so that ihdlr can work while detect is in progress */
843static unsigned int num_boards = MAX_BOARDS; 842static unsigned int num_boards = MAX_BOARDS;
@@ -1097,8 +1096,6 @@ static int port_detect(unsigned long port_base, unsigned int j,
1097 goto fail; 1096 goto fail;
1098 } 1097 }
1099 1098
1100 spin_lock_irq(&driver_lock);
1101
1102 if (do_dma(port_base, 0, READ_CONFIG_PIO)) { 1099 if (do_dma(port_base, 0, READ_CONFIG_PIO)) {
1103#if defined(DEBUG_DETECT) 1100#if defined(DEBUG_DETECT)
1104 printk("%s: detect, do_dma failed at 0x%03lx.\n", name, 1101 printk("%s: detect, do_dma failed at 0x%03lx.\n", name,
@@ -1264,10 +1261,7 @@ static int port_detect(unsigned long port_base, unsigned int j,
1264 } 1261 }
1265#endif 1262#endif
1266 1263
1267 spin_unlock_irq(&driver_lock);
1268 sh[j] = shost = scsi_register(tpnt, sizeof(struct hostdata)); 1264 sh[j] = shost = scsi_register(tpnt, sizeof(struct hostdata));
1269 spin_lock_irq(&driver_lock);
1270
1271 if (shost == NULL) { 1265 if (shost == NULL) {
1272 printk("%s: unable to register host, detaching.\n", name); 1266 printk("%s: unable to register host, detaching.\n", name);
1273 goto freedma; 1267 goto freedma;
@@ -1344,8 +1338,6 @@ static int port_detect(unsigned long port_base, unsigned int j,
1344 else 1338 else
1345 sprintf(dma_name, "DMA %u", dma_channel); 1339 sprintf(dma_name, "DMA %u", dma_channel);
1346 1340
1347 spin_unlock_irq(&driver_lock);
1348
1349 for (i = 0; i < shost->can_queue; i++) 1341 for (i = 0; i < shost->can_queue; i++)
1350 ha->cp[i].cp_dma_addr = pci_map_single(ha->pdev, 1342 ha->cp[i].cp_dma_addr = pci_map_single(ha->pdev,
1351 &ha->cp[i], 1343 &ha->cp[i],
@@ -1438,7 +1430,6 @@ static int port_detect(unsigned long port_base, unsigned int j,
1438 freeirq: 1430 freeirq:
1439 free_irq(irq, &sha[j]); 1431 free_irq(irq, &sha[j]);
1440 freelock: 1432 freelock:
1441 spin_unlock_irq(&driver_lock);
1442 release_region(port_base, REGION_SIZE); 1433 release_region(port_base, REGION_SIZE);
1443 fail: 1434 fail:
1444 return 0; 1435 return 0;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 74277c20f6a5..bdc89899561a 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -96,14 +96,32 @@ int fcoe_link_speed_update(struct fc_lport *lport)
96 struct ethtool_cmd ecmd; 96 struct ethtool_cmd ecmd;
97 97
98 if (!__ethtool_get_settings(netdev, &ecmd)) { 98 if (!__ethtool_get_settings(netdev, &ecmd)) {
99 lport->link_supported_speeds &= 99 lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT |
100 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); 100 FC_PORTSPEED_10GBIT |
101 FC_PORTSPEED_20GBIT |
102 FC_PORTSPEED_40GBIT);
103
101 if (ecmd.supported & (SUPPORTED_1000baseT_Half | 104 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
102 SUPPORTED_1000baseT_Full)) 105 SUPPORTED_1000baseT_Full |
106 SUPPORTED_1000baseKX_Full))
103 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; 107 lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
104 if (ecmd.supported & SUPPORTED_10000baseT_Full) 108
105 lport->link_supported_speeds |= 109 if (ecmd.supported & (SUPPORTED_10000baseT_Full |
106 FC_PORTSPEED_10GBIT; 110 SUPPORTED_10000baseKX4_Full |
111 SUPPORTED_10000baseKR_Full |
112 SUPPORTED_10000baseR_FEC))
113 lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
114
115 if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full |
116 SUPPORTED_20000baseKR2_Full))
117 lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
118
119 if (ecmd.supported & (SUPPORTED_40000baseKR4_Full |
120 SUPPORTED_40000baseCR4_Full |
121 SUPPORTED_40000baseSR4_Full |
122 SUPPORTED_40000baseLR4_Full))
123 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
124
107 switch (ethtool_cmd_speed(&ecmd)) { 125 switch (ethtool_cmd_speed(&ecmd)) {
108 case SPEED_1000: 126 case SPEED_1000:
109 lport->link_speed = FC_PORTSPEED_1GBIT; 127 lport->link_speed = FC_PORTSPEED_1GBIT;
@@ -111,6 +129,15 @@ int fcoe_link_speed_update(struct fc_lport *lport)
111 case SPEED_10000: 129 case SPEED_10000:
112 lport->link_speed = FC_PORTSPEED_10GBIT; 130 lport->link_speed = FC_PORTSPEED_10GBIT;
113 break; 131 break;
132 case 20000:
133 lport->link_speed = FC_PORTSPEED_20GBIT;
134 break;
135 case 40000:
136 lport->link_speed = FC_PORTSPEED_40GBIT;
137 break;
138 default:
139 lport->link_speed = FC_PORTSPEED_UNKNOWN;
140 break;
114 } 141 }
115 return 0; 142 return 0;
116 } 143 }
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 1d3521e13d77..bf8d34c26f13 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
39 39
40#define DRV_NAME "fnic" 40#define DRV_NAME "fnic"
41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver" 41#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
42#define DRV_VERSION "1.6.0.10" 42#define DRV_VERSION "1.6.0.11"
43#define PFX DRV_NAME ": " 43#define PFX DRV_NAME ": "
44#define DFX DRV_NAME "%d: " 44#define DFX DRV_NAME "%d: "
45 45
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1b948f633fc5..f3984b48f8e9 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -35,7 +35,7 @@
35#include "cq_enet_desc.h" 35#include "cq_enet_desc.h"
36#include "cq_exch_desc.h" 36#include "cq_exch_desc.h"
37 37
38static u8 fcoe_all_fcfs[ETH_ALEN]; 38static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
39struct workqueue_struct *fnic_fip_queue; 39struct workqueue_struct *fnic_fip_queue;
40struct workqueue_struct *fnic_event_queue; 40struct workqueue_struct *fnic_event_queue;
41 41
@@ -101,13 +101,14 @@ void fnic_handle_link(struct work_struct *work)
101 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, 101 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
102 "link up\n"); 102 "link up\n");
103 fcoe_ctlr_link_up(&fnic->ctlr); 103 fcoe_ctlr_link_up(&fnic->ctlr);
104 } else 104 } else {
105 /* UP -> UP */ 105 /* UP -> UP */
106 spin_unlock_irqrestore(&fnic->fnic_lock, flags); 106 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
107 fnic_fc_trace_set_data( 107 fnic_fc_trace_set_data(
108 fnic->lport->host->host_no, FNIC_FC_LE, 108 fnic->lport->host->host_no, FNIC_FC_LE,
109 "Link Status: UP_UP", 109 "Link Status: UP_UP",
110 strlen("Link Status: UP_UP")); 110 strlen("Link Status: UP_UP"));
111 }
111 } 112 }
112 } else if (fnic->link_status) { 113 } else if (fnic->link_status) {
113 /* DOWN -> UP */ 114 /* DOWN -> UP */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 8b1b38751b49..acf1f95cb5c5 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -743,7 +743,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
743 743
744 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t"; 744 fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x %c%8x\t";
745 len += snprintf(fnic_dbgfs_prt->buffer + len, 745 len += snprintf(fnic_dbgfs_prt->buffer + len,
746 (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len, 746 max_size - len,
747 fmt, 747 fmt,
748 tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900, 748 tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
749 tm.tm_hour, tm.tm_min, tm.tm_sec, 749 tm.tm_hour, tm.tm_min, tm.tm_sec,
@@ -767,8 +767,7 @@ void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
767 j == ethhdr_len + fcoehdr_len + fchdr_len || 767 j == ethhdr_len + fcoehdr_len + fchdr_len ||
768 (i > 3 && j%fchdr_len == 0)) { 768 (i > 3 && j%fchdr_len == 0)) {
769 len += snprintf(fnic_dbgfs_prt->buffer 769 len += snprintf(fnic_dbgfs_prt->buffer
770 + len, (fnic_fc_trace_max_pages 770 + len, max_size - len,
771 * PAGE_SIZE * 3) - len,
772 "\n\t\t\t\t\t\t\t\t"); 771 "\n\t\t\t\t\t\t\t\t");
773 i++; 772 i++;
774 } 773 }
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6b35d0dfe64c..cef5d49b59cd 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5971,10 +5971,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5971 5971
5972 /* Save the PCI command register */ 5972 /* Save the PCI command register */
5973 pci_read_config_word(pdev, 4, &command_register); 5973 pci_read_config_word(pdev, 4, &command_register);
5974 /* Turn the board off. This is so that later pci_restore_state()
5975 * won't turn the board on before the rest of config space is ready.
5976 */
5977 pci_disable_device(pdev);
5978 pci_save_state(pdev); 5974 pci_save_state(pdev);
5979 5975
5980 /* find the first memory BAR, so we can find the cfg table */ 5976 /* find the first memory BAR, so we can find the cfg table */
@@ -6022,11 +6018,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
6022 goto unmap_cfgtable; 6018 goto unmap_cfgtable;
6023 6019
6024 pci_restore_state(pdev); 6020 pci_restore_state(pdev);
6025 rc = pci_enable_device(pdev);
6026 if (rc) {
6027 dev_warn(&pdev->dev, "failed to enable device.\n");
6028 goto unmap_cfgtable;
6029 }
6030 pci_write_config_word(pdev, 4, command_register); 6021 pci_write_config_word(pdev, 4, command_register);
6031 6022
6032 /* Some devices (notably the HP Smart Array 5i Controller) 6023 /* Some devices (notably the HP Smart Array 5i Controller)
@@ -6159,26 +6150,22 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)
6159 h->msix_vector = MAX_REPLY_QUEUES; 6150 h->msix_vector = MAX_REPLY_QUEUES;
6160 if (h->msix_vector > num_online_cpus()) 6151 if (h->msix_vector > num_online_cpus())
6161 h->msix_vector = num_online_cpus(); 6152 h->msix_vector = num_online_cpus();
6162 err = pci_enable_msix(h->pdev, hpsa_msix_entries, 6153 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
6163 h->msix_vector); 6154 1, h->msix_vector);
6164 if (err > 0) { 6155 if (err < 0) {
6156 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
6157 h->msix_vector = 0;
6158 goto single_msi_mode;
6159 } else if (err < h->msix_vector) {
6165 dev_warn(&h->pdev->dev, "only %d MSI-X vectors " 6160 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
6166 "available\n", err); 6161 "available\n", err);
6167 h->msix_vector = err;
6168 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6169 h->msix_vector);
6170 }
6171 if (!err) {
6172 for (i = 0; i < h->msix_vector; i++)
6173 h->intr[i] = hpsa_msix_entries[i].vector;
6174 return;
6175 } else {
6176 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
6177 err);
6178 h->msix_vector = 0;
6179 goto default_int_mode;
6180 } 6162 }
6163 h->msix_vector = err;
6164 for (i = 0; i < h->msix_vector; i++)
6165 h->intr[i] = hpsa_msix_entries[i].vector;
6166 return;
6181 } 6167 }
6168single_msi_mode:
6182 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 6169 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6183 dev_info(&h->pdev->dev, "MSI\n"); 6170 dev_info(&h->pdev->dev, "MSI\n");
6184 if (!pci_enable_msi(h->pdev)) 6171 if (!pci_enable_msi(h->pdev))
@@ -6541,6 +6528,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
6541 if (!reset_devices) 6528 if (!reset_devices)
6542 return 0; 6529 return 0;
6543 6530
6531 /* kdump kernel is loading, we don't know in which state is
6532 * the pci interface. The dev->enable_cnt is equal zero
6533 * so we call enable+disable, wait a while and switch it on.
6534 */
6535 rc = pci_enable_device(pdev);
6536 if (rc) {
6537 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6538 return -ENODEV;
6539 }
6540 pci_disable_device(pdev);
6541 msleep(260); /* a randomly chosen number */
6542 rc = pci_enable_device(pdev);
6543 if (rc) {
6544 dev_warn(&pdev->dev, "failed to enable device.\n");
6545 return -ENODEV;
6546 }
6547 pci_set_master(pdev);
6544 /* Reset the controller with a PCI power-cycle or via doorbell */ 6548 /* Reset the controller with a PCI power-cycle or via doorbell */
6545 rc = hpsa_kdump_hard_reset_controller(pdev); 6549 rc = hpsa_kdump_hard_reset_controller(pdev);
6546 6550
@@ -6549,10 +6553,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
6549 * "performant mode". Or, it might be 640x, which can't reset 6553 * "performant mode". Or, it might be 640x, which can't reset
6550 * due to concerns about shared bbwc between 6402/6404 pair. 6554 * due to concerns about shared bbwc between 6402/6404 pair.
6551 */ 6555 */
6552 if (rc == -ENOTSUPP) 6556 if (rc) {
6553 return rc; /* just try to do the kdump anyhow. */ 6557 if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
6554 if (rc) 6558 rc = -ENODEV;
6555 return -ENODEV; 6559 goto out_disable;
6560 }
6556 6561
6557 /* Now try to get the controller to respond to a no-op */ 6562 /* Now try to get the controller to respond to a no-op */
6558 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); 6563 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
@@ -6563,7 +6568,11 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev)
6563 dev_warn(&pdev->dev, "no-op failed%s\n", 6568 dev_warn(&pdev->dev, "no-op failed%s\n",
6564 (i < 11 ? "; re-trying" : "")); 6569 (i < 11 ? "; re-trying" : ""));
6565 } 6570 }
6566 return 0; 6571
6572out_disable:
6573
6574 pci_disable_device(pdev);
6575 return rc;
6567} 6576}
6568 6577
6569static int hpsa_allocate_cmd_pool(struct ctlr_info *h) 6578static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
@@ -6743,6 +6752,7 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6743 iounmap(h->transtable); 6752 iounmap(h->transtable);
6744 if (h->cfgtable) 6753 if (h->cfgtable)
6745 iounmap(h->cfgtable); 6754 iounmap(h->cfgtable);
6755 pci_disable_device(h->pdev);
6746 pci_release_regions(h->pdev); 6756 pci_release_regions(h->pdev);
6747 kfree(h); 6757 kfree(h);
6748} 6758}
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 924b0ba74dfe..2a9578c116b7 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2440,6 +2440,7 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2440{ 2440{
2441 u32 ioasc; 2441 u32 ioasc;
2442 int error_index; 2442 int error_index;
2443 struct ipr_hostrcb_type_21_error *error;
2443 2444
2444 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY) 2445 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2445 return; 2446 return;
@@ -2464,6 +2465,15 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2464 if (!ipr_error_table[error_index].log_hcam) 2465 if (!ipr_error_table[error_index].log_hcam)
2465 return; 2466 return;
2466 2467
2468 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2469 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2470 error = &hostrcb->hcam.u.error64.u.type_21_error;
2471
2472 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2473 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2474 return;
2475 }
2476
2467 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error); 2477 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2468 2478
2469 /* Set indication we have logged an error */ 2479 /* Set indication we have logged an error */
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 31ed126f7143..d0201ceb4aac 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -130,6 +130,7 @@
130#define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500 130#define IPR_IOASC_HW_DEV_BUS_STATUS 0x04448500
131#define IPR_IOASC_IOASC_MASK 0xFFFFFF00 131#define IPR_IOASC_IOASC_MASK 0xFFFFFF00
132#define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF 132#define IPR_IOASC_SCSI_STATUS_MASK 0x000000FF
133#define IPR_IOASC_HW_CMD_FAILED 0x046E0000
133#define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT 0x05240000 134#define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT 0x05240000
134#define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000 135#define IPR_IOASC_IR_RESOURCE_HANDLE 0x05250000
135#define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA 0x05258100 136#define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA 0x05258100
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index a669f2d11c31..427af0f24b0f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -726,13 +726,18 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
726 switch(param) { 726 switch(param) {
727 case ISCSI_PARAM_CONN_PORT: 727 case ISCSI_PARAM_CONN_PORT:
728 case ISCSI_PARAM_CONN_ADDRESS: 728 case ISCSI_PARAM_CONN_ADDRESS:
729 case ISCSI_PARAM_LOCAL_PORT:
729 spin_lock_bh(&conn->session->frwd_lock); 730 spin_lock_bh(&conn->session->frwd_lock);
730 if (!tcp_sw_conn || !tcp_sw_conn->sock) { 731 if (!tcp_sw_conn || !tcp_sw_conn->sock) {
731 spin_unlock_bh(&conn->session->frwd_lock); 732 spin_unlock_bh(&conn->session->frwd_lock);
732 return -ENOTCONN; 733 return -ENOTCONN;
733 } 734 }
734 rc = kernel_getpeername(tcp_sw_conn->sock, 735 if (param == ISCSI_PARAM_LOCAL_PORT)
735 (struct sockaddr *)&addr, &len); 736 rc = kernel_getsockname(tcp_sw_conn->sock,
737 (struct sockaddr *)&addr, &len);
738 else
739 rc = kernel_getpeername(tcp_sw_conn->sock,
740 (struct sockaddr *)&addr, &len);
736 spin_unlock_bh(&conn->session->frwd_lock); 741 spin_unlock_bh(&conn->session->frwd_lock);
737 if (rc) 742 if (rc)
738 return rc; 743 return rc;
@@ -895,6 +900,7 @@ static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
895 case ISCSI_PARAM_DATADGST_EN: 900 case ISCSI_PARAM_DATADGST_EN:
896 case ISCSI_PARAM_CONN_ADDRESS: 901 case ISCSI_PARAM_CONN_ADDRESS:
897 case ISCSI_PARAM_CONN_PORT: 902 case ISCSI_PARAM_CONN_PORT:
903 case ISCSI_PARAM_LOCAL_PORT:
898 case ISCSI_PARAM_EXP_STATSN: 904 case ISCSI_PARAM_EXP_STATSN:
899 case ISCSI_PARAM_PERSISTENT_ADDRESS: 905 case ISCSI_PARAM_PERSISTENT_ADDRESS:
900 case ISCSI_PARAM_PERSISTENT_PORT: 906 case ISCSI_PARAM_PERSISTENT_PORT:
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 8d65a51a7598..c11a638f32e6 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -296,9 +296,9 @@ void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
296 BUG_ON(type >= FC_FC4_PROV_SIZE); 296 BUG_ON(type >= FC_FC4_PROV_SIZE);
297 mutex_lock(&fc_prov_mutex); 297 mutex_lock(&fc_prov_mutex);
298 if (prov->recv) 298 if (prov->recv)
299 rcu_assign_pointer(fc_passive_prov[type], NULL); 299 RCU_INIT_POINTER(fc_passive_prov[type], NULL);
300 else 300 else
301 rcu_assign_pointer(fc_active_prov[type], NULL); 301 RCU_INIT_POINTER(fc_active_prov[type], NULL);
302 mutex_unlock(&fc_prov_mutex); 302 mutex_unlock(&fc_prov_mutex);
303 synchronize_rcu(); 303 synchronize_rcu();
304} 304}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 191b59793519..0d8bc6c66650 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3505,6 +3505,7 @@ int iscsi_conn_get_addr_param(struct sockaddr_storage *addr,
3505 len = sprintf(buf, "%pI6\n", &sin6->sin6_addr); 3505 len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
3506 break; 3506 break;
3507 case ISCSI_PARAM_CONN_PORT: 3507 case ISCSI_PARAM_CONN_PORT:
3508 case ISCSI_PARAM_LOCAL_PORT:
3508 if (sin) 3509 if (sin)
3509 len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port)); 3510 len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
3510 else 3511 else
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 6eed9e76a166..2f9b96826ac0 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3385,7 +3385,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
3385 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1)) 3385 if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
3386 return -EINVAL; 3386 return -EINVAL;
3387 3387
3388 strcpy(bucket_data, buf); 3388 strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
3389 str_ptr = &bucket_data[0]; 3389 str_ptr = &bucket_data[0];
3390 /* Ignore this token - this is command token */ 3390 /* Ignore this token - this is command token */
3391 token = strsep(&str_ptr, "\t "); 3391 token = strsep(&str_ptr, "\t ");
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 5b5c825d9576..a7bf359aa0c6 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -656,7 +656,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
656 struct lpfc_nodelist *ndlp = rdata->pnode; 656 struct lpfc_nodelist *ndlp = rdata->pnode;
657 uint32_t elscmd; 657 uint32_t elscmd;
658 uint32_t cmdsize; 658 uint32_t cmdsize;
659 uint32_t rspsize;
660 struct lpfc_iocbq *cmdiocbq; 659 struct lpfc_iocbq *cmdiocbq;
661 uint16_t rpi = 0; 660 uint16_t rpi = 0;
662 struct bsg_job_data *dd_data; 661 struct bsg_job_data *dd_data;
@@ -687,7 +686,6 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
687 686
688 elscmd = job->request->rqst_data.r_els.els_code; 687 elscmd = job->request->rqst_data.r_els.els_code;
689 cmdsize = job->request_payload.payload_len; 688 cmdsize = job->request_payload.payload_len;
690 rspsize = job->reply_payload.payload_len;
691 689
692 if (!lpfc_nlp_get(ndlp)) { 690 if (!lpfc_nlp_get(ndlp)) {
693 rc = -ENODEV; 691 rc = -ENODEV;
@@ -2251,7 +2249,6 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2251 i = 0; 2249 i = 0;
2252 while (phba->link_state != LPFC_LINK_DOWN) { 2250 while (phba->link_state != LPFC_LINK_DOWN) {
2253 if (i++ > timeout) { 2251 if (i++ > timeout) {
2254 rc = -ETIMEDOUT;
2255 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 2252 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2256 "3140 Timeout waiting for link to " 2253 "3140 Timeout waiting for link to "
2257 "diagnostic mode_end, timeout:%d ms\n", 2254 "diagnostic mode_end, timeout:%d ms\n",
@@ -2291,7 +2288,6 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2291 LPFC_MBOXQ_t *pmboxq; 2288 LPFC_MBOXQ_t *pmboxq;
2292 struct sli4_link_diag *link_diag_test_cmd; 2289 struct sli4_link_diag *link_diag_test_cmd;
2293 uint32_t req_len, alloc_len; 2290 uint32_t req_len, alloc_len;
2294 uint32_t timeout;
2295 struct lpfc_mbx_run_link_diag_test *run_link_diag_test; 2291 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2296 union lpfc_sli4_cfg_shdr *shdr; 2292 union lpfc_sli4_cfg_shdr *shdr;
2297 uint32_t shdr_status, shdr_add_status; 2293 uint32_t shdr_status, shdr_add_status;
@@ -2342,7 +2338,6 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2342 2338
2343 link_diag_test_cmd = (struct sli4_link_diag *) 2339 link_diag_test_cmd = (struct sli4_link_diag *)
2344 job->request->rqst_data.h_vendor.vendor_cmd; 2340 job->request->rqst_data.h_vendor.vendor_cmd;
2345 timeout = link_diag_test_cmd->timeout * 100;
2346 2341
2347 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1); 2342 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2348 2343
@@ -2693,14 +2688,13 @@ lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2693 INIT_LIST_HEAD(&dmabuf->list); 2688 INIT_LIST_HEAD(&dmabuf->list);
2694 2689
2695 /* now, allocate dma buffer */ 2690 /* now, allocate dma buffer */
2696 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE, 2691 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2697 &(dmabuf->phys), GFP_KERNEL); 2692 &(dmabuf->phys), GFP_KERNEL);
2698 2693
2699 if (!dmabuf->virt) { 2694 if (!dmabuf->virt) {
2700 kfree(dmabuf); 2695 kfree(dmabuf);
2701 return NULL; 2696 return NULL;
2702 } 2697 }
2703 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2704 2698
2705 return dmabuf; 2699 return dmabuf;
2706} 2700}
@@ -2828,8 +2822,10 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
2828 size -= cnt; 2822 size -= cnt;
2829 } 2823 }
2830 2824
2831 mlist->flag = i; 2825 if (mlist) {
2832 return mlist; 2826 mlist->flag = i;
2827 return mlist;
2828 }
2833out: 2829out:
2834 diag_cmd_data_free(phba, mlist); 2830 diag_cmd_data_free(phba, mlist);
2835 return NULL; 2831 return NULL;
@@ -3344,7 +3340,7 @@ job_error:
3344 * will wake up thread waiting on the wait queue pointed by context1 3340 * will wake up thread waiting on the wait queue pointed by context1
3345 * of the mailbox. 3341 * of the mailbox.
3346 **/ 3342 **/
3347void 3343static void
3348lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) 3344lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3349{ 3345{
3350 struct bsg_job_data *dd_data; 3346 struct bsg_job_data *dd_data;
@@ -4593,7 +4589,7 @@ sli_cfg_ext_error:
4593 * being reset) and com-plete the job, otherwise issue the mailbox command and 4589 * being reset) and com-plete the job, otherwise issue the mailbox command and
4594 * let our completion handler finish the command. 4590 * let our completion handler finish the command.
4595 **/ 4591 **/
4596static uint32_t 4592static int
4597lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job, 4593lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4598 struct lpfc_vport *vport) 4594 struct lpfc_vport *vport)
4599{ 4595{
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index db5604f01a1a..00665a5d92fd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -451,7 +451,6 @@ int lpfc_send_rrq(struct lpfc_hba *, struct lpfc_node_rrq *);
451int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *, 451int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
452 uint16_t, uint16_t, uint16_t); 452 uint16_t, uint16_t, uint16_t);
453uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t); 453uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
454void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
455void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *); 454void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
456struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t, 455struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
457 uint32_t); 456 uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index da61d8dc0449..61a32cd23f79 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1439,7 +1439,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1439 /* #2 HBA attribute entry */ 1439 /* #2 HBA attribute entry */
1440 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 1440 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
1441 ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER); 1441 ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
1442 strcpy(ae->un.Manufacturer, "Emulex Corporation"); 1442 strncpy(ae->un.Manufacturer, "Emulex Corporation", 64);
1443 len = strlen(ae->un.Manufacturer); 1443 len = strlen(ae->un.Manufacturer);
1444 len += (len & 3) ? (4 - (len & 3)) : 4; 1444 len += (len & 3) ? (4 - (len & 3)) : 4;
1445 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); 1445 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1449,7 +1449,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1449 /* #3 HBA attribute entry */ 1449 /* #3 HBA attribute entry */
1450 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 1450 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
1451 ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER); 1451 ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
1452 strcpy(ae->un.SerialNumber, phba->SerialNumber); 1452 strncpy(ae->un.SerialNumber, phba->SerialNumber, 64);
1453 len = strlen(ae->un.SerialNumber); 1453 len = strlen(ae->un.SerialNumber);
1454 len += (len & 3) ? (4 - (len & 3)) : 4; 1454 len += (len & 3) ? (4 - (len & 3)) : 4;
1455 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); 1455 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1459,7 +1459,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1459 /* #4 HBA attribute entry */ 1459 /* #4 HBA attribute entry */
1460 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 1460 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
1461 ae->ad.bits.AttrType = be16_to_cpu(MODEL); 1461 ae->ad.bits.AttrType = be16_to_cpu(MODEL);
1462 strcpy(ae->un.Model, phba->ModelName); 1462 strncpy(ae->un.Model, phba->ModelName, 256);
1463 len = strlen(ae->un.Model); 1463 len = strlen(ae->un.Model);
1464 len += (len & 3) ? (4 - (len & 3)) : 4; 1464 len += (len & 3) ? (4 - (len & 3)) : 4;
1465 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); 1465 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1469,7 +1469,7 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1469 /* #5 HBA attribute entry */ 1469 /* #5 HBA attribute entry */
1470 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 1470 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
1471 ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION); 1471 ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
1472 strcpy(ae->un.ModelDescription, phba->ModelDesc); 1472 strncpy(ae->un.ModelDescription, phba->ModelDesc, 256);
1473 len = strlen(ae->un.ModelDescription); 1473 len = strlen(ae->un.ModelDescription);
1474 len += (len & 3) ? (4 - (len & 3)) : 4; 1474 len += (len & 3) ? (4 - (len & 3)) : 4;
1475 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); 1475 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1500,7 +1500,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1500 /* #7 HBA attribute entry */ 1500 /* #7 HBA attribute entry */
1501 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 1501 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
1502 ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION); 1502 ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
1503 strcpy(ae->un.DriverVersion, lpfc_release_version); 1503 strncpy(ae->un.DriverVersion,
1504 lpfc_release_version, 256);
1504 len = strlen(ae->un.DriverVersion); 1505 len = strlen(ae->un.DriverVersion);
1505 len += (len & 3) ? (4 - (len & 3)) : 4; 1506 len += (len & 3) ? (4 - (len & 3)) : 4;
1506 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); 1507 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1510,7 +1511,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
1510 /* #8 HBA attribute entry */ 1511 /* #8 HBA attribute entry */
1511 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size); 1512 ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
1512 ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION); 1513 ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
1513 strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion); 1514 strncpy(ae->un.OptionROMVersion,
1515 phba->OptionROMVersion, 256);
1514 len = strlen(ae->un.OptionROMVersion); 1516 len = strlen(ae->un.OptionROMVersion);
1515 len += (len & 3) ? (4 - (len & 3)) : 4; 1517 len += (len & 3) ? (4 - (len & 3)) : 4;
1516 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len); 1518 ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b0aedce3f54b..786a2aff7b59 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -269,7 +269,7 @@ static int
269lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) 269lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
270{ 270{
271 int len = 0; 271 int len = 0;
272 int cnt, i, j, found, posted, low; 272 int i, j, found, posted, low;
273 uint32_t phys, raw_index, getidx; 273 uint32_t phys, raw_index, getidx;
274 struct lpfc_hbq_init *hip; 274 struct lpfc_hbq_init *hip;
275 struct hbq_s *hbqs; 275 struct hbq_s *hbqs;
@@ -279,7 +279,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
279 279
280 if (phba->sli_rev != 3) 280 if (phba->sli_rev != 3)
281 return 0; 281 return 0;
282 cnt = LPFC_HBQINFO_SIZE; 282
283 spin_lock_irq(&phba->hbalock); 283 spin_lock_irq(&phba->hbalock);
284 284
285 /* toggle between multiple hbqs, if any */ 285 /* toggle between multiple hbqs, if any */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1a6fe524940d..6977027979be 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -78,7 +78,8 @@ struct lpfc_nodelist {
78 struct list_head nlp_listp; 78 struct list_head nlp_listp;
79 struct lpfc_name nlp_portname; 79 struct lpfc_name nlp_portname;
80 struct lpfc_name nlp_nodename; 80 struct lpfc_name nlp_nodename;
81 uint32_t nlp_flag; /* entry flags */ 81 uint32_t nlp_flag; /* entry flags */
82 uint32_t nlp_add_flag; /* additional flags */
82 uint32_t nlp_DID; /* FC D_ID of entry */ 83 uint32_t nlp_DID; /* FC D_ID of entry */
83 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ 84 uint32_t nlp_last_elscmd; /* Last ELS cmd sent */
84 uint16_t nlp_type; 85 uint16_t nlp_type;
@@ -157,6 +158,9 @@ struct lpfc_node_rrq {
157#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ 158#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
158#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ 159#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
159 160
161/* Defines for nlp_add_flag (uint32) */
162#define NLP_IN_DEV_LOSS 0x00000001 /* Dev Loss processing in progress */
163
160/* ndlp usage management macros */ 164/* ndlp usage management macros */
161#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \ 165#define NLP_CHK_NODE_ACT(ndlp) (((ndlp)->nlp_usg_map \
162 & NLP_USG_NODE_ACT_BIT) \ 166 & NLP_USG_NODE_ACT_BIT) \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7a5d81a65be8..4c25485aa934 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1084,7 +1084,8 @@ stop_rr_fcf_flogi:
1084 * accessing it. 1084 * accessing it.
1085 */ 1085 */
1086 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1086 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1087 1087 if (!prsp)
1088 goto out;
1088 sp = prsp->virt + sizeof(uint32_t); 1089 sp = prsp->virt + sizeof(uint32_t);
1089 1090
1090 /* FLOGI completes successfully */ 1091 /* FLOGI completes successfully */
@@ -1828,7 +1829,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1828 IOCB_t *irsp; 1829 IOCB_t *irsp;
1829 struct lpfc_nodelist *ndlp; 1830 struct lpfc_nodelist *ndlp;
1830 struct lpfc_dmabuf *prsp; 1831 struct lpfc_dmabuf *prsp;
1831 int disc, rc, did, type; 1832 int disc, rc;
1832 1833
1833 /* we pass cmdiocb to state machine which needs rspiocb as well */ 1834 /* we pass cmdiocb to state machine which needs rspiocb as well */
1834 cmdiocb->context_un.rsp_iocb = rspiocb; 1835 cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1873,10 +1874,6 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1873 goto out; 1874 goto out;
1874 } 1875 }
1875 1876
1876 /* ndlp could be freed in DSM, save these values now */
1877 type = ndlp->nlp_type;
1878 did = ndlp->nlp_DID;
1879
1880 if (irsp->ulpStatus) { 1877 if (irsp->ulpStatus) {
1881 /* Check for retry */ 1878 /* Check for retry */
1882 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { 1879 if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -2269,8 +2266,6 @@ lpfc_adisc_done(struct lpfc_vport *vport)
2269void 2266void
2270lpfc_more_adisc(struct lpfc_vport *vport) 2267lpfc_more_adisc(struct lpfc_vport *vport)
2271{ 2268{
2272 int sentadisc;
2273
2274 if (vport->num_disc_nodes) 2269 if (vport->num_disc_nodes)
2275 vport->num_disc_nodes--; 2270 vport->num_disc_nodes--;
2276 /* Continue discovery with <num_disc_nodes> ADISCs to go */ 2271 /* Continue discovery with <num_disc_nodes> ADISCs to go */
@@ -2283,7 +2278,7 @@ lpfc_more_adisc(struct lpfc_vport *vport)
2283 if (vport->fc_flag & FC_NLP_MORE) { 2278 if (vport->fc_flag & FC_NLP_MORE) {
2284 lpfc_set_disctmo(vport); 2279 lpfc_set_disctmo(vport);
2285 /* go thru NPR nodes and issue any remaining ELS ADISCs */ 2280 /* go thru NPR nodes and issue any remaining ELS ADISCs */
2286 sentadisc = lpfc_els_disc_adisc(vport); 2281 lpfc_els_disc_adisc(vport);
2287 } 2282 }
2288 if (!vport->num_disc_nodes) 2283 if (!vport->num_disc_nodes)
2289 lpfc_adisc_done(vport); 2284 lpfc_adisc_done(vport);
@@ -3027,10 +3022,9 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
3027{ 3022{
3028 struct lpfc_vport *vport = ndlp->vport; 3023 struct lpfc_vport *vport = ndlp->vport;
3029 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3024 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3030 uint32_t cmd, did, retry; 3025 uint32_t cmd, retry;
3031 3026
3032 spin_lock_irq(shost->host_lock); 3027 spin_lock_irq(shost->host_lock);
3033 did = ndlp->nlp_DID;
3034 cmd = ndlp->nlp_last_elscmd; 3028 cmd = ndlp->nlp_last_elscmd;
3035 ndlp->nlp_last_elscmd = 0; 3029 ndlp->nlp_last_elscmd = 0;
3036 3030
@@ -5288,10 +5282,9 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5288 IOCB_t *icmd; 5282 IOCB_t *icmd;
5289 RNID *rn; 5283 RNID *rn;
5290 struct ls_rjt stat; 5284 struct ls_rjt stat;
5291 uint32_t cmd, did; 5285 uint32_t cmd;
5292 5286
5293 icmd = &cmdiocb->iocb; 5287 icmd = &cmdiocb->iocb;
5294 did = icmd->un.elsreq64.remoteID;
5295 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 5288 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5296 lp = (uint32_t *) pcmd->virt; 5289 lp = (uint32_t *) pcmd->virt;
5297 5290
@@ -6693,6 +6686,13 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
6693 6686
6694 phba->fc_stat.elsRcvFrame++; 6687 phba->fc_stat.elsRcvFrame++;
6695 6688
6689 /*
6690 * Do not process any unsolicited ELS commands
6691 * if the ndlp is in DEV_LOSS
6692 */
6693 if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS)
6694 goto dropit;
6695
6696 elsiocb->context1 = lpfc_nlp_get(ndlp); 6696 elsiocb->context1 = lpfc_nlp_get(ndlp);
6697 elsiocb->vport = vport; 6697 elsiocb->vport = vport;
6698 6698
@@ -7514,6 +7514,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7514 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; 7514 vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
7515 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE); 7515 lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
7516 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 7516 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
7517 if (!prsp)
7518 goto out;
7517 sp = prsp->virt + sizeof(uint32_t); 7519 sp = prsp->virt + sizeof(uint32_t);
7518 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp); 7520 fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
7519 memcpy(&vport->fabric_portname, &sp->portName, 7521 memcpy(&vport->fabric_portname, &sp->portName,
@@ -8187,9 +8189,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
8187 list_del(&sglq_entry->list); 8189 list_del(&sglq_entry->list);
8188 ndlp = sglq_entry->ndlp; 8190 ndlp = sglq_entry->ndlp;
8189 sglq_entry->ndlp = NULL; 8191 sglq_entry->ndlp = NULL;
8192 spin_lock(&pring->ring_lock);
8190 list_add_tail(&sglq_entry->list, 8193 list_add_tail(&sglq_entry->list,
8191 &phba->sli4_hba.lpfc_sgl_list); 8194 &phba->sli4_hba.lpfc_sgl_list);
8192 sglq_entry->state = SGL_FREED; 8195 sglq_entry->state = SGL_FREED;
8196 spin_unlock(&pring->ring_lock);
8193 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 8197 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
8194 spin_unlock_irqrestore(&phba->hbalock, iflag); 8198 spin_unlock_irqrestore(&phba->hbalock, iflag);
8195 lpfc_set_rrq_active(phba, ndlp, 8199 lpfc_set_rrq_active(phba, ndlp,
@@ -8208,12 +8212,15 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
8208 spin_unlock_irqrestore(&phba->hbalock, iflag); 8212 spin_unlock_irqrestore(&phba->hbalock, iflag);
8209 return; 8213 return;
8210 } 8214 }
8215 spin_lock(&pring->ring_lock);
8211 sglq_entry = __lpfc_get_active_sglq(phba, lxri); 8216 sglq_entry = __lpfc_get_active_sglq(phba, lxri);
8212 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) { 8217 if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
8218 spin_unlock(&pring->ring_lock);
8213 spin_unlock_irqrestore(&phba->hbalock, iflag); 8219 spin_unlock_irqrestore(&phba->hbalock, iflag);
8214 return; 8220 return;
8215 } 8221 }
8216 sglq_entry->state = SGL_XRI_ABORTED; 8222 sglq_entry->state = SGL_XRI_ABORTED;
8223 spin_unlock(&pring->ring_lock);
8217 spin_unlock_irqrestore(&phba->hbalock, iflag); 8224 spin_unlock_irqrestore(&phba->hbalock, iflag);
8218 return; 8225 return;
8219} 8226}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2a17e31265b8..5452f1f4220e 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -150,9 +150,30 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
150 150
151 /* If the WWPN of the rport and ndlp don't match, ignore it */ 151 /* If the WWPN of the rport and ndlp don't match, ignore it */
152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) { 152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
153 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
154 "6789 rport name %lx != node port name %lx",
155 (unsigned long)rport->port_name,
156 (unsigned long)wwn_to_u64(
157 ndlp->nlp_portname.u.wwn));
158 put_node = rdata->pnode != NULL;
159 put_rport = ndlp->rport != NULL;
160 rdata->pnode = NULL;
161 ndlp->rport = NULL;
162 if (put_node)
163 lpfc_nlp_put(ndlp);
153 put_device(&rport->dev); 164 put_device(&rport->dev);
154 return; 165 return;
155 } 166 }
167
168 put_node = rdata->pnode != NULL;
169 put_rport = ndlp->rport != NULL;
170 rdata->pnode = NULL;
171 ndlp->rport = NULL;
172 if (put_node)
173 lpfc_nlp_put(ndlp);
174 if (put_rport)
175 put_device(&rport->dev);
176 return;
156 } 177 }
157 178
158 evtp = &ndlp->dev_loss_evt; 179 evtp = &ndlp->dev_loss_evt;
@@ -161,6 +182,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
161 return; 182 return;
162 183
163 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 184 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
185 ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS;
164 186
165 spin_lock_irq(&phba->hbalock); 187 spin_lock_irq(&phba->hbalock);
166 /* We need to hold the node by incrementing the reference 188 /* We need to hold the node by incrementing the reference
@@ -201,8 +223,10 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
201 223
202 rport = ndlp->rport; 224 rport = ndlp->rport;
203 225
204 if (!rport) 226 if (!rport) {
227 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
205 return fcf_inuse; 228 return fcf_inuse;
229 }
206 230
207 rdata = rport->dd_data; 231 rdata = rport->dd_data;
208 name = (uint8_t *) &ndlp->nlp_portname; 232 name = (uint8_t *) &ndlp->nlp_portname;
@@ -235,6 +259,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
235 put_rport = ndlp->rport != NULL; 259 put_rport = ndlp->rport != NULL;
236 rdata->pnode = NULL; 260 rdata->pnode = NULL;
237 ndlp->rport = NULL; 261 ndlp->rport = NULL;
262 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
238 if (put_node) 263 if (put_node)
239 lpfc_nlp_put(ndlp); 264 lpfc_nlp_put(ndlp);
240 if (put_rport) 265 if (put_rport)
@@ -250,6 +275,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
250 *name, *(name+1), *(name+2), *(name+3), 275 *name, *(name+1), *(name+2), *(name+3),
251 *(name+4), *(name+5), *(name+6), *(name+7), 276 *(name+4), *(name+5), *(name+6), *(name+7),
252 ndlp->nlp_DID); 277 ndlp->nlp_DID);
278 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
253 return fcf_inuse; 279 return fcf_inuse;
254 } 280 }
255 281
@@ -259,6 +285,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
259 put_rport = ndlp->rport != NULL; 285 put_rport = ndlp->rport != NULL;
260 rdata->pnode = NULL; 286 rdata->pnode = NULL;
261 ndlp->rport = NULL; 287 ndlp->rport = NULL;
288 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
262 if (put_node) 289 if (put_node)
263 lpfc_nlp_put(ndlp); 290 lpfc_nlp_put(ndlp);
264 if (put_rport) 291 if (put_rport)
@@ -269,6 +296,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
269 if (ndlp->nlp_sid != NLP_NO_SID) { 296 if (ndlp->nlp_sid != NLP_NO_SID) {
270 warn_on = 1; 297 warn_on = 1;
271 /* flush the target */ 298 /* flush the target */
299 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
272 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 300 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
273 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 301 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
274 } 302 }
@@ -297,6 +325,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
297 put_rport = ndlp->rport != NULL; 325 put_rport = ndlp->rport != NULL;
298 rdata->pnode = NULL; 326 rdata->pnode = NULL;
299 ndlp->rport = NULL; 327 ndlp->rport = NULL;
328 ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
300 if (put_node) 329 if (put_node)
301 lpfc_nlp_put(ndlp); 330 lpfc_nlp_put(ndlp);
302 if (put_rport) 331 if (put_rport)
@@ -995,7 +1024,6 @@ lpfc_linkup(struct lpfc_hba *phba)
995 struct lpfc_vport **vports; 1024 struct lpfc_vport **vports;
996 int i; 1025 int i;
997 1026
998 lpfc_cleanup_wt_rrqs(phba);
999 phba->link_state = LPFC_LINK_UP; 1027 phba->link_state = LPFC_LINK_UP;
1000 1028
1001 /* Unblock fabric iocbs if they are blocked */ 1029 /* Unblock fabric iocbs if they are blocked */
@@ -2042,7 +2070,8 @@ lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2042 * returns: 2070 * returns:
2043 * 0=success 1=failure 2071 * 0=success 1=failure
2044 **/ 2072 **/
2045int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, 2073static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
2074 uint16_t fcf_index,
2046 struct fcf_record *new_fcf_record) 2075 struct fcf_record *new_fcf_record)
2047{ 2076{
2048 uint16_t current_fcf_pri; 2077 uint16_t current_fcf_pri;
@@ -2146,7 +2175,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2146 uint16_t fcf_index, next_fcf_index; 2175 uint16_t fcf_index, next_fcf_index;
2147 struct lpfc_fcf_rec *fcf_rec = NULL; 2176 struct lpfc_fcf_rec *fcf_rec = NULL;
2148 uint16_t vlan_id; 2177 uint16_t vlan_id;
2149 uint32_t seed;
2150 bool select_new_fcf; 2178 bool select_new_fcf;
2151 int rc; 2179 int rc;
2152 2180
@@ -2383,9 +2411,6 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2383 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2411 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2384 /* Setup initial running random FCF selection count */ 2412 /* Setup initial running random FCF selection count */
2385 phba->fcf.eligible_fcf_cnt = 1; 2413 phba->fcf.eligible_fcf_cnt = 1;
2386 /* Seeding the random number generator for random selection */
2387 seed = (uint32_t)(0xFFFFFFFF & jiffies);
2388 prandom_seed(seed);
2389 } 2414 }
2390 spin_unlock_irq(&phba->hbalock); 2415 spin_unlock_irq(&phba->hbalock);
2391 goto read_next_fcf; 2416 goto read_next_fcf;
@@ -2678,7 +2703,7 @@ out:
2678 * 2703 *
2679 * This function handles completion of init vfi mailbox command. 2704 * This function handles completion of init vfi mailbox command.
2680 */ 2705 */
2681void 2706static void
2682lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2707lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2683{ 2708{
2684 struct lpfc_vport *vport = mboxq->vport; 2709 struct lpfc_vport *vport = mboxq->vport;
@@ -4438,7 +4463,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4438 * This function will issue an ELS LOGO command after completing 4463 * This function will issue an ELS LOGO command after completing
4439 * the UNREG_RPI. 4464 * the UNREG_RPI.
4440 **/ 4465 **/
4441void 4466static void
4442lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4467lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4443{ 4468{
4444 struct lpfc_vport *vport = pmb->vport; 4469 struct lpfc_vport *vport = pmb->vport;
@@ -5006,7 +5031,6 @@ lpfc_disc_start(struct lpfc_vport *vport)
5006 struct lpfc_hba *phba = vport->phba; 5031 struct lpfc_hba *phba = vport->phba;
5007 uint32_t num_sent; 5032 uint32_t num_sent;
5008 uint32_t clear_la_pending; 5033 uint32_t clear_la_pending;
5009 int did_changed;
5010 5034
5011 if (!lpfc_is_link_up(phba)) { 5035 if (!lpfc_is_link_up(phba)) {
5012 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, 5036 lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
@@ -5025,11 +5049,6 @@ lpfc_disc_start(struct lpfc_vport *vport)
5025 5049
5026 lpfc_set_disctmo(vport); 5050 lpfc_set_disctmo(vport);
5027 5051
5028 if (vport->fc_prevDID == vport->fc_myDID)
5029 did_changed = 0;
5030 else
5031 did_changed = 1;
5032
5033 vport->fc_prevDID = vport->fc_myDID; 5052 vport->fc_prevDID = vport->fc_myDID;
5034 vport->num_disc_nodes = 0; 5053 vport->num_disc_nodes = 0;
5035 5054
@@ -6318,7 +6337,7 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6318 uint8_t *buff, 6337 uint8_t *buff,
6319 uint32_t size) 6338 uint32_t size)
6320{ 6339{
6321 uint32_t offset = 0, rec_length; 6340 uint32_t offset = 0;
6322 uint8_t *rec_ptr; 6341 uint8_t *rec_ptr;
6323 6342
6324 /* 6343 /*
@@ -6345,8 +6364,6 @@ lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6345 } 6364 }
6346 offset += 4; 6365 offset += 4;
6347 6366
6348 rec_length = buff[offset + 1];
6349
6350 /* Read FCoE param record */ 6367 /* Read FCoE param record */
6351 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 6368 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6352 size - offset, FCOE_PARAM_TYPE); 6369 size - offset, FCOE_PARAM_TYPE);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a5769a9960ac..0b2c53af85c7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -306,10 +306,10 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
306 dist = dist_char[prg->dist]; 306 dist = dist_char[prg->dist];
307 307
308 if ((prg->dist == 3) && (prg->num == 0)) 308 if ((prg->dist == 3) && (prg->num == 0))
309 sprintf(phba->OptionROMVersion, "%d.%d%d", 309 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
310 prg->ver, prg->rev, prg->lev); 310 prg->ver, prg->rev, prg->lev);
311 else 311 else
312 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d", 312 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
313 prg->ver, prg->rev, prg->lev, 313 prg->ver, prg->rev, prg->lev,
314 dist, prg->num); 314 dist, prg->num);
315 mempool_free(pmboxq, phba->mbox_mem_pool); 315 mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -649,7 +649,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
649 * 0 - success 649 * 0 - success
650 * Any other value - error 650 * Any other value - error
651 **/ 651 **/
652int 652static int
653lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag) 653lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
654{ 654{
655 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag); 655 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
@@ -750,7 +750,7 @@ lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
750 * 0 - success 750 * 0 - success
751 * Any other value - error 751 * Any other value - error
752 **/ 752 **/
753int 753static int
754lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag) 754lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
755{ 755{
756 LPFC_MBOXQ_t *pmb; 756 LPFC_MBOXQ_t *pmb;
@@ -988,9 +988,12 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
988 LIST_HEAD(aborts); 988 LIST_HEAD(aborts);
989 unsigned long iflag = 0; 989 unsigned long iflag = 0;
990 struct lpfc_sglq *sglq_entry = NULL; 990 struct lpfc_sglq *sglq_entry = NULL;
991 struct lpfc_sli *psli = &phba->sli;
992 struct lpfc_sli_ring *pring;
991 993
992 lpfc_hba_free_post_buf(phba); 994 lpfc_hba_free_post_buf(phba);
993 lpfc_hba_clean_txcmplq(phba); 995 lpfc_hba_clean_txcmplq(phba);
996 pring = &psli->ring[LPFC_ELS_RING];
994 997
995 /* At this point in time the HBA is either reset or DOA. Either 998 /* At this point in time the HBA is either reset or DOA. Either
996 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be 999 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
@@ -1008,8 +1011,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1008 &phba->sli4_hba.lpfc_abts_els_sgl_list, list) 1011 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1009 sglq_entry->state = SGL_FREED; 1012 sglq_entry->state = SGL_FREED;
1010 1013
1014 spin_lock(&pring->ring_lock);
1011 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, 1015 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1012 &phba->sli4_hba.lpfc_sgl_list); 1016 &phba->sli4_hba.lpfc_sgl_list);
1017 spin_unlock(&pring->ring_lock);
1013 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); 1018 spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
1014 /* abts_scsi_buf_list_lock required because worker thread uses this 1019 /* abts_scsi_buf_list_lock required because worker thread uses this
1015 * list. 1020 * list.
@@ -3047,6 +3052,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3047 LIST_HEAD(els_sgl_list); 3052 LIST_HEAD(els_sgl_list);
3048 LIST_HEAD(scsi_sgl_list); 3053 LIST_HEAD(scsi_sgl_list);
3049 int rc; 3054 int rc;
3055 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3050 3056
3051 /* 3057 /*
3052 * update on pci function's els xri-sgl list 3058 * update on pci function's els xri-sgl list
@@ -3087,7 +3093,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3087 list_add_tail(&sglq_entry->list, &els_sgl_list); 3093 list_add_tail(&sglq_entry->list, &els_sgl_list);
3088 } 3094 }
3089 spin_lock_irq(&phba->hbalock); 3095 spin_lock_irq(&phba->hbalock);
3096 spin_lock(&pring->ring_lock);
3090 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3097 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3098 spin_unlock(&pring->ring_lock);
3091 spin_unlock_irq(&phba->hbalock); 3099 spin_unlock_irq(&phba->hbalock);
3092 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) { 3100 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
3093 /* els xri-sgl shrinked */ 3101 /* els xri-sgl shrinked */
@@ -3097,7 +3105,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3097 "%d to %d\n", phba->sli4_hba.els_xri_cnt, 3105 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
3098 els_xri_cnt); 3106 els_xri_cnt);
3099 spin_lock_irq(&phba->hbalock); 3107 spin_lock_irq(&phba->hbalock);
3108 spin_lock(&pring->ring_lock);
3100 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list); 3109 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
3110 spin_unlock(&pring->ring_lock);
3101 spin_unlock_irq(&phba->hbalock); 3111 spin_unlock_irq(&phba->hbalock);
3102 /* release extra els sgls from list */ 3112 /* release extra els sgls from list */
3103 for (i = 0; i < xri_cnt; i++) { 3113 for (i = 0; i < xri_cnt; i++) {
@@ -3110,7 +3120,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3110 } 3120 }
3111 } 3121 }
3112 spin_lock_irq(&phba->hbalock); 3122 spin_lock_irq(&phba->hbalock);
3123 spin_lock(&pring->ring_lock);
3113 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list); 3124 list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
3125 spin_unlock(&pring->ring_lock);
3114 spin_unlock_irq(&phba->hbalock); 3126 spin_unlock_irq(&phba->hbalock);
3115 } else 3127 } else
3116 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 3128 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3165,9 +3177,11 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
3165 for (i = 0; i < scsi_xri_cnt; i++) { 3177 for (i = 0; i < scsi_xri_cnt; i++) {
3166 list_remove_head(&scsi_sgl_list, psb, 3178 list_remove_head(&scsi_sgl_list, psb,
3167 struct lpfc_scsi_buf, list); 3179 struct lpfc_scsi_buf, list);
3168 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data, 3180 if (psb) {
3169 psb->dma_handle); 3181 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
3170 kfree(psb); 3182 psb->data, psb->dma_handle);
3183 kfree(psb);
3184 }
3171 } 3185 }
3172 spin_lock_irq(&phba->scsi_buf_list_get_lock); 3186 spin_lock_irq(&phba->scsi_buf_list_get_lock);
3173 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; 3187 phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
@@ -3550,7 +3564,7 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
3550 * list, and then worker thread shall be waked up for processing from the 3564 * list, and then worker thread shall be waked up for processing from the
3551 * worker thread context. 3565 * worker thread context.
3552 **/ 3566 **/
3553void 3567static void
3554lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) 3568lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
3555{ 3569{
3556 struct lpfc_hba *phba = (struct lpfc_hba *)ptr; 3570 struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
@@ -5680,10 +5694,13 @@ static void
5680lpfc_free_els_sgl_list(struct lpfc_hba *phba) 5694lpfc_free_els_sgl_list(struct lpfc_hba *phba)
5681{ 5695{
5682 LIST_HEAD(sglq_list); 5696 LIST_HEAD(sglq_list);
5697 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5683 5698
5684 /* Retrieve all els sgls from driver list */ 5699 /* Retrieve all els sgls from driver list */
5685 spin_lock_irq(&phba->hbalock); 5700 spin_lock_irq(&phba->hbalock);
5701 spin_lock(&pring->ring_lock);
5686 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list); 5702 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
5703 spin_unlock(&pring->ring_lock);
5687 spin_unlock_irq(&phba->hbalock); 5704 spin_unlock_irq(&phba->hbalock);
5688 5705
5689 /* Now free the sgl list */ 5706 /* Now free the sgl list */
@@ -5848,16 +5865,14 @@ lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
5848 if (!dmabuf) 5865 if (!dmabuf)
5849 return NULL; 5866 return NULL;
5850 5867
5851 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 5868 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
5852 LPFC_HDR_TEMPLATE_SIZE, 5869 LPFC_HDR_TEMPLATE_SIZE,
5853 &dmabuf->phys, 5870 &dmabuf->phys, GFP_KERNEL);
5854 GFP_KERNEL);
5855 if (!dmabuf->virt) { 5871 if (!dmabuf->virt) {
5856 rpi_hdr = NULL; 5872 rpi_hdr = NULL;
5857 goto err_free_dmabuf; 5873 goto err_free_dmabuf;
5858 } 5874 }
5859 5875
5860 memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
5861 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) { 5876 if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
5862 rpi_hdr = NULL; 5877 rpi_hdr = NULL;
5863 goto err_free_coherent; 5878 goto err_free_coherent;
@@ -6246,14 +6261,11 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
6246 } 6261 }
6247 6262
6248 /* Allocate memory for SLI-2 structures */ 6263 /* Allocate memory for SLI-2 structures */
6249 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, 6264 phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
6250 SLI2_SLIM_SIZE, 6265 &phba->slim2p.phys, GFP_KERNEL);
6251 &phba->slim2p.phys,
6252 GFP_KERNEL);
6253 if (!phba->slim2p.virt) 6266 if (!phba->slim2p.virt)
6254 goto out_iounmap; 6267 goto out_iounmap;
6255 6268
6256 memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
6257 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx); 6269 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
6258 phba->mbox_ext = (phba->slim2p.virt + 6270 phba->mbox_ext = (phba->slim2p.virt +
6259 offsetof(struct lpfc_sli2_slim, mbx_ext_words)); 6271 offsetof(struct lpfc_sli2_slim, mbx_ext_words));
@@ -6618,15 +6630,12 @@ lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
6618 * plus an alignment restriction of 16 bytes. 6630 * plus an alignment restriction of 16 bytes.
6619 */ 6631 */
6620 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1); 6632 bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
6621 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 6633 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
6622 bmbx_size, 6634 &dmabuf->phys, GFP_KERNEL);
6623 &dmabuf->phys,
6624 GFP_KERNEL);
6625 if (!dmabuf->virt) { 6635 if (!dmabuf->virt) {
6626 kfree(dmabuf); 6636 kfree(dmabuf);
6627 return -ENOMEM; 6637 return -ENOMEM;
6628 } 6638 }
6629 memset(dmabuf->virt, 0, bmbx_size);
6630 6639
6631 /* 6640 /*
6632 * Initialize the bootstrap mailbox pointers now so that the register 6641 * Initialize the bootstrap mailbox pointers now so that the register
@@ -6710,7 +6719,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6710 struct lpfc_mbx_get_func_cfg *get_func_cfg; 6719 struct lpfc_mbx_get_func_cfg *get_func_cfg;
6711 struct lpfc_rsrc_desc_fcfcoe *desc; 6720 struct lpfc_rsrc_desc_fcfcoe *desc;
6712 char *pdesc_0; 6721 char *pdesc_0;
6713 uint32_t desc_count;
6714 int length, i, rc = 0, rc2; 6722 int length, i, rc = 0, rc2;
6715 6723
6716 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 6724 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -6841,7 +6849,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
6841 6849
6842 /* search for fc_fcoe resrouce descriptor */ 6850 /* search for fc_fcoe resrouce descriptor */
6843 get_func_cfg = &pmb->u.mqe.un.get_func_cfg; 6851 get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
6844 desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
6845 6852
6846 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0]; 6853 pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
6847 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0; 6854 desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
@@ -7417,7 +7424,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7417 if (rc) { 7424 if (rc) {
7418 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7425 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7419 "0523 Failed setup of fast-path EQ " 7426 "0523 Failed setup of fast-path EQ "
7420 "(%d), rc = 0x%x\n", fcp_eqidx, rc); 7427 "(%d), rc = 0x%x\n", fcp_eqidx,
7428 (uint32_t)rc);
7421 goto out_destroy_hba_eq; 7429 goto out_destroy_hba_eq;
7422 } 7430 }
7423 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7431 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7448,7 +7456,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7448 if (rc) { 7456 if (rc) {
7449 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7457 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7450 "0527 Failed setup of fast-path FCP " 7458 "0527 Failed setup of fast-path FCP "
7451 "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc); 7459 "CQ (%d), rc = 0x%x\n", fcp_cqidx,
7460 (uint32_t)rc);
7452 goto out_destroy_fcp_cq; 7461 goto out_destroy_fcp_cq;
7453 } 7462 }
7454 7463
@@ -7488,7 +7497,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7488 if (rc) { 7497 if (rc) {
7489 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7498 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7490 "0535 Failed setup of fast-path FCP " 7499 "0535 Failed setup of fast-path FCP "
7491 "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc); 7500 "WQ (%d), rc = 0x%x\n", fcp_wqidx,
7501 (uint32_t)rc);
7492 goto out_destroy_fcp_wq; 7502 goto out_destroy_fcp_wq;
7493 } 7503 }
7494 7504
@@ -7521,7 +7531,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7521 if (rc) { 7531 if (rc) {
7522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7532 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7523 "0529 Failed setup of slow-path mailbox CQ: " 7533 "0529 Failed setup of slow-path mailbox CQ: "
7524 "rc = 0x%x\n", rc); 7534 "rc = 0x%x\n", (uint32_t)rc);
7525 goto out_destroy_fcp_wq; 7535 goto out_destroy_fcp_wq;
7526 } 7536 }
7527 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7537 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7541,7 +7551,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7541 if (rc) { 7551 if (rc) {
7542 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7552 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7543 "0531 Failed setup of slow-path ELS CQ: " 7553 "0531 Failed setup of slow-path ELS CQ: "
7544 "rc = 0x%x\n", rc); 7554 "rc = 0x%x\n", (uint32_t)rc);
7545 goto out_destroy_mbx_cq; 7555 goto out_destroy_mbx_cq;
7546 } 7556 }
7547 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 7557 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7585,7 +7595,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7585 if (rc) { 7595 if (rc) {
7586 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7596 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7587 "0537 Failed setup of slow-path ELS WQ: " 7597 "0537 Failed setup of slow-path ELS WQ: "
7588 "rc = 0x%x\n", rc); 7598 "rc = 0x%x\n", (uint32_t)rc);
7589 goto out_destroy_mbx_wq; 7599 goto out_destroy_mbx_wq;
7590 } 7600 }
7591 7601
@@ -7617,7 +7627,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
7617 if (rc) { 7627 if (rc) {
7618 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 7628 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7619 "0541 Failed setup of Receive Queue: " 7629 "0541 Failed setup of Receive Queue: "
7620 "rc = 0x%x\n", rc); 7630 "rc = 0x%x\n", (uint32_t)rc);
7621 goto out_destroy_fcp_wq; 7631 goto out_destroy_fcp_wq;
7622 } 7632 }
7623 7633
@@ -7896,7 +7906,8 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7896 LPFC_MBOXQ_t *mboxq; 7906 LPFC_MBOXQ_t *mboxq;
7897 uint32_t rc = 0, if_type; 7907 uint32_t rc = 0, if_type;
7898 uint32_t shdr_status, shdr_add_status; 7908 uint32_t shdr_status, shdr_add_status;
7899 uint32_t rdy_chk, num_resets = 0, reset_again = 0; 7909 uint32_t rdy_chk;
7910 uint32_t port_reset = 0;
7900 union lpfc_sli4_cfg_shdr *shdr; 7911 union lpfc_sli4_cfg_shdr *shdr;
7901 struct lpfc_register reg_data; 7912 struct lpfc_register reg_data;
7902 uint16_t devid; 7913 uint16_t devid;
@@ -7936,9 +7947,42 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7936 } 7947 }
7937 break; 7948 break;
7938 case LPFC_SLI_INTF_IF_TYPE_2: 7949 case LPFC_SLI_INTF_IF_TYPE_2:
7939 for (num_resets = 0; 7950wait:
7940 num_resets < MAX_IF_TYPE_2_RESETS; 7951 /*
7941 num_resets++) { 7952 * Poll the Port Status Register and wait for RDY for
7953 * up to 30 seconds. If the port doesn't respond, treat
7954 * it as an error.
7955 */
7956 for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) {
7957 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7958 STATUSregaddr, &reg_data.word0)) {
7959 rc = -ENODEV;
7960 goto out;
7961 }
7962 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7963 break;
7964 msleep(20);
7965 }
7966
7967 if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
7968 phba->work_status[0] = readl(
7969 phba->sli4_hba.u.if_type2.ERR1regaddr);
7970 phba->work_status[1] = readl(
7971 phba->sli4_hba.u.if_type2.ERR2regaddr);
7972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7973 "2890 Port not ready, port status reg "
7974 "0x%x error 1=0x%x, error 2=0x%x\n",
7975 reg_data.word0,
7976 phba->work_status[0],
7977 phba->work_status[1]);
7978 rc = -ENODEV;
7979 goto out;
7980 }
7981
7982 if (!port_reset) {
7983 /*
7984 * Reset the port now
7985 */
7942 reg_data.word0 = 0; 7986 reg_data.word0 = 0;
7943 bf_set(lpfc_sliport_ctrl_end, &reg_data, 7987 bf_set(lpfc_sliport_ctrl_end, &reg_data,
7944 LPFC_SLIPORT_LITTLE_ENDIAN); 7988 LPFC_SLIPORT_LITTLE_ENDIAN);
@@ -7949,64 +7993,16 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7949 /* flush */ 7993 /* flush */
7950 pci_read_config_word(phba->pcidev, 7994 pci_read_config_word(phba->pcidev,
7951 PCI_DEVICE_ID, &devid); 7995 PCI_DEVICE_ID, &devid);
7952 /*
7953 * Poll the Port Status Register and wait for RDY for
7954 * up to 10 seconds. If the port doesn't respond, treat
7955 * it as an error. If the port responds with RN, start
7956 * the loop again.
7957 */
7958 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7959 msleep(10);
7960 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7961 STATUSregaddr, &reg_data.word0)) {
7962 rc = -ENODEV;
7963 goto out;
7964 }
7965 if (bf_get(lpfc_sliport_status_rn, &reg_data))
7966 reset_again++;
7967 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7968 break;
7969 }
7970
7971 /*
7972 * If the port responds to the init request with
7973 * reset needed, delay for a bit and restart the loop.
7974 */
7975 if (reset_again && (rdy_chk < 1000)) {
7976 msleep(10);
7977 reset_again = 0;
7978 continue;
7979 }
7980 7996
7981 /* Detect any port errors. */ 7997 port_reset = 1;
7982 if ((bf_get(lpfc_sliport_status_err, &reg_data)) || 7998 msleep(20);
7983 (rdy_chk >= 1000)) { 7999 goto wait;
7984 phba->work_status[0] = readl( 8000 } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
7985 phba->sli4_hba.u.if_type2.ERR1regaddr); 8001 rc = -ENODEV;
7986 phba->work_status[1] = readl( 8002 goto out;
7987 phba->sli4_hba.u.if_type2.ERR2regaddr);
7988 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7989 "2890 Port error detected during port "
7990 "reset(%d): wait_tmo:%d ms, "
7991 "port status reg 0x%x, "
7992 "error 1=0x%x, error 2=0x%x\n",
7993 num_resets, rdy_chk*10,
7994 reg_data.word0,
7995 phba->work_status[0],
7996 phba->work_status[1]);
7997 rc = -ENODEV;
7998 }
7999
8000 /*
8001 * Terminate the outer loop provided the Port indicated
8002 * ready within 10 seconds.
8003 */
8004 if (rdy_chk < 1000)
8005 break;
8006 } 8003 }
8007 /* delay driver action following IF_TYPE_2 function reset */
8008 msleep(100);
8009 break; 8004 break;
8005
8010 case LPFC_SLI_INTF_IF_TYPE_1: 8006 case LPFC_SLI_INTF_IF_TYPE_1:
8011 default: 8007 default:
8012 break; 8008 break;
@@ -8014,11 +8010,10 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
8014 8010
8015out: 8011out:
8016 /* Catch the not-ready port failure after a port reset. */ 8012 /* Catch the not-ready port failure after a port reset. */
8017 if (num_resets >= MAX_IF_TYPE_2_RESETS) { 8013 if (rc) {
8018 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8014 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8019 "3317 HBA not functional: IP Reset Failed " 8015 "3317 HBA not functional: IP Reset Failed "
8020 "after (%d) retries, try: " 8016 "try: echo fw_reset > board_mode\n");
8021 "echo fw_reset > board_mode\n", num_resets);
8022 rc = -ENODEV; 8017 rc = -ENODEV;
8023 } 8018 }
8024 8019
@@ -8211,9 +8206,9 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
8211 * @phba: pointer to lpfc hba data structure. 8206 * @phba: pointer to lpfc hba data structure.
8212 * 8207 *
8213 * This routine is invoked to enable the MSI-X interrupt vectors to device 8208 * This routine is invoked to enable the MSI-X interrupt vectors to device
8214 * with SLI-3 interface specs. The kernel function pci_enable_msix() is 8209 * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
8215 * called to enable the MSI-X vectors. Note that pci_enable_msix(), once 8210 * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
8216 * invoked, enables either all or nothing, depending on the current 8211 * once invoked, enables either all or nothing, depending on the current
8217 * availability of PCI vector resources. The device driver is responsible 8212 * availability of PCI vector resources. The device driver is responsible
8218 * for calling the individual request_irq() to register each MSI-X vector 8213 * for calling the individual request_irq() to register each MSI-X vector
8219 * with a interrupt handler, which is done in this function. Note that 8214 * with a interrupt handler, which is done in this function. Note that
@@ -8237,8 +8232,8 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
8237 phba->msix_entries[i].entry = i; 8232 phba->msix_entries[i].entry = i;
8238 8233
8239 /* Configure MSI-X capability structure */ 8234 /* Configure MSI-X capability structure */
8240 rc = pci_enable_msix(phba->pcidev, phba->msix_entries, 8235 rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
8241 ARRAY_SIZE(phba->msix_entries)); 8236 LPFC_MSIX_VECTORS);
8242 if (rc) { 8237 if (rc) {
8243 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8238 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8244 "0420 PCI enable MSI-X failed (%d)\n", rc); 8239 "0420 PCI enable MSI-X failed (%d)\n", rc);
@@ -8775,16 +8770,14 @@ out:
8775 * @phba: pointer to lpfc hba data structure. 8770 * @phba: pointer to lpfc hba data structure.
8776 * 8771 *
8777 * This routine is invoked to enable the MSI-X interrupt vectors to device 8772 * This routine is invoked to enable the MSI-X interrupt vectors to device
8778 * with SLI-4 interface spec. The kernel function pci_enable_msix() is called 8773 * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
8779 * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked, 8774 * is called to enable the MSI-X vectors. The device driver is responsible
8780 * enables either all or nothing, depending on the current availability of 8775 * for calling the individual request_irq() to register each MSI-X vector
8781 * PCI vector resources. The device driver is responsible for calling the 8776 * with a interrupt handler, which is done in this function. Note that
8782 * individual request_irq() to register each MSI-X vector with a interrupt 8777 * later when device is unloading, the driver should always call free_irq()
8783 * handler, which is done in this function. Note that later when device is 8778 * on all MSI-X vectors it has done request_irq() on before calling
8784 * unloading, the driver should always call free_irq() on all MSI-X vectors 8779 * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
8785 * it has done request_irq() on before calling pci_disable_msix(). Failure 8780 * will be left with MSI-X enabled and leaks its vectors.
8786 * to do so results in a BUG_ON() and a device will be left with MSI-X
8787 * enabled and leaks its vectors.
8788 * 8781 *
8789 * Return codes 8782 * Return codes
8790 * 0 - successful 8783 * 0 - successful
@@ -8805,17 +8798,14 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
8805 phba->sli4_hba.msix_entries[index].entry = index; 8798 phba->sli4_hba.msix_entries[index].entry = index;
8806 vectors++; 8799 vectors++;
8807 } 8800 }
8808enable_msix_vectors: 8801 rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
8809 rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries, 8802 2, vectors);
8810 vectors); 8803 if (rc < 0) {
8811 if (rc > 1) {
8812 vectors = rc;
8813 goto enable_msix_vectors;
8814 } else if (rc) {
8815 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 8804 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8816 "0484 PCI enable MSI-X failed (%d)\n", rc); 8805 "0484 PCI enable MSI-X failed (%d)\n", rc);
8817 goto vec_fail_out; 8806 goto vec_fail_out;
8818 } 8807 }
8808 vectors = rc;
8819 8809
8820 /* Log MSI-X vector assignment */ 8810 /* Log MSI-X vector assignment */
8821 for (index = 0; index < vectors; index++) 8811 for (index = 0; index < vectors; index++)
@@ -8828,7 +8818,8 @@ enable_msix_vectors:
8828 /* Assign MSI-X vectors to interrupt handlers */ 8818 /* Assign MSI-X vectors to interrupt handlers */
8829 for (index = 0; index < vectors; index++) { 8819 for (index = 0; index < vectors; index++) {
8830 memset(&phba->sli4_hba.handler_name[index], 0, 16); 8820 memset(&phba->sli4_hba.handler_name[index], 0, 16);
8831 sprintf((char *)&phba->sli4_hba.handler_name[index], 8821 snprintf((char *)&phba->sli4_hba.handler_name[index],
8822 LPFC_SLI4_HANDLER_NAME_SZ,
8832 LPFC_DRIVER_HANDLER_NAME"%d", index); 8823 LPFC_DRIVER_HANDLER_NAME"%d", index);
8833 8824
8834 phba->sli4_hba.fcp_eq_hdl[index].idx = index; 8825 phba->sli4_hba.fcp_eq_hdl[index].idx = index;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1f292e29d566..06241f590c1e 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1811,12 +1811,12 @@ lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1811 * page, this is used as a priori size of SLI4_PAGE_SIZE for 1811 * page, this is used as a priori size of SLI4_PAGE_SIZE for
1812 * the later DMA memory free. 1812 * the later DMA memory free.
1813 */ 1813 */
1814 viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 1814 viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
1815 &phyaddr, GFP_KERNEL); 1815 SLI4_PAGE_SIZE, &phyaddr,
1816 GFP_KERNEL);
1816 /* In case of malloc fails, proceed with whatever we have */ 1817 /* In case of malloc fails, proceed with whatever we have */
1817 if (!viraddr) 1818 if (!viraddr)
1818 break; 1819 break;
1819 memset(viraddr, 0, SLI4_PAGE_SIZE);
1820 mbox->sge_array->addr[pagen] = viraddr; 1820 mbox->sge_array->addr[pagen] = viraddr;
1821 /* Keep the first page for later sub-header construction */ 1821 /* Keep the first page for later sub-header construction */
1822 if (pagen == 0) 1822 if (pagen == 0)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index c342f6afd747..5cc1103d811e 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1031,6 +1031,8 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
1031 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; 1031 pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
1032 1032
1033 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list); 1033 prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
1034 if (!prsp)
1035 goto out;
1034 1036
1035 lp = (uint32_t *) prsp->virt; 1037 lp = (uint32_t *) prsp->virt;
1036 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t)); 1038 sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7862c5540861..b99399fe2548 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -306,7 +306,7 @@ lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
306 * depth for a scsi device. This function sets the queue depth to the new 306 * depth for a scsi device. This function sets the queue depth to the new
307 * value and sends an event out to log the queue depth change. 307 * value and sends an event out to log the queue depth change.
308 **/ 308 **/
309int 309static int
310lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) 310lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
311{ 311{
312 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata; 312 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
@@ -380,12 +380,14 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
380{ 380{
381 unsigned long flags; 381 unsigned long flags;
382 uint32_t evt_posted; 382 uint32_t evt_posted;
383 unsigned long expires;
383 384
384 spin_lock_irqsave(&phba->hbalock, flags); 385 spin_lock_irqsave(&phba->hbalock, flags);
385 atomic_inc(&phba->num_rsrc_err); 386 atomic_inc(&phba->num_rsrc_err);
386 phba->last_rsrc_error_time = jiffies; 387 phba->last_rsrc_error_time = jiffies;
387 388
388 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) { 389 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
390 if (time_after(expires, jiffies)) {
389 spin_unlock_irqrestore(&phba->hbalock, flags); 391 spin_unlock_irqrestore(&phba->hbalock, flags);
390 return; 392 return;
391 } 393 }
@@ -741,7 +743,7 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
741 * 743 *
742 * Returns: 0 = failure, non-zero number of successfully posted buffers. 744 * Returns: 0 = failure, non-zero number of successfully posted buffers.
743 **/ 745 **/
744int 746static int
745lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, 747lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
746 struct list_head *post_sblist, int sb_count) 748 struct list_head *post_sblist, int sb_count)
747{ 749{
@@ -2965,7 +2967,7 @@ err:
2965 * on the specified data using a CRC algorithmn 2967 * on the specified data using a CRC algorithmn
2966 * using crc_t10dif. 2968 * using crc_t10dif.
2967 */ 2969 */
2968uint16_t 2970static uint16_t
2969lpfc_bg_crc(uint8_t *data, int count) 2971lpfc_bg_crc(uint8_t *data, int count)
2970{ 2972{
2971 uint16_t crc = 0; 2973 uint16_t crc = 0;
@@ -2981,7 +2983,7 @@ lpfc_bg_crc(uint8_t *data, int count)
2981 * on the specified data using a CSUM algorithmn 2983 * on the specified data using a CSUM algorithmn
2982 * using ip_compute_csum. 2984 * using ip_compute_csum.
2983 */ 2985 */
2984uint16_t 2986static uint16_t
2985lpfc_bg_csum(uint8_t *data, int count) 2987lpfc_bg_csum(uint8_t *data, int count)
2986{ 2988{
2987 uint16_t ret; 2989 uint16_t ret;
@@ -2994,7 +2996,7 @@ lpfc_bg_csum(uint8_t *data, int count)
2994 * This function examines the protection data to try to determine 2996 * This function examines the protection data to try to determine
2995 * what type of T10-DIF error occurred. 2997 * what type of T10-DIF error occurred.
2996 */ 2998 */
2997void 2999static void
2998lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) 3000lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2999{ 3001{
3000 struct scatterlist *sgpe; /* s/g prot entry */ 3002 struct scatterlist *sgpe; /* s/g prot entry */
@@ -3464,7 +3466,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3464 */ 3466 */
3465 if ((phba->cfg_fof) && ((struct lpfc_device_data *) 3467 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3466 scsi_cmnd->device->hostdata)->oas_enabled) 3468 scsi_cmnd->device->hostdata)->oas_enabled)
3467 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS; 3469 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3468 return 0; 3470 return 0;
3469} 3471}
3470 3472
@@ -3604,6 +3606,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3604 */ 3606 */
3605 iocb_cmd->un.fcpi.fcpi_parm = fcpdl; 3607 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3606 3608
3609 /*
3610 * If the OAS driver feature is enabled and the lun is enabled for
3611 * OAS, set the oas iocb related flags.
3612 */
3613 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3614 scsi_cmnd->device->hostdata)->oas_enabled)
3615 lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
3616
3607 return 0; 3617 return 0;
3608err: 3618err:
3609 if (lpfc_cmd->seg_cnt) 3619 if (lpfc_cmd->seg_cnt)
@@ -4874,6 +4884,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
4874 /* ABTS WQE must go to the same WQ as the WQE to be aborted */ 4884 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
4875 abtsiocb->fcp_wqidx = iocb->fcp_wqidx; 4885 abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
4876 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 4886 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
4887 if (iocb->iocb_flag & LPFC_IO_FOF)
4888 abtsiocb->iocb_flag |= LPFC_IO_FOF;
4877 4889
4878 if (lpfc_is_link_up(phba)) 4890 if (lpfc_is_link_up(phba))
4879 icmd->ulpCommand = CMD_ABORT_XRI_CN; 4891 icmd->ulpCommand = CMD_ABORT_XRI_CN;
@@ -5327,7 +5339,13 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
5327 if (status == FAILED) { 5339 if (status == FAILED) {
5328 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, 5340 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
5329 "0722 Target Reset rport failure: rdata x%p\n", rdata); 5341 "0722 Target Reset rport failure: rdata x%p\n", rdata);
5330 return FAILED; 5342 spin_lock_irq(shost->host_lock);
5343 pnode->nlp_flag &= ~NLP_NPR_ADISC;
5344 pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
5345 spin_unlock_irq(shost->host_lock);
5346 lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
5347 LPFC_CTX_TGT);
5348 return FAST_IO_FAIL;
5331 } 5349 }
5332 5350
5333 scsi_event.event_type = FC_REG_SCSI_EVENT; 5351 scsi_event.event_type = FC_REG_SCSI_EVENT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 32ada0505576..207a43d952fa 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -187,7 +187,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
187{ 187{
188 struct lpfc_mqe *temp_mqe; 188 struct lpfc_mqe *temp_mqe;
189 struct lpfc_register doorbell; 189 struct lpfc_register doorbell;
190 uint32_t host_index;
191 190
192 /* sanity check on queue memory */ 191 /* sanity check on queue memory */
193 if (unlikely(!q)) 192 if (unlikely(!q))
@@ -202,7 +201,6 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
202 q->phba->mbox = (MAILBOX_t *)temp_mqe; 201 q->phba->mbox = (MAILBOX_t *)temp_mqe;
203 202
204 /* Update the host index before invoking device */ 203 /* Update the host index before invoking device */
205 host_index = q->host_index;
206 q->host_index = ((q->host_index + 1) % q->entry_count); 204 q->host_index = ((q->host_index + 1) % q->entry_count);
207 205
208 /* Ring Doorbell */ 206 /* Ring Doorbell */
@@ -786,42 +784,6 @@ lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
786} 784}
787 785
788/** 786/**
789 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
790 * @phba: Pointer to HBA context object.
791 *
792 * Remove all rrqs from the phba->active_rrq_list and free them by
793 * calling __lpfc_clr_active_rrq
794 *
795 **/
796void
797lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
798{
799 struct lpfc_node_rrq *rrq;
800 struct lpfc_node_rrq *nextrrq;
801 unsigned long next_time;
802 unsigned long iflags;
803 LIST_HEAD(rrq_list);
804
805 if (phba->sli_rev != LPFC_SLI_REV4)
806 return;
807 spin_lock_irqsave(&phba->hbalock, iflags);
808 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
809 next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
810 list_splice_init(&phba->active_rrq_list, &rrq_list);
811 spin_unlock_irqrestore(&phba->hbalock, iflags);
812
813 list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
814 list_del(&rrq->list);
815 lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
816 }
817 if ((!list_empty(&phba->active_rrq_list)) &&
818 (!(phba->pport->load_flag & FC_UNLOADING)))
819
820 mod_timer(&phba->rrq_tmr, next_time);
821}
822
823
824/**
825 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap. 787 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
826 * @phba: Pointer to HBA context object. 788 * @phba: Pointer to HBA context object.
827 * @ndlp: Targets nodelist pointer for this exchange. 789 * @ndlp: Targets nodelist pointer for this exchange.
@@ -937,7 +899,7 @@ out:
937 * @phba: Pointer to HBA context object. 899 * @phba: Pointer to HBA context object.
938 * @piocb: Pointer to the iocbq. 900 * @piocb: Pointer to the iocbq.
939 * 901 *
940 * This function is called with hbalock held. This function 902 * This function is called with the ring lock held. This function
941 * gets a new driver sglq object from the sglq list. If the 903 * gets a new driver sglq object from the sglq list. If the
942 * list is not empty then it is successful, it returns pointer to the newly 904 * list is not empty then it is successful, it returns pointer to the newly
943 * allocated sglq object else it returns NULL. 905 * allocated sglq object else it returns NULL.
@@ -1053,10 +1015,12 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1053 spin_unlock_irqrestore( 1015 spin_unlock_irqrestore(
1054 &phba->sli4_hba.abts_sgl_list_lock, iflag); 1016 &phba->sli4_hba.abts_sgl_list_lock, iflag);
1055 } else { 1017 } else {
1018 spin_lock_irqsave(&pring->ring_lock, iflag);
1056 sglq->state = SGL_FREED; 1019 sglq->state = SGL_FREED;
1057 sglq->ndlp = NULL; 1020 sglq->ndlp = NULL;
1058 list_add_tail(&sglq->list, 1021 list_add_tail(&sglq->list,
1059 &phba->sli4_hba.lpfc_sgl_list); 1022 &phba->sli4_hba.lpfc_sgl_list);
1023 spin_unlock_irqrestore(&pring->ring_lock, iflag);
1060 1024
1061 /* Check if TXQ queue needs to be serviced */ 1025 /* Check if TXQ queue needs to be serviced */
1062 if (!list_empty(&pring->txq)) 1026 if (!list_empty(&pring->txq))
@@ -2469,11 +2433,9 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2469 IOCB_t * irsp; 2433 IOCB_t * irsp;
2470 WORD5 * w5p; 2434 WORD5 * w5p;
2471 uint32_t Rctl, Type; 2435 uint32_t Rctl, Type;
2472 uint32_t match;
2473 struct lpfc_iocbq *iocbq; 2436 struct lpfc_iocbq *iocbq;
2474 struct lpfc_dmabuf *dmzbuf; 2437 struct lpfc_dmabuf *dmzbuf;
2475 2438
2476 match = 0;
2477 irsp = &(saveq->iocb); 2439 irsp = &(saveq->iocb);
2478 2440
2479 if (irsp->ulpCommand == CMD_ASYNC_STATUS) { 2441 if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
@@ -2899,7 +2861,7 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2899void lpfc_poll_eratt(unsigned long ptr) 2861void lpfc_poll_eratt(unsigned long ptr)
2900{ 2862{
2901 struct lpfc_hba *phba; 2863 struct lpfc_hba *phba;
2902 uint32_t eratt = 0, rem; 2864 uint32_t eratt = 0;
2903 uint64_t sli_intr, cnt; 2865 uint64_t sli_intr, cnt;
2904 2866
2905 phba = (struct lpfc_hba *)ptr; 2867 phba = (struct lpfc_hba *)ptr;
@@ -2914,7 +2876,7 @@ void lpfc_poll_eratt(unsigned long ptr)
2914 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr); 2876 cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
2915 2877
2916 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */ 2878 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2917 rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL); 2879 do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
2918 phba->sli.slistat.sli_ips = cnt; 2880 phba->sli.slistat.sli_ips = cnt;
2919 2881
2920 phba->sli.slistat.sli_prev_intr = sli_intr; 2882 phba->sli.slistat.sli_prev_intr = sli_intr;
@@ -4864,15 +4826,12 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4864 * mailbox command. 4826 * mailbox command.
4865 */ 4827 */
4866 dma_size = *vpd_size; 4828 dma_size = *vpd_size;
4867 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 4829 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
4868 dma_size, 4830 &dmabuf->phys, GFP_KERNEL);
4869 &dmabuf->phys,
4870 GFP_KERNEL);
4871 if (!dmabuf->virt) { 4831 if (!dmabuf->virt) {
4872 kfree(dmabuf); 4832 kfree(dmabuf);
4873 return -ENOMEM; 4833 return -ENOMEM;
4874 } 4834 }
4875 memset(dmabuf->virt, 0, dma_size);
4876 4835
4877 /* 4836 /*
4878 * The SLI4 implementation of READ_REV conflicts at word1, 4837 * The SLI4 implementation of READ_REV conflicts at word1,
@@ -5990,9 +5949,6 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5990 curr_blks++; 5949 curr_blks++;
5991 } 5950 }
5992 5951
5993 /* Calculate the total requested length of the dma memory. */
5994 req_len = curr_blks * sizeof(uint16_t);
5995
5996 /* 5952 /*
5997 * Calculate the size of an embedded mailbox. The uint32_t 5953 * Calculate the size of an embedded mailbox. The uint32_t
5998 * accounts for extents-specific word. 5954 * accounts for extents-specific word.
@@ -6101,14 +6057,18 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6101 struct lpfc_sglq *sglq_entry_first = NULL; 6057 struct lpfc_sglq *sglq_entry_first = NULL;
6102 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; 6058 int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
6103 int last_xritag = NO_XRI; 6059 int last_xritag = NO_XRI;
6060 struct lpfc_sli_ring *pring;
6104 LIST_HEAD(prep_sgl_list); 6061 LIST_HEAD(prep_sgl_list);
6105 LIST_HEAD(blck_sgl_list); 6062 LIST_HEAD(blck_sgl_list);
6106 LIST_HEAD(allc_sgl_list); 6063 LIST_HEAD(allc_sgl_list);
6107 LIST_HEAD(post_sgl_list); 6064 LIST_HEAD(post_sgl_list);
6108 LIST_HEAD(free_sgl_list); 6065 LIST_HEAD(free_sgl_list);
6109 6066
6067 pring = &phba->sli.ring[LPFC_ELS_RING];
6110 spin_lock_irq(&phba->hbalock); 6068 spin_lock_irq(&phba->hbalock);
6069 spin_lock(&pring->ring_lock);
6111 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); 6070 list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
6071 spin_unlock(&pring->ring_lock);
6112 spin_unlock_irq(&phba->hbalock); 6072 spin_unlock_irq(&phba->hbalock);
6113 6073
6114 total_cnt = phba->sli4_hba.els_xri_cnt; 6074 total_cnt = phba->sli4_hba.els_xri_cnt;
@@ -6210,8 +6170,10 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba)
6210 /* push els sgls posted to the availble list */ 6170 /* push els sgls posted to the availble list */
6211 if (!list_empty(&post_sgl_list)) { 6171 if (!list_empty(&post_sgl_list)) {
6212 spin_lock_irq(&phba->hbalock); 6172 spin_lock_irq(&phba->hbalock);
6173 spin_lock(&pring->ring_lock);
6213 list_splice_init(&post_sgl_list, 6174 list_splice_init(&post_sgl_list,
6214 &phba->sli4_hba.lpfc_sgl_list); 6175 &phba->sli4_hba.lpfc_sgl_list);
6176 spin_unlock(&pring->ring_lock);
6215 spin_unlock_irq(&phba->hbalock); 6177 spin_unlock_irq(&phba->hbalock);
6216 } else { 6178 } else {
6217 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 6179 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -6797,13 +6759,16 @@ void
6797lpfc_mbox_timeout_handler(struct lpfc_hba *phba) 6759lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
6798{ 6760{
6799 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active; 6761 LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
6800 MAILBOX_t *mb = &pmbox->u.mb; 6762 MAILBOX_t *mb = NULL;
6763
6801 struct lpfc_sli *psli = &phba->sli; 6764 struct lpfc_sli *psli = &phba->sli;
6802 6765
6803 /* If the mailbox completed, process the completion and return */ 6766 /* If the mailbox completed, process the completion and return */
6804 if (lpfc_sli4_process_missed_mbox_completions(phba)) 6767 if (lpfc_sli4_process_missed_mbox_completions(phba))
6805 return; 6768 return;
6806 6769
6770 if (pmbox != NULL)
6771 mb = &pmbox->u.mb;
6807 /* Check the pmbox pointer first. There is a race condition 6772 /* Check the pmbox pointer first. There is a race condition
6808 * between the mbox timeout handler getting executed in the 6773 * between the mbox timeout handler getting executed in the
6809 * worklist and the mailbox actually completing. When this 6774 * worklist and the mailbox actually completing. When this
@@ -8138,7 +8103,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8138 * 8103 *
8139 * Return: index into SLI4 fast-path FCP queue index. 8104 * Return: index into SLI4 fast-path FCP queue index.
8140 **/ 8105 **/
8141static inline uint32_t 8106static inline int
8142lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) 8107lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
8143{ 8108{
8144 struct lpfc_vector_map_info *cpup; 8109 struct lpfc_vector_map_info *cpup;
@@ -8152,7 +8117,6 @@ lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
8152 cpup += cpu; 8117 cpup += cpu;
8153 return cpup->channel_id; 8118 return cpup->channel_id;
8154 } 8119 }
8155 chann = cpu;
8156 } 8120 }
8157 chann = atomic_add_return(1, &phba->fcp_qidx); 8121 chann = atomic_add_return(1, &phba->fcp_qidx);
8158 chann = (chann % phba->cfg_fcp_io_channel); 8122 chann = (chann % phba->cfg_fcp_io_channel);
@@ -8784,6 +8748,37 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8784 return 0; 8748 return 0;
8785} 8749}
8786 8750
8751int
8752lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
8753 struct lpfc_iocbq *piocb)
8754{
8755 uint32_t idx;
8756
8757 if (phba->sli_rev == LPFC_SLI_REV4) {
8758 if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
8759 /*
8760 * fcp_wqidx should already be setup based on what
8761 * completion queue we want to use.
8762 */
8763 if (!(phba->cfg_fof) ||
8764 (!(piocb->iocb_flag & LPFC_IO_FOF))) {
8765 if (unlikely(!phba->sli4_hba.fcp_wq))
8766 return LPFC_HBA_ERROR;
8767 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8768 piocb->fcp_wqidx = idx;
8769 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8770 } else {
8771 if (unlikely(!phba->sli4_hba.oas_wq))
8772 return LPFC_HBA_ERROR;
8773 idx = 0;
8774 piocb->fcp_wqidx = idx;
8775 ring_number = LPFC_FCP_OAS_RING;
8776 }
8777 }
8778 }
8779 return ring_number;
8780}
8781
8787/** 8782/**
8788 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb 8783 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8789 * @phba: Pointer to HBA context object. 8784 * @phba: Pointer to HBA context object.
@@ -8809,61 +8804,42 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
8809 int rc, idx; 8804 int rc, idx;
8810 8805
8811 if (phba->sli_rev == LPFC_SLI_REV4) { 8806 if (phba->sli_rev == LPFC_SLI_REV4) {
8812 if (piocb->iocb_flag & LPFC_IO_FCP) { 8807 ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
8813 if (!phba->cfg_fof || (!(piocb->iocb_flag & 8808 if (unlikely(ring_number == LPFC_HBA_ERROR))
8814 LPFC_IO_OAS))) { 8809 return IOCB_ERROR;
8815 if (unlikely(!phba->sli4_hba.fcp_wq)) 8810 idx = piocb->fcp_wqidx;
8816 return IOCB_ERROR;
8817 idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
8818 piocb->fcp_wqidx = idx;
8819 ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
8820 } else {
8821 if (unlikely(!phba->sli4_hba.oas_wq))
8822 return IOCB_ERROR;
8823 idx = 0;
8824 piocb->fcp_wqidx = 0;
8825 ring_number = LPFC_FCP_OAS_RING;
8826 }
8827 pring = &phba->sli.ring[ring_number];
8828 spin_lock_irqsave(&pring->ring_lock, iflags);
8829 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8830 flag);
8831 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8832 8811
8833 if (lpfc_fcp_look_ahead) { 8812 pring = &phba->sli.ring[ring_number];
8834 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx]; 8813 spin_lock_irqsave(&pring->ring_lock, iflags);
8814 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
8815 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8835 8816
8836 if (atomic_dec_and_test(&fcp_eq_hdl-> 8817 if (lpfc_fcp_look_ahead && (piocb->iocb_flag & LPFC_IO_FCP)) {
8837 fcp_eq_in_use)) { 8818 fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
8838 8819
8839 /* Get associated EQ with this index */ 8820 if (atomic_dec_and_test(&fcp_eq_hdl->
8840 fpeq = phba->sli4_hba.hba_eq[idx]; 8821 fcp_eq_in_use)) {
8841 8822
8842 /* Turn off interrupts from this EQ */ 8823 /* Get associated EQ with this index */
8843 lpfc_sli4_eq_clr_intr(fpeq); 8824 fpeq = phba->sli4_hba.hba_eq[idx];
8844 8825
8845 /* 8826 /* Turn off interrupts from this EQ */
8846 * Process all the events on FCP EQ 8827 lpfc_sli4_eq_clr_intr(fpeq);
8847 */
8848 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8849 lpfc_sli4_hba_handle_eqe(phba,
8850 eqe, idx);
8851 fpeq->EQ_processed++;
8852 }
8853 8828
8854 /* Always clear and re-arm the EQ */ 8829 /*
8855 lpfc_sli4_eq_release(fpeq, 8830 * Process all the events on FCP EQ
8856 LPFC_QUEUE_REARM); 8831 */
8832 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
8833 lpfc_sli4_hba_handle_eqe(phba,
8834 eqe, idx);
8835 fpeq->EQ_processed++;
8857 } 8836 }
8858 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8859 }
8860 } else {
8861 pring = &phba->sli.ring[ring_number];
8862 spin_lock_irqsave(&pring->ring_lock, iflags);
8863 rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
8864 flag);
8865 spin_unlock_irqrestore(&pring->ring_lock, iflags);
8866 8837
8838 /* Always clear and re-arm the EQ */
8839 lpfc_sli4_eq_release(fpeq,
8840 LPFC_QUEUE_REARM);
8841 }
8842 atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
8867 } 8843 }
8868 } else { 8844 } else {
8869 /* For now, SLI2/3 will still use hbalock */ 8845 /* For now, SLI2/3 will still use hbalock */
@@ -9746,6 +9722,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9746 struct lpfc_iocbq *abtsiocbp; 9722 struct lpfc_iocbq *abtsiocbp;
9747 IOCB_t *icmd = NULL; 9723 IOCB_t *icmd = NULL;
9748 IOCB_t *iabt = NULL; 9724 IOCB_t *iabt = NULL;
9725 int ring_number;
9749 int retval; 9726 int retval;
9750 unsigned long iflags; 9727 unsigned long iflags;
9751 9728
@@ -9786,6 +9763,8 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9786 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx; 9763 abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
9787 if (cmdiocb->iocb_flag & LPFC_IO_FCP) 9764 if (cmdiocb->iocb_flag & LPFC_IO_FCP)
9788 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX; 9765 abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
9766 if (cmdiocb->iocb_flag & LPFC_IO_FOF)
9767 abtsiocbp->iocb_flag |= LPFC_IO_FOF;
9789 9768
9790 if (phba->link_state >= LPFC_LINK_UP) 9769 if (phba->link_state >= LPFC_LINK_UP)
9791 iabt->ulpCommand = CMD_ABORT_XRI_CN; 9770 iabt->ulpCommand = CMD_ABORT_XRI_CN;
@@ -9802,6 +9781,11 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
9802 abtsiocbp->iotag); 9781 abtsiocbp->iotag);
9803 9782
9804 if (phba->sli_rev == LPFC_SLI_REV4) { 9783 if (phba->sli_rev == LPFC_SLI_REV4) {
9784 ring_number =
9785 lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
9786 if (unlikely(ring_number == LPFC_HBA_ERROR))
9787 return 0;
9788 pring = &phba->sli.ring[ring_number];
9805 /* Note: both hbalock and ring_lock need to be set here */ 9789 /* Note: both hbalock and ring_lock need to be set here */
9806 spin_lock_irqsave(&pring->ring_lock, iflags); 9790 spin_lock_irqsave(&pring->ring_lock, iflags);
9807 retval = __lpfc_sli_issue_iocb(phba, pring->ringno, 9791 retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
@@ -10099,6 +10083,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10099 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; 10083 abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
10100 if (iocbq->iocb_flag & LPFC_IO_FCP) 10084 if (iocbq->iocb_flag & LPFC_IO_FCP)
10101 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX; 10085 abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
10086 if (iocbq->iocb_flag & LPFC_IO_FOF)
10087 abtsiocb->iocb_flag |= LPFC_IO_FOF;
10102 10088
10103 if (lpfc_is_link_up(phba)) 10089 if (lpfc_is_link_up(phba))
10104 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN; 10090 abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@@ -10146,7 +10132,9 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10146 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd) 10132 uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
10147{ 10133{
10148 struct lpfc_hba *phba = vport->phba; 10134 struct lpfc_hba *phba = vport->phba;
10135 struct lpfc_scsi_buf *lpfc_cmd;
10149 struct lpfc_iocbq *abtsiocbq; 10136 struct lpfc_iocbq *abtsiocbq;
10137 struct lpfc_nodelist *ndlp;
10150 struct lpfc_iocbq *iocbq; 10138 struct lpfc_iocbq *iocbq;
10151 IOCB_t *icmd; 10139 IOCB_t *icmd;
10152 int sum, i, ret_val; 10140 int sum, i, ret_val;
@@ -10198,8 +10186,14 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10198 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx; 10186 abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
10199 if (iocbq->iocb_flag & LPFC_IO_FCP) 10187 if (iocbq->iocb_flag & LPFC_IO_FCP)
10200 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX; 10188 abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
10189 if (iocbq->iocb_flag & LPFC_IO_FOF)
10190 abtsiocbq->iocb_flag |= LPFC_IO_FOF;
10201 10191
10202 if (lpfc_is_link_up(phba)) 10192 lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10193 ndlp = lpfc_cmd->rdata->pnode;
10194
10195 if (lpfc_is_link_up(phba) &&
10196 (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
10203 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN; 10197 abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10204 else 10198 else
10205 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN; 10199 abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
@@ -12611,6 +12605,9 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
12611 * Process all the event on FCP fast-path EQ 12605 * Process all the event on FCP fast-path EQ
12612 */ 12606 */
12613 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 12607 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
12608 if (eqe == NULL)
12609 break;
12610
12614 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx); 12611 lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
12615 if (!(++ecount % fpeq->entry_repost)) 12612 if (!(++ecount % fpeq->entry_repost))
12616 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 12613 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
@@ -12760,14 +12757,13 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
12760 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 12757 dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
12761 if (!dmabuf) 12758 if (!dmabuf)
12762 goto out_fail; 12759 goto out_fail;
12763 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, 12760 dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
12764 hw_page_size, &dmabuf->phys, 12761 hw_page_size, &dmabuf->phys,
12765 GFP_KERNEL); 12762 GFP_KERNEL);
12766 if (!dmabuf->virt) { 12763 if (!dmabuf->virt) {
12767 kfree(dmabuf); 12764 kfree(dmabuf);
12768 goto out_fail; 12765 goto out_fail;
12769 } 12766 }
12770 memset(dmabuf->virt, 0, hw_page_size);
12771 dmabuf->buffer_tag = x; 12767 dmabuf->buffer_tag = x;
12772 list_add_tail(&dmabuf->list, &queue->page_list); 12768 list_add_tail(&dmabuf->list, &queue->page_list);
12773 /* initialize queue's entry array */ 12769 /* initialize queue's entry array */
@@ -12845,7 +12841,7 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
12845 * memory this function will return -ENOMEM. If the queue create mailbox command 12841 * memory this function will return -ENOMEM. If the queue create mailbox command
12846 * fails this function will return -ENXIO. 12842 * fails this function will return -ENXIO.
12847 **/ 12843 **/
12848uint32_t 12844int
12849lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq) 12845lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12850{ 12846{
12851 struct lpfc_mbx_modify_eq_delay *eq_delay; 12847 struct lpfc_mbx_modify_eq_delay *eq_delay;
@@ -12931,7 +12927,7 @@ lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
12931 * memory this function will return -ENOMEM. If the queue create mailbox command 12927 * memory this function will return -ENOMEM. If the queue create mailbox command
12932 * fails this function will return -ENXIO. 12928 * fails this function will return -ENXIO.
12933 **/ 12929 **/
12934uint32_t 12930int
12935lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax) 12931lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
12936{ 12932{
12937 struct lpfc_mbx_eq_create *eq_create; 12933 struct lpfc_mbx_eq_create *eq_create;
@@ -13053,7 +13049,7 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
13053 * memory this function will return -ENOMEM. If the queue create mailbox command 13049 * memory this function will return -ENOMEM. If the queue create mailbox command
13054 * fails this function will return -ENXIO. 13050 * fails this function will return -ENXIO.
13055 **/ 13051 **/
13056uint32_t 13052int
13057lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq, 13053lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
13058 struct lpfc_queue *eq, uint32_t type, uint32_t subtype) 13054 struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
13059{ 13055{
@@ -13394,7 +13390,7 @@ out:
13394 * memory this function will return -ENOMEM. If the queue create mailbox command 13390 * memory this function will return -ENOMEM. If the queue create mailbox command
13395 * fails this function will return -ENXIO. 13391 * fails this function will return -ENXIO.
13396 **/ 13392 **/
13397uint32_t 13393int
13398lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, 13394lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
13399 struct lpfc_queue *cq, uint32_t subtype) 13395 struct lpfc_queue *cq, uint32_t subtype)
13400{ 13396{
@@ -13630,7 +13626,7 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
13630 * memory this function will return -ENOMEM. If the queue create mailbox command 13626 * memory this function will return -ENOMEM. If the queue create mailbox command
13631 * fails this function will return -ENXIO. 13627 * fails this function will return -ENXIO.
13632 **/ 13628 **/
13633uint32_t 13629int
13634lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, 13630lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
13635 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype) 13631 struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
13636{ 13632{
@@ -13895,7 +13891,7 @@ out:
13895 * On success this function will return a zero. If the queue destroy mailbox 13891 * On success this function will return a zero. If the queue destroy mailbox
13896 * command fails this function will return -ENXIO. 13892 * command fails this function will return -ENXIO.
13897 **/ 13893 **/
13898uint32_t 13894int
13899lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq) 13895lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13900{ 13896{
13901 LPFC_MBOXQ_t *mbox; 13897 LPFC_MBOXQ_t *mbox;
@@ -13951,7 +13947,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
13951 * On success this function will return a zero. If the queue destroy mailbox 13947 * On success this function will return a zero. If the queue destroy mailbox
13952 * command fails this function will return -ENXIO. 13948 * command fails this function will return -ENXIO.
13953 **/ 13949 **/
13954uint32_t 13950int
13955lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq) 13951lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
13956{ 13952{
13957 LPFC_MBOXQ_t *mbox; 13953 LPFC_MBOXQ_t *mbox;
@@ -14005,7 +14001,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
14005 * On success this function will return a zero. If the queue destroy mailbox 14001 * On success this function will return a zero. If the queue destroy mailbox
14006 * command fails this function will return -ENXIO. 14002 * command fails this function will return -ENXIO.
14007 **/ 14003 **/
14008uint32_t 14004int
14009lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) 14005lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14010{ 14006{
14011 LPFC_MBOXQ_t *mbox; 14007 LPFC_MBOXQ_t *mbox;
@@ -14059,7 +14055,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
14059 * On success this function will return a zero. If the queue destroy mailbox 14055 * On success this function will return a zero. If the queue destroy mailbox
14060 * command fails this function will return -ENXIO. 14056 * command fails this function will return -ENXIO.
14061 **/ 14057 **/
14062uint32_t 14058int
14063lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) 14059lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14064{ 14060{
14065 LPFC_MBOXQ_t *mbox; 14061 LPFC_MBOXQ_t *mbox;
@@ -14112,7 +14108,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
14112 * On success this function will return a zero. If the queue destroy mailbox 14108 * On success this function will return a zero. If the queue destroy mailbox
14113 * command fails this function will return -ENXIO. 14109 * command fails this function will return -ENXIO.
14114 **/ 14110 **/
14115uint32_t 14111int
14116lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq, 14112lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
14117 struct lpfc_queue *drq) 14113 struct lpfc_queue *drq)
14118{ 14114{
@@ -14252,7 +14248,6 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
14252 "2511 POST_SGL mailbox failed with " 14248 "2511 POST_SGL mailbox failed with "
14253 "status x%x add_status x%x, mbx status x%x\n", 14249 "status x%x add_status x%x, mbx status x%x\n",
14254 shdr_status, shdr_add_status, rc); 14250 shdr_status, shdr_add_status, rc);
14255 rc = -ENXIO;
14256 } 14251 }
14257 return 0; 14252 return 0;
14258} 14253}
@@ -14270,7 +14265,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
14270 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful 14265 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14271 * LPFC_RPI_ALLOC_ERROR if no rpis are available. 14266 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14272 **/ 14267 **/
14273uint16_t 14268static uint16_t
14274lpfc_sli4_alloc_xri(struct lpfc_hba *phba) 14269lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
14275{ 14270{
14276 unsigned long xri; 14271 unsigned long xri;
@@ -14300,7 +14295,7 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
14300 * This routine is invoked to release an xri to the pool of 14295 * This routine is invoked to release an xri to the pool of
14301 * available rpis maintained by the driver. 14296 * available rpis maintained by the driver.
14302 **/ 14297 **/
14303void 14298static void
14304__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri) 14299__lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
14305{ 14300{
14306 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) { 14301 if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
@@ -14720,7 +14715,7 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
14720 * the driver uses this time stamp to indicate if any received sequences have 14715 * the driver uses this time stamp to indicate if any received sequences have
14721 * timed out. 14716 * timed out.
14722 **/ 14717 **/
14723void 14718static void
14724lpfc_update_rcv_time_stamp(struct lpfc_vport *vport) 14719lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
14725{ 14720{
14726 struct lpfc_dmabuf *h_buf; 14721 struct lpfc_dmabuf *h_buf;
@@ -15019,7 +15014,7 @@ uint16_t
15019lpfc_sli4_xri_inrange(struct lpfc_hba *phba, 15014lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
15020 uint16_t xri) 15015 uint16_t xri)
15021{ 15016{
15022 int i; 15017 uint16_t i;
15023 15018
15024 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) { 15019 for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
15025 if (xri == phba->sli4_hba.xri_ids[i]) 15020 if (xri == phba->sli4_hba.xri_ids[i])
@@ -15189,7 +15184,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
15189 * unsolicited sequence has been aborted. After that, it will issue a basic 15184 * unsolicited sequence has been aborted. After that, it will issue a basic
15190 * accept to accept the abort. 15185 * accept to accept the abort.
15191 **/ 15186 **/
15192void 15187static void
15193lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport, 15188lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
15194 struct hbq_dmabuf *dmabuf) 15189 struct hbq_dmabuf *dmabuf)
15195{ 15190{
@@ -15734,7 +15729,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
15734 * This routine is invoked to release an rpi to the pool of 15729 * This routine is invoked to release an rpi to the pool of
15735 * available rpis maintained by the driver. 15730 * available rpis maintained by the driver.
15736 **/ 15731 **/
15737void 15732static void
15738__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) 15733__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
15739{ 15734{
15740 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { 15735 if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
@@ -16172,7 +16167,7 @@ fail_fcf_read:
16172 * returns: 16167 * returns:
16173 * 1=success 0=failure 16168 * 1=success 0=failure
16174 **/ 16169 **/
16175int 16170static int
16176lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) 16171lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
16177{ 16172{
16178 uint16_t next_fcf_pri; 16173 uint16_t next_fcf_pri;
@@ -16403,7 +16398,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
16403 * command. If the mailbox command returned failure, it will try to stop the 16398 * command. If the mailbox command returned failure, it will try to stop the
16404 * FCF rediscover wait timer. 16399 * FCF rediscover wait timer.
16405 **/ 16400 **/
16406void 16401static void
16407lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 16402lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
16408{ 16403{
16409 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; 16404 struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
@@ -16956,7 +16951,7 @@ lpfc_drain_txq(struct lpfc_hba *phba)
16956 char *fail_msg = NULL; 16951 char *fail_msg = NULL;
16957 struct lpfc_sglq *sglq; 16952 struct lpfc_sglq *sglq;
16958 union lpfc_wqe wqe; 16953 union lpfc_wqe wqe;
16959 int txq_cnt = 0; 16954 uint32_t txq_cnt = 0;
16960 16955
16961 spin_lock_irqsave(&pring->ring_lock, iflags); 16956 spin_lock_irqsave(&pring->ring_lock, iflags);
16962 list_for_each_entry(piocbq, &pring->txq, list) { 16957 list_for_each_entry(piocbq, &pring->txq, list) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index edb48832c39b..4a01452415cf 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -79,6 +79,7 @@ struct lpfc_iocbq {
79#define LPFC_FIP_ELS_ID_SHIFT 14 79#define LPFC_FIP_ELS_ID_SHIFT 14
80 80
81#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */ 81#define LPFC_IO_OAS 0x10000 /* OAS FCP IO */
82#define LPFC_IO_FOF 0x20000 /* FOF FCP IO */
82 83
83 uint32_t drvrTimeout; /* driver timeout in seconds */ 84 uint32_t drvrTimeout; /* driver timeout in seconds */
84 uint32_t fcp_wqidx; /* index to FCP work queue */ 85 uint32_t fcp_wqidx; /* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 7f50aa04d66a..22ceb2b05ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -670,22 +670,22 @@ void lpfc_sli4_hba_reset(struct lpfc_hba *);
670struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, 670struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
671 uint32_t); 671 uint32_t);
672void lpfc_sli4_queue_free(struct lpfc_queue *); 672void lpfc_sli4_queue_free(struct lpfc_queue *);
673uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t); 673int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
674uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t); 674int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
675uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *, 675int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
676 struct lpfc_queue *, uint32_t, uint32_t); 676 struct lpfc_queue *, uint32_t, uint32_t);
677int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *, 677int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
678 struct lpfc_queue *, uint32_t); 678 struct lpfc_queue *, uint32_t);
679uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, 679int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
680 struct lpfc_queue *, uint32_t); 680 struct lpfc_queue *, uint32_t);
681uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, 681int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
682 struct lpfc_queue *, struct lpfc_queue *, uint32_t); 682 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
683void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); 683void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
684uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); 684int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
685uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); 685int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
686uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); 686int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
687uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *); 687int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
688uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *, 688int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
689 struct lpfc_queue *); 689 struct lpfc_queue *);
690int lpfc_sli4_queue_setup(struct lpfc_hba *); 690int lpfc_sli4_queue_setup(struct lpfc_hba *);
691void lpfc_sli4_queue_unset(struct lpfc_hba *); 691void lpfc_sli4_queue_unset(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41675c1193e7..89413add2252 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "10.2.8001.0." 21#define LPFC_DRIVER_VERSION "10.4.8000.0."
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23 23
24/* Used for SLI 2/3 */ 24/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 32166c2c7854..a49914de4b95 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "06.803.01.00-rc1" 36#define MEGASAS_VERSION "06.805.06.00-rc1"
37#define MEGASAS_RELDATE "Mar. 10, 2014" 37#define MEGASAS_RELDATE "Sep. 4, 2014"
38#define MEGASAS_EXT_VERSION "Mon. Mar. 10 17:00:00 PDT 2014" 38#define MEGASAS_EXT_VERSION "Thu. Sep. 4 17:00:00 PDT 2014"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -105,6 +105,9 @@
105#define MFI_STATE_READY 0xB0000000 105#define MFI_STATE_READY 0xB0000000
106#define MFI_STATE_OPERATIONAL 0xC0000000 106#define MFI_STATE_OPERATIONAL 0xC0000000
107#define MFI_STATE_FAULT 0xF0000000 107#define MFI_STATE_FAULT 0xF0000000
108#define MFI_STATE_FORCE_OCR 0x00000080
109#define MFI_STATE_DMADONE 0x00000008
110#define MFI_STATE_CRASH_DUMP_DONE 0x00000004
108#define MFI_RESET_REQUIRED 0x00000001 111#define MFI_RESET_REQUIRED 0x00000001
109#define MFI_RESET_ADAPTER 0x00000002 112#define MFI_RESET_ADAPTER 0x00000002
110#define MEGAMFI_FRAME_SIZE 64 113#define MEGAMFI_FRAME_SIZE 64
@@ -191,6 +194,9 @@
191#define MR_DCMD_CLUSTER_RESET_LD 0x08010200 194#define MR_DCMD_CLUSTER_RESET_LD 0x08010200
192#define MR_DCMD_PD_LIST_QUERY 0x02010100 195#define MR_DCMD_PD_LIST_QUERY 0x02010100
193 196
197#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS 0x01190100
198#define MR_DRIVER_SET_APP_CRASHDUMP_MODE (0xF0010000 | 0x0600)
199
194/* 200/*
195 * Global functions 201 * Global functions
196 */ 202 */
@@ -264,6 +270,25 @@ enum MFI_STAT {
264}; 270};
265 271
266/* 272/*
273 * Crash dump related defines
274 */
275#define MAX_CRASH_DUMP_SIZE 512
276#define CRASH_DMA_BUF_SIZE (1024 * 1024)
277
278enum MR_FW_CRASH_DUMP_STATE {
279 UNAVAILABLE = 0,
280 AVAILABLE = 1,
281 COPYING = 2,
282 COPIED = 3,
283 COPY_ERROR = 4,
284};
285
286enum _MR_CRASH_BUF_STATUS {
287 MR_CRASH_BUF_TURN_OFF = 0,
288 MR_CRASH_BUF_TURN_ON = 1,
289};
290
291/*
267 * Number of mailbox bytes in DCMD message frame 292 * Number of mailbox bytes in DCMD message frame
268 */ 293 */
269#define MFI_MBOX_SIZE 12 294#define MFI_MBOX_SIZE 12
@@ -365,7 +390,6 @@ enum MR_LD_QUERY_TYPE {
365#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db 390#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
366#define MR_EVT_LD_OFFLINE 0x00fc 391#define MR_EVT_LD_OFFLINE 0x00fc
367#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152 392#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
368#define MAX_LOGICAL_DRIVES 64
369 393
370enum MR_PD_STATE { 394enum MR_PD_STATE {
371 MR_PD_STATE_UNCONFIGURED_GOOD = 0x00, 395 MR_PD_STATE_UNCONFIGURED_GOOD = 0x00,
@@ -443,14 +467,14 @@ struct MR_LD_LIST {
443 u8 state; 467 u8 state;
444 u8 reserved[3]; 468 u8 reserved[3];
445 u64 size; 469 u64 size;
446 } ldList[MAX_LOGICAL_DRIVES]; 470 } ldList[MAX_LOGICAL_DRIVES_EXT];
447} __packed; 471} __packed;
448 472
449struct MR_LD_TARGETID_LIST { 473struct MR_LD_TARGETID_LIST {
450 u32 size; 474 u32 size;
451 u32 count; 475 u32 count;
452 u8 pad[3]; 476 u8 pad[3];
453 u8 targetId[MAX_LOGICAL_DRIVES]; 477 u8 targetId[MAX_LOGICAL_DRIVES_EXT];
454}; 478};
455 479
456 480
@@ -916,6 +940,15 @@ struct megasas_ctrl_info {
916 * HA cluster information 940 * HA cluster information
917 */ 941 */
918 struct { 942 struct {
943#if defined(__BIG_ENDIAN_BITFIELD)
944 u32 reserved:26;
945 u32 premiumFeatureMismatch:1;
946 u32 ctrlPropIncompatible:1;
947 u32 fwVersionMismatch:1;
948 u32 hwIncompatible:1;
949 u32 peerIsIncompatible:1;
950 u32 peerIsPresent:1;
951#else
919 u32 peerIsPresent:1; 952 u32 peerIsPresent:1;
920 u32 peerIsIncompatible:1; 953 u32 peerIsIncompatible:1;
921 u32 hwIncompatible:1; 954 u32 hwIncompatible:1;
@@ -923,6 +956,7 @@ struct megasas_ctrl_info {
923 u32 ctrlPropIncompatible:1; 956 u32 ctrlPropIncompatible:1;
924 u32 premiumFeatureMismatch:1; 957 u32 premiumFeatureMismatch:1;
925 u32 reserved:26; 958 u32 reserved:26;
959#endif
926 } cluster; 960 } cluster;
927 961
928 char clusterId[16]; /*7D4h */ 962 char clusterId[16]; /*7D4h */
@@ -933,7 +967,27 @@ struct megasas_ctrl_info {
933 u8 reserved; /*0x7E7*/ 967 u8 reserved; /*0x7E7*/
934 } iov; 968 } iov;
935 969
936 u8 pad[0x800-0x7E8]; /*0x7E8 pad to 2k */ 970 struct {
971#if defined(__BIG_ENDIAN_BITFIELD)
972 u32 reserved:25;
973 u32 supportCrashDump:1;
974 u32 supportMaxExtLDs:1;
975 u32 supportT10RebuildAssist:1;
976 u32 supportDisableImmediateIO:1;
977 u32 supportThermalPollInterval:1;
978 u32 supportPersonalityChange:2;
979#else
980 u32 supportPersonalityChange:2;
981 u32 supportThermalPollInterval:1;
982 u32 supportDisableImmediateIO:1;
983 u32 supportT10RebuildAssist:1;
984 u32 supportMaxExtLDs:1;
985 u32 supportCrashDump:1;
986 u32 reserved:25;
987#endif
988 } adapterOperations3;
989
990 u8 pad[0x800-0x7EC];
937} __packed; 991} __packed;
938 992
939/* 993/*
@@ -942,13 +996,12 @@ struct megasas_ctrl_info {
942 * =============================== 996 * ===============================
943 */ 997 */
944#define MEGASAS_MAX_PD_CHANNELS 2 998#define MEGASAS_MAX_PD_CHANNELS 2
945#define MEGASAS_MAX_LD_CHANNELS 1 999#define MEGASAS_MAX_LD_CHANNELS 2
946#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ 1000#define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \
947 MEGASAS_MAX_LD_CHANNELS) 1001 MEGASAS_MAX_LD_CHANNELS)
948#define MEGASAS_MAX_DEV_PER_CHANNEL 128 1002#define MEGASAS_MAX_DEV_PER_CHANNEL 128
949#define MEGASAS_DEFAULT_INIT_ID -1 1003#define MEGASAS_DEFAULT_INIT_ID -1
950#define MEGASAS_MAX_LUN 8 1004#define MEGASAS_MAX_LUN 8
951#define MEGASAS_MAX_LD 64
952#define MEGASAS_DEFAULT_CMD_PER_LUN 256 1005#define MEGASAS_DEFAULT_CMD_PER_LUN 256
953#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ 1006#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
954 MEGASAS_MAX_DEV_PER_CHANNEL) 1007 MEGASAS_MAX_DEV_PER_CHANNEL)
@@ -961,6 +1014,14 @@ struct megasas_ctrl_info {
961 1014
962#define MEGASAS_FW_BUSY 1 1015#define MEGASAS_FW_BUSY 1
963 1016
1017#define VD_EXT_DEBUG 0
1018
1019enum MR_MFI_MPT_PTHR_FLAGS {
1020 MFI_MPT_DETACHED = 0,
1021 MFI_LIST_ADDED = 1,
1022 MFI_MPT_ATTACHED = 2,
1023};
1024
964/* Frame Type */ 1025/* Frame Type */
965#define IO_FRAME 0 1026#define IO_FRAME 0
966#define PTHRU_FRAME 1 1027#define PTHRU_FRAME 1
@@ -978,7 +1039,7 @@ struct megasas_ctrl_info {
978#define MEGASAS_IOCTL_CMD 0 1039#define MEGASAS_IOCTL_CMD 0
979#define MEGASAS_DEFAULT_CMD_TIMEOUT 90 1040#define MEGASAS_DEFAULT_CMD_TIMEOUT 90
980#define MEGASAS_THROTTLE_QUEUE_DEPTH 16 1041#define MEGASAS_THROTTLE_QUEUE_DEPTH 16
981 1042#define MEGASAS_BLOCKED_CMD_TIMEOUT 60
982/* 1043/*
983 * FW reports the maximum of number of commands that it can accept (maximum 1044 * FW reports the maximum of number of commands that it can accept (maximum
984 * commands that can be outstanding) at any time. The driver must report a 1045 * commands that can be outstanding) at any time. The driver must report a
@@ -1133,13 +1194,19 @@ union megasas_sgl_frame {
1133typedef union _MFI_CAPABILITIES { 1194typedef union _MFI_CAPABILITIES {
1134 struct { 1195 struct {
1135#if defined(__BIG_ENDIAN_BITFIELD) 1196#if defined(__BIG_ENDIAN_BITFIELD)
1136 u32 reserved:30; 1197 u32 reserved:27;
1198 u32 support_ndrive_r1_lb:1;
1199 u32 support_max_255lds:1;
1200 u32 reserved1:1;
1137 u32 support_additional_msix:1; 1201 u32 support_additional_msix:1;
1138 u32 support_fp_remote_lun:1; 1202 u32 support_fp_remote_lun:1;
1139#else 1203#else
1140 u32 support_fp_remote_lun:1; 1204 u32 support_fp_remote_lun:1;
1141 u32 support_additional_msix:1; 1205 u32 support_additional_msix:1;
1142 u32 reserved:30; 1206 u32 reserved1:1;
1207 u32 support_max_255lds:1;
1208 u32 support_ndrive_r1_lb:1;
1209 u32 reserved:27;
1143#endif 1210#endif
1144 } mfi_capabilities; 1211 } mfi_capabilities;
1145 u32 reg; 1212 u32 reg;
@@ -1559,6 +1626,20 @@ struct megasas_instance {
1559 u32 *reply_queue; 1626 u32 *reply_queue;
1560 dma_addr_t reply_queue_h; 1627 dma_addr_t reply_queue_h;
1561 1628
1629 u32 *crash_dump_buf;
1630 dma_addr_t crash_dump_h;
1631 void *crash_buf[MAX_CRASH_DUMP_SIZE];
1632 u32 crash_buf_pages;
1633 unsigned int fw_crash_buffer_size;
1634 unsigned int fw_crash_state;
1635 unsigned int fw_crash_buffer_offset;
1636 u32 drv_buf_index;
1637 u32 drv_buf_alloc;
1638 u32 crash_dump_fw_support;
1639 u32 crash_dump_drv_support;
1640 u32 crash_dump_app_support;
1641 spinlock_t crashdump_lock;
1642
1562 struct megasas_register_set __iomem *reg_set; 1643 struct megasas_register_set __iomem *reg_set;
1563 u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY]; 1644 u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
1564 struct megasas_pd_list pd_list[MEGASAS_MAX_PD]; 1645 struct megasas_pd_list pd_list[MEGASAS_MAX_PD];
@@ -1577,7 +1658,7 @@ struct megasas_instance {
1577 struct megasas_cmd **cmd_list; 1658 struct megasas_cmd **cmd_list;
1578 struct list_head cmd_pool; 1659 struct list_head cmd_pool;
1579 /* used to sync fire the cmd to fw */ 1660 /* used to sync fire the cmd to fw */
1580 spinlock_t cmd_pool_lock; 1661 spinlock_t mfi_pool_lock;
1581 /* used to sync fire the cmd to fw */ 1662 /* used to sync fire the cmd to fw */
1582 spinlock_t hba_lock; 1663 spinlock_t hba_lock;
1583 /* used to synch producer, consumer ptrs in dpc */ 1664 /* used to synch producer, consumer ptrs in dpc */
@@ -1606,6 +1687,7 @@ struct megasas_instance {
1606 struct megasas_instance_template *instancet; 1687 struct megasas_instance_template *instancet;
1607 struct tasklet_struct isr_tasklet; 1688 struct tasklet_struct isr_tasklet;
1608 struct work_struct work_init; 1689 struct work_struct work_init;
1690 struct work_struct crash_init;
1609 1691
1610 u8 flag; 1692 u8 flag;
1611 u8 unload; 1693 u8 unload;
@@ -1613,6 +1695,14 @@ struct megasas_instance {
1613 u8 issuepend_done; 1695 u8 issuepend_done;
1614 u8 disableOnlineCtrlReset; 1696 u8 disableOnlineCtrlReset;
1615 u8 UnevenSpanSupport; 1697 u8 UnevenSpanSupport;
1698
1699 u8 supportmax256vd;
1700 u16 fw_supported_vd_count;
1701 u16 fw_supported_pd_count;
1702
1703 u16 drv_supported_vd_count;
1704 u16 drv_supported_pd_count;
1705
1616 u8 adprecovery; 1706 u8 adprecovery;
1617 unsigned long last_time; 1707 unsigned long last_time;
1618 u32 mfiStatus; 1708 u32 mfiStatus;
@@ -1622,6 +1712,8 @@ struct megasas_instance {
1622 1712
1623 /* Ptr to hba specific information */ 1713 /* Ptr to hba specific information */
1624 void *ctrl_context; 1714 void *ctrl_context;
1715 u32 ctrl_context_pages;
1716 struct megasas_ctrl_info *ctrl_info;
1625 unsigned int msix_vectors; 1717 unsigned int msix_vectors;
1626 struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES]; 1718 struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
1627 struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES]; 1719 struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
@@ -1633,8 +1725,6 @@ struct megasas_instance {
1633 struct timer_list sriov_heartbeat_timer; 1725 struct timer_list sriov_heartbeat_timer;
1634 char skip_heartbeat_timer_del; 1726 char skip_heartbeat_timer_del;
1635 u8 requestorId; 1727 u8 requestorId;
1636 u64 initiator_sas_address;
1637 u64 ld_sas_address[64];
1638 char PlasmaFW111; 1728 char PlasmaFW111;
1639 char mpio; 1729 char mpio;
1640 int throttlequeuedepth; 1730 int throttlequeuedepth;
@@ -1661,6 +1751,7 @@ struct MR_LD_VF_AFFILIATION {
1661/* Plasma 1.11 FW backward compatibility structures */ 1751/* Plasma 1.11 FW backward compatibility structures */
1662#define IOV_111_OFFSET 0x7CE 1752#define IOV_111_OFFSET 0x7CE
1663#define MAX_VIRTUAL_FUNCTIONS 8 1753#define MAX_VIRTUAL_FUNCTIONS 8
1754#define MR_LD_ACCESS_HIDDEN 15
1664 1755
1665struct IOV_111 { 1756struct IOV_111 {
1666 u8 maxVFsSupported; 1757 u8 maxVFsSupported;
@@ -1754,6 +1845,11 @@ struct megasas_cmd {
1754 1845
1755 struct list_head list; 1846 struct list_head list;
1756 struct scsi_cmnd *scmd; 1847 struct scsi_cmnd *scmd;
1848
1849 void *mpt_pthr_cmd_blocked;
1850 atomic_t mfi_mpt_pthr;
1851 u8 is_wait_event;
1852
1757 struct megasas_instance *instance; 1853 struct megasas_instance *instance;
1758 union { 1854 union {
1759 struct { 1855 struct {
@@ -1823,12 +1919,33 @@ u8
1823MR_BuildRaidContext(struct megasas_instance *instance, 1919MR_BuildRaidContext(struct megasas_instance *instance,
1824 struct IO_REQUEST_INFO *io_info, 1920 struct IO_REQUEST_INFO *io_info,
1825 struct RAID_CONTEXT *pRAID_Context, 1921 struct RAID_CONTEXT *pRAID_Context,
1826 struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); 1922 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
1827u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); 1923u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
1828struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); 1924struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
1829u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map); 1925u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
1830u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); 1926u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
1831u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map); 1927u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
1832u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); 1928u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
1929
1930u16 get_updated_dev_handle(struct megasas_instance *instance,
1931 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
1932void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
1933 struct LD_LOAD_BALANCE_INFO *lbInfo);
1934int megasas_get_ctrl_info(struct megasas_instance *instance,
1935 struct megasas_ctrl_info *ctrl_info);
1936int megasas_set_crash_dump_params(struct megasas_instance *instance,
1937 u8 crash_buf_state);
1938void megasas_free_host_crash_buffer(struct megasas_instance *instance);
1939void megasas_fusion_crash_dump_wq(struct work_struct *work);
1940
1941void megasas_return_cmd_fusion(struct megasas_instance *instance,
1942 struct megasas_cmd_fusion *cmd);
1943int megasas_issue_blocked_cmd(struct megasas_instance *instance,
1944 struct megasas_cmd *cmd, int timeout);
1945void __megasas_return_cmd(struct megasas_instance *instance,
1946 struct megasas_cmd *cmd);
1947
1948void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
1949 struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
1833 1950
1834#endif /*LSI_MEGARAID_SAS_H */ 1951#endif /*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 22a04e37b70a..f6a69a3b1b3f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : 06.803.01.00-rc1 21 * Version : 06.805.06.00-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -89,6 +89,10 @@ module_param(resetwaittime, int, S_IRUGO);
89MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout " 89MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
90 "before resetting adapter. Default: 180"); 90 "before resetting adapter. Default: 180");
91 91
92int smp_affinity_enable = 1;
93module_param(smp_affinity_enable, int, S_IRUGO);
94MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
95
92MODULE_LICENSE("GPL"); 96MODULE_LICENSE("GPL");
93MODULE_VERSION(MEGASAS_VERSION); 97MODULE_VERSION(MEGASAS_VERSION);
94MODULE_AUTHOR("megaraidlinux@lsi.com"); 98MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -206,43 +210,66 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
206 unsigned long flags; 210 unsigned long flags;
207 struct megasas_cmd *cmd = NULL; 211 struct megasas_cmd *cmd = NULL;
208 212
209 spin_lock_irqsave(&instance->cmd_pool_lock, flags); 213 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
210 214
211 if (!list_empty(&instance->cmd_pool)) { 215 if (!list_empty(&instance->cmd_pool)) {
212 cmd = list_entry((&instance->cmd_pool)->next, 216 cmd = list_entry((&instance->cmd_pool)->next,
213 struct megasas_cmd, list); 217 struct megasas_cmd, list);
214 list_del_init(&cmd->list); 218 list_del_init(&cmd->list);
219 atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
215 } else { 220 } else {
216 printk(KERN_ERR "megasas: Command pool empty!\n"); 221 printk(KERN_ERR "megasas: Command pool empty!\n");
217 } 222 }
218 223
219 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); 224 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
220 return cmd; 225 return cmd;
221} 226}
222 227
223/** 228/**
224 * megasas_return_cmd - Return a cmd to free command pool 229 * __megasas_return_cmd - Return a cmd to free command pool
225 * @instance: Adapter soft state 230 * @instance: Adapter soft state
226 * @cmd: Command packet to be returned to free command pool 231 * @cmd: Command packet to be returned to free command pool
227 */ 232 */
228inline void 233inline void
229megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 234__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
230{ 235{
231 unsigned long flags; 236 /*
232 237 * Don't go ahead and free the MFI frame, if corresponding
233 spin_lock_irqsave(&instance->cmd_pool_lock, flags); 238 * MPT frame is not freed(valid for only fusion adapters).
239 * In case of MFI adapters, anyways for any allocated MFI
240 * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED
241 */
242 if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED)
243 return;
234 244
235 cmd->scmd = NULL; 245 cmd->scmd = NULL;
236 cmd->frame_count = 0; 246 cmd->frame_count = 0;
247 cmd->is_wait_event = 0;
248 cmd->mpt_pthr_cmd_blocked = NULL;
249
237 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && 250 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
238 (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
239 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && 251 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
240 (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) && 252 (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
241 (reset_devices)) 253 (reset_devices))
242 cmd->frame->hdr.cmd = MFI_CMD_INVALID; 254 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
243 list_add_tail(&cmd->list, &instance->cmd_pool);
244 255
245 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); 256 atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
257 list_add(&cmd->list, (&instance->cmd_pool)->next);
258}
259
260/**
261 * megasas_return_cmd - Return a cmd to free command pool
262 * @instance: Adapter soft state
263 * @cmd: Command packet to be returned to free command pool
264 */
265inline void
266megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
267{
268 unsigned long flags;
269
270 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
271 __megasas_return_cmd(instance, cmd);
272 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
246} 273}
247 274
248 275
@@ -921,13 +948,14 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
921 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs 948 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
922 * Used to issue ioctl commands. 949 * Used to issue ioctl commands.
923 */ 950 */
924static int 951int
925megasas_issue_blocked_cmd(struct megasas_instance *instance, 952megasas_issue_blocked_cmd(struct megasas_instance *instance,
926 struct megasas_cmd *cmd, int timeout) 953 struct megasas_cmd *cmd, int timeout)
927{ 954{
928 int ret = 0; 955 int ret = 0;
929 cmd->cmd_status = ENODATA; 956 cmd->cmd_status = ENODATA;
930 957
958 cmd->is_wait_event = 1;
931 instance->instancet->issue_dcmd(instance, cmd); 959 instance->instancet->issue_dcmd(instance, cmd);
932 if (timeout) { 960 if (timeout) {
933 ret = wait_event_timeout(instance->int_cmd_wait_q, 961 ret = wait_event_timeout(instance->int_cmd_wait_q,
@@ -1536,7 +1564,7 @@ out_return_cmd:
1536 * @done: Callback entry point 1564 * @done: Callback entry point
1537 */ 1565 */
1538static int 1566static int
1539megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) 1567megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1540{ 1568{
1541 struct megasas_instance *instance; 1569 struct megasas_instance *instance;
1542 unsigned long flags; 1570 unsigned long flags;
@@ -1558,7 +1586,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
1558 } else { 1586 } else {
1559 spin_unlock_irqrestore(&instance->hba_lock, flags); 1587 spin_unlock_irqrestore(&instance->hba_lock, flags);
1560 scmd->result = DID_NO_CONNECT << 16; 1588 scmd->result = DID_NO_CONNECT << 16;
1561 done(scmd); 1589 scmd->scsi_done(scmd);
1562 return 0; 1590 return 0;
1563 } 1591 }
1564 } 1592 }
@@ -1566,7 +1594,7 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
1566 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 1594 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
1567 spin_unlock_irqrestore(&instance->hba_lock, flags); 1595 spin_unlock_irqrestore(&instance->hba_lock, flags);
1568 scmd->result = DID_NO_CONNECT << 16; 1596 scmd->result = DID_NO_CONNECT << 16;
1569 done(scmd); 1597 scmd->scsi_done(scmd);
1570 return 0; 1598 return 0;
1571 } 1599 }
1572 1600
@@ -1577,11 +1605,11 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
1577 1605
1578 spin_unlock_irqrestore(&instance->hba_lock, flags); 1606 spin_unlock_irqrestore(&instance->hba_lock, flags);
1579 1607
1580 scmd->scsi_done = done;
1581 scmd->result = 0; 1608 scmd->result = 0;
1582 1609
1583 if (MEGASAS_IS_LOGICAL(scmd) && 1610 if (MEGASAS_IS_LOGICAL(scmd) &&
1584 (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) { 1611 (scmd->device->id >= instance->fw_supported_vd_count ||
1612 scmd->device->lun)) {
1585 scmd->result = DID_BAD_TARGET << 16; 1613 scmd->result = DID_BAD_TARGET << 16;
1586 goto out_done; 1614 goto out_done;
1587 } 1615 }
@@ -1606,12 +1634,10 @@ megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd
1606 return 0; 1634 return 0;
1607 1635
1608 out_done: 1636 out_done:
1609 done(scmd); 1637 scmd->scsi_done(scmd);
1610 return 0; 1638 return 0;
1611} 1639}
1612 1640
1613static DEF_SCSI_QCMD(megasas_queue_command)
1614
1615static struct megasas_instance *megasas_lookup_instance(u16 host_no) 1641static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1616{ 1642{
1617 int i; 1643 int i;
@@ -1628,36 +1654,12 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1628 1654
1629static int megasas_slave_configure(struct scsi_device *sdev) 1655static int megasas_slave_configure(struct scsi_device *sdev)
1630{ 1656{
1631 u16 pd_index = 0;
1632 struct megasas_instance *instance ;
1633
1634 instance = megasas_lookup_instance(sdev->host->host_no);
1635
1636 /*
1637 * Don't export physical disk devices to the disk driver.
1638 *
1639 * FIXME: Currently we don't export them to the midlayer at all.
1640 * That will be fixed once LSI engineers have audited the
1641 * firmware for possible issues.
1642 */
1643 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
1644 sdev->type == TYPE_DISK) {
1645 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1646 sdev->id;
1647 if (instance->pd_list[pd_index].driveState ==
1648 MR_PD_STATE_SYSTEM) {
1649 blk_queue_rq_timeout(sdev->request_queue,
1650 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
1651 return 0;
1652 }
1653 return -ENXIO;
1654 }
1655
1656 /* 1657 /*
1657 * The RAID firmware may require extended timeouts. 1658 * The RAID firmware may require extended timeouts.
1658 */ 1659 */
1659 blk_queue_rq_timeout(sdev->request_queue, 1660 blk_queue_rq_timeout(sdev->request_queue,
1660 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); 1661 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
1662
1661 return 0; 1663 return 0;
1662} 1664}
1663 1665
@@ -1666,18 +1668,15 @@ static int megasas_slave_alloc(struct scsi_device *sdev)
1666 u16 pd_index = 0; 1668 u16 pd_index = 0;
1667 struct megasas_instance *instance ; 1669 struct megasas_instance *instance ;
1668 instance = megasas_lookup_instance(sdev->host->host_no); 1670 instance = megasas_lookup_instance(sdev->host->host_no);
1669 if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) && 1671 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1670 (sdev->type == TYPE_DISK)) {
1671 /* 1672 /*
1672 * Open the OS scan to the SYSTEM PD 1673 * Open the OS scan to the SYSTEM PD
1673 */ 1674 */
1674 pd_index = 1675 pd_index =
1675 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + 1676 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1676 sdev->id; 1677 sdev->id;
1677 if ((instance->pd_list[pd_index].driveState == 1678 if (instance->pd_list[pd_index].driveState ==
1678 MR_PD_STATE_SYSTEM) && 1679 MR_PD_STATE_SYSTEM) {
1679 (instance->pd_list[pd_index].driveType ==
1680 TYPE_DISK)) {
1681 return 0; 1680 return 0;
1682 } 1681 }
1683 return -ENXIO; 1682 return -ENXIO;
@@ -1825,16 +1824,12 @@ void megasas_do_ocr(struct megasas_instance *instance)
1825 process_fw_state_change_wq(&instance->work_init); 1824 process_fw_state_change_wq(&instance->work_init);
1826} 1825}
1827 1826
1828/* This function will get the current SR-IOV LD/VF affiliation */ 1827static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
1829static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 1828 int initial)
1830 int initial)
1831{ 1829{
1832 struct megasas_cmd *cmd; 1830 struct megasas_cmd *cmd;
1833 struct megasas_dcmd_frame *dcmd; 1831 struct megasas_dcmd_frame *dcmd;
1834 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
1835 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL; 1832 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
1836 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
1837 dma_addr_t new_affiliation_h;
1838 dma_addr_t new_affiliation_111_h; 1833 dma_addr_t new_affiliation_111_h;
1839 int ld, retval = 0; 1834 int ld, retval = 0;
1840 u8 thisVf; 1835 u8 thisVf;
@@ -1842,15 +1837,15 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
1842 cmd = megasas_get_cmd(instance); 1837 cmd = megasas_get_cmd(instance);
1843 1838
1844 if (!cmd) { 1839 if (!cmd) {
1845 printk(KERN_DEBUG "megasas: megasas_get_ld_vf_" 1840 printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
1846 "affiliation: Failed to get cmd for scsi%d.\n", 1841 "Failed to get cmd for scsi%d.\n",
1847 instance->host->host_no); 1842 instance->host->host_no);
1848 return -ENOMEM; 1843 return -ENOMEM;
1849 } 1844 }
1850 1845
1851 dcmd = &cmd->frame->dcmd; 1846 dcmd = &cmd->frame->dcmd;
1852 1847
1853 if (!instance->vf_affiliation && !instance->vf_affiliation_111) { 1848 if (!instance->vf_affiliation_111) {
1854 printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF " 1849 printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
1855 "affiliation for scsi%d.\n", instance->host->host_no); 1850 "affiliation for scsi%d.\n", instance->host->host_no);
1856 megasas_return_cmd(instance, cmd); 1851 megasas_return_cmd(instance, cmd);
@@ -1858,38 +1853,22 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
1858 } 1853 }
1859 1854
1860 if (initial) 1855 if (initial)
1861 if (instance->PlasmaFW111)
1862 memset(instance->vf_affiliation_111, 0, 1856 memset(instance->vf_affiliation_111, 0,
1863 sizeof(struct MR_LD_VF_AFFILIATION_111)); 1857 sizeof(struct MR_LD_VF_AFFILIATION_111));
1864 else
1865 memset(instance->vf_affiliation, 0,
1866 (MAX_LOGICAL_DRIVES + 1) *
1867 sizeof(struct MR_LD_VF_AFFILIATION));
1868 else { 1858 else {
1869 if (instance->PlasmaFW111) 1859 new_affiliation_111 =
1870 new_affiliation_111 = 1860 pci_alloc_consistent(instance->pdev,
1871 pci_alloc_consistent(instance->pdev, 1861 sizeof(struct MR_LD_VF_AFFILIATION_111),
1872 sizeof(struct MR_LD_VF_AFFILIATION_111), 1862 &new_affiliation_111_h);
1873 &new_affiliation_111_h); 1863 if (!new_affiliation_111) {
1874 else
1875 new_affiliation =
1876 pci_alloc_consistent(instance->pdev,
1877 (MAX_LOGICAL_DRIVES + 1) *
1878 sizeof(struct MR_LD_VF_AFFILIATION),
1879 &new_affiliation_h);
1880 if (!new_affiliation && !new_affiliation_111) {
1881 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate " 1864 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
1882 "memory for new affiliation for scsi%d.\n", 1865 "memory for new affiliation for scsi%d.\n",
1883 instance->host->host_no); 1866 instance->host->host_no);
1884 megasas_return_cmd(instance, cmd); 1867 megasas_return_cmd(instance, cmd);
1885 return -ENOMEM; 1868 return -ENOMEM;
1886 } 1869 }
1887 if (instance->PlasmaFW111) 1870 memset(new_affiliation_111, 0,
1888 memset(new_affiliation_111, 0, 1871 sizeof(struct MR_LD_VF_AFFILIATION_111));
1889 sizeof(struct MR_LD_VF_AFFILIATION_111));
1890 else
1891 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
1892 sizeof(struct MR_LD_VF_AFFILIATION));
1893 } 1872 }
1894 1873
1895 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 1874 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -1900,34 +1879,17 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
1900 dcmd->flags = MFI_FRAME_DIR_BOTH; 1879 dcmd->flags = MFI_FRAME_DIR_BOTH;
1901 dcmd->timeout = 0; 1880 dcmd->timeout = 0;
1902 dcmd->pad_0 = 0; 1881 dcmd->pad_0 = 0;
1903 if (instance->PlasmaFW111) { 1882 dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
1904 dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111); 1883 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
1905 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
1906 } else {
1907 dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
1908 sizeof(struct MR_LD_VF_AFFILIATION);
1909 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
1910 }
1911 1884
1912 if (initial) { 1885 if (initial)
1913 if (instance->PlasmaFW111) 1886 dcmd->sgl.sge32[0].phys_addr =
1914 dcmd->sgl.sge32[0].phys_addr = 1887 instance->vf_affiliation_111_h;
1915 instance->vf_affiliation_111_h;
1916 else
1917 dcmd->sgl.sge32[0].phys_addr =
1918 instance->vf_affiliation_h;
1919 } else {
1920 if (instance->PlasmaFW111)
1921 dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
1922 else
1923 dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
1924 }
1925 if (instance->PlasmaFW111)
1926 dcmd->sgl.sge32[0].length =
1927 sizeof(struct MR_LD_VF_AFFILIATION_111);
1928 else 1888 else
1929 dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) * 1889 dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
1930 sizeof(struct MR_LD_VF_AFFILIATION); 1890
1891 dcmd->sgl.sge32[0].length =
1892 sizeof(struct MR_LD_VF_AFFILIATION_111);
1931 1893
1932 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " 1894 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
1933 "scsi%d\n", instance->host->host_no); 1895 "scsi%d\n", instance->host->host_no);
@@ -1943,80 +1905,222 @@ static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
1943 } 1905 }
1944 1906
1945 if (!initial) { 1907 if (!initial) {
1946 if (instance->PlasmaFW111) { 1908 thisVf = new_affiliation_111->thisVf;
1947 if (!new_affiliation_111->vdCount) { 1909 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
1948 printk(KERN_WARNING "megasas: SR-IOV: Got new " 1910 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
1949 "LD/VF affiliation for passive path " 1911 new_affiliation_111->map[ld].policy[thisVf]) {
1912 printk(KERN_WARNING "megasas: SR-IOV: "
1913 "Got new LD/VF affiliation "
1950 "for scsi%d.\n", 1914 "for scsi%d.\n",
1951 instance->host->host_no);
1952 retval = 1;
1953 goto out;
1954 }
1955 thisVf = new_affiliation_111->thisVf;
1956 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
1957 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
1958 printk(KERN_WARNING "megasas: SR-IOV: "
1959 "Got new LD/VF affiliation "
1960 "for scsi%d.\n",
1961 instance->host->host_no);
1962 memcpy(instance->vf_affiliation_111,
1963 new_affiliation_111,
1964 sizeof(struct MR_LD_VF_AFFILIATION_111));
1965 retval = 1;
1966 goto out;
1967 }
1968 } else {
1969 if (!new_affiliation->ldCount) {
1970 printk(KERN_WARNING "megasas: SR-IOV: Got new "
1971 "LD/VF affiliation for passive "
1972 "path for scsi%d.\n",
1973 instance->host->host_no); 1915 instance->host->host_no);
1916 memcpy(instance->vf_affiliation_111,
1917 new_affiliation_111,
1918 sizeof(struct MR_LD_VF_AFFILIATION_111));
1974 retval = 1; 1919 retval = 1;
1975 goto out; 1920 goto out;
1976 } 1921 }
1977 newmap = new_affiliation->map; 1922 }
1978 savedmap = instance->vf_affiliation->map; 1923out:
1979 thisVf = new_affiliation->thisVf; 1924 if (new_affiliation_111) {
1980 for (ld = 0 ; ld < new_affiliation->ldCount; ld++) { 1925 pci_free_consistent(instance->pdev,
1981 if (savedmap->policy[thisVf] != 1926 sizeof(struct MR_LD_VF_AFFILIATION_111),
1982 newmap->policy[thisVf]) { 1927 new_affiliation_111,
1983 printk(KERN_WARNING "megasas: SR-IOV: " 1928 new_affiliation_111_h);
1984 "Got new LD/VF affiliation " 1929 }
1985 "for scsi%d.\n", 1930
1986 instance->host->host_no); 1931 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
1987 memcpy(instance->vf_affiliation, 1932 megasas_return_mfi_mpt_pthr(instance, cmd,
1988 new_affiliation, 1933 cmd->mpt_pthr_cmd_blocked);
1989 new_affiliation->size); 1934 else
1990 retval = 1; 1935 megasas_return_cmd(instance, cmd);
1991 goto out; 1936
1937 return retval;
1938}
1939
1940static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
1941 int initial)
1942{
1943 struct megasas_cmd *cmd;
1944 struct megasas_dcmd_frame *dcmd;
1945 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
1946 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
1947 dma_addr_t new_affiliation_h;
1948 int i, j, retval = 0, found = 0, doscan = 0;
1949 u8 thisVf;
1950
1951 cmd = megasas_get_cmd(instance);
1952
1953 if (!cmd) {
1954 printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
1955 "Failed to get cmd for scsi%d.\n",
1956 instance->host->host_no);
1957 return -ENOMEM;
1958 }
1959
1960 dcmd = &cmd->frame->dcmd;
1961
1962 if (!instance->vf_affiliation) {
1963 printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
1964 "affiliation for scsi%d.\n", instance->host->host_no);
1965 megasas_return_cmd(instance, cmd);
1966 return -ENOMEM;
1967 }
1968
1969 if (initial)
1970 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
1971 sizeof(struct MR_LD_VF_AFFILIATION));
1972 else {
1973 new_affiliation =
1974 pci_alloc_consistent(instance->pdev,
1975 (MAX_LOGICAL_DRIVES + 1) *
1976 sizeof(struct MR_LD_VF_AFFILIATION),
1977 &new_affiliation_h);
1978 if (!new_affiliation) {
1979 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
1980 "memory for new affiliation for scsi%d.\n",
1981 instance->host->host_no);
1982 megasas_return_cmd(instance, cmd);
1983 return -ENOMEM;
1984 }
1985 memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
1986 sizeof(struct MR_LD_VF_AFFILIATION));
1987 }
1988
1989 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1990
1991 dcmd->cmd = MFI_CMD_DCMD;
1992 dcmd->cmd_status = 0xFF;
1993 dcmd->sge_count = 1;
1994 dcmd->flags = MFI_FRAME_DIR_BOTH;
1995 dcmd->timeout = 0;
1996 dcmd->pad_0 = 0;
1997 dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
1998 sizeof(struct MR_LD_VF_AFFILIATION);
1999 dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
2000
2001 if (initial)
2002 dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h;
2003 else
2004 dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
2005
2006 dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
2007 sizeof(struct MR_LD_VF_AFFILIATION);
2008
2009 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
2010 "scsi%d\n", instance->host->host_no);
2011
2012 megasas_issue_blocked_cmd(instance, cmd, 0);
2013
2014 if (dcmd->cmd_status) {
2015 printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
2016 " failed with status 0x%x for scsi%d.\n",
2017 dcmd->cmd_status, instance->host->host_no);
2018 retval = 1; /* Do a scan if we couldn't get affiliation */
2019 goto out;
2020 }
2021
2022 if (!initial) {
2023 if (!new_affiliation->ldCount) {
2024 printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
2025 "affiliation for passive path for scsi%d.\n",
2026 instance->host->host_no);
2027 retval = 1;
2028 goto out;
2029 }
2030 newmap = new_affiliation->map;
2031 savedmap = instance->vf_affiliation->map;
2032 thisVf = new_affiliation->thisVf;
2033 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2034 found = 0;
2035 for (j = 0; j < instance->vf_affiliation->ldCount;
2036 j++) {
2037 if (newmap->ref.targetId ==
2038 savedmap->ref.targetId) {
2039 found = 1;
2040 if (newmap->policy[thisVf] !=
2041 savedmap->policy[thisVf]) {
2042 doscan = 1;
2043 goto out;
2044 }
1992 } 2045 }
1993 savedmap = (struct MR_LD_VF_MAP *) 2046 savedmap = (struct MR_LD_VF_MAP *)
1994 ((unsigned char *)savedmap + 2047 ((unsigned char *)savedmap +
1995 savedmap->size); 2048 savedmap->size);
2049 }
2050 if (!found && newmap->policy[thisVf] !=
2051 MR_LD_ACCESS_HIDDEN) {
2052 doscan = 1;
2053 goto out;
2054 }
2055 newmap = (struct MR_LD_VF_MAP *)
2056 ((unsigned char *)newmap + newmap->size);
2057 }
2058
2059 newmap = new_affiliation->map;
2060 savedmap = instance->vf_affiliation->map;
2061
2062 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2063 found = 0;
2064 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2065 if (savedmap->ref.targetId ==
2066 newmap->ref.targetId) {
2067 found = 1;
2068 if (savedmap->policy[thisVf] !=
2069 newmap->policy[thisVf]) {
2070 doscan = 1;
2071 goto out;
2072 }
2073 }
1996 newmap = (struct MR_LD_VF_MAP *) 2074 newmap = (struct MR_LD_VF_MAP *)
1997 ((unsigned char *)newmap + 2075 ((unsigned char *)newmap +
1998 newmap->size); 2076 newmap->size);
1999 } 2077 }
2078 if (!found && savedmap->policy[thisVf] !=
2079 MR_LD_ACCESS_HIDDEN) {
2080 doscan = 1;
2081 goto out;
2082 }
2083 savedmap = (struct MR_LD_VF_MAP *)
2084 ((unsigned char *)savedmap +
2085 savedmap->size);
2000 } 2086 }
2001 } 2087 }
2002out: 2088out:
2003 if (new_affiliation) { 2089 if (doscan) {
2004 if (instance->PlasmaFW111) 2090 printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
2005 pci_free_consistent(instance->pdev, 2091 "affiliation for scsi%d.\n", instance->host->host_no);
2006 sizeof(struct MR_LD_VF_AFFILIATION_111), 2092 memcpy(instance->vf_affiliation, new_affiliation,
2007 new_affiliation_111, 2093 new_affiliation->size);
2008 new_affiliation_111_h); 2094 retval = 1;
2009 else
2010 pci_free_consistent(instance->pdev,
2011 (MAX_LOGICAL_DRIVES + 1) *
2012 sizeof(struct MR_LD_VF_AFFILIATION),
2013 new_affiliation, new_affiliation_h);
2014 } 2095 }
2015 megasas_return_cmd(instance, cmd); 2096
2097 if (new_affiliation)
2098 pci_free_consistent(instance->pdev,
2099 (MAX_LOGICAL_DRIVES + 1) *
2100 sizeof(struct MR_LD_VF_AFFILIATION),
2101 new_affiliation, new_affiliation_h);
2102 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
2103 megasas_return_mfi_mpt_pthr(instance, cmd,
2104 cmd->mpt_pthr_cmd_blocked);
2105 else
2106 megasas_return_cmd(instance, cmd);
2016 2107
2017 return retval; 2108 return retval;
2018} 2109}
2019 2110
2111/* This function will get the current SR-IOV LD/VF affiliation */
2112static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2113 int initial)
2114{
2115 int retval;
2116
2117 if (instance->PlasmaFW111)
2118 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2119 else
2120 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2121 return retval;
2122}
2123
2020/* This function will tell FW to start the SR-IOV heartbeat */ 2124/* This function will tell FW to start the SR-IOV heartbeat */
2021int megasas_sriov_start_heartbeat(struct megasas_instance *instance, 2125int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2022 int initial) 2126 int initial)
@@ -2459,7 +2563,12 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2459 cmd->abort_aen = 0; 2563 cmd->abort_aen = 0;
2460 2564
2461 instance->aen_cmd = NULL; 2565 instance->aen_cmd = NULL;
2462 megasas_return_cmd(instance, cmd); 2566
2567 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
2568 megasas_return_mfi_mpt_pthr(instance, cmd,
2569 cmd->mpt_pthr_cmd_blocked);
2570 else
2571 megasas_return_cmd(instance, cmd);
2463 2572
2464 if ((instance->unload == 0) && 2573 if ((instance->unload == 0) &&
2465 ((instance->issuepend_done == 1))) { 2574 ((instance->issuepend_done == 1))) {
@@ -2491,6 +2600,152 @@ static int megasas_change_queue_depth(struct scsi_device *sdev,
2491 return queue_depth; 2600 return queue_depth;
2492} 2601}
2493 2602
2603static ssize_t
2604megasas_fw_crash_buffer_store(struct device *cdev,
2605 struct device_attribute *attr, const char *buf, size_t count)
2606{
2607 struct Scsi_Host *shost = class_to_shost(cdev);
2608 struct megasas_instance *instance =
2609 (struct megasas_instance *) shost->hostdata;
2610 int val = 0;
2611 unsigned long flags;
2612
2613 if (kstrtoint(buf, 0, &val) != 0)
2614 return -EINVAL;
2615
2616 spin_lock_irqsave(&instance->crashdump_lock, flags);
2617 instance->fw_crash_buffer_offset = val;
2618 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2619 return strlen(buf);
2620}
2621
2622static ssize_t
2623megasas_fw_crash_buffer_show(struct device *cdev,
2624 struct device_attribute *attr, char *buf)
2625{
2626 struct Scsi_Host *shost = class_to_shost(cdev);
2627 struct megasas_instance *instance =
2628 (struct megasas_instance *) shost->hostdata;
2629 u32 size;
2630 unsigned long buff_addr;
2631 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
2632 unsigned long src_addr;
2633 unsigned long flags;
2634 u32 buff_offset;
2635
2636 spin_lock_irqsave(&instance->crashdump_lock, flags);
2637 buff_offset = instance->fw_crash_buffer_offset;
2638 if (!instance->crash_dump_buf &&
2639 !((instance->fw_crash_state == AVAILABLE) ||
2640 (instance->fw_crash_state == COPYING))) {
2641 dev_err(&instance->pdev->dev,
2642 "Firmware crash dump is not available\n");
2643 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2644 return -EINVAL;
2645 }
2646
2647 buff_addr = (unsigned long) buf;
2648
2649 if (buff_offset >
2650 (instance->fw_crash_buffer_size * dmachunk)) {
2651 dev_err(&instance->pdev->dev,
2652 "Firmware crash dump offset is out of range\n");
2653 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2654 return 0;
2655 }
2656
2657 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
2658 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
2659
2660 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
2661 (buff_offset % dmachunk);
2662 memcpy(buf, (void *)src_addr, size);
2663 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2664
2665 return size;
2666}
2667
2668static ssize_t
2669megasas_fw_crash_buffer_size_show(struct device *cdev,
2670 struct device_attribute *attr, char *buf)
2671{
2672 struct Scsi_Host *shost = class_to_shost(cdev);
2673 struct megasas_instance *instance =
2674 (struct megasas_instance *) shost->hostdata;
2675
2676 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
2677 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
2678}
2679
2680static ssize_t
2681megasas_fw_crash_state_store(struct device *cdev,
2682 struct device_attribute *attr, const char *buf, size_t count)
2683{
2684 struct Scsi_Host *shost = class_to_shost(cdev);
2685 struct megasas_instance *instance =
2686 (struct megasas_instance *) shost->hostdata;
2687 int val = 0;
2688 unsigned long flags;
2689
2690 if (kstrtoint(buf, 0, &val) != 0)
2691 return -EINVAL;
2692
2693 if ((val <= AVAILABLE || val > COPY_ERROR)) {
2694 dev_err(&instance->pdev->dev, "application updates invalid "
2695 "firmware crash state\n");
2696 return -EINVAL;
2697 }
2698
2699 instance->fw_crash_state = val;
2700
2701 if ((val == COPIED) || (val == COPY_ERROR)) {
2702 spin_lock_irqsave(&instance->crashdump_lock, flags);
2703 megasas_free_host_crash_buffer(instance);
2704 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2705 if (val == COPY_ERROR)
2706 dev_info(&instance->pdev->dev, "application failed to "
2707 "copy Firmware crash dump\n");
2708 else
2709 dev_info(&instance->pdev->dev, "Firmware crash dump "
2710 "copied successfully\n");
2711 }
2712 return strlen(buf);
2713}
2714
2715static ssize_t
2716megasas_fw_crash_state_show(struct device *cdev,
2717 struct device_attribute *attr, char *buf)
2718{
2719 struct Scsi_Host *shost = class_to_shost(cdev);
2720 struct megasas_instance *instance =
2721 (struct megasas_instance *) shost->hostdata;
2722 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
2723}
2724
2725static ssize_t
2726megasas_page_size_show(struct device *cdev,
2727 struct device_attribute *attr, char *buf)
2728{
2729 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
2730}
2731
2732static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
2733 megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
2734static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
2735 megasas_fw_crash_buffer_size_show, NULL);
2736static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
2737 megasas_fw_crash_state_show, megasas_fw_crash_state_store);
2738static DEVICE_ATTR(page_size, S_IRUGO,
2739 megasas_page_size_show, NULL);
2740
2741struct device_attribute *megaraid_host_attrs[] = {
2742 &dev_attr_fw_crash_buffer_size,
2743 &dev_attr_fw_crash_buffer,
2744 &dev_attr_fw_crash_state,
2745 &dev_attr_page_size,
2746 NULL,
2747};
2748
2494/* 2749/*
2495 * Scsi host template for megaraid_sas driver 2750 * Scsi host template for megaraid_sas driver
2496 */ 2751 */
@@ -2506,6 +2761,7 @@ static struct scsi_host_template megasas_template = {
2506 .eh_bus_reset_handler = megasas_reset_bus_host, 2761 .eh_bus_reset_handler = megasas_reset_bus_host,
2507 .eh_host_reset_handler = megasas_reset_bus_host, 2762 .eh_host_reset_handler = megasas_reset_bus_host,
2508 .eh_timed_out = megasas_reset_timer, 2763 .eh_timed_out = megasas_reset_timer,
2764 .shost_attrs = megaraid_host_attrs,
2509 .bios_param = megasas_bios_param, 2765 .bios_param = megasas_bios_param,
2510 .use_clustering = ENABLE_CLUSTERING, 2766 .use_clustering = ENABLE_CLUSTERING,
2511 .change_queue_depth = megasas_change_queue_depth, 2767 .change_queue_depth = megasas_change_queue_depth,
@@ -2688,7 +2944,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2688 "failed, status = 0x%x.\n", 2944 "failed, status = 0x%x.\n",
2689 cmd->frame->hdr.cmd_status); 2945 cmd->frame->hdr.cmd_status);
2690 else { 2946 else {
2691 megasas_return_cmd(instance, cmd); 2947 megasas_return_mfi_mpt_pthr(instance,
2948 cmd, cmd->mpt_pthr_cmd_blocked);
2692 spin_unlock_irqrestore( 2949 spin_unlock_irqrestore(
2693 instance->host->host_lock, 2950 instance->host->host_lock,
2694 flags); 2951 flags);
@@ -2696,7 +2953,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2696 } 2953 }
2697 } else 2954 } else
2698 instance->map_id++; 2955 instance->map_id++;
2699 megasas_return_cmd(instance, cmd); 2956 megasas_return_mfi_mpt_pthr(instance, cmd,
2957 cmd->mpt_pthr_cmd_blocked);
2700 2958
2701 /* 2959 /*
2702 * Set fast path IO to ZERO. 2960 * Set fast path IO to ZERO.
@@ -2852,7 +3110,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
2852 unsigned long flags; 3110 unsigned long flags;
2853 3111
2854 defer_index = 0; 3112 defer_index = 0;
2855 spin_lock_irqsave(&instance->cmd_pool_lock, flags); 3113 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
2856 for (i = 0; i < max_cmd; i++) { 3114 for (i = 0; i < max_cmd; i++) {
2857 cmd = instance->cmd_list[i]; 3115 cmd = instance->cmd_list[i];
2858 if (cmd->sync_cmd == 1 || cmd->scmd) { 3116 if (cmd->sync_cmd == 1 || cmd->scmd) {
@@ -2873,7 +3131,7 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
2873 &instance->internal_reset_pending_q); 3131 &instance->internal_reset_pending_q);
2874 } 3132 }
2875 } 3133 }
2876 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); 3134 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
2877} 3135}
2878 3136
2879 3137
@@ -3438,7 +3696,9 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
3438 int j; 3696 int j;
3439 u32 max_cmd; 3697 u32 max_cmd;
3440 struct megasas_cmd *cmd; 3698 struct megasas_cmd *cmd;
3699 struct fusion_context *fusion;
3441 3700
3701 fusion = instance->ctrl_context;
3442 max_cmd = instance->max_mfi_cmds; 3702 max_cmd = instance->max_mfi_cmds;
3443 3703
3444 /* 3704 /*
@@ -3471,13 +3731,11 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
3471 } 3731 }
3472 } 3732 }
3473 3733
3474 /*
3475 * Add all the commands to command pool (instance->cmd_pool)
3476 */
3477 for (i = 0; i < max_cmd; i++) { 3734 for (i = 0; i < max_cmd; i++) {
3478 cmd = instance->cmd_list[i]; 3735 cmd = instance->cmd_list[i];
3479 memset(cmd, 0, sizeof(struct megasas_cmd)); 3736 memset(cmd, 0, sizeof(struct megasas_cmd));
3480 cmd->index = i; 3737 cmd->index = i;
3738 atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
3481 cmd->scmd = NULL; 3739 cmd->scmd = NULL;
3482 cmd->instance = instance; 3740 cmd->instance = instance;
3483 3741
@@ -3548,11 +3806,11 @@ megasas_get_pd_list(struct megasas_instance *instance)
3548 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 3806 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
3549 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); 3807 dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
3550 3808
3551 if (!megasas_issue_polled(instance, cmd)) { 3809 if (instance->ctrl_context && !instance->mask_interrupts)
3552 ret = 0; 3810 ret = megasas_issue_blocked_cmd(instance, cmd,
3553 } else { 3811 MEGASAS_BLOCKED_CMD_TIMEOUT);
3554 ret = -1; 3812 else
3555 } 3813 ret = megasas_issue_polled(instance, cmd);
3556 3814
3557 /* 3815 /*
3558 * the following function will get the instance PD LIST. 3816 * the following function will get the instance PD LIST.
@@ -3584,7 +3842,12 @@ megasas_get_pd_list(struct megasas_instance *instance)
3584 pci_free_consistent(instance->pdev, 3842 pci_free_consistent(instance->pdev,
3585 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), 3843 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
3586 ci, ci_h); 3844 ci, ci_h);
3587 megasas_return_cmd(instance, cmd); 3845
3846 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
3847 megasas_return_mfi_mpt_pthr(instance, cmd,
3848 cmd->mpt_pthr_cmd_blocked);
3849 else
3850 megasas_return_cmd(instance, cmd);
3588 3851
3589 return ret; 3852 return ret;
3590} 3853}
@@ -3630,6 +3893,8 @@ megasas_get_ld_list(struct megasas_instance *instance)
3630 memset(ci, 0, sizeof(*ci)); 3893 memset(ci, 0, sizeof(*ci));
3631 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3894 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3632 3895
3896 if (instance->supportmax256vd)
3897 dcmd->mbox.b[0] = 1;
3633 dcmd->cmd = MFI_CMD_DCMD; 3898 dcmd->cmd = MFI_CMD_DCMD;
3634 dcmd->cmd_status = 0xFF; 3899 dcmd->cmd_status = 0xFF;
3635 dcmd->sge_count = 1; 3900 dcmd->sge_count = 1;
@@ -3641,18 +3906,19 @@ megasas_get_ld_list(struct megasas_instance *instance)
3641 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); 3906 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
3642 dcmd->pad_0 = 0; 3907 dcmd->pad_0 = 0;
3643 3908
3644 if (!megasas_issue_polled(instance, cmd)) { 3909 if (instance->ctrl_context && !instance->mask_interrupts)
3645 ret = 0; 3910 ret = megasas_issue_blocked_cmd(instance, cmd,
3646 } else { 3911 MEGASAS_BLOCKED_CMD_TIMEOUT);
3647 ret = -1; 3912 else
3648 } 3913 ret = megasas_issue_polled(instance, cmd);
3914
3649 3915
3650 ld_count = le32_to_cpu(ci->ldCount); 3916 ld_count = le32_to_cpu(ci->ldCount);
3651 3917
3652 /* the following function will get the instance PD LIST */ 3918 /* the following function will get the instance PD LIST */
3653 3919
3654 if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) { 3920 if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
3655 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 3921 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
3656 3922
3657 for (ld_index = 0; ld_index < ld_count; ld_index++) { 3923 for (ld_index = 0; ld_index < ld_count; ld_index++) {
3658 if (ci->ldList[ld_index].state != 0) { 3924 if (ci->ldList[ld_index].state != 0) {
@@ -3668,7 +3934,11 @@ megasas_get_ld_list(struct megasas_instance *instance)
3668 ci, 3934 ci,
3669 ci_h); 3935 ci_h);
3670 3936
3671 megasas_return_cmd(instance, cmd); 3937 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
3938 megasas_return_mfi_mpt_pthr(instance, cmd,
3939 cmd->mpt_pthr_cmd_blocked);
3940 else
3941 megasas_return_cmd(instance, cmd);
3672 return ret; 3942 return ret;
3673} 3943}
3674 3944
@@ -3715,6 +3985,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
3715 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3985 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
3716 3986
3717 dcmd->mbox.b[0] = query_type; 3987 dcmd->mbox.b[0] = query_type;
3988 if (instance->supportmax256vd)
3989 dcmd->mbox.b[2] = 1;
3718 3990
3719 dcmd->cmd = MFI_CMD_DCMD; 3991 dcmd->cmd = MFI_CMD_DCMD;
3720 dcmd->cmd_status = 0xFF; 3992 dcmd->cmd_status = 0xFF;
@@ -3727,16 +3999,15 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
3727 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); 3999 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
3728 dcmd->pad_0 = 0; 4000 dcmd->pad_0 = 0;
3729 4001
3730 if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) { 4002 if (instance->ctrl_context && !instance->mask_interrupts)
3731 ret = 0; 4003 ret = megasas_issue_blocked_cmd(instance, cmd,
3732 } else { 4004 MEGASAS_BLOCKED_CMD_TIMEOUT);
3733 /* On failure, call older LD list DCMD */ 4005 else
3734 ret = 1; 4006 ret = megasas_issue_polled(instance, cmd);
3735 }
3736 4007
3737 tgtid_count = le32_to_cpu(ci->count); 4008 tgtid_count = le32_to_cpu(ci->count);
3738 4009
3739 if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) { 4010 if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
3740 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); 4011 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
3741 for (ld_index = 0; ld_index < tgtid_count; ld_index++) { 4012 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
3742 ids = ci->targetId[ld_index]; 4013 ids = ci->targetId[ld_index];
@@ -3748,7 +4019,11 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
3748 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), 4019 pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
3749 ci, ci_h); 4020 ci, ci_h);
3750 4021
3751 megasas_return_cmd(instance, cmd); 4022 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
4023 megasas_return_mfi_mpt_pthr(instance, cmd,
4024 cmd->mpt_pthr_cmd_blocked);
4025 else
4026 megasas_return_cmd(instance, cmd);
3752 4027
3753 return ret; 4028 return ret;
3754} 4029}
@@ -3762,7 +4037,7 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
3762 * This information is mainly used to find out the maximum IO transfer per 4037 * This information is mainly used to find out the maximum IO transfer per
3763 * command supported by the FW. 4038 * command supported by the FW.
3764 */ 4039 */
3765static int 4040int
3766megasas_get_ctrl_info(struct megasas_instance *instance, 4041megasas_get_ctrl_info(struct megasas_instance *instance,
3767 struct megasas_ctrl_info *ctrl_info) 4042 struct megasas_ctrl_info *ctrl_info)
3768{ 4043{
@@ -3803,18 +4078,84 @@ megasas_get_ctrl_info(struct megasas_instance *instance,
3803 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); 4078 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
3804 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 4079 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
3805 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); 4080 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
4081 dcmd->mbox.b[0] = 1;
3806 4082
3807 if (!megasas_issue_polled(instance, cmd)) { 4083 if (instance->ctrl_context && !instance->mask_interrupts)
3808 ret = 0; 4084 ret = megasas_issue_blocked_cmd(instance, cmd,
4085 MEGASAS_BLOCKED_CMD_TIMEOUT);
4086 else
4087 ret = megasas_issue_polled(instance, cmd);
4088
4089 if (!ret)
3809 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info)); 4090 memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
3810 } else {
3811 ret = -1;
3812 }
3813 4091
3814 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info), 4092 pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
3815 ci, ci_h); 4093 ci, ci_h);
3816 4094
3817 megasas_return_cmd(instance, cmd); 4095 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
4096 megasas_return_mfi_mpt_pthr(instance, cmd,
4097 cmd->mpt_pthr_cmd_blocked);
4098 else
4099 megasas_return_cmd(instance, cmd);
4100 return ret;
4101}
4102
4103/*
4104 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
4105 * to firmware
4106 *
4107 * @instance: Adapter soft state
4108 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
4109 MR_CRASH_BUF_TURN_OFF = 0
4110 MR_CRASH_BUF_TURN_ON = 1
4111 * @return 0 on success non-zero on failure.
4112 * Issues an internal command (DCMD) to set parameters for crash dump feature.
4113 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
4114 * that driver supports crash dump feature. This DCMD will be sent only if
4115 * crash dump feature is supported by the FW.
4116 *
4117 */
4118int megasas_set_crash_dump_params(struct megasas_instance *instance,
4119 u8 crash_buf_state)
4120{
4121 int ret = 0;
4122 struct megasas_cmd *cmd;
4123 struct megasas_dcmd_frame *dcmd;
4124
4125 cmd = megasas_get_cmd(instance);
4126
4127 if (!cmd) {
4128 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
4129 return -ENOMEM;
4130 }
4131
4132
4133 dcmd = &cmd->frame->dcmd;
4134
4135 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4136 dcmd->mbox.b[0] = crash_buf_state;
4137 dcmd->cmd = MFI_CMD_DCMD;
4138 dcmd->cmd_status = 0xFF;
4139 dcmd->sge_count = 1;
4140 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
4141 dcmd->timeout = 0;
4142 dcmd->pad_0 = 0;
4143 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4144 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
4145 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
4146 dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
4147
4148 if (instance->ctrl_context && !instance->mask_interrupts)
4149 ret = megasas_issue_blocked_cmd(instance, cmd,
4150 MEGASAS_BLOCKED_CMD_TIMEOUT);
4151 else
4152 ret = megasas_issue_polled(instance, cmd);
4153
4154 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
4155 megasas_return_mfi_mpt_pthr(instance, cmd,
4156 cmd->mpt_pthr_cmd_blocked);
4157 else
4158 megasas_return_cmd(instance, cmd);
3818 return ret; 4159 return ret;
3819} 4160}
3820 4161
@@ -3948,6 +4289,13 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
3948 if (megasas_issue_init_mfi(instance)) 4289 if (megasas_issue_init_mfi(instance))
3949 goto fail_fw_init; 4290 goto fail_fw_init;
3950 4291
4292 if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
4293 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
4294 "Fail from %s %d\n", instance->unique_id,
4295 __func__, __LINE__);
4296 goto fail_fw_init;
4297 }
4298
3951 instance->fw_support_ieee = 0; 4299 instance->fw_support_ieee = 0;
3952 instance->fw_support_ieee = 4300 instance->fw_support_ieee =
3953 (instance->instancet->read_fw_status_reg(reg_set) & 4301 (instance->instancet->read_fw_status_reg(reg_set) &
@@ -3986,7 +4334,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3986 u32 tmp_sectors, msix_enable, scratch_pad_2; 4334 u32 tmp_sectors, msix_enable, scratch_pad_2;
3987 resource_size_t base_addr; 4335 resource_size_t base_addr;
3988 struct megasas_register_set __iomem *reg_set; 4336 struct megasas_register_set __iomem *reg_set;
3989 struct megasas_ctrl_info *ctrl_info; 4337 struct megasas_ctrl_info *ctrl_info = NULL;
3990 unsigned long bar_list; 4338 unsigned long bar_list;
3991 int i, loop, fw_msix_count = 0; 4339 int i, loop, fw_msix_count = 0;
3992 struct IOV_111 *iovPtr; 4340 struct IOV_111 *iovPtr;
@@ -4103,17 +4451,11 @@ static int megasas_init_fw(struct megasas_instance *instance)
4103 (unsigned int)num_online_cpus()); 4451 (unsigned int)num_online_cpus());
4104 for (i = 0; i < instance->msix_vectors; i++) 4452 for (i = 0; i < instance->msix_vectors; i++)
4105 instance->msixentry[i].entry = i; 4453 instance->msixentry[i].entry = i;
4106 i = pci_enable_msix(instance->pdev, instance->msixentry, 4454 i = pci_enable_msix_range(instance->pdev, instance->msixentry,
4107 instance->msix_vectors); 4455 1, instance->msix_vectors);
4108 if (i >= 0) { 4456 if (i)
4109 if (i) { 4457 instance->msix_vectors = i;
4110 if (!pci_enable_msix(instance->pdev, 4458 else
4111 instance->msixentry, i))
4112 instance->msix_vectors = i;
4113 else
4114 instance->msix_vectors = 0;
4115 }
4116 } else
4117 instance->msix_vectors = 0; 4459 instance->msix_vectors = 0;
4118 4460
4119 dev_info(&instance->pdev->dev, "[scsi%d]: FW supports" 4461 dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
@@ -4123,6 +4465,17 @@ static int megasas_init_fw(struct megasas_instance *instance)
4123 instance->msix_vectors); 4465 instance->msix_vectors);
4124 } 4466 }
4125 4467
4468 instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
4469 GFP_KERNEL);
4470 if (instance->ctrl_info == NULL)
4471 goto fail_init_adapter;
4472
4473 /*
4474 * Below are default value for legacy Firmware.
4475 * non-fusion based controllers
4476 */
4477 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
4478 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
4126 /* Get operational params, sge flags, send init cmd to controller */ 4479 /* Get operational params, sge flags, send init cmd to controller */
4127 if (instance->instancet->init_adapter(instance)) 4480 if (instance->instancet->init_adapter(instance))
4128 goto fail_init_adapter; 4481 goto fail_init_adapter;
@@ -4145,8 +4498,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
4145 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) 4498 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
4146 megasas_get_ld_list(instance); 4499 megasas_get_ld_list(instance);
4147 4500
4148 ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
4149
4150 /* 4501 /*
4151 * Compute the max allowed sectors per IO: The controller info has two 4502 * Compute the max allowed sectors per IO: The controller info has two
4152 * limits on max sectors. Driver should use the minimum of these two. 4503 * limits on max sectors. Driver should use the minimum of these two.
@@ -4157,58 +4508,79 @@ static int megasas_init_fw(struct megasas_instance *instance)
4157 * to calculate max_sectors_1. So the number ended up as zero always. 4508 * to calculate max_sectors_1. So the number ended up as zero always.
4158 */ 4509 */
4159 tmp_sectors = 0; 4510 tmp_sectors = 0;
4160 if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { 4511 ctrl_info = instance->ctrl_info;
4161 4512
4162 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * 4513 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
4163 le16_to_cpu(ctrl_info->max_strips_per_io); 4514 le16_to_cpu(ctrl_info->max_strips_per_io);
4164 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 4515 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
4165 4516
4166 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); 4517 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
4167 4518
4168 /*Check whether controller is iMR or MR */ 4519 /*Check whether controller is iMR or MR */
4169 if (ctrl_info->memory_size) { 4520 if (ctrl_info->memory_size) {
4170 instance->is_imr = 0; 4521 instance->is_imr = 0;
4171 dev_info(&instance->pdev->dev, "Controller type: MR," 4522 dev_info(&instance->pdev->dev, "Controller type: MR,"
4172 "Memory size is: %dMB\n", 4523 "Memory size is: %dMB\n",
4173 le16_to_cpu(ctrl_info->memory_size)); 4524 le16_to_cpu(ctrl_info->memory_size));
4174 } else { 4525 } else {
4175 instance->is_imr = 1; 4526 instance->is_imr = 1;
4176 dev_info(&instance->pdev->dev, 4527 dev_info(&instance->pdev->dev,
4177 "Controller type: iMR\n"); 4528 "Controller type: iMR\n");
4178 } 4529 }
4179 /* OnOffProperties are converted into CPU arch*/ 4530 /* OnOffProperties are converted into CPU arch*/
4180 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); 4531 le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
4181 instance->disableOnlineCtrlReset = 4532 instance->disableOnlineCtrlReset =
4182 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4533 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
4183 /* adapterOperations2 are converted into CPU arch*/ 4534 /* adapterOperations2 are converted into CPU arch*/
4184 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); 4535 le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
4185 instance->mpio = ctrl_info->adapterOperations2.mpio; 4536 instance->mpio = ctrl_info->adapterOperations2.mpio;
4186 instance->UnevenSpanSupport = 4537 instance->UnevenSpanSupport =
4187 ctrl_info->adapterOperations2.supportUnevenSpans; 4538 ctrl_info->adapterOperations2.supportUnevenSpans;
4188 if (instance->UnevenSpanSupport) { 4539 if (instance->UnevenSpanSupport) {
4189 struct fusion_context *fusion = instance->ctrl_context; 4540 struct fusion_context *fusion = instance->ctrl_context;
4190 dev_info(&instance->pdev->dev, "FW supports: " 4541
4191 "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport); 4542 dev_info(&instance->pdev->dev, "FW supports: "
4192 if (MR_ValidateMapInfo(instance)) 4543 "UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
4193 fusion->fast_path_io = 1; 4544 if (MR_ValidateMapInfo(instance))
4194 else 4545 fusion->fast_path_io = 1;
4195 fusion->fast_path_io = 0; 4546 else
4547 fusion->fast_path_io = 0;
4196 4548
4549 }
4550 if (ctrl_info->host_interface.SRIOV) {
4551 if (!ctrl_info->adapterOperations2.activePassive)
4552 instance->PlasmaFW111 = 1;
4553
4554 if (!instance->PlasmaFW111)
4555 instance->requestorId =
4556 ctrl_info->iov.requestorId;
4557 else {
4558 iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
4559 instance->requestorId = iovPtr->requestorId;
4197 } 4560 }
4198 if (ctrl_info->host_interface.SRIOV) { 4561 dev_warn(&instance->pdev->dev, "I am VF "
4199 if (!ctrl_info->adapterOperations2.activePassive) 4562 "requestorId %d\n", instance->requestorId);
4200 instance->PlasmaFW111 = 1; 4563 }
4201 4564
4202 if (!instance->PlasmaFW111) 4565 le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
4203 instance->requestorId = 4566 instance->crash_dump_fw_support =
4204 ctrl_info->iov.requestorId; 4567 ctrl_info->adapterOperations3.supportCrashDump;
4205 else { 4568 instance->crash_dump_drv_support =
4206 iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET); 4569 (instance->crash_dump_fw_support &&
4207 instance->requestorId = iovPtr->requestorId; 4570 instance->crash_dump_buf);
4208 } 4571 if (instance->crash_dump_drv_support) {
4209 printk(KERN_WARNING "megaraid_sas: I am VF " 4572 dev_info(&instance->pdev->dev, "Firmware Crash dump "
4210 "requestorId %d\n", instance->requestorId); 4573 "feature is supported\n");
4211 } 4574 megasas_set_crash_dump_params(instance,
4575 MR_CRASH_BUF_TURN_OFF);
4576
4577 } else {
4578 if (instance->crash_dump_buf)
4579 pci_free_consistent(instance->pdev,
4580 CRASH_DMA_BUF_SIZE,
4581 instance->crash_dump_buf,
4582 instance->crash_dump_h);
4583 instance->crash_dump_buf = NULL;
4212 } 4584 }
4213 instance->max_sectors_per_req = instance->max_num_sge * 4585 instance->max_sectors_per_req = instance->max_num_sge *
4214 PAGE_SIZE / 512; 4586 PAGE_SIZE / 512;
@@ -4256,6 +4628,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
4256 4628
4257fail_init_adapter: 4629fail_init_adapter:
4258fail_ready_state: 4630fail_ready_state:
4631 kfree(instance->ctrl_info);
4632 instance->ctrl_info = NULL;
4259 iounmap(instance->reg_set); 4633 iounmap(instance->reg_set);
4260 4634
4261 fail_ioremap: 4635 fail_ioremap:
@@ -4351,7 +4725,11 @@ megasas_get_seq_num(struct megasas_instance *instance,
4351 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), 4725 pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
4352 el_info, el_info_h); 4726 el_info, el_info_h);
4353 4727
4354 megasas_return_cmd(instance, cmd); 4728 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
4729 megasas_return_mfi_mpt_pthr(instance, cmd,
4730 cmd->mpt_pthr_cmd_blocked);
4731 else
4732 megasas_return_cmd(instance, cmd);
4355 4733
4356 return 0; 4734 return 0;
4357} 4735}
@@ -4634,6 +5012,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
4634 struct Scsi_Host *host; 5012 struct Scsi_Host *host;
4635 struct megasas_instance *instance; 5013 struct megasas_instance *instance;
4636 u16 control = 0; 5014 u16 control = 0;
5015 struct fusion_context *fusion = NULL;
4637 5016
4638 /* Reset MSI-X in the kdump kernel */ 5017 /* Reset MSI-X in the kdump kernel */
4639 if (reset_devices) { 5018 if (reset_devices) {
@@ -4694,10 +5073,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
4694 case PCI_DEVICE_ID_LSI_INVADER: 5073 case PCI_DEVICE_ID_LSI_INVADER:
4695 case PCI_DEVICE_ID_LSI_FURY: 5074 case PCI_DEVICE_ID_LSI_FURY:
4696 { 5075 {
4697 struct fusion_context *fusion; 5076 instance->ctrl_context_pages =
4698 5077 get_order(sizeof(struct fusion_context));
4699 instance->ctrl_context = 5078 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
4700 kzalloc(sizeof(struct fusion_context), GFP_KERNEL); 5079 instance->ctrl_context_pages);
4701 if (!instance->ctrl_context) { 5080 if (!instance->ctrl_context) {
4702 printk(KERN_DEBUG "megasas: Failed to allocate " 5081 printk(KERN_DEBUG "megasas: Failed to allocate "
4703 "memory for Fusion context info\n"); 5082 "memory for Fusion context info\n");
@@ -4705,7 +5084,9 @@ static int megasas_probe_one(struct pci_dev *pdev,
4705 } 5084 }
4706 fusion = instance->ctrl_context; 5085 fusion = instance->ctrl_context;
4707 INIT_LIST_HEAD(&fusion->cmd_pool); 5086 INIT_LIST_HEAD(&fusion->cmd_pool);
4708 spin_lock_init(&fusion->cmd_pool_lock); 5087 spin_lock_init(&fusion->mpt_pool_lock);
5088 memset(fusion->load_balance_info, 0,
5089 sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
4709 } 5090 }
4710 break; 5091 break;
4711 default: /* For all other supported controllers */ 5092 default: /* For all other supported controllers */
@@ -4728,13 +5109,29 @@ static int megasas_probe_one(struct pci_dev *pdev,
4728 break; 5109 break;
4729 } 5110 }
4730 5111
5112 /* Crash dump feature related initialisation*/
5113 instance->drv_buf_index = 0;
5114 instance->drv_buf_alloc = 0;
5115 instance->crash_dump_fw_support = 0;
5116 instance->crash_dump_app_support = 0;
5117 instance->fw_crash_state = UNAVAILABLE;
5118 spin_lock_init(&instance->crashdump_lock);
5119 instance->crash_dump_buf = NULL;
5120
5121 if (!reset_devices)
5122 instance->crash_dump_buf = pci_alloc_consistent(pdev,
5123 CRASH_DMA_BUF_SIZE,
5124 &instance->crash_dump_h);
5125 if (!instance->crash_dump_buf)
5126 dev_err(&instance->pdev->dev, "Can't allocate Firmware "
5127 "crash dump DMA buffer\n");
5128
4731 megasas_poll_wait_aen = 0; 5129 megasas_poll_wait_aen = 0;
4732 instance->flag_ieee = 0; 5130 instance->flag_ieee = 0;
4733 instance->ev = NULL; 5131 instance->ev = NULL;
4734 instance->issuepend_done = 1; 5132 instance->issuepend_done = 1;
4735 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 5133 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
4736 instance->is_imr = 0; 5134 instance->is_imr = 0;
4737 megasas_poll_wait_aen = 0;
4738 5135
4739 instance->evt_detail = pci_alloc_consistent(pdev, 5136 instance->evt_detail = pci_alloc_consistent(pdev,
4740 sizeof(struct 5137 sizeof(struct
@@ -4758,7 +5155,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
4758 init_waitqueue_head(&instance->int_cmd_wait_q); 5155 init_waitqueue_head(&instance->int_cmd_wait_q);
4759 init_waitqueue_head(&instance->abort_cmd_wait_q); 5156 init_waitqueue_head(&instance->abort_cmd_wait_q);
4760 5157
4761 spin_lock_init(&instance->cmd_pool_lock); 5158 spin_lock_init(&instance->mfi_pool_lock);
4762 spin_lock_init(&instance->hba_lock); 5159 spin_lock_init(&instance->hba_lock);
4763 spin_lock_init(&instance->completion_lock); 5160 spin_lock_init(&instance->completion_lock);
4764 5161
@@ -4771,13 +5168,14 @@ static int megasas_probe_one(struct pci_dev *pdev,
4771 instance->host = host; 5168 instance->host = host;
4772 instance->unique_id = pdev->bus->number << 8 | pdev->devfn; 5169 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
4773 instance->init_id = MEGASAS_DEFAULT_INIT_ID; 5170 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
5171 instance->ctrl_info = NULL;
4774 5172
4775 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 5173 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4776 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) { 5174 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
4777 instance->flag_ieee = 1; 5175 instance->flag_ieee = 1;
4778 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS); 5176 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
4779 } else 5177 } else
4780 sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS); 5178 sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
4781 5179
4782 megasas_dbg_lvl = 0; 5180 megasas_dbg_lvl = 0;
4783 instance->flag = 0; 5181 instance->flag = 0;
@@ -4789,9 +5187,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
4789 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || 5187 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
4790 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || 5188 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
4791 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 5189 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
4792 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) 5190 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
4793 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 5191 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
4794 else 5192 INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
5193 } else
4795 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 5194 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
4796 5195
4797 /* 5196 /*
@@ -4836,8 +5235,9 @@ retry_irq_register:
4836 printk(KERN_DEBUG "megasas: Failed to " 5235 printk(KERN_DEBUG "megasas: Failed to "
4837 "register IRQ for vector %d.\n", i); 5236 "register IRQ for vector %d.\n", i);
4838 for (j = 0; j < i; j++) { 5237 for (j = 0; j < i; j++) {
4839 irq_set_affinity_hint( 5238 if (smp_affinity_enable)
4840 instance->msixentry[j].vector, NULL); 5239 irq_set_affinity_hint(
5240 instance->msixentry[j].vector, NULL);
4841 free_irq( 5241 free_irq(
4842 instance->msixentry[j].vector, 5242 instance->msixentry[j].vector,
4843 &instance->irq_context[j]); 5243 &instance->irq_context[j]);
@@ -4846,11 +5246,14 @@ retry_irq_register:
4846 instance->msix_vectors = 0; 5246 instance->msix_vectors = 0;
4847 goto retry_irq_register; 5247 goto retry_irq_register;
4848 } 5248 }
4849 if (irq_set_affinity_hint(instance->msixentry[i].vector, 5249 if (smp_affinity_enable) {
4850 get_cpu_mask(cpu))) 5250 if (irq_set_affinity_hint(instance->msixentry[i].vector,
4851 dev_err(&instance->pdev->dev, "Error setting" 5251 get_cpu_mask(cpu)))
4852 "affinity hint for cpu %d\n", cpu); 5252 dev_err(&instance->pdev->dev,
4853 cpu = cpumask_next(cpu, cpu_online_mask); 5253 "Error setting affinity hint "
5254 "for cpu %d\n", cpu);
5255 cpu = cpumask_next(cpu, cpu_online_mask);
5256 }
4854 } 5257 }
4855 } else { 5258 } else {
4856 instance->irq_context[0].instance = instance; 5259 instance->irq_context[0].instance = instance;
@@ -4894,6 +5297,10 @@ retry_irq_register:
4894 goto fail_start_aen; 5297 goto fail_start_aen;
4895 } 5298 }
4896 5299
5300 /* Get current SR-IOV LD/VF affiliation */
5301 if (instance->requestorId)
5302 megasas_get_ld_vf_affiliation(instance, 1);
5303
4897 return 0; 5304 return 0;
4898 5305
4899 fail_start_aen: 5306 fail_start_aen:
@@ -4905,8 +5312,9 @@ retry_irq_register:
4905 instance->instancet->disable_intr(instance); 5312 instance->instancet->disable_intr(instance);
4906 if (instance->msix_vectors) 5313 if (instance->msix_vectors)
4907 for (i = 0; i < instance->msix_vectors; i++) { 5314 for (i = 0; i < instance->msix_vectors; i++) {
4908 irq_set_affinity_hint( 5315 if (smp_affinity_enable)
4909 instance->msixentry[i].vector, NULL); 5316 irq_set_affinity_hint(
5317 instance->msixentry[i].vector, NULL);
4910 free_irq(instance->msixentry[i].vector, 5318 free_irq(instance->msixentry[i].vector,
4911 &instance->irq_context[i]); 5319 &instance->irq_context[i]);
4912 } 5320 }
@@ -4979,7 +5387,11 @@ static void megasas_flush_cache(struct megasas_instance *instance)
4979 dev_err(&instance->pdev->dev, "Command timedout" 5387 dev_err(&instance->pdev->dev, "Command timedout"
4980 " from %s\n", __func__); 5388 " from %s\n", __func__);
4981 5389
4982 megasas_return_cmd(instance, cmd); 5390 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
5391 megasas_return_mfi_mpt_pthr(instance, cmd,
5392 cmd->mpt_pthr_cmd_blocked);
5393 else
5394 megasas_return_cmd(instance, cmd);
4983 5395
4984 return; 5396 return;
4985} 5397}
@@ -5026,7 +5438,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
5026 dev_err(&instance->pdev->dev, "Command timedout" 5438 dev_err(&instance->pdev->dev, "Command timedout"
5027 "from %s\n", __func__); 5439 "from %s\n", __func__);
5028 5440
5029 megasas_return_cmd(instance, cmd); 5441 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
5442 megasas_return_mfi_mpt_pthr(instance, cmd,
5443 cmd->mpt_pthr_cmd_blocked);
5444 else
5445 megasas_return_cmd(instance, cmd);
5030 5446
5031 return; 5447 return;
5032} 5448}
@@ -5069,8 +5485,9 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
5069 5485
5070 if (instance->msix_vectors) 5486 if (instance->msix_vectors)
5071 for (i = 0; i < instance->msix_vectors; i++) { 5487 for (i = 0; i < instance->msix_vectors; i++) {
5072 irq_set_affinity_hint( 5488 if (smp_affinity_enable)
5073 instance->msixentry[i].vector, NULL); 5489 irq_set_affinity_hint(
5490 instance->msixentry[i].vector, NULL);
5074 free_irq(instance->msixentry[i].vector, 5491 free_irq(instance->msixentry[i].vector,
5075 &instance->irq_context[i]); 5492 &instance->irq_context[i]);
5076 } 5493 }
@@ -5132,9 +5549,10 @@ megasas_resume(struct pci_dev *pdev)
5132 goto fail_ready_state; 5549 goto fail_ready_state;
5133 5550
5134 /* Now re-enable MSI-X */ 5551 /* Now re-enable MSI-X */
5135 if (instance->msix_vectors) 5552 if (instance->msix_vectors &&
5136 pci_enable_msix(instance->pdev, instance->msixentry, 5553 pci_enable_msix_exact(instance->pdev, instance->msixentry,
5137 instance->msix_vectors); 5554 instance->msix_vectors))
5555 goto fail_reenable_msix;
5138 5556
5139 switch (instance->pdev->device) { 5557 switch (instance->pdev->device) {
5140 case PCI_DEVICE_ID_LSI_FUSION: 5558 case PCI_DEVICE_ID_LSI_FUSION:
@@ -5178,8 +5596,9 @@ megasas_resume(struct pci_dev *pdev)
5178 printk(KERN_DEBUG "megasas: Failed to " 5596 printk(KERN_DEBUG "megasas: Failed to "
5179 "register IRQ for vector %d.\n", i); 5597 "register IRQ for vector %d.\n", i);
5180 for (j = 0; j < i; j++) { 5598 for (j = 0; j < i; j++) {
5181 irq_set_affinity_hint( 5599 if (smp_affinity_enable)
5182 instance->msixentry[j].vector, NULL); 5600 irq_set_affinity_hint(
5601 instance->msixentry[j].vector, NULL);
5183 free_irq( 5602 free_irq(
5184 instance->msixentry[j].vector, 5603 instance->msixentry[j].vector,
5185 &instance->irq_context[j]); 5604 &instance->irq_context[j]);
@@ -5187,11 +5606,14 @@ megasas_resume(struct pci_dev *pdev)
5187 goto fail_irq; 5606 goto fail_irq;
5188 } 5607 }
5189 5608
5190 if (irq_set_affinity_hint(instance->msixentry[i].vector, 5609 if (smp_affinity_enable) {
5191 get_cpu_mask(cpu))) 5610 if (irq_set_affinity_hint(instance->msixentry[i].vector,
5192 dev_err(&instance->pdev->dev, "Error setting" 5611 get_cpu_mask(cpu)))
5193 "affinity hint for cpu %d\n", cpu); 5612 dev_err(&instance->pdev->dev, "Error "
5194 cpu = cpumask_next(cpu, cpu_online_mask); 5613 "setting affinity hint for cpu "
5614 "%d\n", cpu);
5615 cpu = cpumask_next(cpu, cpu_online_mask);
5616 }
5195 } 5617 }
5196 } else { 5618 } else {
5197 instance->irq_context[0].instance = instance; 5619 instance->irq_context[0].instance = instance;
@@ -5243,6 +5665,7 @@ fail_init_mfi:
5243 5665
5244fail_set_dma_mask: 5666fail_set_dma_mask:
5245fail_ready_state: 5667fail_ready_state:
5668fail_reenable_msix:
5246 5669
5247 pci_disable_device(pdev); 5670 pci_disable_device(pdev);
5248 5671
@@ -5273,6 +5696,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
5273 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 5696 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
5274 del_timer_sync(&instance->sriov_heartbeat_timer); 5697 del_timer_sync(&instance->sriov_heartbeat_timer);
5275 5698
5699 if (instance->fw_crash_state != UNAVAILABLE)
5700 megasas_free_host_crash_buffer(instance);
5276 scsi_remove_host(instance->host); 5701 scsi_remove_host(instance->host);
5277 megasas_flush_cache(instance); 5702 megasas_flush_cache(instance);
5278 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 5703 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -5306,8 +5731,9 @@ static void megasas_detach_one(struct pci_dev *pdev)
5306 5731
5307 if (instance->msix_vectors) 5732 if (instance->msix_vectors)
5308 for (i = 0; i < instance->msix_vectors; i++) { 5733 for (i = 0; i < instance->msix_vectors; i++) {
5309 irq_set_affinity_hint( 5734 if (smp_affinity_enable)
5310 instance->msixentry[i].vector, NULL); 5735 irq_set_affinity_hint(
5736 instance->msixentry[i].vector, NULL);
5311 free_irq(instance->msixentry[i].vector, 5737 free_irq(instance->msixentry[i].vector,
5312 &instance->irq_context[i]); 5738 &instance->irq_context[i]);
5313 } 5739 }
@@ -5322,14 +5748,18 @@ static void megasas_detach_one(struct pci_dev *pdev)
5322 case PCI_DEVICE_ID_LSI_INVADER: 5748 case PCI_DEVICE_ID_LSI_INVADER:
5323 case PCI_DEVICE_ID_LSI_FURY: 5749 case PCI_DEVICE_ID_LSI_FURY:
5324 megasas_release_fusion(instance); 5750 megasas_release_fusion(instance);
5325 for (i = 0; i < 2 ; i++) 5751 for (i = 0; i < 2 ; i++) {
5326 if (fusion->ld_map[i]) 5752 if (fusion->ld_map[i])
5327 dma_free_coherent(&instance->pdev->dev, 5753 dma_free_coherent(&instance->pdev->dev,
5328 fusion->map_sz, 5754 fusion->max_map_sz,
5329 fusion->ld_map[i], 5755 fusion->ld_map[i],
5330 fusion-> 5756 fusion->ld_map_phys[i]);
5331 ld_map_phys[i]); 5757 if (fusion->ld_drv_map[i])
5332 kfree(instance->ctrl_context); 5758 free_pages((ulong)fusion->ld_drv_map[i],
5759 fusion->drv_map_pages);
5760 }
5761 free_pages((ulong)instance->ctrl_context,
5762 instance->ctrl_context_pages);
5333 break; 5763 break;
5334 default: 5764 default:
5335 megasas_release_mfi(instance); 5765 megasas_release_mfi(instance);
@@ -5342,6 +5772,8 @@ static void megasas_detach_one(struct pci_dev *pdev)
5342 break; 5772 break;
5343 } 5773 }
5344 5774
5775 kfree(instance->ctrl_info);
5776
5345 if (instance->evt_detail) 5777 if (instance->evt_detail)
5346 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 5778 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
5347 instance->evt_detail, instance->evt_detail_h); 5779 instance->evt_detail, instance->evt_detail_h);
@@ -5363,6 +5795,10 @@ static void megasas_detach_one(struct pci_dev *pdev)
5363 instance->hb_host_mem, 5795 instance->hb_host_mem,
5364 instance->hb_host_mem_h); 5796 instance->hb_host_mem_h);
5365 5797
5798 if (instance->crash_dump_buf)
5799 pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
5800 instance->crash_dump_buf, instance->crash_dump_h);
5801
5366 scsi_host_put(host); 5802 scsi_host_put(host);
5367 5803
5368 pci_disable_device(pdev); 5804 pci_disable_device(pdev);
@@ -5385,8 +5821,9 @@ static void megasas_shutdown(struct pci_dev *pdev)
5385 instance->instancet->disable_intr(instance); 5821 instance->instancet->disable_intr(instance);
5386 if (instance->msix_vectors) 5822 if (instance->msix_vectors)
5387 for (i = 0; i < instance->msix_vectors; i++) { 5823 for (i = 0; i < instance->msix_vectors; i++) {
5388 irq_set_affinity_hint( 5824 if (smp_affinity_enable)
5389 instance->msixentry[i].vector, NULL); 5825 irq_set_affinity_hint(
5826 instance->msixentry[i].vector, NULL);
5390 free_irq(instance->msixentry[i].vector, 5827 free_irq(instance->msixentry[i].vector,
5391 &instance->irq_context[i]); 5828 &instance->irq_context[i]);
5392 } 5829 }
@@ -5448,12 +5885,53 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
5448 spin_lock_irqsave(&poll_aen_lock, flags); 5885 spin_lock_irqsave(&poll_aen_lock, flags);
5449 if (megasas_poll_wait_aen) 5886 if (megasas_poll_wait_aen)
5450 mask = (POLLIN | POLLRDNORM); 5887 mask = (POLLIN | POLLRDNORM);
5888
5451 else 5889 else
5452 mask = 0; 5890 mask = 0;
5891 megasas_poll_wait_aen = 0;
5453 spin_unlock_irqrestore(&poll_aen_lock, flags); 5892 spin_unlock_irqrestore(&poll_aen_lock, flags);
5454 return mask; 5893 return mask;
5455} 5894}
5456 5895
5896/*
5897 * megasas_set_crash_dump_params_ioctl:
5898 * Send CRASH_DUMP_MODE DCMD to all controllers
5899 * @cmd: MFI command frame
5900 */
5901
5902static int megasas_set_crash_dump_params_ioctl(
5903 struct megasas_cmd *cmd)
5904{
5905 struct megasas_instance *local_instance;
5906 int i, error = 0;
5907 int crash_support;
5908
5909 crash_support = cmd->frame->dcmd.mbox.w[0];
5910
5911 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
5912 local_instance = megasas_mgmt_info.instance[i];
5913 if (local_instance && local_instance->crash_dump_drv_support) {
5914 if ((local_instance->adprecovery ==
5915 MEGASAS_HBA_OPERATIONAL) &&
5916 !megasas_set_crash_dump_params(local_instance,
5917 crash_support)) {
5918 local_instance->crash_dump_app_support =
5919 crash_support;
5920 dev_info(&local_instance->pdev->dev,
5921 "Application firmware crash "
5922 "dump mode set success\n");
5923 error = 0;
5924 } else {
5925 dev_info(&local_instance->pdev->dev,
5926 "Application firmware crash "
5927 "dump mode set failed\n");
5928 error = -1;
5929 }
5930 }
5931 }
5932 return error;
5933}
5934
5457/** 5935/**
5458 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW 5936 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
5459 * @instance: Adapter soft state 5937 * @instance: Adapter soft state
@@ -5500,6 +5978,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5500 MFI_FRAME_SGL64 | 5978 MFI_FRAME_SGL64 |
5501 MFI_FRAME_SENSE64)); 5979 MFI_FRAME_SENSE64));
5502 5980
5981 if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
5982 error = megasas_set_crash_dump_params_ioctl(cmd);
5983 megasas_return_cmd(instance, cmd);
5984 return error;
5985 }
5986
5503 /* 5987 /*
5504 * The management interface between applications and the fw uses 5988 * The management interface between applications and the fw uses
5505 * MFI frames. E.g, RAID configuration changes, LD property changes 5989 * MFI frames. E.g, RAID configuration changes, LD property changes
@@ -5619,9 +6103,14 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5619 le32_to_cpu(kern_sge32[i].length), 6103 le32_to_cpu(kern_sge32[i].length),
5620 kbuff_arr[i], 6104 kbuff_arr[i],
5621 le32_to_cpu(kern_sge32[i].phys_addr)); 6105 le32_to_cpu(kern_sge32[i].phys_addr));
6106 kbuff_arr[i] = NULL;
5622 } 6107 }
5623 6108
5624 megasas_return_cmd(instance, cmd); 6109 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
6110 megasas_return_mfi_mpt_pthr(instance, cmd,
6111 cmd->mpt_pthr_cmd_blocked);
6112 else
6113 megasas_return_cmd(instance, cmd);
5625 return error; 6114 return error;
5626} 6115}
5627 6116
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 081bfff12d00..685e6f391fe4 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -55,6 +55,13 @@
55#include "megaraid_sas.h" 55#include "megaraid_sas.h"
56#include <asm/div64.h> 56#include <asm/div64.h>
57 57
58#define LB_PENDING_CMDS_DEFAULT 4
59static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
60module_param(lb_pending_cmds, int, S_IRUGO);
61MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
62 "threshold. Valid Values are 1-128. Default: 4");
63
64
58#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) 65#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
59#define MR_LD_STATE_OPTIMAL 3 66#define MR_LD_STATE_OPTIMAL 3
60#define FALSE 0 67#define FALSE 0
@@ -66,16 +73,13 @@
66#define SPAN_INVALID 0xff 73#define SPAN_INVALID 0xff
67 74
68/* Prototypes */ 75/* Prototypes */
69void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, 76static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
70 struct LD_LOAD_BALANCE_INFO *lbInfo);
71
72static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
73 PLD_SPAN_INFO ldSpanInfo); 77 PLD_SPAN_INFO ldSpanInfo);
74static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, 78static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
75 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, 79 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
76 struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map); 80 struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
77static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld, 81static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
78 u64 strip, struct MR_FW_RAID_MAP_ALL *map); 82 u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
79 83
80u32 mega_mod64(u64 dividend, u32 divisor) 84u32 mega_mod64(u64 dividend, u32 divisor)
81{ 85{
@@ -109,94 +113,183 @@ u64 mega_div64_32(uint64_t dividend, uint32_t divisor)
109 return d; 113 return d;
110} 114}
111 115
112struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map) 116struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
113{ 117{
114 return &map->raidMap.ldSpanMap[ld].ldRaid; 118 return &map->raidMap.ldSpanMap[ld].ldRaid;
115} 119}
116 120
117static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld, 121static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
118 struct MR_FW_RAID_MAP_ALL 122 struct MR_DRV_RAID_MAP_ALL
119 *map) 123 *map)
120{ 124{
121 return &map->raidMap.ldSpanMap[ld].spanBlock[0]; 125 return &map->raidMap.ldSpanMap[ld].spanBlock[0];
122} 126}
123 127
124static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map) 128static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
125{ 129{
126 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; 130 return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
127} 131}
128 132
129u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) 133u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
130{ 134{
131 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); 135 return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
132} 136}
133 137
134u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) 138u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
135{ 139{
136 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); 140 return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
137} 141}
138 142
139u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) 143u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
140{ 144{
141 return map->raidMap.devHndlInfo[pd].curDevHdl; 145 return map->raidMap.devHndlInfo[pd].curDevHdl;
142} 146}
143 147
144u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map) 148u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
145{ 149{
146 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); 150 return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
147} 151}
148 152
149u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) 153u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
150{ 154{
151 return map->raidMap.ldTgtIdToLd[ldTgtId]; 155 return map->raidMap.ldTgtIdToLd[ldTgtId];
152} 156}
153 157
154static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, 158static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
155 struct MR_FW_RAID_MAP_ALL *map) 159 struct MR_DRV_RAID_MAP_ALL *map)
156{ 160{
157 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; 161 return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
158} 162}
159 163
160/* 164/*
165 * This function will Populate Driver Map using firmware raid map
166 */
167void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
168{
169 struct fusion_context *fusion = instance->ctrl_context;
170 struct MR_FW_RAID_MAP_ALL *fw_map_old = NULL;
171 struct MR_FW_RAID_MAP *pFwRaidMap = NULL;
172 int i;
173
174
175 struct MR_DRV_RAID_MAP_ALL *drv_map =
176 fusion->ld_drv_map[(instance->map_id & 1)];
177 struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
178
179 if (instance->supportmax256vd) {
180 memcpy(fusion->ld_drv_map[instance->map_id & 1],
181 fusion->ld_map[instance->map_id & 1],
182 fusion->current_map_sz);
183 /* New Raid map will not set totalSize, so keep expected value
184 * for legacy code in ValidateMapInfo
185 */
186 pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT);
187 } else {
188 fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
189 fusion->ld_map[(instance->map_id & 1)];
190 pFwRaidMap = &fw_map_old->raidMap;
191
192#if VD_EXT_DEBUG
193 for (i = 0; i < pFwRaidMap->ldCount; i++) {
194 dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
195 "Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
196 instance->unique_id, i,
197 fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
198 fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
199 fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
200 }
201#endif
202
203 memset(drv_map, 0, fusion->drv_map_sz);
204 pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
205 pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
206 pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
207 for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
208 pDrvRaidMap->ldTgtIdToLd[i] =
209 (u8)pFwRaidMap->ldTgtIdToLd[i];
210 for (i = 0; i < pDrvRaidMap->ldCount; i++) {
211 pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
212#if VD_EXT_DEBUG
213 dev_dbg(&instance->pdev->dev,
214 "pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
215 "pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
216 "size 0x%x\n", i, i,
217 pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
218 pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
219 (u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
220 dev_dbg(&instance->pdev->dev,
221 "pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
222 "pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
223 "size 0x%x\n", i, i,
224 pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
225 pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
226 (u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
227 dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
228 "raid map %p LD RAID MAP %p/%p\n", drv_map,
229 pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
230 &pDrvRaidMap->ldSpanMap[i].ldRaid);
231#endif
232 }
233 memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
234 sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
235 memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
236 sizeof(struct MR_DEV_HANDLE_INFO) *
237 MAX_RAIDMAP_PHYSICAL_DEVICES);
238 }
239}
240
241/*
161 * This function will validate Map info data provided by FW 242 * This function will validate Map info data provided by FW
162 */ 243 */
163u8 MR_ValidateMapInfo(struct megasas_instance *instance) 244u8 MR_ValidateMapInfo(struct megasas_instance *instance)
164{ 245{
165 struct fusion_context *fusion = instance->ctrl_context; 246 struct fusion_context *fusion;
166 struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)]; 247 struct MR_DRV_RAID_MAP_ALL *drv_map;
167 struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; 248 struct MR_DRV_RAID_MAP *pDrvRaidMap;
168 PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; 249 struct LD_LOAD_BALANCE_INFO *lbInfo;
169 struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; 250 PLD_SPAN_INFO ldSpanInfo;
170 struct MR_LD_RAID *raid; 251 struct MR_LD_RAID *raid;
171 int ldCount, num_lds; 252 int ldCount, num_lds;
172 u16 ld; 253 u16 ld;
254 u32 expected_size;
255
256
257 MR_PopulateDrvRaidMap(instance);
258
259 fusion = instance->ctrl_context;
260 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
261 pDrvRaidMap = &drv_map->raidMap;
173 262
263 lbInfo = fusion->load_balance_info;
264 ldSpanInfo = fusion->log_to_span;
174 265
175 if (le32_to_cpu(pFwRaidMap->totalSize) != 266 if (instance->supportmax256vd)
176 (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + 267 expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
177 (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) { 268 else
178 printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", 269 expected_size =
179 (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - 270 (sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
180 sizeof(struct MR_LD_SPAN_MAP)) + 271 (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount)));
181 (sizeof(struct MR_LD_SPAN_MAP) * 272
182 le32_to_cpu(pFwRaidMap->ldCount)))); 273 if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
183 printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " 274 dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
184 ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), 275 (unsigned int) expected_size);
185 le32_to_cpu(pFwRaidMap->totalSize)); 276 dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
277 (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
278 le32_to_cpu(pDrvRaidMap->totalSize));
186 return 0; 279 return 0;
187 } 280 }
188 281
189 if (instance->UnevenSpanSupport) 282 if (instance->UnevenSpanSupport)
190 mr_update_span_set(map, ldSpanInfo); 283 mr_update_span_set(drv_map, ldSpanInfo);
191 284
192 mr_update_load_balance_params(map, lbInfo); 285 mr_update_load_balance_params(drv_map, lbInfo);
193 286
194 num_lds = le32_to_cpu(map->raidMap.ldCount); 287 num_lds = le32_to_cpu(drv_map->raidMap.ldCount);
195 288
196 /*Convert Raid capability values to CPU arch */ 289 /*Convert Raid capability values to CPU arch */
197 for (ldCount = 0; ldCount < num_lds; ldCount++) { 290 for (ldCount = 0; ldCount < num_lds; ldCount++) {
198 ld = MR_TargetIdToLdGet(ldCount, map); 291 ld = MR_TargetIdToLdGet(ldCount, drv_map);
199 raid = MR_LdRaidGet(ld, map); 292 raid = MR_LdRaidGet(ld, drv_map);
200 le32_to_cpus((u32 *)&raid->capability); 293 le32_to_cpus((u32 *)&raid->capability);
201 } 294 }
202 295
@@ -204,7 +297,7 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance)
204} 297}
205 298
206u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, 299u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
207 struct MR_FW_RAID_MAP_ALL *map) 300 struct MR_DRV_RAID_MAP_ALL *map)
208{ 301{
209 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); 302 struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
210 struct MR_QUAD_ELEMENT *quad; 303 struct MR_QUAD_ELEMENT *quad;
@@ -246,7 +339,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
246* ldSpanInfo - ldSpanInfo per HBA instance 339* ldSpanInfo - ldSpanInfo per HBA instance
247*/ 340*/
248#if SPAN_DEBUG 341#if SPAN_DEBUG
249static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) 342static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
343 PLD_SPAN_INFO ldSpanInfo)
250{ 344{
251 345
252 u8 span; 346 u8 span;
@@ -257,9 +351,9 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
257 int ldCount; 351 int ldCount;
258 u16 ld; 352 u16 ld;
259 353
260 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 354 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
261 ld = MR_TargetIdToLdGet(ldCount, map); 355 ld = MR_TargetIdToLdGet(ldCount, map);
262 if (ld >= MAX_LOGICAL_DRIVES) 356 if (ld >= MAX_LOGICAL_DRIVES_EXT)
263 continue; 357 continue;
264 raid = MR_LdRaidGet(ld, map); 358 raid = MR_LdRaidGet(ld, map);
265 dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n", 359 dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
@@ -339,7 +433,7 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
339*/ 433*/
340 434
341u32 mr_spanset_get_span_block(struct megasas_instance *instance, 435u32 mr_spanset_get_span_block(struct megasas_instance *instance,
342 u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map) 436 u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
343{ 437{
344 struct fusion_context *fusion = instance->ctrl_context; 438 struct fusion_context *fusion = instance->ctrl_context;
345 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 439 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -402,7 +496,7 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance,
402*/ 496*/
403 497
404static u64 get_row_from_strip(struct megasas_instance *instance, 498static u64 get_row_from_strip(struct megasas_instance *instance,
405 u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) 499 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
406{ 500{
407 struct fusion_context *fusion = instance->ctrl_context; 501 struct fusion_context *fusion = instance->ctrl_context;
408 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 502 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -471,7 +565,7 @@ static u64 get_row_from_strip(struct megasas_instance *instance,
471*/ 565*/
472 566
473static u64 get_strip_from_row(struct megasas_instance *instance, 567static u64 get_strip_from_row(struct megasas_instance *instance,
474 u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map) 568 u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
475{ 569{
476 struct fusion_context *fusion = instance->ctrl_context; 570 struct fusion_context *fusion = instance->ctrl_context;
477 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 571 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -532,7 +626,7 @@ static u64 get_strip_from_row(struct megasas_instance *instance,
532*/ 626*/
533 627
534static u32 get_arm_from_strip(struct megasas_instance *instance, 628static u32 get_arm_from_strip(struct megasas_instance *instance,
535 u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map) 629 u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
536{ 630{
537 struct fusion_context *fusion = instance->ctrl_context; 631 struct fusion_context *fusion = instance->ctrl_context;
538 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 632 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -580,7 +674,7 @@ static u32 get_arm_from_strip(struct megasas_instance *instance,
580 674
581/* This Function will return Phys arm */ 675/* This Function will return Phys arm */
582u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe, 676u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
583 struct MR_FW_RAID_MAP_ALL *map) 677 struct MR_DRV_RAID_MAP_ALL *map)
584{ 678{
585 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 679 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
586 /* Need to check correct default value */ 680 /* Need to check correct default value */
@@ -624,7 +718,7 @@ u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
624static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, 718static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
625 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info, 719 u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
626 struct RAID_CONTEXT *pRAID_Context, 720 struct RAID_CONTEXT *pRAID_Context,
627 struct MR_FW_RAID_MAP_ALL *map) 721 struct MR_DRV_RAID_MAP_ALL *map)
628{ 722{
629 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 723 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
630 u32 pd, arRef; 724 u32 pd, arRef;
@@ -682,6 +776,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
682 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 776 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
683 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 777 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
684 physArm; 778 physArm;
779 io_info->span_arm = pRAID_Context->spanArm;
685 return retval; 780 return retval;
686} 781}
687 782
@@ -705,7 +800,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
705u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, 800u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
706 u16 stripRef, struct IO_REQUEST_INFO *io_info, 801 u16 stripRef, struct IO_REQUEST_INFO *io_info,
707 struct RAID_CONTEXT *pRAID_Context, 802 struct RAID_CONTEXT *pRAID_Context,
708 struct MR_FW_RAID_MAP_ALL *map) 803 struct MR_DRV_RAID_MAP_ALL *map)
709{ 804{
710 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 805 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
711 u32 pd, arRef; 806 u32 pd, arRef;
@@ -778,6 +873,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
778 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); 873 *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
779 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | 874 pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
780 physArm; 875 physArm;
876 io_info->span_arm = pRAID_Context->spanArm;
781 return retval; 877 return retval;
782} 878}
783 879
@@ -794,7 +890,7 @@ u8
794MR_BuildRaidContext(struct megasas_instance *instance, 890MR_BuildRaidContext(struct megasas_instance *instance,
795 struct IO_REQUEST_INFO *io_info, 891 struct IO_REQUEST_INFO *io_info,
796 struct RAID_CONTEXT *pRAID_Context, 892 struct RAID_CONTEXT *pRAID_Context,
797 struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN) 893 struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
798{ 894{
799 struct MR_LD_RAID *raid; 895 struct MR_LD_RAID *raid;
800 u32 ld, stripSize, stripe_mask; 896 u32 ld, stripSize, stripe_mask;
@@ -1043,8 +1139,8 @@ MR_BuildRaidContext(struct megasas_instance *instance,
1043* ldSpanInfo - ldSpanInfo per HBA instance 1139* ldSpanInfo - ldSpanInfo per HBA instance
1044* 1140*
1045*/ 1141*/
1046void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, 1142void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
1047 PLD_SPAN_INFO ldSpanInfo) 1143 PLD_SPAN_INFO ldSpanInfo)
1048{ 1144{
1049 u8 span, count; 1145 u8 span, count;
1050 u32 element, span_row_width; 1146 u32 element, span_row_width;
@@ -1056,9 +1152,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1056 u16 ld; 1152 u16 ld;
1057 1153
1058 1154
1059 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1155 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1060 ld = MR_TargetIdToLdGet(ldCount, map); 1156 ld = MR_TargetIdToLdGet(ldCount, map);
1061 if (ld >= MAX_LOGICAL_DRIVES) 1157 if (ld >= MAX_LOGICAL_DRIVES_EXT)
1062 continue; 1158 continue;
1063 raid = MR_LdRaidGet(ld, map); 1159 raid = MR_LdRaidGet(ld, map);
1064 for (element = 0; element < MAX_QUAD_DEPTH; element++) { 1160 for (element = 0; element < MAX_QUAD_DEPTH; element++) {
@@ -1152,90 +1248,105 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
1152 1248
1153} 1249}
1154 1250
1155void 1251void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
1156mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map, 1252 struct LD_LOAD_BALANCE_INFO *lbInfo)
1157 struct LD_LOAD_BALANCE_INFO *lbInfo)
1158{ 1253{
1159 int ldCount; 1254 int ldCount;
1160 u16 ld; 1255 u16 ld;
1161 struct MR_LD_RAID *raid; 1256 struct MR_LD_RAID *raid;
1162 1257
1163 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) { 1258 if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
1164 ld = MR_TargetIdToLdGet(ldCount, map); 1259 lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
1165 if (ld >= MAX_LOGICAL_DRIVES) { 1260
1261 for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
1262 ld = MR_TargetIdToLdGet(ldCount, drv_map);
1263 if (ld >= MAX_LOGICAL_DRIVES_EXT) {
1166 lbInfo[ldCount].loadBalanceFlag = 0; 1264 lbInfo[ldCount].loadBalanceFlag = 0;
1167 continue; 1265 continue;
1168 } 1266 }
1169 1267
1170 raid = MR_LdRaidGet(ld, map); 1268 raid = MR_LdRaidGet(ld, drv_map);
1171 1269 if ((raid->level != 1) ||
1172 /* Two drive Optimal RAID 1 */ 1270 (raid->ldState != MR_LD_STATE_OPTIMAL)) {
1173 if ((raid->level == 1) && (raid->rowSize == 2) &&
1174 (raid->spanDepth == 1) && raid->ldState ==
1175 MR_LD_STATE_OPTIMAL) {
1176 u32 pd, arRef;
1177
1178 lbInfo[ldCount].loadBalanceFlag = 1;
1179
1180 /* Get the array on which this span is present */
1181 arRef = MR_LdSpanArrayGet(ld, 0, map);
1182
1183 /* Get the Pd */
1184 pd = MR_ArPdGet(arRef, 0, map);
1185 /* Get dev handle from Pd */
1186 lbInfo[ldCount].raid1DevHandle[0] =
1187 MR_PdDevHandleGet(pd, map);
1188 /* Get the Pd */
1189 pd = MR_ArPdGet(arRef, 1, map);
1190
1191 /* Get the dev handle from Pd */
1192 lbInfo[ldCount].raid1DevHandle[1] =
1193 MR_PdDevHandleGet(pd, map);
1194 } else
1195 lbInfo[ldCount].loadBalanceFlag = 0; 1271 lbInfo[ldCount].loadBalanceFlag = 0;
1272 continue;
1273 }
1274 lbInfo[ldCount].loadBalanceFlag = 1;
1196 } 1275 }
1197} 1276}
1198 1277
1199u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block, 1278u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
1200 u32 count) 1279 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1201{ 1280{
1202 u16 pend0, pend1; 1281 struct fusion_context *fusion;
1282 struct MR_LD_RAID *raid;
1283 struct MR_DRV_RAID_MAP_ALL *drv_map;
1284 u16 pend0, pend1, ld;
1203 u64 diff0, diff1; 1285 u64 diff0, diff1;
1204 u8 bestArm; 1286 u8 bestArm, pd0, pd1, span, arm;
1287 u32 arRef, span_row_size;
1288
1289 u64 block = io_info->ldStartBlock;
1290 u32 count = io_info->numBlocks;
1291
1292 span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
1293 >> RAID_CTX_SPANARM_SPAN_SHIFT);
1294 arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
1295
1296
1297 fusion = instance->ctrl_context;
1298 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1299 ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
1300 raid = MR_LdRaidGet(ld, drv_map);
1301 span_row_size = instance->UnevenSpanSupport ?
1302 SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
1303
1304 arRef = MR_LdSpanArrayGet(ld, span, drv_map);
1305 pd0 = MR_ArPdGet(arRef, arm, drv_map);
1306 pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
1307 (arm + 1 - span_row_size) : arm + 1, drv_map);
1205 1308
1206 /* get the pending cmds for the data and mirror arms */ 1309 /* get the pending cmds for the data and mirror arms */
1207 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]); 1310 pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
1208 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]); 1311 pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
1209 1312
1210 /* Determine the disk whose head is nearer to the req. block */ 1313 /* Determine the disk whose head is nearer to the req. block */
1211 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]); 1314 diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
1212 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]); 1315 diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
1213 bestArm = (diff0 <= diff1 ? 0 : 1); 1316 bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
1214 1317
1215 /*Make balance count from 16 to 4 to keep driver in sync with Firmware*/ 1318 if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds) ||
1216 if ((bestArm == arm && pend0 > pend1 + 4) || 1319 (bestArm != arm && pend1 > pend0 + lb_pending_cmds))
1217 (bestArm != arm && pend1 > pend0 + 4))
1218 bestArm ^= 1; 1320 bestArm ^= 1;
1219 1321
1220 /* Update the last accessed block on the correct pd */ 1322 /* Update the last accessed block on the correct pd */
1221 lbInfo->last_accessed_block[bestArm] = block + count - 1; 1323 io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
1222 1324 lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
1223 return bestArm; 1325 io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
1326#if SPAN_DEBUG
1327 if (arm != bestArm)
1328 dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
1329 "occur - span 0x%x arm 0x%x bestArm 0x%x "
1330 "io_info->span_arm 0x%x\n",
1331 span, arm, bestArm, io_info->span_arm);
1332#endif
1333 return io_info->pd_after_lb;
1224} 1334}
1225 1335
1226u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, 1336u16 get_updated_dev_handle(struct megasas_instance *instance,
1227 struct IO_REQUEST_INFO *io_info) 1337 struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
1228{ 1338{
1229 u8 arm, old_arm; 1339 u8 arm_pd;
1230 u16 devHandle; 1340 u16 devHandle;
1341 struct fusion_context *fusion;
1342 struct MR_DRV_RAID_MAP_ALL *drv_map;
1231 1343
1232 old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1; 1344 fusion = instance->ctrl_context;
1233 1345 drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
1234 /* get best new arm */
1235 arm = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
1236 io_info->numBlocks);
1237 devHandle = lbInfo->raid1DevHandle[arm];
1238 atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
1239 1346
1347 /* get best new arm (PD ID) */
1348 arm_pd = megasas_get_best_arm_pd(instance, lbInfo, io_info);
1349 devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
1350 atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
1240 return devHandle; 1351 return devHandle;
1241} 1352}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 3ed03dfab76c..f37eed682c75 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -50,6 +50,7 @@
50#include <scsi/scsi_cmnd.h> 50#include <scsi/scsi_cmnd.h>
51#include <scsi/scsi_device.h> 51#include <scsi/scsi_device.h>
52#include <scsi/scsi_host.h> 52#include <scsi/scsi_host.h>
53#include <scsi/scsi_dbg.h>
53 54
54#include "megaraid_sas_fusion.h" 55#include "megaraid_sas_fusion.h"
55#include "megaraid_sas.h" 56#include "megaraid_sas.h"
@@ -76,8 +77,6 @@ megasas_issue_polled(struct megasas_instance *instance,
76void 77void
77megasas_check_and_restore_queue_depth(struct megasas_instance *instance); 78megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
78 79
79u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
80 struct IO_REQUEST_INFO *in_info);
81int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); 80int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
82void megaraid_sas_kill_hba(struct megasas_instance *instance); 81void megaraid_sas_kill_hba(struct megasas_instance *instance);
83 82
@@ -91,6 +90,8 @@ void megasas_start_timer(struct megasas_instance *instance,
91extern struct megasas_mgmt_info megasas_mgmt_info; 90extern struct megasas_mgmt_info megasas_mgmt_info;
92extern int resetwaittime; 91extern int resetwaittime;
93 92
93
94
94/** 95/**
95 * megasas_enable_intr_fusion - Enables interrupts 96 * megasas_enable_intr_fusion - Enables interrupts
96 * @regs: MFI register set 97 * @regs: MFI register set
@@ -163,7 +164,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
163 (struct fusion_context *)instance->ctrl_context; 164 (struct fusion_context *)instance->ctrl_context;
164 struct megasas_cmd_fusion *cmd = NULL; 165 struct megasas_cmd_fusion *cmd = NULL;
165 166
166 spin_lock_irqsave(&fusion->cmd_pool_lock, flags); 167 spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
167 168
168 if (!list_empty(&fusion->cmd_pool)) { 169 if (!list_empty(&fusion->cmd_pool)) {
169 cmd = list_entry((&fusion->cmd_pool)->next, 170 cmd = list_entry((&fusion->cmd_pool)->next,
@@ -173,7 +174,7 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
173 printk(KERN_ERR "megasas: Command pool (fusion) empty!\n"); 174 printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
174 } 175 }
175 176
176 spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); 177 spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
177 return cmd; 178 return cmd;
178} 179}
179 180
@@ -182,21 +183,47 @@ struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
182 * @instance: Adapter soft state 183 * @instance: Adapter soft state
183 * @cmd: Command packet to be returned to free command pool 184 * @cmd: Command packet to be returned to free command pool
184 */ 185 */
185static inline void 186inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
186megasas_return_cmd_fusion(struct megasas_instance *instance, 187 struct megasas_cmd_fusion *cmd)
187 struct megasas_cmd_fusion *cmd)
188{ 188{
189 unsigned long flags; 189 unsigned long flags;
190 struct fusion_context *fusion = 190 struct fusion_context *fusion =
191 (struct fusion_context *)instance->ctrl_context; 191 (struct fusion_context *)instance->ctrl_context;
192 192
193 spin_lock_irqsave(&fusion->cmd_pool_lock, flags); 193 spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
194 194
195 cmd->scmd = NULL; 195 cmd->scmd = NULL;
196 cmd->sync_cmd_idx = (u32)ULONG_MAX; 196 cmd->sync_cmd_idx = (u32)ULONG_MAX;
197 list_add_tail(&cmd->list, &fusion->cmd_pool); 197 list_add(&cmd->list, (&fusion->cmd_pool)->next);
198 198
199 spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags); 199 spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
200}
201
202/**
203 * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
204 * @instance: Adapter soft state
205 * @cmd_mfi: MFI Command packet to be returned to free command pool
206 * @cmd_mpt: MPT Command packet to be returned to free command pool
207 */
208inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
209 struct megasas_cmd *cmd_mfi,
210 struct megasas_cmd_fusion *cmd_fusion)
211{
212 unsigned long flags;
213
214 /*
215 * TO DO: optimize this code and use only one lock instead of two
216 * locks being used currently- mpt_pool_lock is acquired
217 * inside mfi_pool_lock
218 */
219 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
220 megasas_return_cmd_fusion(instance, cmd_fusion);
221 if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
222 dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
223 __func__, __LINE__);
224 atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
225 __megasas_return_cmd(instance, cmd_mfi);
226 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
200} 227}
201 228
202/** 229/**
@@ -562,9 +589,11 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
562{ 589{
563 int i; 590 int i;
564 struct megasas_header *frame_hdr = &cmd->frame->hdr; 591 struct megasas_header *frame_hdr = &cmd->frame->hdr;
592 struct fusion_context *fusion;
565 593
566 u32 msecs = seconds * 1000; 594 u32 msecs = seconds * 1000;
567 595
596 fusion = instance->ctrl_context;
568 /* 597 /*
569 * Wait for cmd_status to change 598 * Wait for cmd_status to change
570 */ 599 */
@@ -573,8 +602,12 @@ wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
573 msleep(20); 602 msleep(20);
574 } 603 }
575 604
576 if (frame_hdr->cmd_status == 0xff) 605 if (frame_hdr->cmd_status == 0xff) {
606 if (fusion)
607 megasas_return_mfi_mpt_pthr(instance, cmd,
608 cmd->mpt_pthr_cmd_blocked);
577 return -ETIME; 609 return -ETIME;
610 }
578 611
579 return 0; 612 return 0;
580} 613}
@@ -650,6 +683,10 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
650 /* driver supports HA / Remote LUN over Fast Path interface */ 683 /* driver supports HA / Remote LUN over Fast Path interface */
651 init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun 684 init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
652 = 1; 685 = 1;
686 init_frame->driver_operations.mfi_capabilities.support_max_255lds
687 = 1;
688 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
689 = 1;
653 /* Convert capability to LE32 */ 690 /* Convert capability to LE32 */
654 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); 691 cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
655 692
@@ -709,6 +746,13 @@ fail_get_cmd:
709 * Issues an internal command (DCMD) to get the FW's controller PD 746 * Issues an internal command (DCMD) to get the FW's controller PD
710 * list structure. This information is mainly used to find out SYSTEM 747 * list structure. This information is mainly used to find out SYSTEM
711 * supported by the FW. 748 * supported by the FW.
749 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
750 * dcmd.mbox.b[0] - number of LDs being sync'd
751 * dcmd.mbox.b[1] - 0 - complete command immediately.
752 * - 1 - pend till config change
753 * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
754 * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
755 * uses extended struct MR_FW_RAID_MAP_EXT
712 */ 756 */
713static int 757static int
714megasas_get_ld_map_info(struct megasas_instance *instance) 758megasas_get_ld_map_info(struct megasas_instance *instance)
@@ -716,7 +760,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
716 int ret = 0; 760 int ret = 0;
717 struct megasas_cmd *cmd; 761 struct megasas_cmd *cmd;
718 struct megasas_dcmd_frame *dcmd; 762 struct megasas_dcmd_frame *dcmd;
719 struct MR_FW_RAID_MAP_ALL *ci; 763 void *ci;
720 dma_addr_t ci_h = 0; 764 dma_addr_t ci_h = 0;
721 u32 size_map_info; 765 u32 size_map_info;
722 struct fusion_context *fusion; 766 struct fusion_context *fusion;
@@ -737,10 +781,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
737 781
738 dcmd = &cmd->frame->dcmd; 782 dcmd = &cmd->frame->dcmd;
739 783
740 size_map_info = sizeof(struct MR_FW_RAID_MAP) + 784 size_map_info = fusion->current_map_sz;
741 (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
742 785
743 ci = fusion->ld_map[(instance->map_id & 1)]; 786 ci = (void *) fusion->ld_map[(instance->map_id & 1)];
744 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 787 ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
745 788
746 if (!ci) { 789 if (!ci) {
@@ -749,9 +792,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
749 return -ENOMEM; 792 return -ENOMEM;
750 } 793 }
751 794
752 memset(ci, 0, sizeof(*ci)); 795 memset(ci, 0, fusion->max_map_sz);
753 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 796 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
754 797#if VD_EXT_DEBUG
798 dev_dbg(&instance->pdev->dev,
799 "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
800 __func__, cpu_to_le32(size_map_info));
801#endif
755 dcmd->cmd = MFI_CMD_DCMD; 802 dcmd->cmd = MFI_CMD_DCMD;
756 dcmd->cmd_status = 0xFF; 803 dcmd->cmd_status = 0xFF;
757 dcmd->sge_count = 1; 804 dcmd->sge_count = 1;
@@ -763,14 +810,17 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
763 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); 810 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
764 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); 811 dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
765 812
766 if (!megasas_issue_polled(instance, cmd)) 813 if (instance->ctrl_context && !instance->mask_interrupts)
767 ret = 0; 814 ret = megasas_issue_blocked_cmd(instance, cmd,
768 else { 815 MEGASAS_BLOCKED_CMD_TIMEOUT);
769 printk(KERN_ERR "megasas: Get LD Map Info Failed\n"); 816 else
770 ret = -1; 817 ret = megasas_issue_polled(instance, cmd);
771 }
772 818
773 megasas_return_cmd(instance, cmd); 819 if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
820 megasas_return_mfi_mpt_pthr(instance, cmd,
821 cmd->mpt_pthr_cmd_blocked);
822 else
823 megasas_return_cmd(instance, cmd);
774 824
775 return ret; 825 return ret;
776} 826}
@@ -807,7 +857,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
807 u32 size_sync_info, num_lds; 857 u32 size_sync_info, num_lds;
808 struct fusion_context *fusion; 858 struct fusion_context *fusion;
809 struct MR_LD_TARGET_SYNC *ci = NULL; 859 struct MR_LD_TARGET_SYNC *ci = NULL;
810 struct MR_FW_RAID_MAP_ALL *map; 860 struct MR_DRV_RAID_MAP_ALL *map;
811 struct MR_LD_RAID *raid; 861 struct MR_LD_RAID *raid;
812 struct MR_LD_TARGET_SYNC *ld_sync; 862 struct MR_LD_TARGET_SYNC *ld_sync;
813 dma_addr_t ci_h = 0; 863 dma_addr_t ci_h = 0;
@@ -828,7 +878,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
828 return 1; 878 return 1;
829 } 879 }
830 880
831 map = fusion->ld_map[instance->map_id & 1]; 881 map = fusion->ld_drv_map[instance->map_id & 1];
832 882
833 num_lds = le32_to_cpu(map->raidMap.ldCount); 883 num_lds = le32_to_cpu(map->raidMap.ldCount);
834 884
@@ -840,7 +890,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
840 890
841 ci = (struct MR_LD_TARGET_SYNC *) 891 ci = (struct MR_LD_TARGET_SYNC *)
842 fusion->ld_map[(instance->map_id - 1) & 1]; 892 fusion->ld_map[(instance->map_id - 1) & 1];
843 memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL)); 893 memset(ci, 0, fusion->max_map_sz);
844 894
845 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1]; 895 ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
846 896
@@ -852,8 +902,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
852 ld_sync->seqNum = raid->seqNum; 902 ld_sync->seqNum = raid->seqNum;
853 } 903 }
854 904
855 size_map_info = sizeof(struct MR_FW_RAID_MAP) + 905 size_map_info = fusion->current_map_sz;
856 (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
857 906
858 dcmd->cmd = MFI_CMD_DCMD; 907 dcmd->cmd = MFI_CMD_DCMD;
859 dcmd->cmd_status = 0xFF; 908 dcmd->cmd_status = 0xFF;
@@ -971,7 +1020,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
971 1020
972 max_cmd = instance->max_fw_cmds; 1021 max_cmd = instance->max_fw_cmds;
973 1022
974 fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16; 1023 fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
975 1024
976 fusion->request_alloc_sz = 1025 fusion->request_alloc_sz =
977 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd; 1026 sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
@@ -988,8 +1037,8 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
988 fusion->max_sge_in_chain = 1037 fusion->max_sge_in_chain =
989 MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION); 1038 MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
990 1039
991 instance->max_num_sge = fusion->max_sge_in_main_msg + 1040 instance->max_num_sge = rounddown_pow_of_two(
992 fusion->max_sge_in_chain - 2; 1041 fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2);
993 1042
994 /* Used for pass thru MFI frame (DCMD) */ 1043 /* Used for pass thru MFI frame (DCMD) */
995 fusion->chain_offset_mfi_pthru = 1044 fusion->chain_offset_mfi_pthru =
@@ -1016,17 +1065,75 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1016 goto fail_ioc_init; 1065 goto fail_ioc_init;
1017 1066
1018 megasas_display_intel_branding(instance); 1067 megasas_display_intel_branding(instance);
1068 if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
1069 dev_err(&instance->pdev->dev,
1070 "Could not get controller info. Fail from %s %d\n",
1071 __func__, __LINE__);
1072 goto fail_ioc_init;
1073 }
1074
1075 instance->supportmax256vd =
1076 instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
1077 /* Below is additional check to address future FW enhancement */
1078 if (instance->ctrl_info->max_lds > 64)
1079 instance->supportmax256vd = 1;
1080 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
1081 * MEGASAS_MAX_DEV_PER_CHANNEL;
1082 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
1083 * MEGASAS_MAX_DEV_PER_CHANNEL;
1084 if (instance->supportmax256vd) {
1085 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
1086 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1087 } else {
1088 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
1089 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
1090 }
1091 dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n"
1092 "Driver supports %d VDs %d PDs\n",
1093 instance->fw_supported_vd_count,
1094 instance->fw_supported_pd_count,
1095 instance->drv_supported_vd_count,
1096 instance->drv_supported_pd_count);
1019 1097
1020 instance->flag_ieee = 1; 1098 instance->flag_ieee = 1;
1099 fusion->fast_path_io = 0;
1021 1100
1022 fusion->map_sz = sizeof(struct MR_FW_RAID_MAP) + 1101 fusion->old_map_sz =
1023 (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1)); 1102 sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
1103 (instance->fw_supported_vd_count - 1));
1104 fusion->new_map_sz =
1105 sizeof(struct MR_FW_RAID_MAP_EXT);
1106 fusion->drv_map_sz =
1107 sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
1108 (instance->drv_supported_vd_count - 1));
1109
1110 fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1111 for (i = 0; i < 2; i++) {
1112 fusion->ld_map[i] = NULL;
1113 fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
1114 fusion->drv_map_pages);
1115 if (!fusion->ld_drv_map[i]) {
1116 dev_err(&instance->pdev->dev, "Could not allocate "
1117 "memory for local map info for %d pages\n",
1118 fusion->drv_map_pages);
1119 if (i == 1)
1120 free_pages((ulong)fusion->ld_drv_map[0],
1121 fusion->drv_map_pages);
1122 goto fail_ioc_init;
1123 }
1124 }
1125
1126 fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz);
1127
1128 if (instance->supportmax256vd)
1129 fusion->current_map_sz = fusion->new_map_sz;
1130 else
1131 fusion->current_map_sz = fusion->old_map_sz;
1024 1132
1025 fusion->fast_path_io = 0;
1026 1133
1027 for (i = 0; i < 2; i++) { 1134 for (i = 0; i < 2; i++) {
1028 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev, 1135 fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1029 fusion->map_sz, 1136 fusion->max_map_sz,
1030 &fusion->ld_map_phys[i], 1137 &fusion->ld_map_phys[i],
1031 GFP_KERNEL); 1138 GFP_KERNEL);
1032 if (!fusion->ld_map[i]) { 1139 if (!fusion->ld_map[i]) {
@@ -1043,7 +1150,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1043 1150
1044fail_map_info: 1151fail_map_info:
1045 if (i == 1) 1152 if (i == 1)
1046 dma_free_coherent(&instance->pdev->dev, fusion->map_sz, 1153 dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
1047 fusion->ld_map[0], fusion->ld_map_phys[0]); 1154 fusion->ld_map[0], fusion->ld_map_phys[0]);
1048fail_ioc_init: 1155fail_ioc_init:
1049 megasas_free_cmds_fusion(instance); 1156 megasas_free_cmds_fusion(instance);
@@ -1065,6 +1172,11 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
1065 u32 req_desc_hi, 1172 u32 req_desc_hi,
1066 struct megasas_register_set __iomem *regs) 1173 struct megasas_register_set __iomem *regs)
1067{ 1174{
1175#if defined(writeq) && defined(CONFIG_64BIT)
1176 u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo);
1177
1178 writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port);
1179#else
1068 unsigned long flags; 1180 unsigned long flags;
1069 1181
1070 spin_lock_irqsave(&instance->hba_lock, flags); 1182 spin_lock_irqsave(&instance->hba_lock, flags);
@@ -1072,6 +1184,7 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance,
1072 writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); 1184 writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
1073 writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); 1185 writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
1074 spin_unlock_irqrestore(&instance->hba_lock, flags); 1186 spin_unlock_irqrestore(&instance->hba_lock, flags);
1187#endif
1075} 1188}
1076 1189
1077/** 1190/**
@@ -1224,7 +1337,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1224void 1337void
1225megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, 1338megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
1226 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp, 1339 struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
1227 struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag) 1340 struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
1228{ 1341{
1229 struct MR_LD_RAID *raid; 1342 struct MR_LD_RAID *raid;
1230 u32 ld; 1343 u32 ld;
@@ -1409,7 +1522,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1409 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 1522 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1410 struct IO_REQUEST_INFO io_info; 1523 struct IO_REQUEST_INFO io_info;
1411 struct fusion_context *fusion; 1524 struct fusion_context *fusion;
1412 struct MR_FW_RAID_MAP_ALL *local_map_ptr; 1525 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1413 u8 *raidLUN; 1526 u8 *raidLUN;
1414 1527
1415 device_id = MEGASAS_DEV_INDEX(instance, scp); 1528 device_id = MEGASAS_DEV_INDEX(instance, scp);
@@ -1486,10 +1599,10 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1486 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) 1599 if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
1487 io_info.isRead = 1; 1600 io_info.isRead = 1;
1488 1601
1489 local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; 1602 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1490 1603
1491 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >= 1604 if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
1492 MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) { 1605 instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
1493 io_request->RaidContext.regLockFlags = 0; 1606 io_request->RaidContext.regLockFlags = 0;
1494 fp_possible = 0; 1607 fp_possible = 0;
1495 } else { 1608 } else {
@@ -1529,10 +1642,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1529 if ((fusion->load_balance_info[device_id].loadBalanceFlag) && 1642 if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1530 (io_info.isRead)) { 1643 (io_info.isRead)) {
1531 io_info.devHandle = 1644 io_info.devHandle =
1532 get_updated_dev_handle( 1645 get_updated_dev_handle(instance,
1533 &fusion->load_balance_info[device_id], 1646 &fusion->load_balance_info[device_id],
1534 &io_info); 1647 &io_info);
1535 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG; 1648 scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
1649 cmd->pd_r1_lb = io_info.pd_after_lb;
1536 } else 1650 } else
1537 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; 1651 scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
1538 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1652 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
@@ -1579,7 +1693,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1579 u32 device_id; 1693 u32 device_id;
1580 struct MPI2_RAID_SCSI_IO_REQUEST *io_request; 1694 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1581 u16 pd_index = 0; 1695 u16 pd_index = 0;
1582 struct MR_FW_RAID_MAP_ALL *local_map_ptr; 1696 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1583 struct fusion_context *fusion = instance->ctrl_context; 1697 struct fusion_context *fusion = instance->ctrl_context;
1584 u8 span, physArm; 1698 u8 span, physArm;
1585 u16 devHandle; 1699 u16 devHandle;
@@ -1591,7 +1705,7 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1591 device_id = MEGASAS_DEV_INDEX(instance, scmd); 1705 device_id = MEGASAS_DEV_INDEX(instance, scmd);
1592 pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL) 1706 pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
1593 +scmd->device->id; 1707 +scmd->device->id;
1594 local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; 1708 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1595 1709
1596 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); 1710 io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
1597 1711
@@ -1639,7 +1753,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
1639 goto NonFastPath; 1753 goto NonFastPath;
1640 1754
1641 ld = MR_TargetIdToLdGet(device_id, local_map_ptr); 1755 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1642 if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) 1756 if ((ld >= instance->fw_supported_vd_count) ||
1757 (!fusion->fast_path_io))
1643 goto NonFastPath; 1758 goto NonFastPath;
1644 1759
1645 raid = MR_LdRaidGet(ld, local_map_ptr); 1760 raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -1864,10 +1979,11 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1864 struct megasas_cmd *cmd_mfi; 1979 struct megasas_cmd *cmd_mfi;
1865 struct megasas_cmd_fusion *cmd_fusion; 1980 struct megasas_cmd_fusion *cmd_fusion;
1866 u16 smid, num_completed; 1981 u16 smid, num_completed;
1867 u8 reply_descript_type, arm; 1982 u8 reply_descript_type;
1868 u32 status, extStatus, device_id; 1983 u32 status, extStatus, device_id;
1869 union desc_value d_val; 1984 union desc_value d_val;
1870 struct LD_LOAD_BALANCE_INFO *lbinfo; 1985 struct LD_LOAD_BALANCE_INFO *lbinfo;
1986 int threshold_reply_count = 0;
1871 1987
1872 fusion = instance->ctrl_context; 1988 fusion = instance->ctrl_context;
1873 1989
@@ -1914,10 +2030,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1914 lbinfo = &fusion->load_balance_info[device_id]; 2030 lbinfo = &fusion->load_balance_info[device_id];
1915 if (cmd_fusion->scmd->SCp.Status & 2031 if (cmd_fusion->scmd->SCp.Status &
1916 MEGASAS_LOAD_BALANCE_FLAG) { 2032 MEGASAS_LOAD_BALANCE_FLAG) {
1917 arm = lbinfo->raid1DevHandle[0] == 2033 atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
1918 cmd_fusion->io_request->DevHandle ? 0 :
1919 1;
1920 atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
1921 cmd_fusion->scmd->SCp.Status &= 2034 cmd_fusion->scmd->SCp.Status &=
1922 ~MEGASAS_LOAD_BALANCE_FLAG; 2035 ~MEGASAS_LOAD_BALANCE_FLAG;
1923 } 2036 }
@@ -1941,10 +2054,19 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1941 break; 2054 break;
1942 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */ 2055 case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
1943 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; 2056 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2057
2058 if (!cmd_mfi->mpt_pthr_cmd_blocked) {
2059 if (megasas_dbg_lvl == 5)
2060 dev_info(&instance->pdev->dev,
2061 "freeing mfi/mpt pass-through "
2062 "from %s %d\n",
2063 __func__, __LINE__);
2064 megasas_return_mfi_mpt_pthr(instance, cmd_mfi,
2065 cmd_fusion);
2066 }
2067
1944 megasas_complete_cmd(instance, cmd_mfi, DID_OK); 2068 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
1945 cmd_fusion->flags = 0; 2069 cmd_fusion->flags = 0;
1946 megasas_return_cmd_fusion(instance, cmd_fusion);
1947
1948 break; 2070 break;
1949 } 2071 }
1950 2072
@@ -1955,6 +2077,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1955 2077
1956 desc->Words = ULLONG_MAX; 2078 desc->Words = ULLONG_MAX;
1957 num_completed++; 2079 num_completed++;
2080 threshold_reply_count++;
1958 2081
1959 /* Get the next reply descriptor */ 2082 /* Get the next reply descriptor */
1960 if (!fusion->last_reply_idx[MSIxIndex]) 2083 if (!fusion->last_reply_idx[MSIxIndex])
@@ -1974,6 +2097,25 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1974 2097
1975 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 2098 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1976 break; 2099 break;
2100 /*
2101 * Write to reply post host index register after completing threshold
2102 * number of reply counts and still there are more replies in reply queue
2103 * pending to be completed
2104 */
2105 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
2106 if ((instance->pdev->device ==
2107 PCI_DEVICE_ID_LSI_INVADER) ||
2108 (instance->pdev->device ==
2109 PCI_DEVICE_ID_LSI_FURY))
2110 writel(((MSIxIndex & 0x7) << 24) |
2111 fusion->last_reply_idx[MSIxIndex],
2112 instance->reply_post_host_index_addr[MSIxIndex/8]);
2113 else
2114 writel((MSIxIndex << 24) |
2115 fusion->last_reply_idx[MSIxIndex],
2116 instance->reply_post_host_index_addr[0]);
2117 threshold_reply_count = 0;
2118 }
1977 } 2119 }
1978 2120
1979 if (!num_completed) 2121 if (!num_completed)
@@ -2028,7 +2170,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
2028{ 2170{
2029 struct megasas_irq_context *irq_context = devp; 2171 struct megasas_irq_context *irq_context = devp;
2030 struct megasas_instance *instance = irq_context->instance; 2172 struct megasas_instance *instance = irq_context->instance;
2031 u32 mfiStatus, fw_state; 2173 u32 mfiStatus, fw_state, dma_state;
2032 2174
2033 if (instance->mask_interrupts) 2175 if (instance->mask_interrupts)
2034 return IRQ_NONE; 2176 return IRQ_NONE;
@@ -2050,7 +2192,16 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
2050 /* If we didn't complete any commands, check for FW fault */ 2192 /* If we didn't complete any commands, check for FW fault */
2051 fw_state = instance->instancet->read_fw_status_reg( 2193 fw_state = instance->instancet->read_fw_status_reg(
2052 instance->reg_set) & MFI_STATE_MASK; 2194 instance->reg_set) & MFI_STATE_MASK;
2053 if (fw_state == MFI_STATE_FAULT) { 2195 dma_state = instance->instancet->read_fw_status_reg
2196 (instance->reg_set) & MFI_STATE_DMADONE;
2197 if (instance->crash_dump_drv_support &&
2198 instance->crash_dump_app_support) {
2199 /* Start collecting crash, if DMA bit is done */
2200 if ((fw_state == MFI_STATE_FAULT) && dma_state)
2201 schedule_work(&instance->crash_init);
2202 else if (fw_state == MFI_STATE_FAULT)
2203 schedule_work(&instance->work_init);
2204 } else if (fw_state == MFI_STATE_FAULT) {
2054 printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt" 2205 printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
2055 "for scsi%d\n", instance->host->host_no); 2206 "for scsi%d\n", instance->host->host_no);
2056 schedule_work(&instance->work_init); 2207 schedule_work(&instance->work_init);
@@ -2075,6 +2226,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2075 struct megasas_cmd_fusion *cmd; 2226 struct megasas_cmd_fusion *cmd;
2076 struct fusion_context *fusion; 2227 struct fusion_context *fusion;
2077 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr; 2228 struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
2229 u32 opcode;
2078 2230
2079 cmd = megasas_get_cmd_fusion(instance); 2231 cmd = megasas_get_cmd_fusion(instance);
2080 if (!cmd) 2232 if (!cmd)
@@ -2082,9 +2234,20 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
2082 2234
2083 /* Save the smid. To be used for returning the cmd */ 2235 /* Save the smid. To be used for returning the cmd */
2084 mfi_cmd->context.smid = cmd->index; 2236 mfi_cmd->context.smid = cmd->index;
2085
2086 cmd->sync_cmd_idx = mfi_cmd->index; 2237 cmd->sync_cmd_idx = mfi_cmd->index;
2087 2238
2239 /* Set this only for Blocked commands */
2240 opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
2241 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
2242 && (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
2243 mfi_cmd->is_wait_event = 1;
2244
2245 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
2246 mfi_cmd->is_wait_event = 1;
2247
2248 if (mfi_cmd->is_wait_event)
2249 mfi_cmd->mpt_pthr_cmd_blocked = cmd;
2250
2088 /* 2251 /*
2089 * For cmds where the flag is set, store the flag and check 2252 * For cmds where the flag is set, store the flag and check
2090 * on completion. For cmds with this flag, don't call 2253 * on completion. For cmds with this flag, don't call
@@ -2173,6 +2336,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2173 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); 2336 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
2174 return; 2337 return;
2175 } 2338 }
2339 atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED);
2176 instance->instancet->fire_cmd(instance, req_desc->u.low, 2340 instance->instancet->fire_cmd(instance, req_desc->u.low,
2177 req_desc->u.high, instance->reg_set); 2341 req_desc->u.high, instance->reg_set);
2178} 2342}
@@ -2203,6 +2367,49 @@ megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
2203} 2367}
2204 2368
2205/** 2369/**
2370 * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
2371 * @instance: Controller's soft instance
2372 * return: Number of allocated host crash buffers
2373 */
2374static void
2375megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
2376{
2377 unsigned int i;
2378
2379 instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
2380 for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
2381 instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
2382 instance->crash_buf_pages);
2383 if (!instance->crash_buf[i]) {
2384 dev_info(&instance->pdev->dev, "Firmware crash dump "
2385 "memory allocation failed at index %d\n", i);
2386 break;
2387 }
2388 }
2389 instance->drv_buf_alloc = i;
2390}
2391
2392/**
2393 * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
2394 * @instance: Controller's soft instance
2395 */
2396void
2397megasas_free_host_crash_buffer(struct megasas_instance *instance)
2398{
2399 unsigned int i
2400;
2401 for (i = 0; i < instance->drv_buf_alloc; i++) {
2402 if (instance->crash_buf[i])
2403 free_pages((ulong)instance->crash_buf[i],
2404 instance->crash_buf_pages);
2405 }
2406 instance->drv_buf_index = 0;
2407 instance->drv_buf_alloc = 0;
2408 instance->fw_crash_state = UNAVAILABLE;
2409 instance->fw_crash_buffer_size = 0;
2410}
2411
2412/**
2206 * megasas_adp_reset_fusion - For controller reset 2413 * megasas_adp_reset_fusion - For controller reset
2207 * @regs: MFI register set 2414 * @regs: MFI register set
2208 */ 2415 */
@@ -2345,6 +2552,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2345 struct megasas_cmd *cmd_mfi; 2552 struct megasas_cmd *cmd_mfi;
2346 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2553 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2347 u32 host_diag, abs_state, status_reg, reset_adapter; 2554 u32 host_diag, abs_state, status_reg, reset_adapter;
2555 u32 io_timeout_in_crash_mode = 0;
2348 2556
2349 instance = (struct megasas_instance *)shost->hostdata; 2557 instance = (struct megasas_instance *)shost->hostdata;
2350 fusion = instance->ctrl_context; 2558 fusion = instance->ctrl_context;
@@ -2355,8 +2563,45 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2355 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2563 printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
2356 "returning FAILED for scsi%d.\n", 2564 "returning FAILED for scsi%d.\n",
2357 instance->host->host_no); 2565 instance->host->host_no);
2566 mutex_unlock(&instance->reset_mutex);
2358 return FAILED; 2567 return FAILED;
2359 } 2568 }
2569 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
2570 abs_state = status_reg & MFI_STATE_MASK;
2571
2572 /* IO timeout detected, forcibly put FW in FAULT state */
2573 if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
2574 instance->crash_dump_app_support && iotimeout) {
2575 dev_info(&instance->pdev->dev, "IO timeout is detected, "
2576 "forcibly FAULT Firmware\n");
2577 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2578 status_reg = readl(&instance->reg_set->doorbell);
2579 writel(status_reg | MFI_STATE_FORCE_OCR,
2580 &instance->reg_set->doorbell);
2581 readl(&instance->reg_set->doorbell);
2582 mutex_unlock(&instance->reset_mutex);
2583 do {
2584 ssleep(3);
2585 io_timeout_in_crash_mode++;
2586 dev_dbg(&instance->pdev->dev, "waiting for [%d] "
2587 "seconds for crash dump collection and OCR "
2588 "to be done\n", (io_timeout_in_crash_mode * 3));
2589 } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
2590 (io_timeout_in_crash_mode < 80));
2591
2592 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
2593 dev_info(&instance->pdev->dev, "OCR done for IO "
2594 "timeout case\n");
2595 retval = SUCCESS;
2596 } else {
2597 dev_info(&instance->pdev->dev, "Controller is not "
2598 "operational after 240 seconds wait for IO "
2599 "timeout case in FW crash dump mode\n do "
2600 "OCR/kill adapter\n");
2601 retval = megasas_reset_fusion(shost, 0);
2602 }
2603 return retval;
2604 }
2360 2605
2361 if (instance->requestorId && !instance->skip_heartbeat_timer_del) 2606 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
2362 del_timer_sync(&instance->sriov_heartbeat_timer); 2607 del_timer_sync(&instance->sriov_heartbeat_timer);
@@ -2563,10 +2808,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2563 cmd_list[cmd_fusion->sync_cmd_idx]; 2808 cmd_list[cmd_fusion->sync_cmd_idx];
2564 if (cmd_mfi->frame->dcmd.opcode == 2809 if (cmd_mfi->frame->dcmd.opcode ==
2565 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) { 2810 cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
2566 megasas_return_cmd(instance, 2811 megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
2567 cmd_mfi);
2568 megasas_return_cmd_fusion(
2569 instance, cmd_fusion);
2570 } else { 2812 } else {
2571 req_desc = 2813 req_desc =
2572 megasas_get_request_descriptor( 2814 megasas_get_request_descriptor(
@@ -2603,7 +2845,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2603 /* Reset load balance info */ 2845 /* Reset load balance info */
2604 memset(fusion->load_balance_info, 0, 2846 memset(fusion->load_balance_info, 0,
2605 sizeof(struct LD_LOAD_BALANCE_INFO) 2847 sizeof(struct LD_LOAD_BALANCE_INFO)
2606 *MAX_LOGICAL_DRIVES); 2848 *MAX_LOGICAL_DRIVES_EXT);
2607 2849
2608 if (!megasas_get_map_info(instance)) 2850 if (!megasas_get_map_info(instance))
2609 megasas_sync_map_info(instance); 2851 megasas_sync_map_info(instance);
@@ -2623,6 +2865,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2623 printk(KERN_WARNING "megaraid_sas: Reset " 2865 printk(KERN_WARNING "megaraid_sas: Reset "
2624 "successful for scsi%d.\n", 2866 "successful for scsi%d.\n",
2625 instance->host->host_no); 2867 instance->host->host_no);
2868
2869 if (instance->crash_dump_drv_support) {
2870 if (instance->crash_dump_app_support)
2871 megasas_set_crash_dump_params(instance,
2872 MR_CRASH_BUF_TURN_ON);
2873 else
2874 megasas_set_crash_dump_params(instance,
2875 MR_CRASH_BUF_TURN_OFF);
2876 }
2626 retval = SUCCESS; 2877 retval = SUCCESS;
2627 goto out; 2878 goto out;
2628 } 2879 }
@@ -2651,6 +2902,74 @@ out:
2651 return retval; 2902 return retval;
2652} 2903}
2653 2904
2905/* Fusion Crash dump collection work queue */
2906void megasas_fusion_crash_dump_wq(struct work_struct *work)
2907{
2908 struct megasas_instance *instance =
2909 container_of(work, struct megasas_instance, crash_init);
2910 u32 status_reg;
2911 u8 partial_copy = 0;
2912
2913
2914 status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
2915
2916 /*
2917 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
2918 * to host crash buffers
2919 */
2920 if (instance->drv_buf_index == 0) {
2921 /* Buffer is already allocated for old Crash dump.
2922 * Do OCR and do not wait for crash dump collection
2923 */
2924 if (instance->drv_buf_alloc) {
2925 dev_info(&instance->pdev->dev, "earlier crash dump is "
2926 "not yet copied by application, ignoring this "
2927 "crash dump and initiating OCR\n");
2928 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
2929 writel(status_reg,
2930 &instance->reg_set->outbound_scratch_pad);
2931 readl(&instance->reg_set->outbound_scratch_pad);
2932 return;
2933 }
2934 megasas_alloc_host_crash_buffer(instance);
2935 dev_info(&instance->pdev->dev, "Number of host crash buffers "
2936 "allocated: %d\n", instance->drv_buf_alloc);
2937 }
2938
2939 /*
2940 * Driver has allocated max buffers, which can be allocated
2941 * and FW has more crash dump data, then driver will
2942 * ignore the data.
2943 */
2944 if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
2945 dev_info(&instance->pdev->dev, "Driver is done copying "
2946 "the buffer: %d\n", instance->drv_buf_alloc);
2947 status_reg |= MFI_STATE_CRASH_DUMP_DONE;
2948 partial_copy = 1;
2949 } else {
2950 memcpy(instance->crash_buf[instance->drv_buf_index],
2951 instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
2952 instance->drv_buf_index++;
2953 status_reg &= ~MFI_STATE_DMADONE;
2954 }
2955
2956 if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
2957 dev_info(&instance->pdev->dev, "Crash Dump is available,number "
2958 "of copied buffers: %d\n", instance->drv_buf_index);
2959 instance->fw_crash_buffer_size = instance->drv_buf_index;
2960 instance->fw_crash_state = AVAILABLE;
2961 instance->drv_buf_index = 0;
2962 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
2963 readl(&instance->reg_set->outbound_scratch_pad);
2964 if (!partial_copy)
2965 megasas_reset_fusion(instance->host, 0);
2966 } else {
2967 writel(status_reg, &instance->reg_set->outbound_scratch_pad);
2968 readl(&instance->reg_set->outbound_scratch_pad);
2969 }
2970}
2971
2972
2654/* Fusion OCR work queue */ 2973/* Fusion OCR work queue */
2655void megasas_fusion_ocr_wq(struct work_struct *work) 2974void megasas_fusion_ocr_wq(struct work_struct *work)
2656{ 2975{
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index e76af5459a09..0d183d521bdd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -86,6 +86,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
86 86
87#define MEGASAS_FP_CMD_LEN 16 87#define MEGASAS_FP_CMD_LEN 16
88#define MEGASAS_FUSION_IN_RESET 0 88#define MEGASAS_FUSION_IN_RESET 0
89#define THRESHOLD_REPLY_COUNT 50
89 90
90/* 91/*
91 * Raid Context structure which describes MegaRAID specific IO Parameters 92 * Raid Context structure which describes MegaRAID specific IO Parameters
@@ -478,10 +479,13 @@ struct MPI2_IOC_INIT_REQUEST {
478#define MAX_ROW_SIZE 32 479#define MAX_ROW_SIZE 32
479#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE) 480#define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
480#define MAX_LOGICAL_DRIVES 64 481#define MAX_LOGICAL_DRIVES 64
482#define MAX_LOGICAL_DRIVES_EXT 256
481#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES) 483#define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
482#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES) 484#define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
483#define MAX_ARRAYS 128 485#define MAX_ARRAYS 128
484#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS) 486#define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
487#define MAX_ARRAYS_EXT 256
488#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
485#define MAX_PHYSICAL_DEVICES 256 489#define MAX_PHYSICAL_DEVICES 256
486#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES) 490#define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
487#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101 491#define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
@@ -601,7 +605,6 @@ struct MR_FW_RAID_MAP {
601 u32 maxArrays; 605 u32 maxArrays;
602 } validationInfo; 606 } validationInfo;
603 u32 version[5]; 607 u32 version[5];
604 u32 reserved1[5];
605 }; 608 };
606 609
607 u32 ldCount; 610 u32 ldCount;
@@ -627,6 +630,8 @@ struct IO_REQUEST_INFO {
627 u8 start_span; 630 u8 start_span;
628 u8 reserved; 631 u8 reserved;
629 u64 start_row; 632 u64 start_row;
633 u8 span_arm; /* span[7:5], arm[4:0] */
634 u8 pd_after_lb;
630}; 635};
631 636
632struct MR_LD_TARGET_SYNC { 637struct MR_LD_TARGET_SYNC {
@@ -678,14 +683,14 @@ struct megasas_cmd_fusion {
678 u32 sync_cmd_idx; 683 u32 sync_cmd_idx;
679 u32 index; 684 u32 index;
680 u8 flags; 685 u8 flags;
686 u8 pd_r1_lb;
681}; 687};
682 688
683struct LD_LOAD_BALANCE_INFO { 689struct LD_LOAD_BALANCE_INFO {
684 u8 loadBalanceFlag; 690 u8 loadBalanceFlag;
685 u8 reserved1; 691 u8 reserved1;
686 u16 raid1DevHandle[2]; 692 atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
687 atomic_t scsi_pending_cmds[2]; 693 u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
688 u64 last_accessed_block[2];
689}; 694};
690 695
691/* SPAN_SET is info caclulated from span info from Raid map per LD */ 696/* SPAN_SET is info caclulated from span info from Raid map per LD */
@@ -713,11 +718,86 @@ struct MR_FW_RAID_MAP_ALL {
713 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1]; 718 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
714} __attribute__ ((packed)); 719} __attribute__ ((packed));
715 720
721struct MR_DRV_RAID_MAP {
722 /* total size of this structure, including this field.
723 * This feild will be manupulated by driver for ext raid map,
724 * else pick the value from firmware raid map.
725 */
726 u32 totalSize;
727
728 union {
729 struct {
730 u32 maxLd;
731 u32 maxSpanDepth;
732 u32 maxRowSize;
733 u32 maxPdCount;
734 u32 maxArrays;
735 } validationInfo;
736 u32 version[5];
737 };
738
739 /* timeout value used by driver in FP IOs*/
740 u8 fpPdIoTimeoutSec;
741 u8 reserved2[7];
742
743 u16 ldCount;
744 u16 arCount;
745 u16 spanCount;
746 u16 reserve3;
747
748 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
749 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
750 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
751 struct MR_LD_SPAN_MAP ldSpanMap[1];
752
753};
754
755/* Driver raid map size is same as raid map ext
756 * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
757 * And it is mainly for code re-use purpose.
758 */
759struct MR_DRV_RAID_MAP_ALL {
760
761 struct MR_DRV_RAID_MAP raidMap;
762 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
763} __packed;
764
765
766
767struct MR_FW_RAID_MAP_EXT {
768 /* Not usred in new map */
769 u32 reserved;
770
771 union {
772 struct {
773 u32 maxLd;
774 u32 maxSpanDepth;
775 u32 maxRowSize;
776 u32 maxPdCount;
777 u32 maxArrays;
778 } validationInfo;
779 u32 version[5];
780 };
781
782 u8 fpPdIoTimeoutSec;
783 u8 reserved2[7];
784
785 u16 ldCount;
786 u16 arCount;
787 u16 spanCount;
788 u16 reserve3;
789
790 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
791 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
792 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
793 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
794};
795
716struct fusion_context { 796struct fusion_context {
717 struct megasas_cmd_fusion **cmd_list; 797 struct megasas_cmd_fusion **cmd_list;
718 struct list_head cmd_pool; 798 struct list_head cmd_pool;
719 799
720 spinlock_t cmd_pool_lock; 800 spinlock_t mpt_pool_lock;
721 801
722 dma_addr_t req_frames_desc_phys; 802 dma_addr_t req_frames_desc_phys;
723 u8 *req_frames_desc; 803 u8 *req_frames_desc;
@@ -749,10 +829,18 @@ struct fusion_context {
749 struct MR_FW_RAID_MAP_ALL *ld_map[2]; 829 struct MR_FW_RAID_MAP_ALL *ld_map[2];
750 dma_addr_t ld_map_phys[2]; 830 dma_addr_t ld_map_phys[2];
751 831
752 u32 map_sz; 832 /*Non dma-able memory. Driver local copy.*/
833 struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
834
835 u32 max_map_sz;
836 u32 current_map_sz;
837 u32 old_map_sz;
838 u32 new_map_sz;
839 u32 drv_map_sz;
840 u32 drv_map_pages;
753 u8 fast_path_io; 841 u8 fast_path_io;
754 struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES]; 842 struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
755 LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES]; 843 LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
756}; 844};
757 845
758union desc_value { 846union desc_value {
@@ -763,4 +851,5 @@ union desc_value {
763 } u; 851 } u;
764}; 852};
765 853
854
766#endif /* _MEGARAID_SAS_FUSION_H_ */ 855#endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 39f08dd20556..657b45ca04c5 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
2# Kernel configuration file for the MPT2SAS 2# Kernel configuration file for the MPT2SAS
3# 3#
4# This code is based on drivers/scsi/mpt2sas/Kconfig 4# This code is based on drivers/scsi/mpt2sas/Kconfig
5# Copyright (C) 2007-2012 LSI Corporation 5# Copyright (C) 2007-2014 LSI Corporation
6# (mailto:DL-MPTFusionLinux@lsi.com) 6# (mailto:DL-MPTFusionLinux@lsi.com)
7 7
8# This program is free software; you can redistribute it and/or 8# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 7b14a015c903..088eefa67da8 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2.h 5 * Name: mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.28 11 * mpi2.h Version: 02.00.32
12 * 12 *
13 * Version History 13 * Version History
14 * --------------- 14 * ---------------
@@ -78,6 +78,11 @@
78 * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. 78 * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
79 * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. 79 * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
80 * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. 80 * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
81 * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
82 * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
83 * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
84 * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
85 * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT.
81 * -------------------------------------------------------------------------- 86 * --------------------------------------------------------------------------
82 */ 87 */
83 88
@@ -103,7 +108,7 @@
103#define MPI2_VERSION_02_00 (0x0200) 108#define MPI2_VERSION_02_00 (0x0200)
104 109
105/* versioning for this MPI header set */ 110/* versioning for this MPI header set */
106#define MPI2_HEADER_VERSION_UNIT (0x1C) 111#define MPI2_HEADER_VERSION_UNIT (0x20)
107#define MPI2_HEADER_VERSION_DEV (0x00) 112#define MPI2_HEADER_VERSION_DEV (0x00)
108#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 113#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
109#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 114#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -263,6 +268,7 @@ typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS
263#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) 268#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
264#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) 269#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
265#define MPI2_RPHI_MSIX_INDEX_SHIFT (24) 270#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
271#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /* MPI v2.5 only */
266 272
267/* 273/*
268 * Defines for the HCBSize and address 274 * Defines for the HCBSize and address
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 88cb7f828bbd..510ef0dc8d7b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_cnfg.h 5 * Name: mpi2_cnfg.h
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.23 9 * mpi2_cnfg.h Version: 02.00.26
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -150,7 +150,13 @@
150 * Added UEFIVersion field to BIOS Page 1 and defined new 150 * Added UEFIVersion field to BIOS Page 1 and defined new
151 * BiosOptions bits. 151 * BiosOptions bits.
152 * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. 152 * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
153 * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. 153 * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
154 * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
155 * obsolete for MPI v2.5 and later.
156 * Added some defines for 12G SAS speeds.
157 * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
158 * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
159 * match the specification.
154 * -------------------------------------------------------------------------- 160 * --------------------------------------------------------------------------
155 */ 161 */
156 162
@@ -773,6 +779,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1
773#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) 779#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
774 780
775/* IO Unit Page 1 Flags defines */ 781/* IO Unit Page 1 Flags defines */
782#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
776#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) 783#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
777#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) 784#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
778#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) 785#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
@@ -844,7 +851,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
844#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) 851#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
845 852
846/* defines for IO Unit Page 5 DmaEngineCapabilities field */ 853/* defines for IO Unit Page 5 DmaEngineCapabilities field */
847#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00) 854#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000)
848#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) 855#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
849 856
850#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) 857#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
@@ -885,13 +892,17 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
885 U16 IOCTemperature; /* 0x10 */ 892 U16 IOCTemperature; /* 0x10 */
886 U8 IOCTemperatureUnits; /* 0x12 */ 893 U8 IOCTemperatureUnits; /* 0x12 */
887 U8 IOCSpeed; /* 0x13 */ 894 U8 IOCSpeed; /* 0x13 */
888 U16 BoardTemperature; /* 0x14 */ 895 U16 BoardTemperature; /* 0x14 */
889 U8 BoardTemperatureUnits; /* 0x16 */ 896 U8 BoardTemperatureUnits; /* 0x16 */
890 U8 Reserved3; /* 0x17 */ 897 U8 Reserved3; /* 0x17 */
898 U32 Reserved4; /* 0x18 */
899 U32 Reserved5; /* 0x1C */
900 U32 Reserved6; /* 0x20 */
901 U32 Reserved7; /* 0x24 */
891} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, 902} MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
892 Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t; 903 Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
893 904
894#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02) 905#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
895 906
896/* defines for IO Unit Page 7 PCIeWidth field */ 907/* defines for IO Unit Page 7 PCIeWidth field */
897#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) 908#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
@@ -1801,6 +1812,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
1801#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) 1812#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
1802#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) 1813#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
1803#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) 1814#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
1815#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0)
1804#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) 1816#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
1805#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) 1817#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
1806#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) 1818#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
@@ -1813,6 +1825,7 @@ typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1
1813#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) 1825#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
1814#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) 1826#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
1815#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) 1827#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
1828#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
1816#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) 1829#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
1817#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) 1830#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
1818#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) 1831#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 9d284dae6553..eea1a16b13ec 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_init.h 5 * Name: mpi2_init.h
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.14 9 * mpi2_init.h Version: 02.00.15
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -37,6 +37,8 @@
37 * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command 37 * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
38 * Priority to match SAM-4. 38 * Priority to match SAM-4.
39 * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. 39 * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
40 * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
41 * replacing the Reserved4 field.
40 * -------------------------------------------------------------------------- 42 * --------------------------------------------------------------------------
41 */ 43 */
42 44
@@ -234,7 +236,7 @@ typedef struct _MPI2_SCSI_IO_REPLY
234 U32 SenseCount; /* 0x18 */ 236 U32 SenseCount; /* 0x18 */
235 U32 ResponseInfo; /* 0x1C */ 237 U32 ResponseInfo; /* 0x1C */
236 U16 TaskTag; /* 0x20 */ 238 U16 TaskTag; /* 0x20 */
237 U16 Reserved4; /* 0x22 */ 239 U16 SCSIStatusQualifier; /* 0x22 */
238 U32 BidirectionalTransferCount; /* 0x24 */ 240 U32 BidirectionalTransferCount; /* 0x24 */
239 U32 Reserved5; /* 0x28 */ 241 U32 Reserved5; /* 0x28 */
240 U32 Reserved6; /* 0x2C */ 242 U32 Reserved6; /* 0x2C */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index d159c5f24aab..2c3b0f28576b 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_ioc.h 5 * Name: mpi2_ioc.h
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.22 9 * mpi2_ioc.h Version: 02.00.23
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -121,6 +121,11 @@
121 * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. 121 * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
122 * Added ElapsedSeconds field to 122 * Added ElapsedSeconds field to
123 * MPI2_EVENT_DATA_IR_OPERATION_STATUS. 123 * MPI2_EVENT_DATA_IR_OPERATION_STATUS.
124 * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
125 * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
126 * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
127 * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
128 * Added Encrypted Hash Extended Image.
124 * -------------------------------------------------------------------------- 129 * --------------------------------------------------------------------------
125 */ 130 */
126 131
@@ -177,6 +182,9 @@ typedef struct _MPI2_IOC_INIT_REQUEST
177#define MPI2_WHOINIT_HOST_DRIVER (0x04) 182#define MPI2_WHOINIT_HOST_DRIVER (0x04)
178#define MPI2_WHOINIT_MANUFACTURER (0x05) 183#define MPI2_WHOINIT_MANUFACTURER (0x05)
179 184
185/* MsgFlags */
186#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
187
180/* MsgVersion */ 188/* MsgVersion */
181#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) 189#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
182#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) 190#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
@@ -189,9 +197,17 @@ typedef struct _MPI2_IOC_INIT_REQUEST
189#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) 197#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
190#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) 198#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
191 199
192/* minimum depth for the Reply Descriptor Post Queue */ 200/* minimum depth for a Reply Descriptor Post Queue */
193#define MPI2_RDPQ_DEPTH_MIN (16) 201#define MPI2_RDPQ_DEPTH_MIN (16)
194 202
203/* Reply Descriptor Post Queue Array Entry */
204typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
205 U64 RDPQBaseAddress; /* 0x00 */
206 U32 Reserved1; /* 0x08 */
207 U32 Reserved2; /* 0x0C */
208} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
209MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
210Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry;
195 211
196/* IOCInit Reply message */ 212/* IOCInit Reply message */
197typedef struct _MPI2_IOC_INIT_REPLY 213typedef struct _MPI2_IOC_INIT_REPLY
@@ -307,6 +323,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY
307/* ProductID field uses MPI2_FW_HEADER_PID_ */ 323/* ProductID field uses MPI2_FW_HEADER_PID_ */
308 324
309/* IOCCapabilities */ 325/* IOCCapabilities */
326#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
310#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) 327#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
311#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) 328#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
312#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) 329#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
@@ -1153,6 +1170,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST
1153#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) 1170#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
1154#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) 1171#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
1155#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) 1172#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
1173#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
1156#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) 1174#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
1157 1175
1158/* FWDownload TransactionContext Element */ 1176/* FWDownload TransactionContext Element */
@@ -1379,14 +1397,15 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
1379#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) 1397#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
1380 1398
1381/* defines for the ImageType field */ 1399/* defines for the ImageType field */
1382#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) 1400#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
1383#define MPI2_EXT_IMAGE_TYPE_FW (0x01) 1401#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
1384#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) 1402#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
1385#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) 1403#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
1386#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) 1404#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
1387#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) 1405#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
1388#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) 1406#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
1389#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) 1407#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
1408#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
1390#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) 1409#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
1391#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) 1410#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
1392#define MPI2_EXT_IMAGE_TYPE_MAX \ 1411#define MPI2_EXT_IMAGE_TYPE_MAX \
@@ -1555,6 +1574,39 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER
1555#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) 1574#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
1556 1575
1557 1576
1577/* Encrypted Hash Extended Image Data */
1578
1579typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
1580 U8 HashImageType; /* 0x00 */
1581 U8 HashAlgorithm; /* 0x01 */
1582 U8 EncryptionAlgorithm; /* 0x02 */
1583 U8 Reserved1; /* 0x03 */
1584 U32 Reserved2; /* 0x04 */
1585 U32 EncryptedHash[1]; /* 0x08 */
1586} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY,
1587Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t;
1588
1589/* values for HashImageType */
1590#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
1591#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
1592
1593/* values for HashAlgorithm */
1594#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
1595#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
1596
1597/* values for EncryptionAlgorithm */
1598#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
1599#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
1600
1601typedef struct _MPI25_ENCRYPTED_HASH_DATA {
1602 U8 ImageVersion; /* 0x00 */
1603 U8 NumHash; /* 0x01 */
1604 U16 Reserved1; /* 0x02 */
1605 U32 Reserved2; /* 0x04 */
1606 MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
1607} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA,
1608Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t;
1609
1558/**************************************************************************** 1610/****************************************************************************
1559* PowerManagementControl message 1611* PowerManagementControl message
1560****************************************************************************/ 1612****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 0d202a2c6db7..7efa58ff0d34 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_raid.h 5 * Name: mpi2_raid.h
6 * Title: MPI Integrated RAID messages and structures 6 * Title: MPI Integrated RAID messages and structures
7 * Creation Date: April 26, 2007 7 * Creation Date: April 26, 2007
8 * 8 *
9 * mpi2_raid.h Version: 02.00.09 9 * mpi2_raid.h Version: 02.00.10
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -29,6 +29,7 @@
29 * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. 29 * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
30 * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. 30 * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
31 * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. 31 * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
32 * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
32 * -------------------------------------------------------------------------- 33 * --------------------------------------------------------------------------
33 */ 34 */
34 35
@@ -45,6 +46,9 @@
45* RAID Action messages 46* RAID Action messages
46****************************************************************************/ 47****************************************************************************/
47 48
49/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
50#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000)
51
48/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ 52/* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
49#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) 53#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
50#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) 54#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 50b39ccd526a..45b6fa10b803 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_sas.h 5 * Name: mpi2_sas.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 11b2ac4e7c6e..9be03ed46180 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_tool.h 5 * Name: mpi2_tool.h
6 * Title: MPI diagnostic tool structures and definitions 6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007 7 * Creation Date: March 26, 2007
8 * 8 *
9 * mpi2_tool.h Version: 02.00.10 9 * mpi2_tool.h Version: 02.00.11
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -29,6 +29,7 @@
29 * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. 29 * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
30 * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that 30 * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
31 * it uses MPI Chain SGE as well as MPI Simple SGE. 31 * it uses MPI Chain SGE as well as MPI Simple SGE.
32 * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
32 * -------------------------------------------------------------------------- 33 * --------------------------------------------------------------------------
33 */ 34 */
34 35
@@ -48,6 +49,7 @@
48#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) 49#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
49#define MPI2_TOOLBOX_BEACON_TOOL (0x05) 50#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
50#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) 51#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
52#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
51 53
52 54
53/**************************************************************************** 55/****************************************************************************
@@ -321,6 +323,44 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
321 MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t; 323 MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t;
322 324
323 325
326/****************************************************************************
327* Toolbox Console Text Display Tool
328****************************************************************************/
329
330/* Toolbox Console Text Display Tool request message */
331typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
332 U8 Tool; /* 0x00 */
333 U8 Reserved1; /* 0x01 */
334 U8 ChainOffset; /* 0x02 */
335 U8 Function; /* 0x03 */
336 U16 Reserved2; /* 0x04 */
337 U8 Reserved3; /* 0x06 */
338 U8 MsgFlags; /* 0x07 */
339 U8 VP_ID; /* 0x08 */
340 U8 VF_ID; /* 0x09 */
341 U16 Reserved4; /* 0x0A */
342 U8 Console; /* 0x0C */
343 U8 Flags; /* 0x0D */
344 U16 Reserved6; /* 0x0E */
345 U8 TextToDisplay[4]; /* 0x10 */
346} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
347MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
348Mpi2ToolboxTextDisplayRequest_t,
349MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t;
350
351/* defines for the Console field */
352#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0)
353#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00)
354#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10)
355#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20)
356
357#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F)
358
359/* defines for the Flags field */
360#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
361
362
363
324/***************************************************************************** 364/*****************************************************************************
325* 365*
326* Diagnostic Buffer Messages 366* Diagnostic Buffer Messages
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
index 0b128b68a5ea..6b0dcdd02f68 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_type.h 5 * Name: mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 2f262be890c5..58e45216d1ec 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2013 LSI Corporation 6 * Copyright (C) 2007-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -80,6 +80,10 @@ static int msix_disable = -1;
80module_param(msix_disable, int, 0); 80module_param(msix_disable, int, 0);
81MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); 81MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
82 82
83static int max_msix_vectors = -1;
84module_param(max_msix_vectors, int, 0);
85MODULE_PARM_DESC(max_msix_vectors, " max msix vectors ");
86
83static int mpt2sas_fwfault_debug; 87static int mpt2sas_fwfault_debug;
84MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault " 88MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
85 "and halt firmware - (default=0)"); 89 "and halt firmware - (default=0)");
@@ -88,6 +92,12 @@ static int disable_discovery = -1;
88module_param(disable_discovery, int, 0); 92module_param(disable_discovery, int, 0);
89MODULE_PARM_DESC(disable_discovery, " disable discovery "); 93MODULE_PARM_DESC(disable_discovery, " disable discovery ");
90 94
95static int
96_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
97
98static int
99_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
100
91/** 101/**
92 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 102 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
93 * 103 *
@@ -1175,17 +1185,22 @@ static int
1175_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) 1185_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1176{ 1186{
1177 struct sysinfo s; 1187 struct sysinfo s;
1178 char *desc = NULL; 1188 u64 consistent_dma_mask;
1189
1190 if (ioc->dma_mask)
1191 consistent_dma_mask = DMA_BIT_MASK(64);
1192 else
1193 consistent_dma_mask = DMA_BIT_MASK(32);
1179 1194
1180 if (sizeof(dma_addr_t) > 4) { 1195 if (sizeof(dma_addr_t) > 4) {
1181 const uint64_t required_mask = 1196 const uint64_t required_mask =
1182 dma_get_required_mask(&pdev->dev); 1197 dma_get_required_mask(&pdev->dev);
1183 if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev, 1198 if ((required_mask > DMA_BIT_MASK(32)) &&
1184 DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev, 1199 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1185 DMA_BIT_MASK(64))) { 1200 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
1186 ioc->base_add_sg_single = &_base_add_sg_single_64; 1201 ioc->base_add_sg_single = &_base_add_sg_single_64;
1187 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 1202 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1188 desc = "64"; 1203 ioc->dma_mask = 64;
1189 goto out; 1204 goto out;
1190 } 1205 }
1191 } 1206 }
@@ -1194,18 +1209,29 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1194 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1209 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1195 ioc->base_add_sg_single = &_base_add_sg_single_32; 1210 ioc->base_add_sg_single = &_base_add_sg_single_32;
1196 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 1211 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1197 desc = "32"; 1212 ioc->dma_mask = 32;
1198 } else 1213 } else
1199 return -ENODEV; 1214 return -ENODEV;
1200 1215
1201 out: 1216 out:
1202 si_meminfo(&s); 1217 si_meminfo(&s);
1203 printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, " 1218 printk(MPT2SAS_INFO_FMT
1204 "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram)); 1219 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1220 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
1205 1221
1206 return 0; 1222 return 0;
1207} 1223}
1208 1224
1225static int
1226_base_change_consistent_dma_mask(struct MPT2SAS_ADAPTER *ioc,
1227 struct pci_dev *pdev)
1228{
1229 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1230 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1231 return -ENODEV;
1232 }
1233 return 0;
1234}
1209/** 1235/**
1210 * _base_check_enable_msix - checks MSIX capabable. 1236 * _base_check_enable_msix - checks MSIX capabable.
1211 * @ioc: per adapter object 1237 * @ioc: per adapter object
@@ -1402,6 +1428,20 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1402 ioc->reply_queue_count = min_t(int, ioc->cpu_count, 1428 ioc->reply_queue_count = min_t(int, ioc->cpu_count,
1403 ioc->msix_vector_count); 1429 ioc->msix_vector_count);
1404 1430
1431 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1432 max_msix_vectors = 8;
1433
1434 if (max_msix_vectors > 0) {
1435 ioc->reply_queue_count = min_t(int, max_msix_vectors,
1436 ioc->reply_queue_count);
1437 ioc->msix_vector_count = ioc->reply_queue_count;
1438 } else if (max_msix_vectors == 0)
1439 goto try_ioapic;
1440
1441 printk(MPT2SAS_INFO_FMT
1442 "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
1443 ioc->name, ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
1444
1405 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), 1445 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1406 GFP_KERNEL); 1446 GFP_KERNEL);
1407 if (!entries) { 1447 if (!entries) {
@@ -1414,10 +1454,10 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1414 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) 1454 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1415 a->entry = i; 1455 a->entry = i;
1416 1456
1417 r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); 1457 r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
1418 if (r) { 1458 if (r) {
1419 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix " 1459 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT
1420 "failed (r=%d) !!!\n", ioc->name, r)); 1460 "pci_enable_msix_exact failed (r=%d) !!!\n", ioc->name, r));
1421 kfree(entries); 1461 kfree(entries);
1422 goto try_ioapic; 1462 goto try_ioapic;
1423 } 1463 }
@@ -1439,6 +1479,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1439/* failback to io_apic interrupt routing */ 1479/* failback to io_apic interrupt routing */
1440 try_ioapic: 1480 try_ioapic:
1441 1481
1482 ioc->reply_queue_count = 1;
1442 r = _base_request_irq(ioc, 0, ioc->pdev->irq); 1483 r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1443 1484
1444 return r; 1485 return r;
@@ -1520,6 +1561,16 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1520 } 1561 }
1521 1562
1522 _base_mask_interrupts(ioc); 1563 _base_mask_interrupts(ioc);
1564
1565 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
1566 if (r)
1567 goto out_fail;
1568
1569 if (!ioc->rdpq_array_enable_assigned) {
1570 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
1571 ioc->rdpq_array_enable_assigned = 1;
1572 }
1573
1523 r = _base_enable_msix(ioc); 1574 r = _base_enable_msix(ioc);
1524 if (r) 1575 if (r)
1525 goto out_fail; 1576 goto out_fail;
@@ -2317,7 +2368,8 @@ _base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
2317static void 2368static void
2318_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc) 2369_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2319{ 2370{
2320 int i; 2371 int i = 0;
2372 struct reply_post_struct *rps;
2321 2373
2322 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 2374 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
2323 __func__)); 2375 __func__));
@@ -2358,15 +2410,25 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
2358 ioc->reply_free = NULL; 2410 ioc->reply_free = NULL;
2359 } 2411 }
2360 2412
2361 if (ioc->reply_post_free) { 2413 if (ioc->reply_post) {
2362 pci_pool_free(ioc->reply_post_free_dma_pool, 2414 do {
2363 ioc->reply_post_free, ioc->reply_post_free_dma); 2415 rps = &ioc->reply_post[i];
2416 if (rps->reply_post_free) {
2417 pci_pool_free(
2418 ioc->reply_post_free_dma_pool,
2419 rps->reply_post_free,
2420 rps->reply_post_free_dma);
2421 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
2422 "reply_post_free_pool(0x%p): free\n",
2423 ioc->name, rps->reply_post_free));
2424 rps->reply_post_free = NULL;
2425 }
2426 } while (ioc->rdpq_array_enable &&
2427 (++i < ioc->reply_queue_count));
2428
2364 if (ioc->reply_post_free_dma_pool) 2429 if (ioc->reply_post_free_dma_pool)
2365 pci_pool_destroy(ioc->reply_post_free_dma_pool); 2430 pci_pool_destroy(ioc->reply_post_free_dma_pool);
2366 dexitprintk(ioc, printk(MPT2SAS_INFO_FMT 2431 kfree(ioc->reply_post);
2367 "reply_post_free_pool(0x%p): free\n", ioc->name,
2368 ioc->reply_post_free));
2369 ioc->reply_post_free = NULL;
2370 } 2432 }
2371 2433
2372 if (ioc->config_page) { 2434 if (ioc->config_page) {
@@ -2509,6 +2571,65 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2509 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 2571 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2510 ioc->chains_needed_per_io)); 2572 ioc->chains_needed_per_io));
2511 2573
2574 /* reply post queue, 16 byte align */
2575 reply_post_free_sz = ioc->reply_post_queue_depth *
2576 sizeof(Mpi2DefaultReplyDescriptor_t);
2577
2578 sz = reply_post_free_sz;
2579 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
2580 sz *= ioc->reply_queue_count;
2581
2582 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
2583 (ioc->reply_queue_count):1,
2584 sizeof(struct reply_post_struct), GFP_KERNEL);
2585
2586 if (!ioc->reply_post) {
2587 printk(MPT2SAS_ERR_FMT "reply_post_free pool: kcalloc failed\n",
2588 ioc->name);
2589 goto out;
2590 }
2591 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2592 ioc->pdev, sz, 16, 0);
2593 if (!ioc->reply_post_free_dma_pool) {
2594 printk(MPT2SAS_ERR_FMT
2595 "reply_post_free pool: pci_pool_create failed\n",
2596 ioc->name);
2597 goto out;
2598 }
2599 i = 0;
2600 do {
2601 ioc->reply_post[i].reply_post_free =
2602 pci_pool_alloc(ioc->reply_post_free_dma_pool,
2603 GFP_KERNEL,
2604 &ioc->reply_post[i].reply_post_free_dma);
2605 if (!ioc->reply_post[i].reply_post_free) {
2606 printk(MPT2SAS_ERR_FMT
2607 "reply_post_free pool: pci_pool_alloc failed\n",
2608 ioc->name);
2609 goto out;
2610 }
2611 memset(ioc->reply_post[i].reply_post_free, 0, sz);
2612 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2613 "reply post free pool (0x%p): depth(%d),"
2614 "element_size(%d), pool_size(%d kB)\n", ioc->name,
2615 ioc->reply_post[i].reply_post_free,
2616 ioc->reply_post_queue_depth, 8, sz/1024));
2617 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2618 "reply_post_free_dma = (0x%llx)\n", ioc->name,
2619 (unsigned long long)
2620 ioc->reply_post[i].reply_post_free_dma));
2621 total_sz += sz;
2622 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
2623
2624 if (ioc->dma_mask == 64) {
2625 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
2626 printk(MPT2SAS_WARN_FMT
2627 "no suitable consistent DMA mask for %s\n",
2628 ioc->name, pci_name(ioc->pdev));
2629 goto out;
2630 }
2631 }
2632
2512 ioc->scsiio_depth = ioc->hba_queue_depth - 2633 ioc->scsiio_depth = ioc->hba_queue_depth -
2513 ioc->hi_priority_depth - ioc->internal_depth; 2634 ioc->hi_priority_depth - ioc->internal_depth;
2514 2635
@@ -2720,37 +2841,6 @@ chain_done:
2720 "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma)); 2841 "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2721 total_sz += sz; 2842 total_sz += sz;
2722 2843
2723 /* reply post queue, 16 byte align */
2724 reply_post_free_sz = ioc->reply_post_queue_depth *
2725 sizeof(Mpi2DefaultReplyDescriptor_t);
2726 if (_base_is_controller_msix_enabled(ioc))
2727 sz = reply_post_free_sz * ioc->reply_queue_count;
2728 else
2729 sz = reply_post_free_sz;
2730 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2731 ioc->pdev, sz, 16, 0);
2732 if (!ioc->reply_post_free_dma_pool) {
2733 printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
2734 "failed\n", ioc->name);
2735 goto out;
2736 }
2737 ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2738 GFP_KERNEL, &ioc->reply_post_free_dma);
2739 if (!ioc->reply_post_free) {
2740 printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
2741 "failed\n", ioc->name);
2742 goto out;
2743 }
2744 memset(ioc->reply_post_free, 0, sz);
2745 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
2746 "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2747 ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2748 sz/1024));
2749 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
2750 "(0x%llx)\n", ioc->name, (unsigned long long)
2751 ioc->reply_post_free_dma));
2752 total_sz += sz;
2753
2754 ioc->config_page_sz = 512; 2844 ioc->config_page_sz = 512;
2755 ioc->config_page = pci_alloc_consistent(ioc->pdev, 2845 ioc->config_page = pci_alloc_consistent(ioc->pdev,
2756 ioc->config_page_sz, &ioc->config_page_dma); 2846 ioc->config_page_sz, &ioc->config_page_dma);
@@ -3374,6 +3464,64 @@ _base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
3374} 3464}
3375 3465
3376/** 3466/**
3467 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
3468 * @ioc: per adapter object
3469 * @timeout:
3470 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3471 *
3472 * Returns 0 for success, non-zero for failure.
3473 */
3474static int
3475_base_wait_for_iocstate(struct MPT2SAS_ADAPTER *ioc, int timeout,
3476 int sleep_flag)
3477{
3478 u32 ioc_state, doorbell;
3479 int rc;
3480
3481 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3482 __func__));
3483
3484 if (ioc->pci_error_recovery)
3485 return 0;
3486
3487 doorbell = mpt2sas_base_get_iocstate(ioc, 0);
3488 ioc_state = doorbell & MPI2_IOC_STATE_MASK;
3489 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
3490 ioc->name, __func__, ioc_state));
3491
3492 switch (ioc_state) {
3493 case MPI2_IOC_STATE_READY:
3494 case MPI2_IOC_STATE_OPERATIONAL:
3495 return 0;
3496 }
3497
3498 if (doorbell & MPI2_DOORBELL_USED) {
3499 dhsprintk(ioc, printk(MPT2SAS_INFO_FMT
3500 "unexpected doorbell activ!e\n", ioc->name));
3501 goto issue_diag_reset;
3502 }
3503
3504 if (ioc_state == MPI2_IOC_STATE_FAULT) {
3505 mpt2sas_base_fault_info(ioc, doorbell &
3506 MPI2_DOORBELL_DATA_MASK);
3507 goto issue_diag_reset;
3508 }
3509
3510 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
3511 timeout, sleep_flag);
3512 if (ioc_state) {
3513 printk(MPT2SAS_ERR_FMT
3514 "%s: failed going to ready state (ioc_state=0x%x)\n",
3515 ioc->name, __func__, ioc_state);
3516 return -EFAULT;
3517 }
3518
3519 issue_diag_reset:
3520 rc = _base_diag_reset(ioc, sleep_flag);
3521 return rc;
3522}
3523
3524/**
3377 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 3525 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
3378 * @ioc: per adapter object 3526 * @ioc: per adapter object
3379 * @sleep_flag: CAN_SLEEP or NO_SLEEP 3527 * @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3391,6 +3539,13 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3391 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3539 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3392 __func__)); 3540 __func__));
3393 3541
3542 r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
3543 if (r) {
3544 printk(MPT2SAS_ERR_FMT "%s: failed getting to correct state\n",
3545 ioc->name, __func__);
3546 return r;
3547 }
3548
3394 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); 3549 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
3395 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); 3550 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
3396 memset(&mpi_request, 0, mpi_request_sz); 3551 memset(&mpi_request, 0, mpi_request_sz);
@@ -3422,6 +3577,9 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3422 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 3577 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3423 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 3578 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3424 ioc->ir_firmware = 1; 3579 ioc->ir_firmware = 1;
3580 if ((facts->IOCCapabilities &
3581 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
3582 ioc->rdpq_array_capable = 1;
3425 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 3583 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3426 facts->IOCRequestFrameSize = 3584 facts->IOCRequestFrameSize =
3427 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 3585 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -3457,9 +3615,12 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3457{ 3615{
3458 Mpi2IOCInitRequest_t mpi_request; 3616 Mpi2IOCInitRequest_t mpi_request;
3459 Mpi2IOCInitReply_t mpi_reply; 3617 Mpi2IOCInitReply_t mpi_reply;
3460 int r; 3618 int i, r = 0;
3461 struct timeval current_time; 3619 struct timeval current_time;
3462 u16 ioc_status; 3620 u16 ioc_status;
3621 u32 reply_post_free_array_sz = 0;
3622 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
3623 dma_addr_t reply_post_free_array_dma;
3463 3624
3464 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3625 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3465 __func__)); 3626 __func__));
@@ -3488,9 +3649,31 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3488 cpu_to_le64((u64)ioc->request_dma); 3649 cpu_to_le64((u64)ioc->request_dma);
3489 mpi_request.ReplyFreeQueueAddress = 3650 mpi_request.ReplyFreeQueueAddress =
3490 cpu_to_le64((u64)ioc->reply_free_dma); 3651 cpu_to_le64((u64)ioc->reply_free_dma);
3491 mpi_request.ReplyDescriptorPostQueueAddress =
3492 cpu_to_le64((u64)ioc->reply_post_free_dma);
3493 3652
3653 if (ioc->rdpq_array_enable) {
3654 reply_post_free_array_sz = ioc->reply_queue_count *
3655 sizeof(Mpi2IOCInitRDPQArrayEntry);
3656 reply_post_free_array = pci_alloc_consistent(ioc->pdev,
3657 reply_post_free_array_sz, &reply_post_free_array_dma);
3658 if (!reply_post_free_array) {
3659 printk(MPT2SAS_ERR_FMT
3660 "reply_post_free_array: pci_alloc_consistent failed\n",
3661 ioc->name);
3662 r = -ENOMEM;
3663 goto out;
3664 }
3665 memset(reply_post_free_array, 0, reply_post_free_array_sz);
3666 for (i = 0; i < ioc->reply_queue_count; i++)
3667 reply_post_free_array[i].RDPQBaseAddress =
3668 cpu_to_le64(
3669 (u64)ioc->reply_post[i].reply_post_free_dma);
3670 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
3671 mpi_request.ReplyDescriptorPostQueueAddress =
3672 cpu_to_le64((u64)reply_post_free_array_dma);
3673 } else {
3674 mpi_request.ReplyDescriptorPostQueueAddress =
3675 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
3676 }
3494 3677
3495 /* This time stamp specifies number of milliseconds 3678 /* This time stamp specifies number of milliseconds
3496 * since epoch ~ midnight January 1, 1970. 3679 * since epoch ~ midnight January 1, 1970.
@@ -3518,7 +3701,7 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3518 if (r != 0) { 3701 if (r != 0) {
3519 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n", 3702 printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
3520 ioc->name, __func__, r); 3703 ioc->name, __func__, r);
3521 return r; 3704 goto out;
3522 } 3705 }
3523 3706
3524 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 3707 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -3528,7 +3711,12 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3528 r = -EIO; 3711 r = -EIO;
3529 } 3712 }
3530 3713
3531 return 0; 3714out:
3715 if (reply_post_free_array)
3716 pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
3717 reply_post_free_array,
3718 reply_post_free_array_dma);
3719 return r;
3532} 3720}
3533 3721
3534/** 3722/**
@@ -4061,7 +4249,7 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4061 u8 hide_flag; 4249 u8 hide_flag;
4062 struct adapter_reply_queue *reply_q; 4250 struct adapter_reply_queue *reply_q;
4063 long reply_post_free; 4251 long reply_post_free;
4064 u32 reply_post_free_sz; 4252 u32 reply_post_free_sz, index = 0;
4065 4253
4066 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 4254 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
4067 __func__)); 4255 __func__));
@@ -4132,19 +4320,27 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
4132 _base_assign_reply_queues(ioc); 4320 _base_assign_reply_queues(ioc);
4133 4321
4134 /* initialize Reply Post Free Queue */ 4322 /* initialize Reply Post Free Queue */
4135 reply_post_free = (long)ioc->reply_post_free;
4136 reply_post_free_sz = ioc->reply_post_queue_depth * 4323 reply_post_free_sz = ioc->reply_post_queue_depth *
4137 sizeof(Mpi2DefaultReplyDescriptor_t); 4324 sizeof(Mpi2DefaultReplyDescriptor_t);
4325 reply_post_free = (long)ioc->reply_post[index].reply_post_free;
4138 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 4326 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4139 reply_q->reply_post_host_index = 0; 4327 reply_q->reply_post_host_index = 0;
4140 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) 4328 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
4141 reply_post_free; 4329 reply_post_free;
4142 for (i = 0; i < ioc->reply_post_queue_depth; i++) 4330 for (i = 0; i < ioc->reply_post_queue_depth; i++)
4143 reply_q->reply_post_free[i].Words = 4331 reply_q->reply_post_free[i].Words =
4144 cpu_to_le64(ULLONG_MAX); 4332 cpu_to_le64(ULLONG_MAX);
4145 if (!_base_is_controller_msix_enabled(ioc)) 4333 if (!_base_is_controller_msix_enabled(ioc))
4146 goto skip_init_reply_post_free_queue; 4334 goto skip_init_reply_post_free_queue;
4147 reply_post_free += reply_post_free_sz; 4335 /*
4336 * If RDPQ is enabled, switch to the next allocation.
4337 * Otherwise advance within the contiguous region.
4338 */
4339 if (ioc->rdpq_array_enable)
4340 reply_post_free = (long)
4341 ioc->reply_post[++index].reply_post_free;
4342 else
4343 reply_post_free += reply_post_free_sz;
4148 } 4344 }
4149 skip_init_reply_post_free_queue: 4345 skip_init_reply_post_free_queue:
4150 4346
@@ -4272,6 +4468,8 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
4272 } 4468 }
4273 } 4469 }
4274 4470
4471 ioc->rdpq_array_enable_assigned = 0;
4472 ioc->dma_mask = 0;
4275 r = mpt2sas_base_map_resources(ioc); 4473 r = mpt2sas_base_map_resources(ioc);
4276 if (r) 4474 if (r)
4277 goto out_free_resources; 4475 goto out_free_resources;
@@ -4633,6 +4831,16 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
4633 r = -EFAULT; 4831 r = -EFAULT;
4634 goto out; 4832 goto out;
4635 } 4833 }
4834
4835 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4836 if (r)
4837 goto out;
4838
4839 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
4840 panic("%s: Issue occurred with flashing controller firmware."
4841 "Please reboot the system and ensure that the correct"
4842 " firmware version is running\n", ioc->name);
4843
4636 r = _base_make_ioc_operational(ioc, sleep_flag); 4844 r = _base_make_ioc_operational(ioc, sleep_flag);
4637 if (!r) 4845 if (!r)
4638 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET); 4846 _base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 0ac5815a7f91..239f169b0673 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
6 * Copyright (C) 2007-2013 LSI Corporation 6 * Copyright (C) 2007-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "16.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "18.100.00.00"
73#define MPT2SAS_MAJOR_VERSION 16 73#define MPT2SAS_MAJOR_VERSION 18
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 00
@@ -355,6 +355,7 @@ struct _internal_cmd {
355 * @slot: number number 355 * @slot: number number
356 * @phy: phy identifier provided in sas device page 0 356 * @phy: phy identifier provided in sas device page 0
357 * @responding: used in _scsih_sas_device_mark_responding 357 * @responding: used in _scsih_sas_device_mark_responding
358 * @pfa_led_on: flag for PFA LED status
358 */ 359 */
359struct _sas_device { 360struct _sas_device {
360 struct list_head list; 361 struct list_head list;
@@ -373,6 +374,7 @@ struct _sas_device {
373 u16 slot; 374 u16 slot;
374 u8 phy; 375 u8 phy;
375 u8 responding; 376 u8 responding;
377 u8 pfa_led_on;
376}; 378};
377 379
378/** 380/**
@@ -634,6 +636,11 @@ struct mpt2sas_port_facts {
634 u16 MaxPostedCmdBuffers; 636 u16 MaxPostedCmdBuffers;
635}; 637};
636 638
639struct reply_post_struct {
640 Mpi2ReplyDescriptorsUnion_t *reply_post_free;
641 dma_addr_t reply_post_free_dma;
642};
643
637/** 644/**
638 * enum mutex_type - task management mutex type 645 * enum mutex_type - task management mutex type
639 * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it 646 * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
@@ -661,6 +668,7 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
661 * @ir_firmware: IR firmware present 668 * @ir_firmware: IR firmware present
662 * @bars: bitmask of BAR's that must be configured 669 * @bars: bitmask of BAR's that must be configured
663 * @mask_interrupts: ignore interrupt 670 * @mask_interrupts: ignore interrupt
671 * @dma_mask: used to set the consistent dma mask
664 * @fault_reset_work_q_name: fw fault work queue 672 * @fault_reset_work_q_name: fw fault work queue
665 * @fault_reset_work_q: "" 673 * @fault_reset_work_q: ""
666 * @fault_reset_work: "" 674 * @fault_reset_work: ""
@@ -777,8 +785,11 @@ typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
777 * @reply_free_dma_pool: 785 * @reply_free_dma_pool:
778 * @reply_free_host_index: tail index in pool to insert free replys 786 * @reply_free_host_index: tail index in pool to insert free replys
779 * @reply_post_queue_depth: reply post queue depth 787 * @reply_post_queue_depth: reply post queue depth
780 * @reply_post_free: pool for reply post (64bit descriptor) 788 * @reply_post_struct: struct for reply_post_free physical & virt address
781 * @reply_post_free_dma: 789 * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
790 * @rdpq_array_enable: rdpq_array support is enabled in the driver
791 * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
792 * is assigned only ones
782 * @reply_queue_count: number of reply queue's 793 * @reply_queue_count: number of reply queue's
783 * @reply_queue_list: link list contaning the reply queue info 794 * @reply_queue_list: link list contaning the reply queue info
784 * @reply_post_host_index: head index in the pool where FW completes IO 795 * @reply_post_host_index: head index in the pool where FW completes IO
@@ -800,6 +811,7 @@ struct MPT2SAS_ADAPTER {
800 u8 ir_firmware; 811 u8 ir_firmware;
801 int bars; 812 int bars;
802 u8 mask_interrupts; 813 u8 mask_interrupts;
814 int dma_mask;
803 815
804 /* fw fault handler */ 816 /* fw fault handler */
805 char fault_reset_work_q_name[20]; 817 char fault_reset_work_q_name[20];
@@ -970,8 +982,10 @@ struct MPT2SAS_ADAPTER {
970 982
971 /* reply post queue */ 983 /* reply post queue */
972 u16 reply_post_queue_depth; 984 u16 reply_post_queue_depth;
973 Mpi2ReplyDescriptorsUnion_t *reply_post_free; 985 struct reply_post_struct *reply_post;
974 dma_addr_t reply_post_free_dma; 986 u8 rdpq_array_capable;
987 u8 rdpq_array_enable;
988 u8 rdpq_array_enable_assigned;
975 struct dma_pool *reply_post_free_dma_pool; 989 struct dma_pool *reply_post_free_dma_pool;
976 u8 reply_queue_count; 990 u8 reply_queue_count;
977 struct list_head reply_queue_list; 991 struct list_head reply_queue_list;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 0c47425c73f2..c72a2fff5dbb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
2 * This module provides common API for accessing firmware configuration pages 2 * This module provides common API for accessing firmware configuration pages
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
5 * Copyright (C) 2007-2013 LSI Corporation 5 * Copyright (C) 2007-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 62df8f9d4271..ca4e563c01dd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
6 * Copyright (C) 2007-2013 LSI Corporation 6 * Copyright (C) 2007-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 8b2ac1869dcc..fa0567c96050 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h 5 * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
6 * Copyright (C) 2007-2013 LSI Corporation 6 * Copyright (C) 2007-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index a9021cbd6628..cc57ef31d0fe 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
2 * Logging Support for MPT (Message Passing Technology) based controllers 2 * Logging Support for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
5 * Copyright (C) 2007-2013 LSI Corporation 5 * Copyright (C) 2007-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index dd461015813f..c80ed0482649 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
5 * Copyright (C) 2007-2013 LSI Corporation 5 * Copyright (C) 2007-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -55,6 +55,8 @@
55#include <linux/raid_class.h> 55#include <linux/raid_class.h>
56#include <linux/slab.h> 56#include <linux/slab.h>
57 57
58#include <asm/unaligned.h>
59
58#include "mpt2sas_base.h" 60#include "mpt2sas_base.h"
59 61
60MODULE_AUTHOR(MPT2SAS_AUTHOR); 62MODULE_AUTHOR(MPT2SAS_AUTHOR);
@@ -145,7 +147,7 @@ struct sense_info {
145}; 147};
146 148
147 149
148#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC) 150#define MPT2SAS_TURN_ON_PFA_LED (0xFFFC)
149#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD) 151#define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD)
150#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 152#define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
151/** 153/**
@@ -3858,85 +3860,46 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3858 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, 3860 struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
3859 u16 smid) 3861 u16 smid)
3860{ 3862{
3861 u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size; 3863 sector_t v_lba, p_lba, stripe_off, column, io_size;
3862 u32 stripe_sz, stripe_exp; 3864 u32 stripe_sz, stripe_exp;
3863 u8 num_pds, *cdb_ptr, i; 3865 u8 num_pds, cmd = scmd->cmnd[0];
3864 u8 cdb0 = scmd->cmnd[0];
3865 u64 v_llba;
3866 3866
3867 /* 3867 if (cmd != READ_10 && cmd != WRITE_10 &&
3868 * Try Direct I/O to RAID memeber disks 3868 cmd != READ_16 && cmd != WRITE_16)
3869 */ 3869 return;
3870 if (cdb0 == READ_16 || cdb0 == READ_10 || 3870
3871 cdb0 == WRITE_16 || cdb0 == WRITE_10) { 3871 if (cmd == READ_10 || cmd == WRITE_10)
3872 cdb_ptr = mpi_request->CDB.CDB32; 3872 v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]);
3873 3873 else
3874 if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4] 3874 v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]);
3875 | cdb_ptr[5])) { 3875
3876 io_size = scsi_bufflen(scmd) >> 3876 io_size = scsi_bufflen(scmd) >> raid_device->block_exponent;
3877 raid_device->block_exponent; 3877
3878 i = (cdb0 < READ_16) ? 2 : 6; 3878 if (v_lba + io_size - 1 > raid_device->max_lba)
3879 /* get virtual lba */ 3879 return;
3880 v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i])); 3880
3881 3881 stripe_sz = raid_device->stripe_sz;
3882 if (((u64)v_lba + (u64)io_size - 1) <= 3882 stripe_exp = raid_device->stripe_exponent;
3883 (u32)raid_device->max_lba) { 3883 stripe_off = v_lba & (stripe_sz - 1);
3884 stripe_sz = raid_device->stripe_sz; 3884
3885 stripe_exp = raid_device->stripe_exponent; 3885 /* Return unless IO falls within a stripe */
3886 stripe_off = v_lba & (stripe_sz - 1); 3886 if (stripe_off + io_size > stripe_sz)
3887 3887 return;
3888 /* Check whether IO falls within a stripe */ 3888
3889 if ((stripe_off + io_size) <= stripe_sz) { 3889 num_pds = raid_device->num_pds;
3890 num_pds = raid_device->num_pds; 3890 p_lba = v_lba >> stripe_exp;
3891 p_lba = v_lba >> stripe_exp; 3891 column = sector_div(p_lba, num_pds);
3892 stripe_unit = p_lba / num_pds; 3892 p_lba = (p_lba << stripe_exp) + stripe_off;
3893 column = p_lba % num_pds; 3893
3894 p_lba = (stripe_unit << stripe_exp) + 3894 mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]);
3895 stripe_off; 3895
3896 mpi_request->DevHandle = 3896 if (cmd == READ_10 || cmd == WRITE_10)
3897 cpu_to_le16(raid_device-> 3897 put_unaligned_be32(lower_32_bits(p_lba),
3898 pd_handle[column]); 3898 &mpi_request->CDB.CDB32[2]);
3899 (*(__be32 *)(&cdb_ptr[i])) = 3899 else
3900 cpu_to_be32(p_lba); 3900 put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
3901 /* 3901
3902 * WD: To indicate this I/O is directI/O 3902 _scsih_scsi_direct_io_set(ioc, smid, 1);
3903 */
3904 _scsih_scsi_direct_io_set(ioc, smid, 1);
3905 }
3906 }
3907 } else {
3908 io_size = scsi_bufflen(scmd) >>
3909 raid_device->block_exponent;
3910 /* get virtual lba */
3911 v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
3912
3913 if ((v_llba + (u64)io_size - 1) <=
3914 raid_device->max_lba) {
3915 stripe_sz = raid_device->stripe_sz;
3916 stripe_exp = raid_device->stripe_exponent;
3917 stripe_off = (u32) (v_llba & (stripe_sz - 1));
3918
3919 /* Check whether IO falls within a stripe */
3920 if ((stripe_off + io_size) <= stripe_sz) {
3921 num_pds = raid_device->num_pds;
3922 p_lba = (u32)(v_llba >> stripe_exp);
3923 stripe_unit = p_lba / num_pds;
3924 column = p_lba % num_pds;
3925 p_lba = (stripe_unit << stripe_exp) +
3926 stripe_off;
3927 mpi_request->DevHandle =
3928 cpu_to_le16(raid_device->
3929 pd_handle[column]);
3930 (*(__be64 *)(&cdb_ptr[2])) =
3931 cpu_to_be64((u64)p_lba);
3932 /*
3933 * WD: To indicate this I/O is directI/O
3934 */
3935 _scsih_scsi_direct_io_set(ioc, smid, 1);
3936 }
3937 }
3938 }
3939 }
3940} 3903}
3941 3904
3942/** 3905/**
@@ -4308,7 +4271,7 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4308#endif 4271#endif
4309 4272
4310/** 4273/**
4311 * _scsih_turn_on_fault_led - illuminate Fault LED 4274 * _scsih_turn_on_pfa_led - illuminate PFA LED
4312 * @ioc: per adapter object 4275 * @ioc: per adapter object
4313 * @handle: device handle 4276 * @handle: device handle
4314 * Context: process 4277 * Context: process
@@ -4316,10 +4279,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4316 * Return nothing. 4279 * Return nothing.
4317 */ 4280 */
4318static void 4281static void
4319_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) 4282_scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4320{ 4283{
4321 Mpi2SepReply_t mpi_reply; 4284 Mpi2SepReply_t mpi_reply;
4322 Mpi2SepRequest_t mpi_request; 4285 Mpi2SepRequest_t mpi_request;
4286 struct _sas_device *sas_device;
4287
4288 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
4289 if (!sas_device)
4290 return;
4323 4291
4324 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 4292 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
4325 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 4293 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
@@ -4334,6 +4302,47 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4334 __FILE__, __LINE__, __func__); 4302 __FILE__, __LINE__, __func__);
4335 return; 4303 return;
4336 } 4304 }
4305 sas_device->pfa_led_on = 1;
4306
4307
4308 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4309 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
4310 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
4311 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
4312 le32_to_cpu(mpi_reply.IOCLogInfo)));
4313 return;
4314 }
4315}
4316
4317/**
4318 * _scsih_turn_off_pfa_led - turn off PFA LED
4319 * @ioc: per adapter object
4320 * @sas_device: sas device whose PFA LED has to turned off
4321 * Context: process
4322 *
4323 * Return nothing.
4324 */
4325static void
4326_scsih_turn_off_pfa_led(struct MPT2SAS_ADAPTER *ioc,
4327 struct _sas_device *sas_device)
4328{
4329 Mpi2SepReply_t mpi_reply;
4330 Mpi2SepRequest_t mpi_request;
4331
4332 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
4333 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
4334 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
4335 mpi_request.SlotStatus = 0;
4336 mpi_request.Slot = cpu_to_le16(sas_device->slot);
4337 mpi_request.DevHandle = 0;
4338 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
4339 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
4340 if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
4341 &mpi_request)) != 0) {
4342 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
4343 __FILE__, __LINE__, __func__);
4344 return;
4345 }
4337 4346
4338 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 4347 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
4339 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: " 4348 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
@@ -4345,7 +4354,7 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4345} 4354}
4346 4355
4347/** 4356/**
4348 * _scsih_send_event_to_turn_on_fault_led - fire delayed event 4357 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
4349 * @ioc: per adapter object 4358 * @ioc: per adapter object
4350 * @handle: device handle 4359 * @handle: device handle
4351 * Context: interrupt. 4360 * Context: interrupt.
@@ -4353,14 +4362,14 @@ _scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4353 * Return nothing. 4362 * Return nothing.
4354 */ 4363 */
4355static void 4364static void
4356_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle) 4365_scsih_send_event_to_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4357{ 4366{
4358 struct fw_event_work *fw_event; 4367 struct fw_event_work *fw_event;
4359 4368
4360 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 4369 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
4361 if (!fw_event) 4370 if (!fw_event)
4362 return; 4371 return;
4363 fw_event->event = MPT2SAS_TURN_ON_FAULT_LED; 4372 fw_event->event = MPT2SAS_TURN_ON_PFA_LED;
4364 fw_event->device_handle = handle; 4373 fw_event->device_handle = handle;
4365 fw_event->ioc = ioc; 4374 fw_event->ioc = ioc;
4366 _scsih_fw_event_add(ioc, fw_event); 4375 _scsih_fw_event_add(ioc, fw_event);
@@ -4404,7 +4413,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
4404 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4413 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4405 4414
4406 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 4415 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
4407 _scsih_send_event_to_turn_on_fault_led(ioc, handle); 4416 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
4408 4417
4409 /* insert into event log */ 4418 /* insert into event log */
4410 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 4419 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -5325,6 +5334,12 @@ _scsih_remove_device(struct MPT2SAS_ADAPTER *ioc,
5325{ 5334{
5326 struct MPT2SAS_TARGET *sas_target_priv_data; 5335 struct MPT2SAS_TARGET *sas_target_priv_data;
5327 5336
5337 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
5338 (sas_device->pfa_led_on)) {
5339 _scsih_turn_off_pfa_led(ioc, sas_device);
5340 sas_device->pfa_led_on = 0;
5341 }
5342
5328 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: " 5343 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
5329 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__, 5344 "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
5330 sas_device->handle, (unsigned long long) 5345 sas_device->handle, (unsigned long long)
@@ -7441,8 +7456,8 @@ _firmware_event_work(struct work_struct *work)
7441 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete " 7456 dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
7442 "from worker thread\n", ioc->name)); 7457 "from worker thread\n", ioc->name));
7443 break; 7458 break;
7444 case MPT2SAS_TURN_ON_FAULT_LED: 7459 case MPT2SAS_TURN_ON_PFA_LED:
7445 _scsih_turn_on_fault_led(ioc, fw_event->device_handle); 7460 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
7446 break; 7461 break;
7447 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 7462 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7448 _scsih_sas_topology_change_event(ioc, fw_event); 7463 _scsih_sas_topology_change_event(ioc, fw_event);
@@ -8132,6 +8147,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8132{ 8147{
8133 struct MPT2SAS_ADAPTER *ioc; 8148 struct MPT2SAS_ADAPTER *ioc;
8134 struct Scsi_Host *shost; 8149 struct Scsi_Host *shost;
8150 int rv;
8135 8151
8136 shost = scsi_host_alloc(&scsih_driver_template, 8152 shost = scsi_host_alloc(&scsih_driver_template,
8137 sizeof(struct MPT2SAS_ADAPTER)); 8153 sizeof(struct MPT2SAS_ADAPTER));
@@ -8227,6 +8243,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8227 if (!ioc->firmware_event_thread) { 8243 if (!ioc->firmware_event_thread) {
8228 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 8244 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
8229 ioc->name, __FILE__, __LINE__, __func__); 8245 ioc->name, __FILE__, __LINE__, __func__);
8246 rv = -ENODEV;
8230 goto out_thread_fail; 8247 goto out_thread_fail;
8231 } 8248 }
8232 8249
@@ -8234,6 +8251,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8234 if ((mpt2sas_base_attach(ioc))) { 8251 if ((mpt2sas_base_attach(ioc))) {
8235 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 8252 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
8236 ioc->name, __FILE__, __LINE__, __func__); 8253 ioc->name, __FILE__, __LINE__, __func__);
8254 rv = -ENODEV;
8237 goto out_attach_fail; 8255 goto out_attach_fail;
8238 } 8256 }
8239 8257
@@ -8251,7 +8269,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8251 } else 8269 } else
8252 ioc->hide_drives = 0; 8270 ioc->hide_drives = 0;
8253 8271
8254 if ((scsi_add_host(shost, &pdev->dev))) { 8272 rv = scsi_add_host(shost, &pdev->dev);
8273 if (rv) {
8255 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", 8274 printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
8256 ioc->name, __FILE__, __LINE__, __func__); 8275 ioc->name, __FILE__, __LINE__, __func__);
8257 goto out_add_shost_fail; 8276 goto out_add_shost_fail;
@@ -8268,7 +8287,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8268 out_thread_fail: 8287 out_thread_fail:
8269 list_del(&ioc->list); 8288 list_del(&ioc->list);
8270 scsi_host_put(shost); 8289 scsi_host_put(shost);
8271 return -ENODEV; 8290 return rv;
8272} 8291}
8273 8292
8274#ifdef CONFIG_PM 8293#ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 410f4a3e8888..0d1d06488a28 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers 2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c 4 * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
5 * Copyright (C) 2007-2013 LSI Corporation 5 * Copyright (C) 2007-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index d53e1b02e893..4d235dd741bf 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -2,7 +2,7 @@
2# Kernel configuration file for the MPT3SAS 2# Kernel configuration file for the MPT3SAS
3# 3#
4# This code is based on drivers/scsi/mpt3sas/Kconfig 4# This code is based on drivers/scsi/mpt3sas/Kconfig
5# Copyright (C) 2012-2013 LSI Corporation 5# Copyright (C) 2012-2014 LSI Corporation
6# (mailto:DL-MPTFusionLinux@lsi.com) 6# (mailto:DL-MPTFusionLinux@lsi.com)
7 7
8# This program is free software; you can redistribute it and/or 8# This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 20da8f907c00..c34c1157907b 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2.h 5 * Name: mpi2.h
@@ -8,7 +8,7 @@
8 * scatter/gather formats. 8 * scatter/gather formats.
9 * Creation Date: June 21, 2006 9 * Creation Date: June 21, 2006
10 * 10 *
11 * mpi2.h Version: 02.00.29 11 * mpi2.h Version: 02.00.31
12 * 12 *
13 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 13 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
14 * prefix are for use only on MPI v2.5 products, and must not be used 14 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -86,6 +86,8 @@
86 * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. 86 * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
87 * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. 87 * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT.
88 * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. 88 * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
89 * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT.
90 * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT.
89 * -------------------------------------------------------------------------- 91 * --------------------------------------------------------------------------
90 */ 92 */
91 93
@@ -119,7 +121,7 @@
119#define MPI2_VERSION_02_05 (0x0205) 121#define MPI2_VERSION_02_05 (0x0205)
120 122
121/*Unit and Dev versioning for this MPI header set */ 123/*Unit and Dev versioning for this MPI header set */
122#define MPI2_HEADER_VERSION_UNIT (0x1D) 124#define MPI2_HEADER_VERSION_UNIT (0x1F)
123#define MPI2_HEADER_VERSION_DEV (0x00) 125#define MPI2_HEADER_VERSION_DEV (0x00)
124#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) 126#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
125#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) 127#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 889aa7067899..e261a3153bb3 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_cnfg.h 5 * Name: mpi2_cnfg.h
6 * Title: MPI Configuration messages and pages 6 * Title: MPI Configuration messages and pages
7 * Creation Date: November 10, 2006 7 * Creation Date: November 10, 2006
8 * 8 *
9 * mpi2_cnfg.h Version: 02.00.24 9 * mpi2_cnfg.h Version: 02.00.26
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -160,6 +160,11 @@
160 * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as 160 * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
161 * obsolete for MPI v2.5 and later. 161 * obsolete for MPI v2.5 and later.
162 * Added some defines for 12G SAS speeds. 162 * Added some defines for 12G SAS speeds.
163 * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
164 * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
165 * match the specification.
166 * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
167 * future use.
163 * -------------------------------------------------------------------------- 168 * --------------------------------------------------------------------------
164 */ 169 */
165 170
@@ -792,6 +797,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
792#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) 797#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
793 798
794/*IO Unit Page 1 Flags defines */ 799/*IO Unit Page 1 Flags defines */
800#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000)
795#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) 801#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
796#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) 802#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
797#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) 803#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
@@ -870,7 +876,7 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
870#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) 876#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
871 877
872/*defines for IO Unit Page 5 DmaEngineCapabilities field */ 878/*defines for IO Unit Page 5 DmaEngineCapabilities field */
873#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00) 879#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000)
874#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) 880#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
875 881
876#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) 882#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
@@ -920,11 +926,15 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
920 U8 926 U8
921 BoardTemperatureUnits; /*0x16 */ 927 BoardTemperatureUnits; /*0x16 */
922 U8 Reserved3; /*0x17 */ 928 U8 Reserved3; /*0x17 */
929 U32 Reserved4; /* 0x18 */
930 U32 Reserved5; /* 0x1C */
931 U32 Reserved6; /* 0x20 */
932 U32 Reserved7; /* 0x24 */
923} MPI2_CONFIG_PAGE_IO_UNIT_7, 933} MPI2_CONFIG_PAGE_IO_UNIT_7,
924 *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, 934 *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
925 Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; 935 Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
926 936
927#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02) 937#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04)
928 938
929/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ 939/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
930#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) 940#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index f7928bf66478..068c98efd742 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_init.h 5 * Name: mpi2_init.h
6 * Title: MPI SCSI initiator mode messages and structures 6 * Title: MPI SCSI initiator mode messages and structures
7 * Creation Date: June 23, 2006 7 * Creation Date: June 23, 2006
8 * 8 *
9 * mpi2_init.h Version: 02.00.14 9 * mpi2_init.h Version: 02.00.15
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -44,6 +44,8 @@
44 * Priority to match SAM-4. 44 * Priority to match SAM-4.
45 * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. 45 * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
46 * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. 46 * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
47 * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
48 * replacing the Reserved4 field.
47 * -------------------------------------------------------------------------- 49 * --------------------------------------------------------------------------
48 */ 50 */
49 51
@@ -347,7 +349,7 @@ typedef struct _MPI2_SCSI_IO_REPLY {
347 U32 SenseCount; /*0x18 */ 349 U32 SenseCount; /*0x18 */
348 U32 ResponseInfo; /*0x1C */ 350 U32 ResponseInfo; /*0x1C */
349 U16 TaskTag; /*0x20 */ 351 U16 TaskTag; /*0x20 */
350 U16 Reserved4; /*0x22 */ 352 U16 SCSIStatusQualifier; /* 0x22 */
351 U32 BidirectionalTransferCount; /*0x24 */ 353 U32 BidirectionalTransferCount; /*0x24 */
352 U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/ 354 U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
353 U32 Reserved6; /*0x2C */ 355 U32 Reserved6; /*0x2C */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index e2bb82143720..490830957806 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_ioc.h 5 * Name: mpi2_ioc.h
6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages 6 * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
7 * Creation Date: October 11, 2006 7 * Creation Date: October 11, 2006
8 * 8 *
9 * mpi2_ioc.h Version: 02.00.22 9 * mpi2_ioc.h Version: 02.00.23
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -127,6 +127,11 @@
127 * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. 127 * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
128 * Added ElapsedSeconds field to 128 * Added ElapsedSeconds field to
129 * MPI2_EVENT_DATA_IR_OPERATION_STATUS. 129 * MPI2_EVENT_DATA_IR_OPERATION_STATUS.
130 * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
131 * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
132 * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
133 * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
134 * Added Encrypted Hash Extended Image.
130 * -------------------------------------------------------------------------- 135 * --------------------------------------------------------------------------
131 */ 136 */
132 137
@@ -182,6 +187,10 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
182#define MPI2_WHOINIT_HOST_DRIVER (0x04) 187#define MPI2_WHOINIT_HOST_DRIVER (0x04)
183#define MPI2_WHOINIT_MANUFACTURER (0x05) 188#define MPI2_WHOINIT_MANUFACTURER (0x05)
184 189
190/* MsgFlags */
191#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01)
192
193
185/*MsgVersion */ 194/*MsgVersion */
186#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) 195#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
187#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) 196#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
@@ -194,9 +203,19 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
194#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) 203#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
195#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) 204#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
196 205
197/*minimum depth for the Reply Descriptor Post Queue */ 206/*minimum depth for a Reply Descriptor Post Queue */
198#define MPI2_RDPQ_DEPTH_MIN (16) 207#define MPI2_RDPQ_DEPTH_MIN (16)
199 208
209/* Reply Descriptor Post Queue Array Entry */
210typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
211 U64 RDPQBaseAddress; /* 0x00 */
212 U32 Reserved1; /* 0x08 */
213 U32 Reserved2; /* 0x0C */
214} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
215*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
216Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry;
217
218
200/*IOCInit Reply message */ 219/*IOCInit Reply message */
201typedef struct _MPI2_IOC_INIT_REPLY { 220typedef struct _MPI2_IOC_INIT_REPLY {
202 U8 WhoInit; /*0x00 */ 221 U8 WhoInit; /*0x00 */
@@ -306,6 +325,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY {
306/*ProductID field uses MPI2_FW_HEADER_PID_ */ 325/*ProductID field uses MPI2_FW_HEADER_PID_ */
307 326
308/*IOCCapabilities */ 327/*IOCCapabilities */
328#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000)
309#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) 329#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
310#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) 330#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
311#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) 331#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
@@ -1140,6 +1160,7 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
1140#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) 1160#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
1141#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) 1161#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
1142#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) 1162#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
1163#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C)
1143#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) 1164#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
1144 1165
1145/*MPI v2.0 FWDownload TransactionContext Element */ 1166/*MPI v2.0 FWDownload TransactionContext Element */
@@ -1404,6 +1425,7 @@ typedef struct _MPI2_EXT_IMAGE_HEADER {
1404#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) 1425#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
1405#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) 1426#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
1406#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) 1427#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
1428#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09)
1407#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) 1429#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
1408#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) 1430#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
1409 1431
@@ -1560,6 +1582,42 @@ typedef struct _MPI2_INIT_IMAGE_FOOTER {
1560/*defines for the ResetVector field */ 1582/*defines for the ResetVector field */
1561#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) 1583#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
1562 1584
1585
1586/* Encrypted Hash Extended Image Data */
1587
1588typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
1589 U8 HashImageType; /* 0x00 */
1590 U8 HashAlgorithm; /* 0x01 */
1591 U8 EncryptionAlgorithm; /* 0x02 */
1592 U8 Reserved1; /* 0x03 */
1593 U32 Reserved2; /* 0x04 */
1594 U32 EncryptedHash[1]; /* 0x08 */ /* variable length */
1595} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
1596Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
1597
1598/* values for HashImageType */
1599#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00)
1600#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01)
1601
1602/* values for HashAlgorithm */
1603#define MPI25_HASH_ALGORITHM_UNUSED (0x00)
1604#define MPI25_HASH_ALGORITHM_SHA256 (0x01)
1605
1606/* values for EncryptionAlgorithm */
1607#define MPI25_ENCRYPTION_ALG_UNUSED (0x00)
1608#define MPI25_ENCRYPTION_ALG_RSA256 (0x01)
1609
1610typedef struct _MPI25_ENCRYPTED_HASH_DATA {
1611 U8 ImageVersion; /* 0x00 */
1612 U8 NumHash; /* 0x01 */
1613 U16 Reserved1; /* 0x02 */
1614 U32 Reserved2; /* 0x04 */
1615 MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */
1616} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
1617Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
1618
1619
1620
1563/**************************************************************************** 1621/****************************************************************************
1564* PowerManagementControl message 1622* PowerManagementControl message
1565****************************************************************************/ 1623****************************************************************************/
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index 71765236afef..13d93ca029d5 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_raid.h 5 * Name: mpi2_raid.h
6 * Title: MPI Integrated RAID messages and structures 6 * Title: MPI Integrated RAID messages and structures
7 * Creation Date: April 26, 2007 7 * Creation Date: April 26, 2007
8 * 8 *
9 * mpi2_raid.h Version: 02.00.09 9 * mpi2_raid.h Version: 02.00.10
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -30,6 +30,7 @@
30 * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. 30 * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
31 * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. 31 * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
32 * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. 32 * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
33 * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
33 * -------------------------------------------------------------------------- 34 * --------------------------------------------------------------------------
34 */ 35 */
35 36
@@ -46,6 +47,9 @@
46* RAID Action messages 47* RAID Action messages
47****************************************************************************/ 48****************************************************************************/
48 49
50/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
51#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000)
52
49/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ 53/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
50#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) 54#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
51#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) 55#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index cba046f6a4b4..156e30543a2f 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_sas.h 5 * Name: mpi2_sas.h
6 * Title: MPI Serial Attached SCSI structures and definitions 6 * Title: MPI Serial Attached SCSI structures and definitions
7 * Creation Date: February 9, 2007 7 * Creation Date: February 9, 2007
8 * 8 *
9 * mpi2_sas.h Version: 02.00.07 9 * mpi2_sas.h Version: 02.00.08
10 * 10 *
11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 11 * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
12 * prefix are for use only on MPI v2.5 products, and must not be used 12 * prefix are for use only on MPI v2.5 products, and must not be used
@@ -30,6 +30,8 @@
30 * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. 30 * 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
31 * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA 31 * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
32 * Passthrough Request message. 32 * Passthrough Request message.
33 * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
34 * for anything newer than MPI v2.0.
33 * -------------------------------------------------------------------------- 35 * --------------------------------------------------------------------------
34 */ 36 */
35 37
@@ -251,7 +253,7 @@ typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST {
251#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) 253#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
252#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) 254#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
253#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) 255#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
254#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) 256#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */
255#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) 257#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
256#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) 258#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
257#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) 259#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 34e9a7ba76b0..904910d8a737 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_tool.h 5 * Name: mpi2_tool.h
6 * Title: MPI diagnostic tool structures and definitions 6 * Title: MPI diagnostic tool structures and definitions
7 * Creation Date: March 26, 2007 7 * Creation Date: March 26, 2007
8 * 8 *
9 * mpi2_tool.h Version: 02.00.10 9 * mpi2_tool.h Version: 02.00.11
10 * 10 *
11 * Version History 11 * Version History
12 * --------------- 12 * ---------------
@@ -32,6 +32,7 @@
32 * message. 32 * message.
33 * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that 33 * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
34 * it uses MPI Chain SGE as well as MPI Simple SGE. 34 * it uses MPI Chain SGE as well as MPI Simple SGE.
35 * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
35 * -------------------------------------------------------------------------- 36 * --------------------------------------------------------------------------
36 */ 37 */
37 38
@@ -51,6 +52,7 @@
51#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) 52#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
52#define MPI2_TOOLBOX_BEACON_TOOL (0x05) 53#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
53#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) 54#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
55#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07)
54 56
55/**************************************************************************** 57/****************************************************************************
56* Toolbox reply 58* Toolbox reply
@@ -331,6 +333,45 @@ typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
331 Mpi2ToolboxDiagnosticCliReply_t, 333 Mpi2ToolboxDiagnosticCliReply_t,
332 *pMpi2ToolboxDiagnosticCliReply_t; 334 *pMpi2ToolboxDiagnosticCliReply_t;
333 335
336
337/****************************************************************************
338* Toolbox Console Text Display Tool
339****************************************************************************/
340
341/* Toolbox Console Text Display Tool request message */
342typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
343 U8 Tool; /* 0x00 */
344 U8 Reserved1; /* 0x01 */
345 U8 ChainOffset; /* 0x02 */
346 U8 Function; /* 0x03 */
347 U16 Reserved2; /* 0x04 */
348 U8 Reserved3; /* 0x06 */
349 U8 MsgFlags; /* 0x07 */
350 U8 VP_ID; /* 0x08 */
351 U8 VF_ID; /* 0x09 */
352 U16 Reserved4; /* 0x0A */
353 U8 Console; /* 0x0C */
354 U8 Flags; /* 0x0D */
355 U16 Reserved6; /* 0x0E */
356 U8 TextToDisplay[4]; /* 0x10 */
357} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
358*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
359Mpi2ToolboxTextDisplayRequest_t,
360*pMpi2ToolboxTextDisplayRequest_t;
361
362/* defines for the Console field */
363#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0)
364#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00)
365#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10)
366#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20)
367
368#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F)
369
370/* defines for the Flags field */
371#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01)
372
373
374
334/***************************************************************************** 375/*****************************************************************************
335* 376*
336* Diagnostic Buffer Messages 377* Diagnostic Buffer Messages
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
index ba1fed50966e..99ab093602e8 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2000-2013 LSI Corporation. 2 * Copyright (c) 2000-2014 LSI Corporation.
3 * 3 *
4 * 4 *
5 * Name: mpi2_type.h 5 * Name: mpi2_type.h
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 93ce2b2baa41..1560115079c7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6 * Copyright (C) 2012-2013 LSI Corporation 6 * Copyright (C) 2012-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -91,6 +91,8 @@ static int mpt3sas_fwfault_debug;
91MODULE_PARM_DESC(mpt3sas_fwfault_debug, 91MODULE_PARM_DESC(mpt3sas_fwfault_debug,
92 " enable detection of firmware fault and halt firmware - (default=0)"); 92 " enable detection of firmware fault and halt firmware - (default=0)");
93 93
94static int
95_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
94 96
95/** 97/**
96 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 98 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
@@ -1482,17 +1484,22 @@ static int
1482_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) 1484_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1483{ 1485{
1484 struct sysinfo s; 1486 struct sysinfo s;
1485 char *desc = NULL; 1487 u64 consistent_dma_mask;
1488
1489 if (ioc->dma_mask)
1490 consistent_dma_mask = DMA_BIT_MASK(64);
1491 else
1492 consistent_dma_mask = DMA_BIT_MASK(32);
1486 1493
1487 if (sizeof(dma_addr_t) > 4) { 1494 if (sizeof(dma_addr_t) > 4) {
1488 const uint64_t required_mask = 1495 const uint64_t required_mask =
1489 dma_get_required_mask(&pdev->dev); 1496 dma_get_required_mask(&pdev->dev);
1490 if ((required_mask > DMA_BIT_MASK(32)) && 1497 if ((required_mask > DMA_BIT_MASK(32)) &&
1491 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 1498 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1492 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 1499 !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
1493 ioc->base_add_sg_single = &_base_add_sg_single_64; 1500 ioc->base_add_sg_single = &_base_add_sg_single_64;
1494 ioc->sge_size = sizeof(Mpi2SGESimple64_t); 1501 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1495 desc = "64"; 1502 ioc->dma_mask = 64;
1496 goto out; 1503 goto out;
1497 } 1504 }
1498 } 1505 }
@@ -1501,19 +1508,30 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
1501 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { 1508 && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1502 ioc->base_add_sg_single = &_base_add_sg_single_32; 1509 ioc->base_add_sg_single = &_base_add_sg_single_32;
1503 ioc->sge_size = sizeof(Mpi2SGESimple32_t); 1510 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1504 desc = "32"; 1511 ioc->dma_mask = 32;
1505 } else 1512 } else
1506 return -ENODEV; 1513 return -ENODEV;
1507 1514
1508 out: 1515 out:
1509 si_meminfo(&s); 1516 si_meminfo(&s);
1510 pr_info(MPT3SAS_FMT 1517 pr_info(MPT3SAS_FMT
1511 "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", 1518 "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
1512 ioc->name, desc, convert_to_kb(s.totalram)); 1519 ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
1513 1520
1514 return 0; 1521 return 0;
1515} 1522}
1516 1523
1524static int
1525_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
1526 struct pci_dev *pdev)
1527{
1528 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1529 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1530 return -ENODEV;
1531 }
1532 return 0;
1533}
1534
1517/** 1535/**
1518 * _base_check_enable_msix - checks MSIX capabable. 1536 * _base_check_enable_msix - checks MSIX capabable.
1519 * @ioc: per adapter object 1537 * @ioc: per adapter object
@@ -1698,11 +1716,15 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1698 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, 1716 ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
1699 ioc->cpu_count, max_msix_vectors); 1717 ioc->cpu_count, max_msix_vectors);
1700 1718
1719 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
1720 max_msix_vectors = 8;
1721
1701 if (max_msix_vectors > 0) { 1722 if (max_msix_vectors > 0) {
1702 ioc->reply_queue_count = min_t(int, max_msix_vectors, 1723 ioc->reply_queue_count = min_t(int, max_msix_vectors,
1703 ioc->reply_queue_count); 1724 ioc->reply_queue_count);
1704 ioc->msix_vector_count = ioc->reply_queue_count; 1725 ioc->msix_vector_count = ioc->reply_queue_count;
1705 } 1726 } else if (max_msix_vectors == 0)
1727 goto try_ioapic;
1706 1728
1707 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), 1729 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1708 GFP_KERNEL); 1730 GFP_KERNEL);
@@ -1716,10 +1738,10 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1716 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) 1738 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1717 a->entry = i; 1739 a->entry = i;
1718 1740
1719 r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); 1741 r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
1720 if (r) { 1742 if (r) {
1721 dfailprintk(ioc, pr_info(MPT3SAS_FMT 1743 dfailprintk(ioc, pr_info(MPT3SAS_FMT
1722 "pci_enable_msix failed (r=%d) !!!\n", 1744 "pci_enable_msix_exact failed (r=%d) !!!\n",
1723 ioc->name, r)); 1745 ioc->name, r));
1724 kfree(entries); 1746 kfree(entries);
1725 goto try_ioapic; 1747 goto try_ioapic;
@@ -1742,6 +1764,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
1742/* failback to io_apic interrupt routing */ 1764/* failback to io_apic interrupt routing */
1743 try_ioapic: 1765 try_ioapic:
1744 1766
1767 ioc->reply_queue_count = 1;
1745 r = _base_request_irq(ioc, 0, ioc->pdev->irq); 1768 r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1746 1769
1747 return r; 1770 return r;
@@ -1821,6 +1844,16 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1821 } 1844 }
1822 1845
1823 _base_mask_interrupts(ioc); 1846 _base_mask_interrupts(ioc);
1847
1848 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
1849 if (r)
1850 goto out_fail;
1851
1852 if (!ioc->rdpq_array_enable_assigned) {
1853 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
1854 ioc->rdpq_array_enable_assigned = 1;
1855 }
1856
1824 r = _base_enable_msix(ioc); 1857 r = _base_enable_msix(ioc);
1825 if (r) 1858 if (r)
1826 goto out_fail; 1859 goto out_fail;
@@ -2185,6 +2218,53 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
2185 &ioc->scsi_lookup_lock); 2218 &ioc->scsi_lookup_lock);
2186} 2219}
2187 2220
2221/**
2222 * _base_display_intel_branding - Display branding string
2223 * @ioc: per adapter object
2224 *
2225 * Return nothing.
2226 */
2227static void
2228_base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc)
2229{
2230 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
2231 return;
2232
2233 switch (ioc->pdev->device) {
2234 case MPI25_MFGPAGE_DEVID_SAS3008:
2235 switch (ioc->pdev->subsystem_device) {
2236 case MPT3SAS_INTEL_RMS3JC080_SSDID:
2237 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2238 MPT3SAS_INTEL_RMS3JC080_BRANDING);
2239 break;
2240
2241 case MPT3SAS_INTEL_RS3GC008_SSDID:
2242 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2243 MPT3SAS_INTEL_RS3GC008_BRANDING);
2244 break;
2245 case MPT3SAS_INTEL_RS3FC044_SSDID:
2246 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2247 MPT3SAS_INTEL_RS3FC044_BRANDING);
2248 break;
2249 case MPT3SAS_INTEL_RS3UC080_SSDID:
2250 pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2251 MPT3SAS_INTEL_RS3UC080_BRANDING);
2252 break;
2253 default:
2254 pr_info(MPT3SAS_FMT
2255 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2256 ioc->name, ioc->pdev->subsystem_device);
2257 break;
2258 }
2259 break;
2260 default:
2261 pr_info(MPT3SAS_FMT
2262 "Intel(R) Controller: Subsystem ID: 0x%X\n",
2263 ioc->name, ioc->pdev->subsystem_device);
2264 break;
2265 }
2266}
2267
2188 2268
2189 2269
2190/** 2270/**
@@ -2216,6 +2296,8 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
2216 (bios_version & 0x0000FF00) >> 8, 2296 (bios_version & 0x0000FF00) >> 8,
2217 bios_version & 0x000000FF); 2297 bios_version & 0x000000FF);
2218 2298
2299 _base_display_intel_branding(ioc);
2300
2219 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); 2301 pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
2220 2302
2221 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { 2303 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -2447,7 +2529,8 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
2447static void 2529static void
2448_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) 2530_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2449{ 2531{
2450 int i; 2532 int i = 0;
2533 struct reply_post_struct *rps;
2451 2534
2452 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 2535 dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
2453 __func__)); 2536 __func__));
@@ -2492,15 +2575,25 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
2492 ioc->reply_free = NULL; 2575 ioc->reply_free = NULL;
2493 } 2576 }
2494 2577
2495 if (ioc->reply_post_free) { 2578 if (ioc->reply_post) {
2496 pci_pool_free(ioc->reply_post_free_dma_pool, 2579 do {
2497 ioc->reply_post_free, ioc->reply_post_free_dma); 2580 rps = &ioc->reply_post[i];
2581 if (rps->reply_post_free) {
2582 pci_pool_free(
2583 ioc->reply_post_free_dma_pool,
2584 rps->reply_post_free,
2585 rps->reply_post_free_dma);
2586 dexitprintk(ioc, pr_info(MPT3SAS_FMT
2587 "reply_post_free_pool(0x%p): free\n",
2588 ioc->name, rps->reply_post_free));
2589 rps->reply_post_free = NULL;
2590 }
2591 } while (ioc->rdpq_array_enable &&
2592 (++i < ioc->reply_queue_count));
2593
2498 if (ioc->reply_post_free_dma_pool) 2594 if (ioc->reply_post_free_dma_pool)
2499 pci_pool_destroy(ioc->reply_post_free_dma_pool); 2595 pci_pool_destroy(ioc->reply_post_free_dma_pool);
2500 dexitprintk(ioc, pr_info(MPT3SAS_FMT 2596 kfree(ioc->reply_post);
2501 "reply_post_free_pool(0x%p): free\n", ioc->name,
2502 ioc->reply_post_free));
2503 ioc->reply_post_free = NULL;
2504 } 2597 }
2505 2598
2506 if (ioc->config_page) { 2599 if (ioc->config_page) {
@@ -2647,6 +2740,65 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
2647 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, 2740 ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2648 ioc->chains_needed_per_io)); 2741 ioc->chains_needed_per_io));
2649 2742
2743 /* reply post queue, 16 byte align */
2744 reply_post_free_sz = ioc->reply_post_queue_depth *
2745 sizeof(Mpi2DefaultReplyDescriptor_t);
2746
2747 sz = reply_post_free_sz;
2748 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
2749 sz *= ioc->reply_queue_count;
2750
2751 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
2752 (ioc->reply_queue_count):1,
2753 sizeof(struct reply_post_struct), GFP_KERNEL);
2754
2755 if (!ioc->reply_post) {
2756 pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
2757 ioc->name);
2758 goto out;
2759 }
2760 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2761 ioc->pdev, sz, 16, 0);
2762 if (!ioc->reply_post_free_dma_pool) {
2763 pr_err(MPT3SAS_FMT
2764 "reply_post_free pool: pci_pool_create failed\n",
2765 ioc->name);
2766 goto out;
2767 }
2768 i = 0;
2769 do {
2770 ioc->reply_post[i].reply_post_free =
2771 pci_pool_alloc(ioc->reply_post_free_dma_pool,
2772 GFP_KERNEL,
2773 &ioc->reply_post[i].reply_post_free_dma);
2774 if (!ioc->reply_post[i].reply_post_free) {
2775 pr_err(MPT3SAS_FMT
2776 "reply_post_free pool: pci_pool_alloc failed\n",
2777 ioc->name);
2778 goto out;
2779 }
2780 memset(ioc->reply_post[i].reply_post_free, 0, sz);
2781 dinitprintk(ioc, pr_info(MPT3SAS_FMT
2782 "reply post free pool (0x%p): depth(%d),"
2783 "element_size(%d), pool_size(%d kB)\n", ioc->name,
2784 ioc->reply_post[i].reply_post_free,
2785 ioc->reply_post_queue_depth, 8, sz/1024));
2786 dinitprintk(ioc, pr_info(MPT3SAS_FMT
2787 "reply_post_free_dma = (0x%llx)\n", ioc->name,
2788 (unsigned long long)
2789 ioc->reply_post[i].reply_post_free_dma));
2790 total_sz += sz;
2791 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
2792
2793 if (ioc->dma_mask == 64) {
2794 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
2795 pr_warn(MPT3SAS_FMT
2796 "no suitable consistent DMA mask for %s\n",
2797 ioc->name, pci_name(ioc->pdev));
2798 goto out;
2799 }
2800 }
2801
2650 ioc->scsiio_depth = ioc->hba_queue_depth - 2802 ioc->scsiio_depth = ioc->hba_queue_depth -
2651 ioc->hi_priority_depth - ioc->internal_depth; 2803 ioc->hi_priority_depth - ioc->internal_depth;
2652 2804
@@ -2861,40 +3013,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
2861 ioc->name, (unsigned long long)ioc->reply_free_dma)); 3013 ioc->name, (unsigned long long)ioc->reply_free_dma));
2862 total_sz += sz; 3014 total_sz += sz;
2863 3015
2864 /* reply post queue, 16 byte align */
2865 reply_post_free_sz = ioc->reply_post_queue_depth *
2866 sizeof(Mpi2DefaultReplyDescriptor_t);
2867 if (_base_is_controller_msix_enabled(ioc))
2868 sz = reply_post_free_sz * ioc->reply_queue_count;
2869 else
2870 sz = reply_post_free_sz;
2871 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2872 ioc->pdev, sz, 16, 0);
2873 if (!ioc->reply_post_free_dma_pool) {
2874 pr_err(MPT3SAS_FMT
2875 "reply_post_free pool: pci_pool_create failed\n",
2876 ioc->name);
2877 goto out;
2878 }
2879 ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2880 GFP_KERNEL, &ioc->reply_post_free_dma);
2881 if (!ioc->reply_post_free) {
2882 pr_err(MPT3SAS_FMT
2883 "reply_post_free pool: pci_pool_alloc failed\n",
2884 ioc->name);
2885 goto out;
2886 }
2887 memset(ioc->reply_post_free, 0, sz);
2888 dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
2889 "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2890 ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2891 sz/1024));
2892 dinitprintk(ioc, pr_info(MPT3SAS_FMT
2893 "reply_post_free_dma = (0x%llx)\n",
2894 ioc->name, (unsigned long long)
2895 ioc->reply_post_free_dma));
2896 total_sz += sz;
2897
2898 ioc->config_page_sz = 512; 3016 ioc->config_page_sz = 512;
2899 ioc->config_page = pci_alloc_consistent(ioc->pdev, 3017 ioc->config_page = pci_alloc_consistent(ioc->pdev,
2900 ioc->config_page_sz, &ioc->config_page_dma); 3018 ioc->config_page_sz, &ioc->config_page_dma);
@@ -3577,6 +3695,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3577 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); 3695 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
3578 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) 3696 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
3579 ioc->ir_firmware = 1; 3697 ioc->ir_firmware = 1;
3698 if ((facts->IOCCapabilities &
3699 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
3700 ioc->rdpq_array_capable = 1;
3580 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); 3701 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
3581 facts->IOCRequestFrameSize = 3702 facts->IOCRequestFrameSize =
3582 le16_to_cpu(mpi_reply.IOCRequestFrameSize); 3703 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -3613,9 +3734,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3613{ 3734{
3614 Mpi2IOCInitRequest_t mpi_request; 3735 Mpi2IOCInitRequest_t mpi_request;
3615 Mpi2IOCInitReply_t mpi_reply; 3736 Mpi2IOCInitReply_t mpi_reply;
3616 int r; 3737 int i, r = 0;
3617 struct timeval current_time; 3738 struct timeval current_time;
3618 u16 ioc_status; 3739 u16 ioc_status;
3740 u32 reply_post_free_array_sz = 0;
3741 Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
3742 dma_addr_t reply_post_free_array_dma;
3619 3743
3620 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 3744 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
3621 __func__)); 3745 __func__));
@@ -3644,9 +3768,31 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3644 cpu_to_le64((u64)ioc->request_dma); 3768 cpu_to_le64((u64)ioc->request_dma);
3645 mpi_request.ReplyFreeQueueAddress = 3769 mpi_request.ReplyFreeQueueAddress =
3646 cpu_to_le64((u64)ioc->reply_free_dma); 3770 cpu_to_le64((u64)ioc->reply_free_dma);
3647 mpi_request.ReplyDescriptorPostQueueAddress =
3648 cpu_to_le64((u64)ioc->reply_post_free_dma);
3649 3771
3772 if (ioc->rdpq_array_enable) {
3773 reply_post_free_array_sz = ioc->reply_queue_count *
3774 sizeof(Mpi2IOCInitRDPQArrayEntry);
3775 reply_post_free_array = pci_alloc_consistent(ioc->pdev,
3776 reply_post_free_array_sz, &reply_post_free_array_dma);
3777 if (!reply_post_free_array) {
3778 pr_err(MPT3SAS_FMT
3779 "reply_post_free_array: pci_alloc_consistent failed\n",
3780 ioc->name);
3781 r = -ENOMEM;
3782 goto out;
3783 }
3784 memset(reply_post_free_array, 0, reply_post_free_array_sz);
3785 for (i = 0; i < ioc->reply_queue_count; i++)
3786 reply_post_free_array[i].RDPQBaseAddress =
3787 cpu_to_le64(
3788 (u64)ioc->reply_post[i].reply_post_free_dma);
3789 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
3790 mpi_request.ReplyDescriptorPostQueueAddress =
3791 cpu_to_le64((u64)reply_post_free_array_dma);
3792 } else {
3793 mpi_request.ReplyDescriptorPostQueueAddress =
3794 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
3795 }
3650 3796
3651 /* This time stamp specifies number of milliseconds 3797 /* This time stamp specifies number of milliseconds
3652 * since epoch ~ midnight January 1, 1970. 3798 * since epoch ~ midnight January 1, 1970.
@@ -3674,7 +3820,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3674 if (r != 0) { 3820 if (r != 0) {
3675 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 3821 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
3676 ioc->name, __func__, r); 3822 ioc->name, __func__, r);
3677 return r; 3823 goto out;
3678 } 3824 }
3679 3825
3680 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; 3826 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -3684,7 +3830,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
3684 r = -EIO; 3830 r = -EIO;
3685 } 3831 }
3686 3832
3687 return 0; 3833out:
3834 if (reply_post_free_array)
3835 pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
3836 reply_post_free_array,
3837 reply_post_free_array_dma);
3838 return r;
3688} 3839}
3689 3840
3690/** 3841/**
@@ -4234,7 +4385,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4234 struct _tr_list *delayed_tr, *delayed_tr_next; 4385 struct _tr_list *delayed_tr, *delayed_tr_next;
4235 struct adapter_reply_queue *reply_q; 4386 struct adapter_reply_queue *reply_q;
4236 long reply_post_free; 4387 long reply_post_free;
4237 u32 reply_post_free_sz; 4388 u32 reply_post_free_sz, index = 0;
4238 4389
4239 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4390 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4240 __func__)); 4391 __func__));
@@ -4305,9 +4456,9 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4305 _base_assign_reply_queues(ioc); 4456 _base_assign_reply_queues(ioc);
4306 4457
4307 /* initialize Reply Post Free Queue */ 4458 /* initialize Reply Post Free Queue */
4308 reply_post_free = (long)ioc->reply_post_free;
4309 reply_post_free_sz = ioc->reply_post_queue_depth * 4459 reply_post_free_sz = ioc->reply_post_queue_depth *
4310 sizeof(Mpi2DefaultReplyDescriptor_t); 4460 sizeof(Mpi2DefaultReplyDescriptor_t);
4461 reply_post_free = (long)ioc->reply_post[index].reply_post_free;
4311 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { 4462 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
4312 reply_q->reply_post_host_index = 0; 4463 reply_q->reply_post_host_index = 0;
4313 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) 4464 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
@@ -4317,7 +4468,15 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4317 cpu_to_le64(ULLONG_MAX); 4468 cpu_to_le64(ULLONG_MAX);
4318 if (!_base_is_controller_msix_enabled(ioc)) 4469 if (!_base_is_controller_msix_enabled(ioc))
4319 goto skip_init_reply_post_free_queue; 4470 goto skip_init_reply_post_free_queue;
4320 reply_post_free += reply_post_free_sz; 4471 /*
4472 * If RDPQ is enabled, switch to the next allocation.
4473 * Otherwise advance within the contiguous region.
4474 */
4475 if (ioc->rdpq_array_enable)
4476 reply_post_free = (long)
4477 ioc->reply_post[++index].reply_post_free;
4478 else
4479 reply_post_free += reply_post_free_sz;
4321 } 4480 }
4322 skip_init_reply_post_free_queue: 4481 skip_init_reply_post_free_queue:
4323 4482
@@ -4428,6 +4587,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
4428 goto out_free_resources; 4587 goto out_free_resources;
4429 } 4588 }
4430 4589
4590 ioc->rdpq_array_enable_assigned = 0;
4591 ioc->dma_mask = 0;
4431 r = mpt3sas_base_map_resources(ioc); 4592 r = mpt3sas_base_map_resources(ioc);
4432 if (r) 4593 if (r)
4433 goto out_free_resources; 4594 goto out_free_resources;
@@ -4804,6 +4965,12 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4804 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 4965 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
4805 if (r) 4966 if (r)
4806 goto out; 4967 goto out;
4968
4969 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
4970 panic("%s: Issue occurred with flashing controller firmware."
4971 "Please reboot the system and ensure that the correct"
4972 " firmware version is running\n", ioc->name);
4973
4807 r = _base_make_ioc_operational(ioc, sleep_flag); 4974 r = _base_make_ioc_operational(ioc, sleep_flag);
4808 if (!r) 4975 if (!r)
4809 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); 4976 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 9b90a6fef706..40926aa9b24d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -3,7 +3,7 @@
3 * for access to MPT (Message Passing Technology) firmware. 3 * for access to MPT (Message Passing Technology) firmware.
4 * 4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
6 * Copyright (C) 2012-2013 LSI Corporation 6 * Copyright (C) 2012-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
@@ -70,8 +70,8 @@
70#define MPT3SAS_DRIVER_NAME "mpt3sas" 70#define MPT3SAS_DRIVER_NAME "mpt3sas"
71#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 71#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
72#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" 72#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
73#define MPT3SAS_DRIVER_VERSION "02.100.00.00" 73#define MPT3SAS_DRIVER_VERSION "04.100.00.00"
74#define MPT3SAS_MAJOR_VERSION 2 74#define MPT3SAS_MAJOR_VERSION 4
75#define MPT3SAS_MINOR_VERSION 100 75#define MPT3SAS_MINOR_VERSION 100
76#define MPT3SAS_BUILD_VERSION 0 76#define MPT3SAS_BUILD_VERSION 0
77#define MPT3SAS_RELEASE_VERSION 00 77#define MPT3SAS_RELEASE_VERSION 00
@@ -130,7 +130,25 @@
130#define MPT_TARGET_FLAGS_DELETED 0x04 130#define MPT_TARGET_FLAGS_DELETED 0x04
131#define MPT_TARGET_FASTPATH_IO 0x08 131#define MPT_TARGET_FASTPATH_IO 0x08
132 132
133/*
134 * Intel HBA branding
135 */
136#define MPT3SAS_INTEL_RMS3JC080_BRANDING \
137 "Intel(R) Integrated RAID Module RMS3JC080"
138#define MPT3SAS_INTEL_RS3GC008_BRANDING \
139 "Intel(R) RAID Controller RS3GC008"
140#define MPT3SAS_INTEL_RS3FC044_BRANDING \
141 "Intel(R) RAID Controller RS3FC044"
142#define MPT3SAS_INTEL_RS3UC080_BRANDING \
143 "Intel(R) RAID Controller RS3UC080"
133 144
145/*
146 * Intel HBA SSDIDs
147 */
148#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521
149#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522
150#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523
151#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524
134 152
135/* 153/*
136 * status bits for ioc->diag_buffer_status 154 * status bits for ioc->diag_buffer_status
@@ -272,8 +290,10 @@ struct _internal_cmd {
272 * @channel: target channel 290 * @channel: target channel
273 * @slot: number number 291 * @slot: number number
274 * @phy: phy identifier provided in sas device page 0 292 * @phy: phy identifier provided in sas device page 0
275 * @fast_path: fast path feature enable bit
276 * @responding: used in _scsih_sas_device_mark_responding 293 * @responding: used in _scsih_sas_device_mark_responding
294 * @fast_path: fast path feature enable bit
295 * @pfa_led_on: flag for PFA LED status
296 *
277 */ 297 */
278struct _sas_device { 298struct _sas_device {
279 struct list_head list; 299 struct list_head list;
@@ -293,6 +313,7 @@ struct _sas_device {
293 u8 phy; 313 u8 phy;
294 u8 responding; 314 u8 responding;
295 u8 fast_path; 315 u8 fast_path;
316 u8 pfa_led_on;
296}; 317};
297 318
298/** 319/**
@@ -548,6 +569,11 @@ struct mpt3sas_port_facts {
548 u16 MaxPostedCmdBuffers; 569 u16 MaxPostedCmdBuffers;
549}; 570};
550 571
572struct reply_post_struct {
573 Mpi2ReplyDescriptorsUnion_t *reply_post_free;
574 dma_addr_t reply_post_free_dma;
575};
576
551/** 577/**
552 * enum mutex_type - task management mutex type 578 * enum mutex_type - task management mutex type
553 * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it 579 * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
@@ -576,6 +602,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
576 * @ir_firmware: IR firmware present 602 * @ir_firmware: IR firmware present
577 * @bars: bitmask of BAR's that must be configured 603 * @bars: bitmask of BAR's that must be configured
578 * @mask_interrupts: ignore interrupt 604 * @mask_interrupts: ignore interrupt
605 * @dma_mask: used to set the consistent dma mask
579 * @fault_reset_work_q_name: fw fault work queue 606 * @fault_reset_work_q_name: fw fault work queue
580 * @fault_reset_work_q: "" 607 * @fault_reset_work_q: ""
581 * @fault_reset_work: "" 608 * @fault_reset_work: ""
@@ -691,8 +718,11 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
691 * @reply_free_dma_pool: 718 * @reply_free_dma_pool:
692 * @reply_free_host_index: tail index in pool to insert free replys 719 * @reply_free_host_index: tail index in pool to insert free replys
693 * @reply_post_queue_depth: reply post queue depth 720 * @reply_post_queue_depth: reply post queue depth
694 * @reply_post_free: pool for reply post (64bit descriptor) 721 * @reply_post_struct: struct for reply_post_free physical & virt address
695 * @reply_post_free_dma: 722 * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
723 * @rdpq_array_enable: rdpq_array support is enabled in the driver
724 * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
725 * is assigned only ones
696 * @reply_queue_count: number of reply queue's 726 * @reply_queue_count: number of reply queue's
697 * @reply_queue_list: link list contaning the reply queue info 727 * @reply_queue_list: link list contaning the reply queue info
698 * @reply_post_host_index: head index in the pool where FW completes IO 728 * @reply_post_host_index: head index in the pool where FW completes IO
@@ -714,6 +744,7 @@ struct MPT3SAS_ADAPTER {
714 u8 ir_firmware; 744 u8 ir_firmware;
715 int bars; 745 int bars;
716 u8 mask_interrupts; 746 u8 mask_interrupts;
747 int dma_mask;
717 748
718 /* fw fault handler */ 749 /* fw fault handler */
719 char fault_reset_work_q_name[20]; 750 char fault_reset_work_q_name[20];
@@ -893,8 +924,10 @@ struct MPT3SAS_ADAPTER {
893 924
894 /* reply post queue */ 925 /* reply post queue */
895 u16 reply_post_queue_depth; 926 u16 reply_post_queue_depth;
896 Mpi2ReplyDescriptorsUnion_t *reply_post_free; 927 struct reply_post_struct *reply_post;
897 dma_addr_t reply_post_free_dma; 928 u8 rdpq_array_capable;
929 u8 rdpq_array_enable;
930 u8 rdpq_array_enable_assigned;
898 struct dma_pool *reply_post_free_dma_pool; 931 struct dma_pool *reply_post_free_dma_pool;
899 u8 reply_queue_count; 932 u8 reply_queue_count;
900 struct list_head reply_queue_list; 933 struct list_head reply_queue_list;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 936ec0391990..4472c2af9255 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -2,7 +2,7 @@
2 * This module provides common API for accessing firmware configuration pages 2 * This module provides common API for accessing firmware configuration pages
3 * 3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
5 * Copyright (C) 2012-2013 LSI Corporation 5 * Copyright (C) 2012-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index ba9cbe598a91..dca14877d5ab 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
6 * Copyright (C) 2012-2013 LSI Corporation 6 * Copyright (C) 2012-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 53b0c480d98f..5f3d7fd7c2f8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -3,7 +3,7 @@
3 * controllers 3 * controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
6 * Copyright (C) 2012-2013 LSI Corporation 6 * Copyright (C) 2012-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
index 545b22d2cbdf..4778e7dd98bd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_debug.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -2,7 +2,7 @@
2 * Logging Support for MPT (Message Passing Technology) based controllers 2 * Logging Support for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
5 * Copyright (C) 2012-2013 LSI Corporation 5 * Copyright (C) 2012-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 135f12c20ecf..857276b8880f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2,7 +2,7 @@
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers 2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2013 LSI Corporation 5 * Copyright (C) 2012-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -159,7 +159,7 @@ struct sense_info {
159}; 159};
160 160
161#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) 161#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
162#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC) 162#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
163#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) 163#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
164#define MPT3SAS_ABRT_TASK_SET (0xFFFE) 164#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
165#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) 165#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
@@ -3885,7 +3885,7 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3885#endif 3885#endif
3886 3886
3887/** 3887/**
3888 * _scsih_turn_on_fault_led - illuminate Fault LED 3888 * _scsih_turn_on_pfa_led - illuminate PFA LED
3889 * @ioc: per adapter object 3889 * @ioc: per adapter object
3890 * @handle: device handle 3890 * @handle: device handle
3891 * Context: process 3891 * Context: process
@@ -3893,10 +3893,15 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
3893 * Return nothing. 3893 * Return nothing.
3894 */ 3894 */
3895static void 3895static void
3896_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3896_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3897{ 3897{
3898 Mpi2SepReply_t mpi_reply; 3898 Mpi2SepReply_t mpi_reply;
3899 Mpi2SepRequest_t mpi_request; 3899 Mpi2SepRequest_t mpi_request;
3900 struct _sas_device *sas_device;
3901
3902 sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
3903 if (!sas_device)
3904 return;
3900 3905
3901 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); 3906 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
3902 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; 3907 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
@@ -3911,6 +3916,7 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3911 __FILE__, __LINE__, __func__); 3916 __FILE__, __LINE__, __func__);
3912 return; 3917 return;
3913 } 3918 }
3919 sas_device->pfa_led_on = 1;
3914 3920
3915 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { 3921 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
3916 dewtprintk(ioc, pr_info(MPT3SAS_FMT 3922 dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3920,9 +3926,46 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3920 return; 3926 return;
3921 } 3927 }
3922} 3928}
3929/**
3930 * _scsih_turn_off_pfa_led - turn off Fault LED
3931 * @ioc: per adapter object
3932 * @sas_device: sas device whose PFA LED has to turned off
3933 * Context: process
3934 *
3935 * Return nothing.
3936 */
3937static void
3938_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
3939 struct _sas_device *sas_device)
3940{
3941 Mpi2SepReply_t mpi_reply;
3942 Mpi2SepRequest_t mpi_request;
3923 3943
3944 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
3945 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
3946 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
3947 mpi_request.SlotStatus = 0;
3948 mpi_request.Slot = cpu_to_le16(sas_device->slot);
3949 mpi_request.DevHandle = 0;
3950 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
3951 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
3952 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
3953 &mpi_request)) != 0) {
3954 printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
3955 __FILE__, __LINE__, __func__);
3956 return;
3957 }
3958
3959 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
3960 dewtprintk(ioc, printk(MPT3SAS_FMT
3961 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
3962 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
3963 le32_to_cpu(mpi_reply.IOCLogInfo)));
3964 return;
3965 }
3966}
3924/** 3967/**
3925 * _scsih_send_event_to_turn_on_fault_led - fire delayed event 3968 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
3926 * @ioc: per adapter object 3969 * @ioc: per adapter object
3927 * @handle: device handle 3970 * @handle: device handle
3928 * Context: interrupt. 3971 * Context: interrupt.
@@ -3930,14 +3973,14 @@ _scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3930 * Return nothing. 3973 * Return nothing.
3931 */ 3974 */
3932static void 3975static void
3933_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) 3976_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3934{ 3977{
3935 struct fw_event_work *fw_event; 3978 struct fw_event_work *fw_event;
3936 3979
3937 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); 3980 fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
3938 if (!fw_event) 3981 if (!fw_event)
3939 return; 3982 return;
3940 fw_event->event = MPT3SAS_TURN_ON_FAULT_LED; 3983 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
3941 fw_event->device_handle = handle; 3984 fw_event->device_handle = handle;
3942 fw_event->ioc = ioc; 3985 fw_event->ioc = ioc;
3943 _scsih_fw_event_add(ioc, fw_event); 3986 _scsih_fw_event_add(ioc, fw_event);
@@ -3981,7 +4024,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3981 spin_unlock_irqrestore(&ioc->sas_device_lock, flags); 4024 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3982 4025
3983 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) 4026 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
3984 _scsih_send_event_to_turn_on_fault_led(ioc, handle); 4027 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
3985 4028
3986 /* insert into event log */ 4029 /* insert into event log */
3987 sz = offsetof(Mpi2EventNotificationReply_t, EventData) + 4030 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -4911,7 +4954,11 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
4911{ 4954{
4912 struct MPT3SAS_TARGET *sas_target_priv_data; 4955 struct MPT3SAS_TARGET *sas_target_priv_data;
4913 4956
4914 4957 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
4958 (sas_device->pfa_led_on)) {
4959 _scsih_turn_off_pfa_led(ioc, sas_device);
4960 sas_device->pfa_led_on = 0;
4961 }
4915 dewtprintk(ioc, pr_info(MPT3SAS_FMT 4962 dewtprintk(ioc, pr_info(MPT3SAS_FMT
4916 "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", 4963 "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
4917 ioc->name, __func__, 4964 ioc->name, __func__,
@@ -7065,8 +7112,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
7065 "port enable: complete from worker thread\n", 7112 "port enable: complete from worker thread\n",
7066 ioc->name)); 7113 ioc->name));
7067 break; 7114 break;
7068 case MPT3SAS_TURN_ON_FAULT_LED: 7115 case MPT3SAS_TURN_ON_PFA_LED:
7069 _scsih_turn_on_fault_led(ioc, fw_event->device_handle); 7116 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
7070 break; 7117 break;
7071 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: 7118 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7072 _scsih_sas_topology_change_event(ioc, fw_event); 7119 _scsih_sas_topology_change_event(ioc, fw_event);
@@ -7734,6 +7781,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7734{ 7781{
7735 struct MPT3SAS_ADAPTER *ioc; 7782 struct MPT3SAS_ADAPTER *ioc;
7736 struct Scsi_Host *shost; 7783 struct Scsi_Host *shost;
7784 int rv;
7737 7785
7738 shost = scsi_host_alloc(&scsih_driver_template, 7786 shost = scsi_host_alloc(&scsih_driver_template,
7739 sizeof(struct MPT3SAS_ADAPTER)); 7787 sizeof(struct MPT3SAS_ADAPTER));
@@ -7826,6 +7874,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7826 if (!ioc->firmware_event_thread) { 7874 if (!ioc->firmware_event_thread) {
7827 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 7875 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7828 ioc->name, __FILE__, __LINE__, __func__); 7876 ioc->name, __FILE__, __LINE__, __func__);
7877 rv = -ENODEV;
7829 goto out_thread_fail; 7878 goto out_thread_fail;
7830 } 7879 }
7831 7880
@@ -7833,12 +7882,13 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
7833 if ((mpt3sas_base_attach(ioc))) { 7882 if ((mpt3sas_base_attach(ioc))) {
7834 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 7883 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7835 ioc->name, __FILE__, __LINE__, __func__); 7884 ioc->name, __FILE__, __LINE__, __func__);
7885 rv = -ENODEV;
7836 goto out_attach_fail; 7886 goto out_attach_fail;
7837 } 7887 }
7838 if ((scsi_add_host(shost, &pdev->dev))) { 7888 rv = scsi_add_host(shost, &pdev->dev);
7889 if (rv) {
7839 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 7890 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
7840 ioc->name, __FILE__, __LINE__, __func__); 7891 ioc->name, __FILE__, __LINE__, __func__);
7841 list_del(&ioc->list);
7842 goto out_add_shost_fail; 7892 goto out_add_shost_fail;
7843 } 7893 }
7844 7894
@@ -7851,7 +7901,7 @@ out_add_shost_fail:
7851 out_thread_fail: 7901 out_thread_fail:
7852 list_del(&ioc->list); 7902 list_del(&ioc->list);
7853 scsi_host_put(shost); 7903 scsi_host_put(shost);
7854 return -ENODEV; 7904 return rv;
7855} 7905}
7856 7906
7857#ifdef CONFIG_PM 7907#ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 65170cb1a00f..d4bafaaebea9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -2,7 +2,7 @@
2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers 2 * SAS Transport Layer for MPT (Message Passing Technology) based controllers
3 * 3 *
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c 4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
5 * Copyright (C) 2012-2013 LSI Corporation 5 * Copyright (C) 2012-2014 LSI Corporation
6 * (mailto:DL-MPTFusionLinux@lsi.com) 6 * (mailto:DL-MPTFusionLinux@lsi.com)
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index f6533ab20364..8a2dd113f401 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -3,7 +3,7 @@
3 * (Message Passing Technology) based controllers 3 * (Message Passing Technology) based controllers
4 * 4 *
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c 5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
6 * Copyright (C) 2012-2013 LSI Corporation 6 * Copyright (C) 2012-2014 LSI Corporation
7 * (mailto:DL-MPTFusionLinux@lsi.com) 7 * (mailto:DL-MPTFusionLinux@lsi.com)
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
index bb693923bef1..f681db56c53b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -4,7 +4,7 @@
4 * controllers 4 * controllers
5 * 5 *
6 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h 6 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
7 * Copyright (C) 2012-2013 LSI Corporation 7 * Copyright (C) 2012-2014 LSI Corporation
8 * (mailto:DL-MPTFusionLinux@lsi.com) 8 * (mailto:DL-MPTFusionLinux@lsi.com)
9 * 9 *
10 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 53284eb23a15..90abb03c9074 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -915,7 +915,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
915 int ret; 915 int ret;
916 916
917 nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, 917 nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
918 "enter. target: 0x%x LUN: 0x%llu cmnd: 0x%x cmndlen: 0x%x " 918 "enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x "
919 "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", 919 "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
920 SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, 920 SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
921 scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt)); 921 scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7abbf284da1a..be8269c8d127 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -385,7 +385,6 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
385 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); 385 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
386 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; 386 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
387 char *str = buf; 387 char *str = buf;
388 void *virt_addr;
389 int bios_index; 388 int bios_index;
390 DECLARE_COMPLETION_ONSTACK(completion); 389 DECLARE_COMPLETION_ONSTACK(completion);
391 struct pm8001_ioctl_payload payload; 390 struct pm8001_ioctl_payload payload;
@@ -402,11 +401,10 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
402 return -ENOMEM; 401 return -ENOMEM;
403 } 402 }
404 wait_for_completion(&completion); 403 wait_for_completion(&completion);
405 virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
406 for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT; 404 for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
407 bios_index++) 405 bios_index++)
408 str += sprintf(str, "%c", 406 str += sprintf(str, "%c",
409 *((u8 *)((u8 *)virt_addr+bios_index))); 407 *(payload.func_specific+bios_index));
410 kfree(payload.func_specific); 408 kfree(payload.func_specific);
411 return str - buf; 409 return str - buf;
412} 410}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index dd12c6fe57a6..933f21471951 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3132,6 +3132,7 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3132void 3132void
3133pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) 3133pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3134{ 3134{
3135 struct fw_control_ex *fw_control_context;
3135 struct get_nvm_data_resp *pPayload = 3136 struct get_nvm_data_resp *pPayload =
3136 (struct get_nvm_data_resp *)(piomb + 4); 3137 (struct get_nvm_data_resp *)(piomb + 4);
3137 u32 tag = le32_to_cpu(pPayload->tag); 3138 u32 tag = le32_to_cpu(pPayload->tag);
@@ -3140,6 +3141,7 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3140 u32 ir_tds_bn_dps_das_nvm = 3141 u32 ir_tds_bn_dps_das_nvm =
3141 le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); 3142 le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
3142 void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; 3143 void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
3144 fw_control_context = ccb->fw_control_context;
3143 3145
3144 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n")); 3146 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
3145 if ((dlen_status & NVMD_STAT) != 0) { 3147 if ((dlen_status & NVMD_STAT) != 0) {
@@ -3180,6 +3182,12 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3180 pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", 3182 pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
3181 (dlen_status & NVMD_LEN) >> 24)); 3183 (dlen_status & NVMD_LEN) >> 24));
3182 } 3184 }
3185 /* Though fw_control_context is freed below, usrAddr still needs
3186 * to be updated as this holds the response to the request function
3187 */
3188 memcpy(fw_control_context->usrAddr,
3189 pm8001_ha->memoryMap.region[NVMD].virt_ptr,
3190 fw_control_context->len);
3183 kfree(ccb->fw_control_context); 3191 kfree(ccb->fw_control_context);
3184 ccb->task = NULL; 3192 ccb->task = NULL;
3185 ccb->ccb_tag = 0xFFFFFFFF; 3193 ccb->ccb_tag = 0xFFFFFFFF;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 6f3275d020a0..bcb64eb1387f 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4698,19 +4698,10 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
4698 for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++) 4698 for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
4699 entries[i].entry = i; 4699 entries[i].entry = i;
4700 4700
4701 rc = pci_enable_msix(pdev, entries, num_hrrq); 4701 num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
4702 if (rc < 0) 4702 if (num_hrrq < 0)
4703 goto pmcraid_isr_legacy; 4703 goto pmcraid_isr_legacy;
4704 4704
4705 /* Check how many MSIX vectors are allocated and register
4706 * msi-x handlers for each of them giving appropriate buffer
4707 */
4708 if (rc > 0) {
4709 num_hrrq = rc;
4710 if (pci_enable_msix(pdev, entries, num_hrrq))
4711 goto pmcraid_isr_legacy;
4712 }
4713
4714 for (i = 0; i < num_hrrq; i++) { 4705 for (i = 0; i < num_hrrq; i++) {
4715 pinstance->hrrq_vector[i].hrrq_id = i; 4706 pinstance->hrrq_vector[i].hrrq_id = i;
4716 pinstance->hrrq_vector[i].drv_inst = pinstance; 4707 pinstance->hrrq_vector[i].drv_inst = pinstance;
@@ -4746,7 +4737,6 @@ pmcraid_isr_legacy:
4746 pinstance->hrrq_vector[0].drv_inst = pinstance; 4737 pinstance->hrrq_vector[0].drv_inst = pinstance;
4747 pinstance->hrrq_vector[0].vector = pdev->irq; 4738 pinstance->hrrq_vector[0].vector = pdev->irq;
4748 pinstance->num_hrrq = 1; 4739 pinstance->num_hrrq = 1;
4749 rc = 0;
4750 4740
4751 rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED, 4741 rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
4752 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]); 4742 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 16fe5196e6d9..82b92c414a9c 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -484,7 +484,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
484 start == (ha->flt_region_fw * 4)) 484 start == (ha->flt_region_fw * 4))
485 valid = 1; 485 valid = 1;
486 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) 486 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
487 || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 487 || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
488 || IS_QLA27XX(ha))
488 valid = 1; 489 valid = 1;
489 if (!valid) { 490 if (!valid) {
490 ql_log(ql_log_warn, vha, 0x7065, 491 ql_log(ql_log_warn, vha, 0x7065,
@@ -987,6 +988,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
987 continue; 988 continue;
988 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) 989 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
989 continue; 990 continue;
991 if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
992 continue;
990 993
991 sysfs_remove_bin_file(&host->shost_gendev.kobj, 994 sysfs_remove_bin_file(&host->shost_gendev.kobj,
992 iter->attr); 995 iter->attr);
@@ -1014,7 +1017,7 @@ qla2x00_fw_version_show(struct device *dev,
1014 char fw_str[128]; 1017 char fw_str[128];
1015 1018
1016 return scnprintf(buf, PAGE_SIZE, "%s\n", 1019 return scnprintf(buf, PAGE_SIZE, "%s\n",
1017 ha->isp_ops->fw_version_str(vha, fw_str)); 1020 ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
1018} 1021}
1019 1022
1020static ssize_t 1023static ssize_t
@@ -1440,7 +1443,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1440{ 1443{
1441 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1444 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1442 int rval = QLA_FUNCTION_FAILED; 1445 int rval = QLA_FUNCTION_FAILED;
1443 uint16_t state[5]; 1446 uint16_t state[6];
1444 uint32_t pstate; 1447 uint32_t pstate;
1445 1448
1446 if (IS_QLAFX00(vha->hw)) { 1449 if (IS_QLAFX00(vha->hw)) {
@@ -1456,8 +1459,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1456 if (rval != QLA_SUCCESS) 1459 if (rval != QLA_SUCCESS)
1457 memset(state, -1, sizeof(state)); 1460 memset(state, -1, sizeof(state));
1458 1461
1459 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], 1462 return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1460 state[1], state[2], state[3], state[4]); 1463 state[0], state[1], state[2], state[3], state[4], state[5]);
1461} 1464}
1462 1465
1463static ssize_t 1466static ssize_t
@@ -1924,7 +1927,8 @@ qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1924{ 1927{
1925 scsi_qla_host_t *vha = shost_priv(shost); 1928 scsi_qla_host_t *vha = shost_priv(shost);
1926 1929
1927 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost)); 1930 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
1931 sizeof(fc_host_symbolic_name(shost)));
1928} 1932}
1929 1933
1930static void 1934static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 524f9eb7fcd1..2e2bb6f45ce6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1390,7 +1390,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1390 start == (ha->flt_region_fw * 4)) 1390 start == (ha->flt_region_fw * 4))
1391 valid = 1; 1391 valid = 1;
1392 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1392 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1393 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) 1393 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1394 valid = 1; 1394 valid = 1;
1395 if (!valid) { 1395 if (!valid) {
1396 ql_log(ql_log_warn, vha, 0x7058, 1396 ql_log(ql_log_warn, vha, 0x7058,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c72ee97bf3f7..d77fe43793b6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,19 +11,15 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x017d | 0x004b,0x0141 | 14 * | Module Init and Probe | 0x017d | 0x0144,0x0146 |
15 * | | | 0x0144,0x0146 |
16 * | | | 0x015b-0x0160 | 15 * | | | 0x015b-0x0160 |
17 * | | | 0x016e-0x0170 | 16 * | | | 0x016e-0x0170 |
18 * | Mailbox commands | 0x118d | 0x1018-0x1019 | 17 * | Mailbox commands | 0x118d | 0x1115-0x1116 |
19 * | | | 0x10ca | 18 * | | | 0x111a-0x111b |
20 * | | | 0x1115-0x1116 | 19 * | Device Discovery | 0x2016 | 0x2020-0x2022, |
21 * | | | 0x111a-0x111b |
22 * | | | 0x1155-0x1158 |
23 * | Device Discovery | 0x2095 | 0x2020-0x2022, |
24 * | | | 0x2011-0x2012, | 20 * | | | 0x2011-0x2012, |
25 * | | | 0x2016 | 21 * | | | 0x2099-0x20a4 |
26 * | Queue Command and IO tracing | 0x3059 | 0x3006-0x300b | 22 * | Queue Command and IO tracing | 0x3059 | 0x300b |
27 * | | | 0x3027-0x3028 | 23 * | | | 0x3027-0x3028 |
28 * | | | 0x303d-0x3041 | 24 * | | | 0x303d-0x3041 |
29 * | | | 0x302d,0x3033 | 25 * | | | 0x302d,0x3033 |
@@ -31,10 +27,10 @@
31 * | | | 0x303a | 27 * | | | 0x303a |
32 * | DPC Thread | 0x4023 | 0x4002,0x4013 | 28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
33 * | Async Events | 0x5087 | 0x502b-0x502f | 29 * | Async Events | 0x5087 | 0x502b-0x502f |
34 * | | | 0x5047,0x5052 | 30 * | | | 0x5047 |
35 * | | | 0x5084,0x5075 | 31 * | | | 0x5084,0x5075 |
36 * | | | 0x503d,0x5044 | 32 * | | | 0x503d,0x5044 |
37 * | | | 0x507b | 33 * | | | 0x507b,0x505f |
38 * | Timer Routines | 0x6012 | | 34 * | Timer Routines | 0x6012 | |
39 * | User Space Interactions | 0x70e2 | 0x7018,0x702e | 35 * | User Space Interactions | 0x70e2 | 0x7018,0x702e |
40 * | | | 0x7020,0x7024 | 36 * | | | 0x7020,0x7024 |
@@ -64,13 +60,15 @@
64 * | | | 0xb13c-0xb140 | 60 * | | | 0xb13c-0xb140 |
65 * | | | 0xb149 | 61 * | | | 0xb149 |
66 * | MultiQ | 0xc00c | | 62 * | MultiQ | 0xc00c | |
67 * | Misc | 0xd212 | 0xd017-0xd019 | 63 * | Misc | 0xd213 | 0xd011-0xd017 |
68 * | | | 0xd020 | 64 * | | | 0xd021,0xd024 |
69 * | | | 0xd030-0xd0ff | 65 * | | | 0xd025,0xd029 |
66 * | | | 0xd02a,0xd02e |
67 * | | | 0xd031-0xd0ff |
70 * | | | 0xd101-0xd1fe | 68 * | | | 0xd101-0xd1fe |
71 * | | | 0xd213-0xd2fe | 69 * | | | 0xd214-0xd2fe |
72 * | Target Mode | 0xe078 | | 70 * | Target Mode | 0xe079 | |
73 * | Target Mode Management | 0xf072 | 0xf002-0xf003 | 71 * | Target Mode Management | 0xf072 | 0xf002 |
74 * | | | 0xf046-0xf049 | 72 * | | | 0xf046-0xf049 |
75 * | Target Mode Task Management | 0x1000b | | 73 * | Target Mode Task Management | 0x1000b | |
76 * ---------------------------------------------------------------------- 74 * ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b64399153135..5f6b2960cccb 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -191,6 +191,11 @@
191 * reset-recovery completion is 191 * reset-recovery completion is
192 * second 192 * second
193 */ 193 */
194/* ISP2031: Values for laser on/off */
195#define PORT_0_2031 0x00201340
196#define PORT_1_2031 0x00201350
197#define LASER_ON_2031 0x01800100
198#define LASER_OFF_2031 0x01800180
194 199
195/* 200/*
196 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an 201 * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
@@ -261,6 +266,7 @@
261#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */ 266#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
262#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */ 267#define REQUEST_ENTRY_CNT_2200 2048 /* Number of request entries. */
263#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */ 268#define REQUEST_ENTRY_CNT_24XX 2048 /* Number of request entries. */
269#define REQUEST_ENTRY_CNT_83XX 8192 /* Number of request entries. */
264#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 270#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
265#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 271#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
266#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ 272#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
@@ -803,6 +809,7 @@ struct mbx_cmd_32 {
803#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */ 809#define MBA_FW_RESTART_CMPLT 0x8060 /* Firmware restart complete */
804#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */ 810#define MBA_INIT_REQUIRED 0x8061 /* Initialization required */
805#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */ 811#define MBA_SHUTDOWN_REQUESTED 0x8062 /* Shutdown Requested */
812#define MBA_DPORT_DIAGNOSTICS 0x8080 /* D-port Diagnostics */
806#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */ 813#define MBA_FW_INIT_FAILURE 0x8401 /* Firmware initialization failure */
807#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change 814#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
808 Notification */ 815 Notification */
@@ -948,6 +955,7 @@ struct mbx_cmd_32 {
948#define MBC_WRITE_SFP 0x30 /* Write SFP Data. */ 955#define MBC_WRITE_SFP 0x30 /* Write SFP Data. */
949#define MBC_READ_SFP 0x31 /* Read SFP Data. */ 956#define MBC_READ_SFP 0x31 /* Read SFP Data. */
950#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */ 957#define MBC_SET_TIMEOUT_PARAMS 0x32 /* Set FW timeouts. */
958#define MBC_DPORT_DIAGNOSTICS 0x47 /* D-Port Diagnostics */
951#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */ 959#define MBC_MID_INITIALIZE_FIRMWARE 0x48 /* MID Initialize firmware. */
952#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */ 960#define MBC_MID_GET_VP_DATABASE 0x49 /* MID Get VP Database. */
953#define MBC_MID_GET_VP_ENTRY 0x4a /* MID Get VP Entry. */ 961#define MBC_MID_GET_VP_ENTRY 0x4a /* MID Get VP Entry. */
@@ -2016,6 +2024,8 @@ typedef struct fc_port {
2016 unsigned long last_ramp_up; 2024 unsigned long last_ramp_up;
2017 2025
2018 uint16_t port_id; 2026 uint16_t port_id;
2027
2028 unsigned long retry_delay_timestamp;
2019} fc_port_t; 2029} fc_port_t;
2020 2030
2021#include "qla_mr.h" 2031#include "qla_mr.h"
@@ -2056,10 +2066,21 @@ static const char * const port_state_str[] = {
2056 2066
2057#define CT_REJECT_RESPONSE 0x8001 2067#define CT_REJECT_RESPONSE 0x8001
2058#define CT_ACCEPT_RESPONSE 0x8002 2068#define CT_ACCEPT_RESPONSE 0x8002
2059#define CT_REASON_INVALID_COMMAND_CODE 0x01 2069#define CT_REASON_INVALID_COMMAND_CODE 0x01
2060#define CT_REASON_CANNOT_PERFORM 0x09 2070#define CT_REASON_CANNOT_PERFORM 0x09
2061#define CT_REASON_COMMAND_UNSUPPORTED 0x0b 2071#define CT_REASON_COMMAND_UNSUPPORTED 0x0b
2062#define CT_EXPL_ALREADY_REGISTERED 0x10 2072#define CT_EXPL_ALREADY_REGISTERED 0x10
2073#define CT_EXPL_HBA_ATTR_NOT_REGISTERED 0x11
2074#define CT_EXPL_MULTIPLE_HBA_ATTR 0x12
2075#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH 0x13
2076#define CT_EXPL_MISSING_REQ_HBA_ATTR 0x14
2077#define CT_EXPL_PORT_NOT_REGISTERED_ 0x15
2078#define CT_EXPL_MISSING_HBA_ID_PORT_LIST 0x16
2079#define CT_EXPL_HBA_NOT_REGISTERED 0x17
2080#define CT_EXPL_PORT_ATTR_NOT_REGISTERED 0x20
2081#define CT_EXPL_PORT_NOT_REGISTERED 0x21
2082#define CT_EXPL_MULTIPLE_PORT_ATTR 0x22
2083#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH 0x23
2063 2084
2064#define NS_N_PORT_TYPE 0x01 2085#define NS_N_PORT_TYPE 0x01
2065#define NS_NL_PORT_TYPE 0x02 2086#define NS_NL_PORT_TYPE 0x02
@@ -2116,33 +2137,40 @@ static const char * const port_state_str[] = {
2116 * HBA attribute types. 2137 * HBA attribute types.
2117 */ 2138 */
2118#define FDMI_HBA_ATTR_COUNT 9 2139#define FDMI_HBA_ATTR_COUNT 9
2119#define FDMI_HBA_NODE_NAME 1 2140#define FDMIV2_HBA_ATTR_COUNT 17
2120#define FDMI_HBA_MANUFACTURER 2 2141#define FDMI_HBA_NODE_NAME 0x1
2121#define FDMI_HBA_SERIAL_NUMBER 3 2142#define FDMI_HBA_MANUFACTURER 0x2
2122#define FDMI_HBA_MODEL 4 2143#define FDMI_HBA_SERIAL_NUMBER 0x3
2123#define FDMI_HBA_MODEL_DESCRIPTION 5 2144#define FDMI_HBA_MODEL 0x4
2124#define FDMI_HBA_HARDWARE_VERSION 6 2145#define FDMI_HBA_MODEL_DESCRIPTION 0x5
2125#define FDMI_HBA_DRIVER_VERSION 7 2146#define FDMI_HBA_HARDWARE_VERSION 0x6
2126#define FDMI_HBA_OPTION_ROM_VERSION 8 2147#define FDMI_HBA_DRIVER_VERSION 0x7
2127#define FDMI_HBA_FIRMWARE_VERSION 9 2148#define FDMI_HBA_OPTION_ROM_VERSION 0x8
2149#define FDMI_HBA_FIRMWARE_VERSION 0x9
2128#define FDMI_HBA_OS_NAME_AND_VERSION 0xa 2150#define FDMI_HBA_OS_NAME_AND_VERSION 0xa
2129#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb 2151#define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH 0xb
2152#define FDMI_HBA_NODE_SYMBOLIC_NAME 0xc
2153#define FDMI_HBA_VENDOR_ID 0xd
2154#define FDMI_HBA_NUM_PORTS 0xe
2155#define FDMI_HBA_FABRIC_NAME 0xf
2156#define FDMI_HBA_BOOT_BIOS_NAME 0x10
2157#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER 0xe0
2130 2158
2131struct ct_fdmi_hba_attr { 2159struct ct_fdmi_hba_attr {
2132 uint16_t type; 2160 uint16_t type;
2133 uint16_t len; 2161 uint16_t len;
2134 union { 2162 union {
2135 uint8_t node_name[WWN_SIZE]; 2163 uint8_t node_name[WWN_SIZE];
2136 uint8_t manufacturer[32]; 2164 uint8_t manufacturer[64];
2137 uint8_t serial_num[8]; 2165 uint8_t serial_num[32];
2138 uint8_t model[16]; 2166 uint8_t model[16];
2139 uint8_t model_desc[80]; 2167 uint8_t model_desc[80];
2140 uint8_t hw_version[16]; 2168 uint8_t hw_version[32];
2141 uint8_t driver_version[32]; 2169 uint8_t driver_version[32];
2142 uint8_t orom_version[16]; 2170 uint8_t orom_version[16];
2143 uint8_t fw_version[16]; 2171 uint8_t fw_version[32];
2144 uint8_t os_version[128]; 2172 uint8_t os_version[128];
2145 uint8_t max_ct_len[4]; 2173 uint32_t max_ct_len;
2146 } a; 2174 } a;
2147}; 2175};
2148 2176
@@ -2151,16 +2179,56 @@ struct ct_fdmi_hba_attributes {
2151 struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT]; 2179 struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
2152}; 2180};
2153 2181
2182struct ct_fdmiv2_hba_attr {
2183 uint16_t type;
2184 uint16_t len;
2185 union {
2186 uint8_t node_name[WWN_SIZE];
2187 uint8_t manufacturer[32];
2188 uint8_t serial_num[32];
2189 uint8_t model[16];
2190 uint8_t model_desc[80];
2191 uint8_t hw_version[16];
2192 uint8_t driver_version[32];
2193 uint8_t orom_version[16];
2194 uint8_t fw_version[32];
2195 uint8_t os_version[128];
2196 uint32_t max_ct_len;
2197 uint8_t sym_name[256];
2198 uint32_t vendor_id;
2199 uint32_t num_ports;
2200 uint8_t fabric_name[WWN_SIZE];
2201 uint8_t bios_name[32];
2202 uint8_t vendor_indentifer[8];
2203 } a;
2204};
2205
2206struct ct_fdmiv2_hba_attributes {
2207 uint32_t count;
2208 struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
2209};
2210
2154/* 2211/*
2155 * Port attribute types. 2212 * Port attribute types.
2156 */ 2213 */
2157#define FDMI_PORT_ATTR_COUNT 6 2214#define FDMI_PORT_ATTR_COUNT 6
2158#define FDMI_PORT_FC4_TYPES 1 2215#define FDMIV2_PORT_ATTR_COUNT 16
2159#define FDMI_PORT_SUPPORT_SPEED 2 2216#define FDMI_PORT_FC4_TYPES 0x1
2160#define FDMI_PORT_CURRENT_SPEED 3 2217#define FDMI_PORT_SUPPORT_SPEED 0x2
2161#define FDMI_PORT_MAX_FRAME_SIZE 4 2218#define FDMI_PORT_CURRENT_SPEED 0x3
2162#define FDMI_PORT_OS_DEVICE_NAME 5 2219#define FDMI_PORT_MAX_FRAME_SIZE 0x4
2163#define FDMI_PORT_HOST_NAME 6 2220#define FDMI_PORT_OS_DEVICE_NAME 0x5
2221#define FDMI_PORT_HOST_NAME 0x6
2222#define FDMI_PORT_NODE_NAME 0x7
2223#define FDMI_PORT_NAME 0x8
2224#define FDMI_PORT_SYM_NAME 0x9
2225#define FDMI_PORT_TYPE 0xa
2226#define FDMI_PORT_SUPP_COS 0xb
2227#define FDMI_PORT_FABRIC_NAME 0xc
2228#define FDMI_PORT_FC4_TYPE 0xd
2229#define FDMI_PORT_STATE 0x101
2230#define FDMI_PORT_COUNT 0x102
2231#define FDMI_PORT_ID 0x103
2164 2232
2165#define FDMI_PORT_SPEED_1GB 0x1 2233#define FDMI_PORT_SPEED_1GB 0x1
2166#define FDMI_PORT_SPEED_2GB 0x2 2234#define FDMI_PORT_SPEED_2GB 0x2
@@ -2171,7 +2239,11 @@ struct ct_fdmi_hba_attributes {
2171#define FDMI_PORT_SPEED_32GB 0x40 2239#define FDMI_PORT_SPEED_32GB 0x40
2172#define FDMI_PORT_SPEED_UNKNOWN 0x8000 2240#define FDMI_PORT_SPEED_UNKNOWN 0x8000
2173 2241
2174struct ct_fdmi_port_attr { 2242#define FC_CLASS_2 0x04
2243#define FC_CLASS_3 0x08
2244#define FC_CLASS_2_3 0x0C
2245
2246struct ct_fdmiv2_port_attr {
2175 uint16_t type; 2247 uint16_t type;
2176 uint16_t len; 2248 uint16_t len;
2177 union { 2249 union {
@@ -2181,12 +2253,40 @@ struct ct_fdmi_port_attr {
2181 uint32_t max_frame_size; 2253 uint32_t max_frame_size;
2182 uint8_t os_dev_name[32]; 2254 uint8_t os_dev_name[32];
2183 uint8_t host_name[32]; 2255 uint8_t host_name[32];
2256 uint8_t node_name[WWN_SIZE];
2257 uint8_t port_name[WWN_SIZE];
2258 uint8_t port_sym_name[128];
2259 uint32_t port_type;
2260 uint32_t port_supported_cos;
2261 uint8_t fabric_name[WWN_SIZE];
2262 uint8_t port_fc4_type[32];
2263 uint32_t port_state;
2264 uint32_t num_ports;
2265 uint32_t port_id;
2184 } a; 2266 } a;
2185}; 2267};
2186 2268
2187/* 2269/*
2188 * Port Attribute Block. 2270 * Port Attribute Block.
2189 */ 2271 */
2272struct ct_fdmiv2_port_attributes {
2273 uint32_t count;
2274 struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
2275};
2276
2277struct ct_fdmi_port_attr {
2278 uint16_t type;
2279 uint16_t len;
2280 union {
2281 uint8_t fc4_types[32];
2282 uint32_t sup_speed;
2283 uint32_t cur_speed;
2284 uint32_t max_frame_size;
2285 uint8_t os_dev_name[32];
2286 uint8_t host_name[32];
2287 } a;
2288};
2289
2190struct ct_fdmi_port_attributes { 2290struct ct_fdmi_port_attributes {
2191 uint32_t count; 2291 uint32_t count;
2192 struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT]; 2292 struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
@@ -2286,6 +2386,13 @@ struct ct_sns_req {
2286 2386
2287 struct { 2387 struct {
2288 uint8_t hba_identifier[8]; 2388 uint8_t hba_identifier[8];
2389 uint32_t entry_count;
2390 uint8_t port_name[8];
2391 struct ct_fdmiv2_hba_attributes attrs;
2392 } rhba2;
2393
2394 struct {
2395 uint8_t hba_identifier[8];
2289 struct ct_fdmi_hba_attributes attrs; 2396 struct ct_fdmi_hba_attributes attrs;
2290 } rhat; 2397 } rhat;
2291 2398
@@ -2296,6 +2403,11 @@ struct ct_sns_req {
2296 2403
2297 struct { 2404 struct {
2298 uint8_t port_name[8]; 2405 uint8_t port_name[8];
2406 struct ct_fdmiv2_port_attributes attrs;
2407 } rpa2;
2408
2409 struct {
2410 uint8_t port_name[8];
2299 } dhba; 2411 } dhba;
2300 2412
2301 struct { 2413 struct {
@@ -2522,7 +2634,7 @@ struct isp_operations {
2522 int (*load_risc) (struct scsi_qla_host *, uint32_t *); 2634 int (*load_risc) (struct scsi_qla_host *, uint32_t *);
2523 2635
2524 char * (*pci_info_str) (struct scsi_qla_host *, char *); 2636 char * (*pci_info_str) (struct scsi_qla_host *, char *);
2525 char * (*fw_version_str) (struct scsi_qla_host *, char *); 2637 char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
2526 2638
2527 irq_handler_t intr_handler; 2639 irq_handler_t intr_handler;
2528 void (*enable_intrs) (struct qla_hw_data *); 2640 void (*enable_intrs) (struct qla_hw_data *);
@@ -2664,6 +2776,9 @@ struct qla_statistics {
2664 uint32_t control_requests; 2776 uint32_t control_requests;
2665 2777
2666 uint64_t jiffies_at_last_reset; 2778 uint64_t jiffies_at_last_reset;
2779 uint32_t stat_max_pend_cmds;
2780 uint32_t stat_max_qfull_cmds_alloc;
2781 uint32_t stat_max_qfull_cmds_dropped;
2667}; 2782};
2668 2783
2669struct bidi_statistics { 2784struct bidi_statistics {
@@ -2786,8 +2901,22 @@ struct qlt_hw_data {
2786 uint8_t saved_add_firmware_options[2]; 2901 uint8_t saved_add_firmware_options[2];
2787 2902
2788 uint8_t tgt_node_name[WWN_SIZE]; 2903 uint8_t tgt_node_name[WWN_SIZE];
2904
2905 struct list_head q_full_list;
2906 uint32_t num_pend_cmds;
2907 uint32_t num_qfull_cmds_alloc;
2908 uint32_t num_qfull_cmds_dropped;
2909 spinlock_t q_full_lock;
2910 uint32_t leak_exchg_thresh_hold;
2789}; 2911};
2790 2912
2913#define MAX_QFULL_CMDS_ALLOC 8192
2914#define Q_FULL_THRESH_HOLD_PERCENT 90
2915#define Q_FULL_THRESH_HOLD(ha) \
2916 ((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
2917
2918#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
2919
2791/* 2920/*
2792 * Qlogic host adapter specific data structure. 2921 * Qlogic host adapter specific data structure.
2793*/ 2922*/
@@ -2834,7 +2963,8 @@ struct qla_hw_data {
2834 2963
2835 uint32_t mr_reset_hdlr_active:1; 2964 uint32_t mr_reset_hdlr_active:1;
2836 uint32_t mr_intr_valid:1; 2965 uint32_t mr_intr_valid:1;
2837 /* 34 bits */ 2966 uint32_t fawwpn_enabled:1;
2967 /* 35 bits */
2838 } flags; 2968 } flags;
2839 2969
2840 /* This spinlock is used to protect "io transactions", you must 2970 /* This spinlock is used to protect "io transactions", you must
@@ -3032,6 +3162,7 @@ struct qla_hw_data {
3032#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) 3162#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
3033#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3163#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3034#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) 3164#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3165#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3035 3166
3036 /* HBA serial number */ 3167 /* HBA serial number */
3037 uint8_t serial0; 3168 uint8_t serial0;
@@ -3333,6 +3464,7 @@ struct qla_hw_data {
3333 struct work_struct board_disable; 3464 struct work_struct board_disable;
3334 3465
3335 struct mr_data_fx00 mr; 3466 struct mr_data_fx00 mr;
3467 uint32_t chip_reset;
3336 3468
3337 struct qlt_hw_data tgt; 3469 struct qlt_hw_data tgt;
3338 int allow_cna_fw_dump; 3470 int allow_cna_fw_dump;
@@ -3402,6 +3534,11 @@ typedef struct scsi_qla_host {
3402#define FX00_CRITEMP_RECOVERY 25 3534#define FX00_CRITEMP_RECOVERY 25
3403#define FX00_HOST_INFO_RESEND 26 3535#define FX00_HOST_INFO_RESEND 26
3404 3536
3537 unsigned long pci_flags;
3538#define PFLG_DISCONNECTED 0 /* PCI device removed */
3539#define PFLG_DRIVER_REMOVING 1 /* PCI driver .remove */
3540#define PFLG_DRIVER_PROBING 2 /* PCI driver .probe */
3541
3405 uint32_t device_flags; 3542 uint32_t device_flags;
3406#define SWITCH_FOUND BIT_0 3543#define SWITCH_FOUND BIT_0
3407#define DFLG_NO_CABLE BIT_1 3544#define DFLG_NO_CABLE BIT_1
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index eb8f57249f1d..42bb357bf56b 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -91,7 +91,7 @@ struct nvram_24xx {
91 /* Firmware Initialization Control Block. */ 91 /* Firmware Initialization Control Block. */
92 uint16_t version; 92 uint16_t version;
93 uint16_t reserved_1; 93 uint16_t reserved_1;
94 uint16_t frame_payload_size; 94 __le16 frame_payload_size;
95 uint16_t execution_throttle; 95 uint16_t execution_throttle;
96 uint16_t exchange_count; 96 uint16_t exchange_count;
97 uint16_t hard_address; 97 uint16_t hard_address;
@@ -317,8 +317,8 @@ struct init_cb_24xx {
317 * BIT 3 = Reserved 317 * BIT 3 = Reserved
318 * BIT 4 = Enable Target Mode 318 * BIT 4 = Enable Target Mode
319 * BIT 5 = Disable Initiator Mode 319 * BIT 5 = Disable Initiator Mode
320 * BIT 6 = Reserved 320 * BIT 6 = Acquire FA-WWN
321 * BIT 7 = Reserved 321 * BIT 7 = Enable D-port Diagnostics
322 * 322 *
323 * BIT 8 = Reserved 323 * BIT 8 = Reserved
324 * BIT 9 = Non Participating LIP 324 * BIT 9 = Non Participating LIP
@@ -567,7 +567,7 @@ struct sts_entry_24xx {
567#define SF_TRANSFERRED_DATA BIT_11 567#define SF_TRANSFERRED_DATA BIT_11
568#define SF_FCP_RSP_DMA BIT_0 568#define SF_FCP_RSP_DMA BIT_0
569 569
570 uint16_t reserved_2; 570 uint16_t retry_delay;
571 uint16_t scsi_status; /* SCSI status. */ 571 uint16_t scsi_status; /* SCSI status. */
572#define SS_CONFIRMATION_REQ BIT_12 572#define SS_CONFIRMATION_REQ BIT_12
573 573
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d646540db3ac..b1865a72ce59 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,6 +72,7 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
72extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, 72extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
73 uint16_t *); 73 uint16_t *);
74extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); 74extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
75extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
75extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); 76extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
76 77
77extern fc_port_t * 78extern fc_port_t *
@@ -475,7 +476,8 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
475extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, 476extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
476 uint32_t); 477 uint32_t);
477extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); 478extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
478bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t); 479bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
480bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
479 481
480extern int qla2x00_beacon_on(struct scsi_qla_host *); 482extern int qla2x00_beacon_on(struct scsi_qla_host *);
481extern int qla2x00_beacon_off(struct scsi_qla_host *); 483extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -561,7 +563,7 @@ extern void *qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *, uint32_t, uint32_t);
561extern int qla2x00_fdmi_register(scsi_qla_host_t *); 563extern int qla2x00_fdmi_register(scsi_qla_host_t *);
562extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *); 564extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
563extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *); 565extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
564extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *); 566extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
565 567
566/* 568/*
567 * Global Function Prototypes in qla_attr.c source file. 569 * Global Function Prototypes in qla_attr.c source file.
@@ -613,7 +615,7 @@ extern void qlafx00_soft_reset(scsi_qla_host_t *);
613extern int qlafx00_chip_diag(scsi_qla_host_t *); 615extern int qlafx00_chip_diag(scsi_qla_host_t *);
614extern void qlafx00_config_rings(struct scsi_qla_host *); 616extern void qlafx00_config_rings(struct scsi_qla_host *);
615extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *); 617extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
616extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *); 618extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
617extern irqreturn_t qlafx00_intr_handler(int, void *); 619extern irqreturn_t qlafx00_intr_handler(int, void *);
618extern void qlafx00_enable_intrs(struct qla_hw_data *); 620extern void qlafx00_enable_intrs(struct qla_hw_data *);
619extern void qlafx00_disable_intrs(struct qla_hw_data *); 621extern void qlafx00_disable_intrs(struct qla_hw_data *);
@@ -765,4 +767,5 @@ extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
765extern int qla8044_abort_isp(scsi_qla_host_t *); 767extern int qla8044_abort_isp(scsi_qla_host_t *);
766extern int qla8044_check_fw_alive(struct scsi_qla_host *); 768extern int qla8044_check_fw_alive(struct scsi_qla_host *);
767 769
770extern void qlt_host_reset_handler(struct qla_hw_data *ha);
768#endif /* _QLA_GBL_H */ 771#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0df3b1b3823..dccc4dcc39c8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -6,6 +6,7 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h" 8#include "qla_target.h"
9#include <linux/utsname.h>
9 10
10static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); 11static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
11static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); 12static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -143,10 +144,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
143 if (ct_rsp->header.response != 144 if (ct_rsp->header.response !=
144 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 145 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
145 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, 146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
146 "%s failed rejected request on port_id: " 147 "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
147 "%02x%02x%02x.\n", routine, 148 routine, vha->d_id.b.domain,
148 vha->d_id.b.domain, vha->d_id.b.area, 149 vha->d_id.b.area, vha->d_id.b.al_pa,
149 vha->d_id.b.al_pa); 150 comp_status, ct_rsp->header.response);
150 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 151 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
151 0x2078, (uint8_t *)&ct_rsp->header, 152 0x2078, (uint8_t *)&ct_rsp->header,
152 sizeof(struct ct_rsp_hdr)); 153 sizeof(struct ct_rsp_hdr));
@@ -622,15 +623,16 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
622} 623}
623 624
624void 625void
625qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn) 626qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
626{ 627{
627 struct qla_hw_data *ha = vha->hw; 628 struct qla_hw_data *ha = vha->hw;
628 629
629 if (IS_QLAFX00(ha)) 630 if (IS_QLAFX00(ha))
630 sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number, 631 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
631 ha->mr.fw_version, qla2x00_version_str); 632 ha->mr.fw_version, qla2x00_version_str);
632 else 633 else
633 sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number, 634 snprintf(snn, size,
635 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
634 ha->fw_major_version, ha->fw_minor_version, 636 ha->fw_major_version, ha->fw_minor_version,
635 ha->fw_subminor_version, qla2x00_version_str); 637 ha->fw_subminor_version, qla2x00_version_str);
636} 638}
@@ -670,7 +672,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
670 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE); 672 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
671 673
672 /* Prepare the Symbolic Node Name */ 674 /* Prepare the Symbolic Node Name */
673 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name); 675 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
676 sizeof(ct_req->req.rsnn_nn.sym_node_name));
674 677
675 /* Calculate SNN length */ 678 /* Calculate SNN length */
676 ct_req->req.rsnn_nn.name_len = 679 ct_req->req.rsnn_nn.name_len =
@@ -1263,7 +1266,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1263 ms_iocb_entry_t *ms_pkt; 1266 ms_iocb_entry_t *ms_pkt;
1264 struct ct_sns_req *ct_req; 1267 struct ct_sns_req *ct_req;
1265 struct ct_sns_rsp *ct_rsp; 1268 struct ct_sns_rsp *ct_rsp;
1266 uint8_t *entries; 1269 void *entries;
1267 struct ct_fdmi_hba_attr *eiter; 1270 struct ct_fdmi_hba_attr *eiter;
1268 struct qla_hw_data *ha = vha->hw; 1271 struct qla_hw_data *ha = vha->hw;
1269 1272
@@ -1288,7 +1291,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1288 entries = ct_req->req.rhba.hba_identifier; 1291 entries = ct_req->req.rhba.hba_identifier;
1289 1292
1290 /* Nodename. */ 1293 /* Nodename. */
1291 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1294 eiter = entries + size;
1292 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME); 1295 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
1293 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE); 1296 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
1294 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); 1297 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
@@ -1298,11 +1301,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1298 "NodeName = %8phN.\n", eiter->a.node_name); 1301 "NodeName = %8phN.\n", eiter->a.node_name);
1299 1302
1300 /* Manufacturer. */ 1303 /* Manufacturer. */
1301 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1304 eiter = entries + size;
1302 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER); 1305 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
1303 alen = strlen(QLA2XXX_MANUFACTURER); 1306 alen = strlen(QLA2XXX_MANUFACTURER);
1304 strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1); 1307 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1305 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1308 "%s", "QLogic Corporation");
1309 alen += 4 - (alen & 3);
1306 eiter->len = cpu_to_be16(4 + alen); 1310 eiter->len = cpu_to_be16(4 + alen);
1307 size += 4 + alen; 1311 size += 4 + alen;
1308 1312
@@ -1310,12 +1314,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1310 "Manufacturer = %s.\n", eiter->a.manufacturer); 1314 "Manufacturer = %s.\n", eiter->a.manufacturer);
1311 1315
1312 /* Serial number. */ 1316 /* Serial number. */
1313 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1317 eiter = entries + size;
1314 eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); 1318 eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1315 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1; 1319 if (IS_FWI2_CAPABLE(ha))
1316 sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000); 1320 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1321 sizeof(eiter->a.serial_num));
1322 else {
1323 sn = ((ha->serial0 & 0x1f) << 16) |
1324 (ha->serial2 << 8) | ha->serial1;
1325 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1326 "%c%05d", 'A' + sn / 100000, sn % 100000);
1327 }
1317 alen = strlen(eiter->a.serial_num); 1328 alen = strlen(eiter->a.serial_num);
1318 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1329 alen += 4 - (alen & 3);
1319 eiter->len = cpu_to_be16(4 + alen); 1330 eiter->len = cpu_to_be16(4 + alen);
1320 size += 4 + alen; 1331 size += 4 + alen;
1321 1332
@@ -1323,11 +1334,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1323 "Serial no. = %s.\n", eiter->a.serial_num); 1334 "Serial no. = %s.\n", eiter->a.serial_num);
1324 1335
1325 /* Model name. */ 1336 /* Model name. */
1326 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1337 eiter = entries + size;
1327 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL); 1338 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
1328 strcpy(eiter->a.model, ha->model_number); 1339 snprintf(eiter->a.model, sizeof(eiter->a.model),
1340 "%s", ha->model_number);
1329 alen = strlen(eiter->a.model); 1341 alen = strlen(eiter->a.model);
1330 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1342 alen += 4 - (alen & 3);
1331 eiter->len = cpu_to_be16(4 + alen); 1343 eiter->len = cpu_to_be16(4 + alen);
1332 size += 4 + alen; 1344 size += 4 + alen;
1333 1345
@@ -1335,11 +1347,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1335 "Model Name = %s.\n", eiter->a.model); 1347 "Model Name = %s.\n", eiter->a.model);
1336 1348
1337 /* Model description. */ 1349 /* Model description. */
1338 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1350 eiter = entries + size;
1339 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); 1351 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1340 strncpy(eiter->a.model_desc, ha->model_desc, 80); 1352 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1353 "%s", ha->model_desc);
1341 alen = strlen(eiter->a.model_desc); 1354 alen = strlen(eiter->a.model_desc);
1342 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1355 alen += 4 - (alen & 3);
1343 eiter->len = cpu_to_be16(4 + alen); 1356 eiter->len = cpu_to_be16(4 + alen);
1344 size += 4 + alen; 1357 size += 4 + alen;
1345 1358
@@ -1347,11 +1360,23 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1347 "Model Desc = %s.\n", eiter->a.model_desc); 1360 "Model Desc = %s.\n", eiter->a.model_desc);
1348 1361
1349 /* Hardware version. */ 1362 /* Hardware version. */
1350 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1363 eiter = entries + size;
1351 eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); 1364 eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1352 strcpy(eiter->a.hw_version, ha->adapter_id); 1365 if (!IS_FWI2_CAPABLE(ha)) {
1366 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1367 "HW:%s", ha->adapter_id);
1368 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1369 sizeof(eiter->a.hw_version))) {
1370 ;
1371 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1372 sizeof(eiter->a.hw_version))) {
1373 ;
1374 } else {
1375 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1376 "HW:%s", ha->adapter_id);
1377 }
1353 alen = strlen(eiter->a.hw_version); 1378 alen = strlen(eiter->a.hw_version);
1354 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1379 alen += 4 - (alen & 3);
1355 eiter->len = cpu_to_be16(4 + alen); 1380 eiter->len = cpu_to_be16(4 + alen);
1356 size += 4 + alen; 1381 size += 4 + alen;
1357 1382
@@ -1359,11 +1384,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1359 "Hardware ver = %s.\n", eiter->a.hw_version); 1384 "Hardware ver = %s.\n", eiter->a.hw_version);
1360 1385
1361 /* Driver version. */ 1386 /* Driver version. */
1362 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1387 eiter = entries + size;
1363 eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION); 1388 eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1364 strcpy(eiter->a.driver_version, qla2x00_version_str); 1389 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1390 "%s", qla2x00_version_str);
1365 alen = strlen(eiter->a.driver_version); 1391 alen = strlen(eiter->a.driver_version);
1366 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1392 alen += 4 - (alen & 3);
1367 eiter->len = cpu_to_be16(4 + alen); 1393 eiter->len = cpu_to_be16(4 + alen);
1368 size += 4 + alen; 1394 size += 4 + alen;
1369 1395
@@ -1371,11 +1397,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1371 "Driver ver = %s.\n", eiter->a.driver_version); 1397 "Driver ver = %s.\n", eiter->a.driver_version);
1372 1398
1373 /* Option ROM version. */ 1399 /* Option ROM version. */
1374 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1400 eiter = entries + size;
1375 eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); 1401 eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1376 strcpy(eiter->a.orom_version, "0.00"); 1402 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1403 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1377 alen = strlen(eiter->a.orom_version); 1404 alen = strlen(eiter->a.orom_version);
1378 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1405 alen += 4 - (alen & 3);
1379 eiter->len = cpu_to_be16(4 + alen); 1406 eiter->len = cpu_to_be16(4 + alen);
1380 size += 4 + alen; 1407 size += 4 + alen;
1381 1408
@@ -1383,11 +1410,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1383 "Optrom vers = %s.\n", eiter->a.orom_version); 1410 "Optrom vers = %s.\n", eiter->a.orom_version);
1384 1411
1385 /* Firmware version */ 1412 /* Firmware version */
1386 eiter = (struct ct_fdmi_hba_attr *) (entries + size); 1413 eiter = entries + size;
1387 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1414 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1388 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version); 1415 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1416 sizeof(eiter->a.fw_version));
1389 alen = strlen(eiter->a.fw_version); 1417 alen = strlen(eiter->a.fw_version);
1390 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 1418 alen += 4 - (alen & 3);
1391 eiter->len = cpu_to_be16(4 + alen); 1419 eiter->len = cpu_to_be16(4 + alen);
1392 size += 4 + alen; 1420 size += 4 + alen;
1393 1421
@@ -1419,6 +1447,11 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1419 ql_dbg(ql_dbg_disc, vha, 0x2034, 1447 ql_dbg(ql_dbg_disc, vha, 0x2034,
1420 "HBA already registered.\n"); 1448 "HBA already registered.\n");
1421 rval = QLA_ALREADY_REGISTERED; 1449 rval = QLA_ALREADY_REGISTERED;
1450 } else {
1451 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1452 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1453 ct_rsp->header.reason_code,
1454 ct_rsp->header.explanation_code);
1422 } 1455 }
1423 } else { 1456 } else {
1424 ql_dbg(ql_dbg_disc, vha, 0x2035, 1457 ql_dbg(ql_dbg_disc, vha, 0x2035,
@@ -1429,6 +1462,534 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1429} 1462}
1430 1463
1431/** 1464/**
1465 * qla2x00_fdmi_rpa() -
1466 * @ha: HA context
1467 *
1468 * Returns 0 on success.
1469 */
1470static int
1471qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1472{
1473 int rval, alen;
1474 uint32_t size;
1475 struct qla_hw_data *ha = vha->hw;
1476 ms_iocb_entry_t *ms_pkt;
1477 struct ct_sns_req *ct_req;
1478 struct ct_sns_rsp *ct_rsp;
1479 void *entries;
1480 struct ct_fdmi_port_attr *eiter;
1481 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1482 struct new_utsname *p_sysid = NULL;
1483
1484 /* Issue RPA */
1485 /* Prepare common MS IOCB */
1486 /* Request size adjusted after CT preparation */
1487 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1488
1489 /* Prepare CT request */
1490 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1491 RPA_RSP_SIZE);
1492 ct_rsp = &ha->ct_sns->p.rsp;
1493
1494 /* Prepare FDMI command arguments -- attribute block, attributes. */
1495 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1496 size = WWN_SIZE + 4;
1497
1498 /* Attributes */
1499 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1500 entries = ct_req->req.rpa.port_name;
1501
1502 /* FC4 types. */
1503 eiter = entries + size;
1504 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1505 eiter->len = cpu_to_be16(4 + 32);
1506 eiter->a.fc4_types[2] = 0x01;
1507 size += 4 + 32;
1508
1509 ql_dbg(ql_dbg_disc, vha, 0x2039,
1510 "FC4_TYPES=%02x %02x.\n",
1511 eiter->a.fc4_types[2],
1512 eiter->a.fc4_types[1]);
1513
1514 /* Supported speed. */
1515 eiter = entries + size;
1516 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1517 eiter->len = cpu_to_be16(4 + 4);
1518 if (IS_CNA_CAPABLE(ha))
1519 eiter->a.sup_speed = cpu_to_be32(
1520 FDMI_PORT_SPEED_10GB);
1521 else if (IS_QLA27XX(ha))
1522 eiter->a.sup_speed = cpu_to_be32(
1523 FDMI_PORT_SPEED_32GB|
1524 FDMI_PORT_SPEED_16GB|
1525 FDMI_PORT_SPEED_8GB);
1526 else if (IS_QLA2031(ha))
1527 eiter->a.sup_speed = cpu_to_be32(
1528 FDMI_PORT_SPEED_16GB|
1529 FDMI_PORT_SPEED_8GB|
1530 FDMI_PORT_SPEED_4GB);
1531 else if (IS_QLA25XX(ha))
1532 eiter->a.sup_speed = cpu_to_be32(
1533 FDMI_PORT_SPEED_8GB|
1534 FDMI_PORT_SPEED_4GB|
1535 FDMI_PORT_SPEED_2GB|
1536 FDMI_PORT_SPEED_1GB);
1537 else if (IS_QLA24XX_TYPE(ha))
1538 eiter->a.sup_speed = cpu_to_be32(
1539 FDMI_PORT_SPEED_4GB|
1540 FDMI_PORT_SPEED_2GB|
1541 FDMI_PORT_SPEED_1GB);
1542 else if (IS_QLA23XX(ha))
1543 eiter->a.sup_speed = cpu_to_be32(
1544 FDMI_PORT_SPEED_2GB|
1545 FDMI_PORT_SPEED_1GB);
1546 else
1547 eiter->a.sup_speed = cpu_to_be32(
1548 FDMI_PORT_SPEED_1GB);
1549 size += 4 + 4;
1550
1551 ql_dbg(ql_dbg_disc, vha, 0x203a,
1552 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1553
1554 /* Current speed. */
1555 eiter = entries + size;
1556 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1557 eiter->len = cpu_to_be16(4 + 4);
1558 switch (ha->link_data_rate) {
1559 case PORT_SPEED_1GB:
1560 eiter->a.cur_speed =
1561 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1562 break;
1563 case PORT_SPEED_2GB:
1564 eiter->a.cur_speed =
1565 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1566 break;
1567 case PORT_SPEED_4GB:
1568 eiter->a.cur_speed =
1569 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1570 break;
1571 case PORT_SPEED_8GB:
1572 eiter->a.cur_speed =
1573 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1574 break;
1575 case PORT_SPEED_10GB:
1576 eiter->a.cur_speed =
1577 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1578 break;
1579 case PORT_SPEED_16GB:
1580 eiter->a.cur_speed =
1581 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1582 break;
1583 case PORT_SPEED_32GB:
1584 eiter->a.cur_speed =
1585 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1586 break;
1587 default:
1588 eiter->a.cur_speed =
1589 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1590 break;
1591 }
1592 size += 4 + 4;
1593
1594 ql_dbg(ql_dbg_disc, vha, 0x203b,
1595 "Current_Speed=%x.\n", eiter->a.cur_speed);
1596
1597 /* Max frame size. */
1598 eiter = entries + size;
1599 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1600 eiter->len = cpu_to_be16(4 + 4);
1601 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1602 le16_to_cpu(icb24->frame_payload_size) :
1603 le16_to_cpu(ha->init_cb->frame_payload_size);
1604 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1605 size += 4 + 4;
1606
1607 ql_dbg(ql_dbg_disc, vha, 0x203c,
1608 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1609
1610 /* OS device name. */
1611 eiter = entries + size;
1612 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1613 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1614 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1615 alen = strlen(eiter->a.os_dev_name);
1616 alen += 4 - (alen & 3);
1617 eiter->len = cpu_to_be16(4 + alen);
1618 size += 4 + alen;
1619
1620 ql_dbg(ql_dbg_disc, vha, 0x204b,
1621 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1622
1623 /* Hostname. */
1624 eiter = entries + size;
1625 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1626 p_sysid = utsname();
1627 if (p_sysid) {
1628 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1629 "%s", p_sysid->nodename);
1630 } else {
1631 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1632 "%s", fc_host_system_hostname(vha->host));
1633 }
1634 alen = strlen(eiter->a.host_name);
1635 alen += 4 - (alen & 3);
1636 eiter->len = cpu_to_be16(4 + alen);
1637 size += 4 + alen;
1638
1639 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1640
1641 /* Update MS request size. */
1642 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1643
1644 ql_dbg(ql_dbg_disc, vha, 0x203e,
1645 "RPA portname %016llx, size = %d.\n",
1646 wwn_to_u64(ct_req->req.rpa.port_name), size);
1647 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1648 entries, size);
1649
1650 /* Execute MS IOCB */
1651 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1652 sizeof(ms_iocb_entry_t));
1653 if (rval != QLA_SUCCESS) {
1654 /*EMPTY*/
1655 ql_dbg(ql_dbg_disc, vha, 0x2040,
1656 "RPA issue IOCB failed (%d).\n", rval);
1657 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1658 QLA_SUCCESS) {
1659 rval = QLA_FUNCTION_FAILED;
1660 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1661 ct_rsp->header.explanation_code ==
1662 CT_EXPL_ALREADY_REGISTERED) {
1663 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1664 "RPA already registered.\n");
1665 rval = QLA_ALREADY_REGISTERED;
1666 }
1667
1668 } else {
1669 ql_dbg(ql_dbg_disc, vha, 0x2041,
1670 "RPA exiting normally.\n");
1671 }
1672
1673 return rval;
1674}
1675
1676/**
1677 * qla2x00_fdmiv2_rhba() -
1678 * @ha: HA context
1679 *
1680 * Returns 0 on success.
1681 */
1682static int
1683qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1684{
1685 int rval, alen;
1686 uint32_t size, sn;
1687 ms_iocb_entry_t *ms_pkt;
1688 struct ct_sns_req *ct_req;
1689 struct ct_sns_rsp *ct_rsp;
1690 void *entries;
1691 struct ct_fdmiv2_hba_attr *eiter;
1692 struct qla_hw_data *ha = vha->hw;
1693 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1694 struct new_utsname *p_sysid = NULL;
1695
1696 /* Issue RHBA */
1697 /* Prepare common MS IOCB */
1698 /* Request size adjusted after CT preparation */
1699 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1700
1701 /* Prepare CT request */
1702 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1703 RHBA_RSP_SIZE);
1704 ct_rsp = &ha->ct_sns->p.rsp;
1705
1706 /* Prepare FDMI command arguments -- attribute block, attributes. */
1707 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1708 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1709 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1710 size = 2 * WWN_SIZE + 4 + 4;
1711
1712 /* Attributes */
1713 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1714 entries = ct_req->req.rhba2.hba_identifier;
1715
1716 /* Nodename. */
1717 eiter = entries + size;
1718 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1719 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1720 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1721 size += 4 + WWN_SIZE;
1722
1723 ql_dbg(ql_dbg_disc, vha, 0x207d,
1724 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1725
1726 /* Manufacturer. */
1727 eiter = entries + size;
1728 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1729 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1730 "%s", "QLogic Corporation");
1731 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
1732 alen = strlen(eiter->a.manufacturer);
1733 alen += 4 - (alen & 3);
1734 eiter->len = cpu_to_be16(4 + alen);
1735 size += 4 + alen;
1736
1737 ql_dbg(ql_dbg_disc, vha, 0x20a5,
1738 "Manufacturer = %s.\n", eiter->a.manufacturer);
1739
1740 /* Serial number. */
1741 eiter = entries + size;
1742 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1743 if (IS_FWI2_CAPABLE(ha))
1744 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1745 sizeof(eiter->a.serial_num));
1746 else {
1747 sn = ((ha->serial0 & 0x1f) << 16) |
1748 (ha->serial2 << 8) | ha->serial1;
1749 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1750 "%c%05d", 'A' + sn / 100000, sn % 100000);
1751 }
1752 alen = strlen(eiter->a.serial_num);
1753 alen += 4 - (alen & 3);
1754 eiter->len = cpu_to_be16(4 + alen);
1755 size += 4 + alen;
1756
1757 ql_dbg(ql_dbg_disc, vha, 0x20a6,
1758 "Serial no. = %s.\n", eiter->a.serial_num);
1759
1760 /* Model name. */
1761 eiter = entries + size;
1762 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1763 snprintf(eiter->a.model, sizeof(eiter->a.model),
1764 "%s", ha->model_number);
1765 alen = strlen(eiter->a.model);
1766 alen += 4 - (alen & 3);
1767 eiter->len = cpu_to_be16(4 + alen);
1768 size += 4 + alen;
1769
1770 ql_dbg(ql_dbg_disc, vha, 0x20a7,
1771 "Model Name = %s.\n", eiter->a.model);
1772
1773 /* Model description. */
1774 eiter = entries + size;
1775 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1776 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1777 "%s", ha->model_desc);
1778 alen = strlen(eiter->a.model_desc);
1779 alen += 4 - (alen & 3);
1780 eiter->len = cpu_to_be16(4 + alen);
1781 size += 4 + alen;
1782
1783 ql_dbg(ql_dbg_disc, vha, 0x20a8,
1784 "Model Desc = %s.\n", eiter->a.model_desc);
1785
1786 /* Hardware version. */
1787 eiter = entries + size;
1788 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1789 if (!IS_FWI2_CAPABLE(ha)) {
1790 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1791 "HW:%s", ha->adapter_id);
1792 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1793 sizeof(eiter->a.hw_version))) {
1794 ;
1795 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1796 sizeof(eiter->a.hw_version))) {
1797 ;
1798 } else {
1799 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1800 "HW:%s", ha->adapter_id);
1801 }
1802 alen = strlen(eiter->a.hw_version);
1803 alen += 4 - (alen & 3);
1804 eiter->len = cpu_to_be16(4 + alen);
1805 size += 4 + alen;
1806
1807 ql_dbg(ql_dbg_disc, vha, 0x20a9,
1808 "Hardware ver = %s.\n", eiter->a.hw_version);
1809
1810 /* Driver version. */
1811 eiter = entries + size;
1812 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1813 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1814 "%s", qla2x00_version_str);
1815 alen = strlen(eiter->a.driver_version);
1816 alen += 4 - (alen & 3);
1817 eiter->len = cpu_to_be16(4 + alen);
1818 size += 4 + alen;
1819
1820 ql_dbg(ql_dbg_disc, vha, 0x20aa,
1821 "Driver ver = %s.\n", eiter->a.driver_version);
1822
1823 /* Option ROM version. */
1824 eiter = entries + size;
1825 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1826 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1827 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1828 alen = strlen(eiter->a.orom_version);
1829 alen += 4 - (alen & 3);
1830 eiter->len = cpu_to_be16(4 + alen);
1831 size += 4 + alen;
1832
1833 ql_dbg(ql_dbg_disc, vha , 0x20ab,
1834 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
1835 eiter->a.orom_version[0]);
1836
1837 /* Firmware version */
1838 eiter = entries + size;
1839 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1840 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1841 sizeof(eiter->a.fw_version));
1842 alen = strlen(eiter->a.fw_version);
1843 alen += 4 - (alen & 3);
1844 eiter->len = cpu_to_be16(4 + alen);
1845 size += 4 + alen;
1846
1847 ql_dbg(ql_dbg_disc, vha, 0x20ac,
1848 "Firmware vers = %s.\n", eiter->a.fw_version);
1849
1850 /* OS Name and Version */
1851 eiter = entries + size;
1852 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
1853 p_sysid = utsname();
1854 if (p_sysid) {
1855 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
1856 "%s %s %s",
1857 p_sysid->sysname, p_sysid->release, p_sysid->version);
1858 } else {
1859 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
1860 "%s %s", "Linux", fc_host_system_hostname(vha->host));
1861 }
1862 alen = strlen(eiter->a.os_version);
1863 alen += 4 - (alen & 3);
1864 eiter->len = cpu_to_be16(4 + alen);
1865 size += 4 + alen;
1866
1867 ql_dbg(ql_dbg_disc, vha, 0x20ae,
1868 "OS Name and Version = %s.\n", eiter->a.os_version);
1869
1870 /* MAX CT Payload Length */
1871 eiter = entries + size;
1872 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
1873 eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
1874 le16_to_cpu(icb24->frame_payload_size) :
1875 le16_to_cpu(ha->init_cb->frame_payload_size);
1876 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
1877 eiter->len = cpu_to_be16(4 + 4);
1878 size += 4 + 4;
1879
1880 ql_dbg(ql_dbg_disc, vha, 0x20af,
1881 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
1882
1883 /* Node Sybolic Name */
1884 eiter = entries + size;
1885 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
1886 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
1887 sizeof(eiter->a.sym_name));
1888 alen = strlen(eiter->a.sym_name);
1889 alen += 4 - (alen & 3);
1890 eiter->len = cpu_to_be16(4 + alen);
1891 size += 4 + alen;
1892
1893 ql_dbg(ql_dbg_disc, vha, 0x20b0,
1894 "Symbolic Name = %s.\n", eiter->a.sym_name);
1895
1896 /* Vendor Id */
1897 eiter = entries + size;
1898 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
1899 eiter->a.vendor_id = cpu_to_be32(0x1077);
1900 eiter->len = cpu_to_be16(4 + 4);
1901 size += 4 + 4;
1902
1903 ql_dbg(ql_dbg_disc, vha, 0x20b1,
1904 "Vendor Id = %x.\n", eiter->a.vendor_id);
1905
1906 /* Num Ports */
1907 eiter = entries + size;
1908 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
1909 eiter->a.num_ports = cpu_to_be32(1);
1910 eiter->len = cpu_to_be16(4 + 4);
1911 size += 4 + 4;
1912
1913 ql_dbg(ql_dbg_disc, vha, 0x20b2,
1914 "Port Num = %x.\n", eiter->a.num_ports);
1915
1916 /* Fabric Name */
1917 eiter = entries + size;
1918 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
1919 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
1920 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1921 size += 4 + WWN_SIZE;
1922
1923 ql_dbg(ql_dbg_disc, vha, 0x20b3,
1924 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
1925
1926 /* BIOS Version */
1927 eiter = entries + size;
1928 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
1929 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
1930 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1931 alen = strlen(eiter->a.bios_name);
1932 alen += 4 - (alen & 3);
1933 eiter->len = cpu_to_be16(4 + alen);
1934 size += 4 + alen;
1935
1936 ql_dbg(ql_dbg_disc, vha, 0x20b4,
1937 "BIOS Name = %s\n", eiter->a.bios_name);
1938
1939 /* Vendor Identifier */
1940 eiter = entries + size;
1941 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
1942 snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer),
1943 "%s", "QLGC");
1944 alen = strlen(eiter->a.vendor_indentifer);
1945 alen += 4 - (alen & 3);
1946 eiter->len = cpu_to_be16(4 + alen);
1947 size += 4 + alen;
1948
1949 ql_dbg(ql_dbg_disc, vha, 0x20b1,
1950 "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer);
1951
1952 /* Update MS request size. */
1953 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1954
1955 ql_dbg(ql_dbg_disc, vha, 0x20b5,
1956 "RHBA identifier = %016llx.\n",
1957 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
1958 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
1959 entries, size);
1960
1961 /* Execute MS IOCB */
1962 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1963 sizeof(ms_iocb_entry_t));
1964 if (rval != QLA_SUCCESS) {
1965 /*EMPTY*/
1966 ql_dbg(ql_dbg_disc, vha, 0x20b7,
1967 "RHBA issue IOCB failed (%d).\n", rval);
1968 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1969 QLA_SUCCESS) {
1970 rval = QLA_FUNCTION_FAILED;
1971
1972 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1973 ct_rsp->header.explanation_code ==
1974 CT_EXPL_ALREADY_REGISTERED) {
1975 ql_dbg(ql_dbg_disc, vha, 0x20b8,
1976 "HBA already registered.\n");
1977 rval = QLA_ALREADY_REGISTERED;
1978 } else {
1979 ql_dbg(ql_dbg_disc, vha, 0x2016,
1980 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1981 ct_rsp->header.reason_code,
1982 ct_rsp->header.explanation_code);
1983 }
1984 } else {
1985 ql_dbg(ql_dbg_disc, vha, 0x20b9,
1986 "RHBA FDMI V2 exiting normally.\n");
1987 }
1988
1989 return rval;
1990}
1991
1992/**
1432 * qla2x00_fdmi_dhba() - 1993 * qla2x00_fdmi_dhba() -
1433 * @ha: HA context 1994 * @ha: HA context
1434 * 1995 *
@@ -1477,23 +2038,24 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
1477} 2038}
1478 2039
1479/** 2040/**
1480 * qla2x00_fdmi_rpa() - 2041 * qla2x00_fdmiv2_rpa() -
1481 * @ha: HA context 2042 * @ha: HA context
1482 * 2043 *
1483 * Returns 0 on success. 2044 * Returns 0 on success.
1484 */ 2045 */
1485static int 2046static int
1486qla2x00_fdmi_rpa(scsi_qla_host_t *vha) 2047qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
1487{ 2048{
1488 int rval, alen; 2049 int rval, alen;
1489 uint32_t size, max_frame_size; 2050 uint32_t size;
1490 struct qla_hw_data *ha = vha->hw; 2051 struct qla_hw_data *ha = vha->hw;
1491 ms_iocb_entry_t *ms_pkt; 2052 ms_iocb_entry_t *ms_pkt;
1492 struct ct_sns_req *ct_req; 2053 struct ct_sns_req *ct_req;
1493 struct ct_sns_rsp *ct_rsp; 2054 struct ct_sns_rsp *ct_rsp;
1494 uint8_t *entries; 2055 void *entries;
1495 struct ct_fdmi_port_attr *eiter; 2056 struct ct_fdmiv2_port_attr *eiter;
1496 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb; 2057 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2058 struct new_utsname *p_sysid = NULL;
1497 2059
1498 /* Issue RPA */ 2060 /* Issue RPA */
1499 /* Prepare common MS IOCB */ 2061 /* Prepare common MS IOCB */
@@ -1505,147 +2067,258 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1505 ct_rsp = &ha->ct_sns->p.rsp; 2067 ct_rsp = &ha->ct_sns->p.rsp;
1506 2068
1507 /* Prepare FDMI command arguments -- attribute block, attributes. */ 2069 /* Prepare FDMI command arguments -- attribute block, attributes. */
1508 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE); 2070 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
1509 size = WWN_SIZE + 4; 2071 size = WWN_SIZE + 4;
1510 2072
1511 /* Attributes */ 2073 /* Attributes */
1512 ct_req->req.rpa.attrs.count = 2074 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
1513 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT - 1); 2075 entries = ct_req->req.rpa2.port_name;
1514 entries = ct_req->req.rpa.port_name;
1515 2076
1516 /* FC4 types. */ 2077 /* FC4 types. */
1517 eiter = (struct ct_fdmi_port_attr *) (entries + size); 2078 eiter = entries + size;
1518 eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES); 2079 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1519 eiter->len = __constant_cpu_to_be16(4 + 32); 2080 eiter->len = cpu_to_be16(4 + 32);
1520 eiter->a.fc4_types[2] = 0x01; 2081 eiter->a.fc4_types[2] = 0x01;
1521 size += 4 + 32; 2082 size += 4 + 32;
1522 2083
1523 ql_dbg(ql_dbg_disc, vha, 0x2039, 2084 ql_dbg(ql_dbg_disc, vha, 0x20ba,
1524 "FC4_TYPES=%02x %02x.\n", 2085 "FC4_TYPES=%02x %02x.\n",
1525 eiter->a.fc4_types[2], 2086 eiter->a.fc4_types[2],
1526 eiter->a.fc4_types[1]); 2087 eiter->a.fc4_types[1]);
1527 2088
1528 /* Supported speed. */ 2089 /* Supported speed. */
1529 eiter = (struct ct_fdmi_port_attr *) (entries + size); 2090 eiter = entries + size;
1530 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 2091 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1531 eiter->len = __constant_cpu_to_be16(4 + 4); 2092 eiter->len = cpu_to_be16(4 + 4);
1532 if (IS_CNA_CAPABLE(ha)) 2093 if (IS_CNA_CAPABLE(ha))
1533 eiter->a.sup_speed = __constant_cpu_to_be32( 2094 eiter->a.sup_speed = cpu_to_be32(
1534 FDMI_PORT_SPEED_10GB); 2095 FDMI_PORT_SPEED_10GB);
1535 else if (IS_QLA27XX(ha)) 2096 else if (IS_QLA27XX(ha))
1536 eiter->a.sup_speed = __constant_cpu_to_be32( 2097 eiter->a.sup_speed = cpu_to_be32(
1537 FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB| 2098 FDMI_PORT_SPEED_32GB|
2099 FDMI_PORT_SPEED_16GB|
1538 FDMI_PORT_SPEED_8GB); 2100 FDMI_PORT_SPEED_8GB);
2101 else if (IS_QLA2031(ha))
2102 eiter->a.sup_speed = cpu_to_be32(
2103 FDMI_PORT_SPEED_16GB|
2104 FDMI_PORT_SPEED_8GB|
2105 FDMI_PORT_SPEED_4GB);
1539 else if (IS_QLA25XX(ha)) 2106 else if (IS_QLA25XX(ha))
1540 eiter->a.sup_speed = __constant_cpu_to_be32( 2107 eiter->a.sup_speed = cpu_to_be32(
1541 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| 2108 FDMI_PORT_SPEED_8GB|
1542 FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB); 2109 FDMI_PORT_SPEED_4GB|
2110 FDMI_PORT_SPEED_2GB|
2111 FDMI_PORT_SPEED_1GB);
1543 else if (IS_QLA24XX_TYPE(ha)) 2112 else if (IS_QLA24XX_TYPE(ha))
1544 eiter->a.sup_speed = __constant_cpu_to_be32( 2113 eiter->a.sup_speed = cpu_to_be32(
1545 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB| 2114 FDMI_PORT_SPEED_4GB|
1546 FDMI_PORT_SPEED_4GB); 2115 FDMI_PORT_SPEED_2GB|
2116 FDMI_PORT_SPEED_1GB);
1547 else if (IS_QLA23XX(ha)) 2117 else if (IS_QLA23XX(ha))
1548 eiter->a.sup_speed =__constant_cpu_to_be32( 2118 eiter->a.sup_speed = cpu_to_be32(
1549 FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB); 2119 FDMI_PORT_SPEED_2GB|
2120 FDMI_PORT_SPEED_1GB);
1550 else 2121 else
1551 eiter->a.sup_speed = __constant_cpu_to_be32( 2122 eiter->a.sup_speed = cpu_to_be32(
1552 FDMI_PORT_SPEED_1GB); 2123 FDMI_PORT_SPEED_1GB);
1553 size += 4 + 4; 2124 size += 4 + 4;
1554 2125
1555 ql_dbg(ql_dbg_disc, vha, 0x203a, 2126 ql_dbg(ql_dbg_disc, vha, 0x20bb,
1556 "Supported_Speed=%x.\n", eiter->a.sup_speed); 2127 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
1557 2128
1558 /* Current speed. */ 2129 /* Current speed. */
1559 eiter = (struct ct_fdmi_port_attr *) (entries + size); 2130 eiter = entries + size;
1560 eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED); 2131 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1561 eiter->len = __constant_cpu_to_be16(4 + 4); 2132 eiter->len = cpu_to_be16(4 + 4);
1562 switch (ha->link_data_rate) { 2133 switch (ha->link_data_rate) {
1563 case PORT_SPEED_1GB: 2134 case PORT_SPEED_1GB:
1564 eiter->a.cur_speed = 2135 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
1565 __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
1566 break; 2136 break;
1567 case PORT_SPEED_2GB: 2137 case PORT_SPEED_2GB:
1568 eiter->a.cur_speed = 2138 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
1569 __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
1570 break; 2139 break;
1571 case PORT_SPEED_4GB: 2140 case PORT_SPEED_4GB:
1572 eiter->a.cur_speed = 2141 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
1573 __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
1574 break; 2142 break;
1575 case PORT_SPEED_8GB: 2143 case PORT_SPEED_8GB:
1576 eiter->a.cur_speed = 2144 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
1577 __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
1578 break; 2145 break;
1579 case PORT_SPEED_10GB: 2146 case PORT_SPEED_10GB:
1580 eiter->a.cur_speed = 2147 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
1581 __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
1582 break; 2148 break;
1583 case PORT_SPEED_16GB: 2149 case PORT_SPEED_16GB:
1584 eiter->a.cur_speed = 2150 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
1585 __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
1586 break; 2151 break;
1587 case PORT_SPEED_32GB: 2152 case PORT_SPEED_32GB:
1588 eiter->a.cur_speed = 2153 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
1589 __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
1590 break; 2154 break;
1591 default: 2155 default:
1592 eiter->a.cur_speed = 2156 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1593 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1594 break; 2157 break;
1595 } 2158 }
1596 size += 4 + 4; 2159 size += 4 + 4;
1597 2160
1598 ql_dbg(ql_dbg_disc, vha, 0x203b, 2161 ql_dbg(ql_dbg_disc, vha, 0x20bc,
1599 "Current_Speed=%x.\n", eiter->a.cur_speed); 2162 "Current_Speed = %x.\n", eiter->a.cur_speed);
1600 2163
1601 /* Max frame size. */ 2164 /* Max frame size. */
1602 eiter = (struct ct_fdmi_port_attr *) (entries + size); 2165 eiter = entries + size;
1603 eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); 2166 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1604 eiter->len = __constant_cpu_to_be16(4 + 4); 2167 eiter->len = cpu_to_be16(4 + 4);
1605 max_frame_size = IS_FWI2_CAPABLE(ha) ? 2168 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1606 le16_to_cpu(icb24->frame_payload_size): 2169 le16_to_cpu(icb24->frame_payload_size):
1607 le16_to_cpu(ha->init_cb->frame_payload_size); 2170 le16_to_cpu(ha->init_cb->frame_payload_size);
1608 eiter->a.max_frame_size = cpu_to_be32(max_frame_size); 2171 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1609 size += 4 + 4; 2172 size += 4 + 4;
1610 2173
1611 ql_dbg(ql_dbg_disc, vha, 0x203c, 2174 ql_dbg(ql_dbg_disc, vha, 0x20bc,
1612 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size); 2175 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
1613 2176
1614 /* OS device name. */ 2177 /* OS device name. */
1615 eiter = (struct ct_fdmi_port_attr *) (entries + size); 2178 eiter = entries + size;
1616 eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME); 2179 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1617 alen = strlen(QLA2XXX_DRIVER_NAME); 2180 alen = strlen(QLA2XXX_DRIVER_NAME);
1618 strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1); 2181 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1619 alen += (alen & 3) ? (4 - (alen & 3)) : 4; 2182 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2183 alen += 4 - (alen & 3);
1620 eiter->len = cpu_to_be16(4 + alen); 2184 eiter->len = cpu_to_be16(4 + alen);
1621 size += 4 + alen; 2185 size += 4 + alen;
1622 2186
1623 ql_dbg(ql_dbg_disc, vha, 0x204b, 2187 ql_dbg(ql_dbg_disc, vha, 0x20be,
1624 "OS_Device_Name=%s.\n", eiter->a.os_dev_name); 2188 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
1625 2189
1626 /* Hostname. */ 2190 /* Hostname. */
1627 if (strlen(fc_host_system_hostname(vha->host))) { 2191 eiter = entries + size;
1628 ct_req->req.rpa.attrs.count = 2192 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1629 __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT); 2193 p_sysid = utsname();
1630 eiter = (struct ct_fdmi_port_attr *) (entries + size); 2194 if (p_sysid) {
1631 eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME); 2195 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2196 "%s", p_sysid->nodename);
2197 } else {
1632 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name), 2198 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1633 "%s", fc_host_system_hostname(vha->host)); 2199 "%s", fc_host_system_hostname(vha->host));
1634 alen = strlen(eiter->a.host_name);
1635 alen += (alen & 3) ? (4 - (alen & 3)) : 4;
1636 eiter->len = cpu_to_be16(4 + alen);
1637 size += 4 + alen;
1638
1639 ql_dbg(ql_dbg_disc, vha, 0x203d,
1640 "HostName=%s.\n", eiter->a.host_name);
1641 } 2200 }
2201 alen = strlen(eiter->a.host_name);
2202 alen += 4 - (alen & 3);
2203 eiter->len = cpu_to_be16(4 + alen);
2204 size += 4 + alen;
2205
2206 ql_dbg(ql_dbg_disc, vha, 0x203d,
2207 "HostName=%s.\n", eiter->a.host_name);
2208
2209 /* Node Name */
2210 eiter = entries + size;
2211 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2212 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2213 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2214 size += 4 + WWN_SIZE;
2215
2216 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2217 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2218
2219 /* Port Name */
2220 eiter = entries + size;
2221 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2222 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2223 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2224 size += 4 + WWN_SIZE;
2225
2226 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2227 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2228
2229 /* Port Symbolic Name */
2230 eiter = entries + size;
2231 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2232 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2233 sizeof(eiter->a.port_sym_name));
2234 alen = strlen(eiter->a.port_sym_name);
2235 alen += 4 - (alen & 3);
2236 eiter->len = cpu_to_be16(4 + alen);
2237 size += 4 + alen;
2238
2239 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2240 "port symbolic name = %s\n", eiter->a.port_sym_name);
2241
2242 /* Port Type */
2243 eiter = entries + size;
2244 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2245 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2246 eiter->len = cpu_to_be16(4 + 4);
2247 size += 4 + 4;
2248
2249 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2250 "Port Type = %x.\n", eiter->a.port_type);
2251
2252 /* Class of Service */
2253 eiter = entries + size;
2254 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2255 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2256 eiter->len = cpu_to_be16(4 + 4);
2257 size += 4 + 4;
2258
2259 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2260 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2261
2262 /* Port Fabric Name */
2263 eiter = entries + size;
2264 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2265 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2266 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2267 size += 4 + WWN_SIZE;
2268
2269 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2270 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2271
2272 /* FC4_type */
2273 eiter = entries + size;
2274 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2275 eiter->a.port_fc4_type[0] = 0;
2276 eiter->a.port_fc4_type[1] = 0;
2277 eiter->a.port_fc4_type[2] = 1;
2278 eiter->a.port_fc4_type[3] = 0;
2279 eiter->len = cpu_to_be16(4 + 32);
2280 size += 4 + 32;
2281
2282 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2283 "Port Active FC4 Type = %02x %02x.\n",
2284 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2285
2286 /* Port State */
2287 eiter = entries + size;
2288 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2289 eiter->a.port_state = cpu_to_be32(1);
2290 eiter->len = cpu_to_be16(4 + 4);
2291 size += 4 + 4;
2292
2293 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2294 "Port State = %x.\n", eiter->a.port_state);
2295
2296 /* Number of Ports */
2297 eiter = entries + size;
2298 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2299 eiter->a.num_ports = cpu_to_be32(1);
2300 eiter->len = cpu_to_be16(4 + 4);
2301 size += 4 + 4;
2302
2303 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2304 "Number of ports = %x.\n", eiter->a.num_ports);
2305
2306 /* Port Id */
2307 eiter = entries + size;
2308 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2309 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2310 eiter->len = cpu_to_be16(4 + 4);
2311 size += 4 + 4;
2312
2313 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2314 "Port Id = %x.\n", eiter->a.port_id);
1642 2315
1643 /* Update MS request size. */ 2316 /* Update MS request size. */
1644 qla2x00_update_ms_fdmi_iocb(vha, size + 16); 2317 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1645 2318
1646 ql_dbg(ql_dbg_disc, vha, 0x203e, 2319 ql_dbg(ql_dbg_disc, vha, 0x203e,
1647 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size); 2320 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
1648 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079, 2321 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
1649 entries, size); 2322 entries, size);
1650 2323
1651 /* Execute MS IOCB */ 2324 /* Execute MS IOCB */
@@ -1653,14 +2326,26 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1653 sizeof(ms_iocb_entry_t)); 2326 sizeof(ms_iocb_entry_t));
1654 if (rval != QLA_SUCCESS) { 2327 if (rval != QLA_SUCCESS) {
1655 /*EMPTY*/ 2328 /*EMPTY*/
1656 ql_dbg(ql_dbg_disc, vha, 0x2040, 2329 ql_dbg(ql_dbg_disc, vha, 0x20cb,
1657 "RPA issue IOCB failed (%d).\n", rval); 2330 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
1658 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") != 2331 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1659 QLA_SUCCESS) { 2332 QLA_SUCCESS) {
1660 rval = QLA_FUNCTION_FAILED; 2333 rval = QLA_FUNCTION_FAILED;
2334 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2335 ct_rsp->header.explanation_code ==
2336 CT_EXPL_ALREADY_REGISTERED) {
2337 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2338 "RPA FDMI v2 already registered\n");
2339 rval = QLA_ALREADY_REGISTERED;
2340 } else {
2341 ql_dbg(ql_dbg_disc, vha, 0x2020,
2342 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2343 ct_rsp->header.reason_code,
2344 ct_rsp->header.explanation_code);
2345 }
1661 } else { 2346 } else {
1662 ql_dbg(ql_dbg_disc, vha, 0x2041, 2347 ql_dbg(ql_dbg_disc, vha, 0x20cc,
1663 "RPA exiting nornally.\n"); 2348 "RPA FDMI V2 exiting normally.\n");
1664 } 2349 }
1665 2350
1666 return rval; 2351 return rval;
@@ -1675,8 +2360,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1675int 2360int
1676qla2x00_fdmi_register(scsi_qla_host_t *vha) 2361qla2x00_fdmi_register(scsi_qla_host_t *vha)
1677{ 2362{
1678 int rval; 2363 int rval = QLA_FUNCTION_FAILED;
1679 struct qla_hw_data *ha = vha->hw; 2364 struct qla_hw_data *ha = vha->hw;
1680 2365
1681 if (IS_QLA2100(ha) || IS_QLA2200(ha) || 2366 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
1682 IS_QLAFX00(ha)) 2367 IS_QLAFX00(ha))
@@ -1686,6 +2371,26 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
1686 if (rval) 2371 if (rval)
1687 return rval; 2372 return rval;
1688 2373
2374 rval = qla2x00_fdmiv2_rhba(vha);
2375 if (rval) {
2376 if (rval != QLA_ALREADY_REGISTERED)
2377 goto try_fdmi;
2378
2379 rval = qla2x00_fdmi_dhba(vha);
2380 if (rval)
2381 goto try_fdmi;
2382
2383 rval = qla2x00_fdmiv2_rhba(vha);
2384 if (rval)
2385 goto try_fdmi;
2386 }
2387 rval = qla2x00_fdmiv2_rpa(vha);
2388 if (rval)
2389 goto try_fdmi;
2390
2391 goto out;
2392
2393try_fdmi:
1689 rval = qla2x00_fdmi_rhba(vha); 2394 rval = qla2x00_fdmi_rhba(vha);
1690 if (rval) { 2395 if (rval) {
1691 if (rval != QLA_ALREADY_REGISTERED) 2396 if (rval != QLA_ALREADY_REGISTERED)
@@ -1700,7 +2405,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
1700 return rval; 2405 return rval;
1701 } 2406 }
1702 rval = qla2x00_fdmi_rpa(vha); 2407 rval = qla2x00_fdmi_rpa(vha);
1703 2408out:
1704 return rval; 2409 return rval;
1705} 2410}
1706 2411
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 46990f4ceb40..a4dde7e80dbd 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1848,7 +1848,9 @@ enable_82xx_npiv:
1848 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1848 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1849 } 1849 }
1850 1850
1851 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 1851 if (IS_QLA27XX(ha))
1852 ha->flags.fac_supported = 1;
1853 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1852 uint32_t size; 1854 uint32_t size;
1853 1855
1854 rval = qla81xx_fac_get_sector_size(vha, &size); 1856 rval = qla81xx_fac_get_sector_size(vha, &size);
@@ -2196,6 +2198,15 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2196 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 2198 mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
2197 mid_init_cb->init_cb.execution_throttle = 2199 mid_init_cb->init_cb.execution_throttle =
2198 cpu_to_le16(ha->fw_xcb_count); 2200 cpu_to_le16(ha->fw_xcb_count);
2201 /* D-Port Status */
2202 if (IS_DPORT_CAPABLE(ha))
2203 mid_init_cb->init_cb.firmware_options_1 |=
2204 cpu_to_le16(BIT_7);
2205 /* Enable FA-WWPN */
2206 ha->flags.fawwpn_enabled =
2207 (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0;
2208 ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
2209 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
2199 } 2210 }
2200 2211
2201 rval = qla2x00_init_firmware(vha, ha->init_cb_size); 2212 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
@@ -2224,7 +2235,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
2224 unsigned long wtime, mtime, cs84xx_time; 2235 unsigned long wtime, mtime, cs84xx_time;
2225 uint16_t min_wait; /* Minimum wait time if loop is down */ 2236 uint16_t min_wait; /* Minimum wait time if loop is down */
2226 uint16_t wait_time; /* Wait time if loop is coming ready */ 2237 uint16_t wait_time; /* Wait time if loop is coming ready */
2227 uint16_t state[5]; 2238 uint16_t state[6];
2228 struct qla_hw_data *ha = vha->hw; 2239 struct qla_hw_data *ha = vha->hw;
2229 2240
2230 if (IS_QLAFX00(vha->hw)) 2241 if (IS_QLAFX00(vha->hw))
@@ -2329,8 +2340,8 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
2329 } while (1); 2340 } while (1);
2330 2341
2331 ql_dbg(ql_dbg_taskm, vha, 0x803a, 2342 ql_dbg(ql_dbg_taskm, vha, 0x803a,
2332 "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0], 2343 "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
2333 state[1], state[2], state[3], state[4], jiffies); 2344 state[1], state[2], state[3], state[4], state[5], jiffies);
2334 2345
2335 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) { 2346 if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
2336 ql_log(ql_log_warn, vha, 0x803b, 2347 ql_log(ql_log_warn, vha, 0x803b,
@@ -2596,18 +2607,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2596 nv->firmware_options[1] = BIT_7 | BIT_5; 2607 nv->firmware_options[1] = BIT_7 | BIT_5;
2597 nv->add_firmware_options[0] = BIT_5; 2608 nv->add_firmware_options[0] = BIT_5;
2598 nv->add_firmware_options[1] = BIT_5 | BIT_4; 2609 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2599 nv->frame_payload_size = __constant_cpu_to_le16(2048); 2610 nv->frame_payload_size = 2048;
2600 nv->special_options[1] = BIT_7; 2611 nv->special_options[1] = BIT_7;
2601 } else if (IS_QLA2200(ha)) { 2612 } else if (IS_QLA2200(ha)) {
2602 nv->firmware_options[0] = BIT_2 | BIT_1; 2613 nv->firmware_options[0] = BIT_2 | BIT_1;
2603 nv->firmware_options[1] = BIT_7 | BIT_5; 2614 nv->firmware_options[1] = BIT_7 | BIT_5;
2604 nv->add_firmware_options[0] = BIT_5; 2615 nv->add_firmware_options[0] = BIT_5;
2605 nv->add_firmware_options[1] = BIT_5 | BIT_4; 2616 nv->add_firmware_options[1] = BIT_5 | BIT_4;
2606 nv->frame_payload_size = __constant_cpu_to_le16(1024); 2617 nv->frame_payload_size = 1024;
2607 } else if (IS_QLA2100(ha)) { 2618 } else if (IS_QLA2100(ha)) {
2608 nv->firmware_options[0] = BIT_3 | BIT_1; 2619 nv->firmware_options[0] = BIT_3 | BIT_1;
2609 nv->firmware_options[1] = BIT_5; 2620 nv->firmware_options[1] = BIT_5;
2610 nv->frame_payload_size = __constant_cpu_to_le16(1024); 2621 nv->frame_payload_size = 1024;
2611 } 2622 }
2612 2623
2613 nv->max_iocb_allocation = __constant_cpu_to_le16(256); 2624 nv->max_iocb_allocation = __constant_cpu_to_le16(256);
@@ -2643,7 +2654,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2643 * are valid. 2654 * are valid.
2644 */ 2655 */
2645 if (ia64_platform_is("sn2")) { 2656 if (ia64_platform_is("sn2")) {
2646 nv->frame_payload_size = __constant_cpu_to_le16(2048); 2657 nv->frame_payload_size = 2048;
2647 if (IS_QLA23XX(ha)) 2658 if (IS_QLA23XX(ha))
2648 nv->special_options[1] = BIT_7; 2659 nv->special_options[1] = BIT_7;
2649 } 2660 }
@@ -3192,7 +3203,7 @@ static void
3192qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 3203qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3193{ 3204{
3194 int rval; 3205 int rval;
3195 uint16_t mb[4]; 3206 uint16_t mb[MAILBOX_REGISTER_COUNT];
3196 struct qla_hw_data *ha = vha->hw; 3207 struct qla_hw_data *ha = vha->hw;
3197 3208
3198 if (!IS_IIDMA_CAPABLE(ha)) 3209 if (!IS_IIDMA_CAPABLE(ha))
@@ -4564,6 +4575,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4564 /* Requeue all commands in outstanding command list. */ 4575 /* Requeue all commands in outstanding command list. */
4565 qla2x00_abort_all_cmds(vha, DID_RESET << 16); 4576 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
4566 } 4577 }
4578
4579 ha->chip_reset++;
4580 /* memory barrier */
4581 wmb();
4567} 4582}
4568 4583
4569/* 4584/*
@@ -4958,7 +4973,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4958 memset(nv, 0, ha->nvram_size); 4973 memset(nv, 0, ha->nvram_size);
4959 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 4974 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
4960 nv->version = __constant_cpu_to_le16(ICB_VERSION); 4975 nv->version = __constant_cpu_to_le16(ICB_VERSION);
4961 nv->frame_payload_size = __constant_cpu_to_le16(2048); 4976 nv->frame_payload_size = 2048;
4962 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 4977 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
4963 nv->exchange_count = __constant_cpu_to_le16(0); 4978 nv->exchange_count = __constant_cpu_to_le16(0);
4964 nv->hard_address = __constant_cpu_to_le16(124); 4979 nv->hard_address = __constant_cpu_to_le16(124);
@@ -5225,7 +5240,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
5225 ql_log(ql_log_fatal, vha, 0x008f, 5240 ql_log(ql_log_fatal, vha, 0x008f,
5226 "Failed to load segment %d of firmware.\n", 5241 "Failed to load segment %d of firmware.\n",
5227 fragment); 5242 fragment);
5228 break; 5243 return QLA_FUNCTION_FAILED;
5229 } 5244 }
5230 5245
5231 faddr += dlen; 5246 faddr += dlen;
@@ -5528,7 +5543,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
5528 ql_log(ql_log_fatal, vha, 0x0098, 5543 ql_log(ql_log_fatal, vha, 0x0098,
5529 "Failed to load segment %d of firmware.\n", 5544 "Failed to load segment %d of firmware.\n",
5530 fragment); 5545 fragment);
5531 break; 5546 return QLA_FUNCTION_FAILED;
5532 } 5547 }
5533 5548
5534 fwcode += dlen; 5549 fwcode += dlen;
@@ -5905,7 +5920,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5905 memset(nv, 0, ha->nvram_size); 5920 memset(nv, 0, ha->nvram_size);
5906 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 5921 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
5907 nv->version = __constant_cpu_to_le16(ICB_VERSION); 5922 nv->version = __constant_cpu_to_le16(ICB_VERSION);
5908 nv->frame_payload_size = __constant_cpu_to_le16(2048); 5923 nv->frame_payload_size = 2048;
5909 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 5924 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
5910 nv->exchange_count = __constant_cpu_to_le16(0); 5925 nv->exchange_count = __constant_cpu_to_le16(0);
5911 nv->port_name[0] = 0x21; 5926 nv->port_name[0] = 0x21;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index b3b1d6fc2d6c..fee9eb7c8a60 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -279,3 +279,11 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
279 complete(&ha->mbx_intr_comp); 279 complete(&ha->mbx_intr_comp);
280 } 280 }
281} 281}
282
283static inline void
284qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
285{
286 if (retry_delay)
287 fcport->retry_delay_timestamp = jiffies +
288 (retry_delay * HZ / 10);
289}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 150529d98db4..f0edb07f3198 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1858,6 +1858,17 @@ static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1858} 1858}
1859 1859
1860/* Generic Control-SRB manipulation functions. */ 1860/* Generic Control-SRB manipulation functions. */
1861
1862/* hardware_lock assumed to be held. */
1863void *
1864qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
1865{
1866 if (qla2x00_reset_active(vha))
1867 return NULL;
1868
1869 return qla2x00_alloc_iocbs(vha, sp);
1870}
1871
1861void * 1872void *
1862qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp) 1873qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1863{ 1874{
@@ -1901,7 +1912,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1901 1912
1902skip_cmd_array: 1913skip_cmd_array:
1903 /* Check for room on request queue. */ 1914 /* Check for room on request queue. */
1904 if (req->cnt < req_cnt) { 1915 if (req->cnt < req_cnt + 2) {
1905 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 1916 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1906 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); 1917 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1907 else if (IS_P3P_TYPE(ha)) 1918 else if (IS_P3P_TYPE(ha))
@@ -1920,7 +1931,7 @@ skip_cmd_array:
1920 req->cnt = req->length - 1931 req->cnt = req->length -
1921 (req->ring_index - cnt); 1932 (req->ring_index - cnt);
1922 } 1933 }
1923 if (req->cnt < req_cnt) 1934 if (req->cnt < req_cnt + 2)
1924 goto queuing_error; 1935 goto queuing_error;
1925 1936
1926 /* Prep packet */ 1937 /* Prep packet */
@@ -2648,7 +2659,7 @@ queuing_error:
2648 return QLA_FUNCTION_FAILED; 2659 return QLA_FUNCTION_FAILED;
2649} 2660}
2650 2661
2651void 2662static void
2652qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) 2663qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2653{ 2664{
2654 struct srb_iocb *aio = &sp->u.iocb_cmd; 2665 struct srb_iocb *aio = &sp->u.iocb_cmd;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 550a4a31f51a..a04a1b1f7f32 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,16 +56,8 @@ qla2100_intr_handler(int irq, void *dev_id)
56 vha = pci_get_drvdata(ha->pdev); 56 vha = pci_get_drvdata(ha->pdev);
57 for (iter = 50; iter--; ) { 57 for (iter = 50; iter--; ) {
58 hccr = RD_REG_WORD(&reg->hccr); 58 hccr = RD_REG_WORD(&reg->hccr);
59 /* Check for PCI disconnection */ 59 if (qla2x00_check_reg16_for_disconnect(vha, hccr))
60 if (hccr == 0xffff) {
61 /*
62 * Schedule this on the default system workqueue so that
63 * all the adapter workqueues and the DPC thread can be
64 * shutdown cleanly.
65 */
66 schedule_work(&ha->board_disable);
67 break; 60 break;
68 }
69 if (hccr & HCCR_RISC_PAUSE) { 61 if (hccr & HCCR_RISC_PAUSE) {
70 if (pci_channel_offline(ha->pdev)) 62 if (pci_channel_offline(ha->pdev))
71 break; 63 break;
@@ -121,21 +113,31 @@ qla2100_intr_handler(int irq, void *dev_id)
121} 113}
122 114
123bool 115bool
124qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 116qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
125{ 117{
126 /* Check for PCI disconnection */ 118 /* Check for PCI disconnection */
127 if (reg == 0xffffffff) { 119 if (reg == 0xffffffff) {
128 /* 120 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
129 * Schedule this on the default system workqueue so that all the 121 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
130 * adapter workqueues and the DPC thread can be shutdown 122 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
131 * cleanly. 123 /*
132 */ 124 * Schedule this (only once) on the default system
133 schedule_work(&vha->hw->board_disable); 125 * workqueue so that all the adapter workqueues and the
126 * DPC thread can be shutdown cleanly.
127 */
128 schedule_work(&vha->hw->board_disable);
129 }
134 return true; 130 return true;
135 } else 131 } else
136 return false; 132 return false;
137} 133}
138 134
135bool
136qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
137{
138 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
139}
140
139/** 141/**
140 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 142 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
141 * @irq: 143 * @irq:
@@ -174,7 +176,7 @@ qla2300_intr_handler(int irq, void *dev_id)
174 vha = pci_get_drvdata(ha->pdev); 176 vha = pci_get_drvdata(ha->pdev);
175 for (iter = 50; iter--; ) { 177 for (iter = 50; iter--; ) {
176 stat = RD_REG_DWORD(&reg->u.isp2300.host_status); 178 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
177 if (qla2x00_check_reg_for_disconnect(vha, stat)) 179 if (qla2x00_check_reg32_for_disconnect(vha, stat))
178 break; 180 break;
179 if (stat & HSR_RISC_PAUSED) { 181 if (stat & HSR_RISC_PAUSED) {
180 if (unlikely(pci_channel_offline(ha->pdev))) 182 if (unlikely(pci_channel_offline(ha->pdev)))
@@ -573,8 +575,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
573 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 575 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
574 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 576 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
575 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 577 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
576 uint32_t rscn_entry, host_pid; 578 uint32_t rscn_entry, host_pid, tmp_pid;
577 unsigned long flags; 579 unsigned long flags;
580 fc_port_t *fcport = NULL;
578 581
579 /* Setup to process RIO completion. */ 582 /* Setup to process RIO completion. */
580 handle_cnt = 0; 583 handle_cnt = 0;
@@ -730,7 +733,7 @@ skip_rio:
730 else 733 else
731 ha->link_data_rate = mb[1]; 734 ha->link_data_rate = mb[1];
732 735
733 ql_dbg(ql_dbg_async, vha, 0x500a, 736 ql_log(ql_log_info, vha, 0x500a,
734 "LOOP UP detected (%s Gbps).\n", 737 "LOOP UP detected (%s Gbps).\n",
735 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 738 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
736 739
@@ -743,13 +746,23 @@ skip_rio:
743 ? RD_REG_WORD(&reg24->mailbox4) : 0; 746 ? RD_REG_WORD(&reg24->mailbox4) : 0;
744 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4]) 747 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
745 : mbx; 748 : mbx;
746 ql_dbg(ql_dbg_async, vha, 0x500b, 749 ql_log(ql_log_info, vha, 0x500b,
747 "LOOP DOWN detected (%x %x %x %x).\n", 750 "LOOP DOWN detected (%x %x %x %x).\n",
748 mb[1], mb[2], mb[3], mbx); 751 mb[1], mb[2], mb[3], mbx);
749 752
750 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 753 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
751 atomic_set(&vha->loop_state, LOOP_DOWN); 754 atomic_set(&vha->loop_state, LOOP_DOWN);
752 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 755 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
756 /*
757 * In case of loop down, restore WWPN from
758 * NVRAM in case of FA-WWPN capable ISP
759 */
760 if (ha->flags.fawwpn_enabled) {
761 void *wwpn = ha->init_cb->port_name;
762
763 memcpy(vha->port_name, wwpn, WWN_SIZE);
764 }
765
753 vha->device_flags |= DFLG_NO_CABLE; 766 vha->device_flags |= DFLG_NO_CABLE;
754 qla2x00_mark_all_devices_lost(vha, 1); 767 qla2x00_mark_all_devices_lost(vha, 1);
755 } 768 }
@@ -908,7 +921,8 @@ skip_rio:
908 * it. Otherwise ignore it and Wait for RSCN to come in. 921 * it. Otherwise ignore it and Wait for RSCN to come in.
909 */ 922 */
910 atomic_set(&vha->loop_down_timer, 0); 923 atomic_set(&vha->loop_down_timer, 0);
911 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { 924 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
925 atomic_read(&vha->loop_state) != LOOP_DEAD) {
912 ql_dbg(ql_dbg_async, vha, 0x5011, 926 ql_dbg(ql_dbg_async, vha, 0x5011,
913 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 927 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
914 mb[1], mb[2], mb[3]); 928 mb[1], mb[2], mb[3]);
@@ -920,9 +934,6 @@ skip_rio:
920 ql_dbg(ql_dbg_async, vha, 0x5012, 934 ql_dbg(ql_dbg_async, vha, 0x5012,
921 "Port database changed %04x %04x %04x.\n", 935 "Port database changed %04x %04x %04x.\n",
922 mb[1], mb[2], mb[3]); 936 mb[1], mb[2], mb[3]);
923 ql_log(ql_log_warn, vha, 0x505f,
924 "Link is operational (%s Gbps).\n",
925 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
926 937
927 /* 938 /*
928 * Mark all devices as missing so we will login again. 939 * Mark all devices as missing so we will login again.
@@ -969,6 +980,20 @@ skip_rio:
969 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 980 if (qla2x00_is_a_vp_did(vha, rscn_entry))
970 break; 981 break;
971 982
983 /*
984 * Search for the rport related to this RSCN entry and mark it
985 * as lost.
986 */
987 list_for_each_entry(fcport, &vha->vp_fcports, list) {
988 if (atomic_read(&fcport->state) != FCS_ONLINE)
989 continue;
990 tmp_pid = fcport->d_id.b24;
991 if (fcport->d_id.b24 == rscn_entry) {
992 qla2x00_mark_device_lost(vha, fcport, 0, 0);
993 break;
994 }
995 }
996
972 atomic_set(&vha->loop_down_timer, 0); 997 atomic_set(&vha->loop_down_timer, 0);
973 vha->flags.management_server_logged_in = 0; 998 vha->flags.management_server_logged_in = 0;
974 999
@@ -1086,6 +1111,14 @@ skip_rio:
1086 qla83xx_handle_8200_aen(vha, mb); 1111 qla83xx_handle_8200_aen(vha, mb);
1087 break; 1112 break;
1088 1113
1114 case MBA_DPORT_DIAGNOSTICS:
1115 ql_dbg(ql_dbg_async, vha, 0x5052,
1116 "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1],
1117 mb[1] == 0 ? "start" :
1118 mb[1] == 1 ? "done (ok)" :
1119 mb[1] == 2 ? "done (error)" : "other");
1120 break;
1121
1089 default: 1122 default:
1090 ql_dbg(ql_dbg_async, vha, 0x5057, 1123 ql_dbg(ql_dbg_async, vha, 0x5057,
1091 "Unknown AEN:%04x %04x %04x %04x\n", 1124 "Unknown AEN:%04x %04x %04x %04x\n",
@@ -1975,6 +2008,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1975 int logit = 1; 2008 int logit = 1;
1976 int res = 0; 2009 int res = 0;
1977 uint16_t state_flags = 0; 2010 uint16_t state_flags = 0;
2011 uint16_t retry_delay = 0;
1978 2012
1979 sts = (sts_entry_t *) pkt; 2013 sts = (sts_entry_t *) pkt;
1980 sts24 = (struct sts_entry_24xx *) pkt; 2014 sts24 = (struct sts_entry_24xx *) pkt;
@@ -2068,6 +2102,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2068 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2102 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2069 ox_id = le16_to_cpu(sts24->ox_id); 2103 ox_id = le16_to_cpu(sts24->ox_id);
2070 par_sense_len = sizeof(sts24->data); 2104 par_sense_len = sizeof(sts24->data);
2105 /* Valid values of the retry delay timer are 0x1-0xffef */
2106 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
2107 retry_delay = sts24->retry_delay;
2071 } else { 2108 } else {
2072 if (scsi_status & SS_SENSE_LEN_VALID) 2109 if (scsi_status & SS_SENSE_LEN_VALID)
2073 sense_len = le16_to_cpu(sts->req_sense_length); 2110 sense_len = le16_to_cpu(sts->req_sense_length);
@@ -2102,6 +2139,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2102 comp_status = CS_DATA_OVERRUN; 2139 comp_status = CS_DATA_OVERRUN;
2103 2140
2104 /* 2141 /*
2142 * Check retry_delay_timer value if we receive a busy or
2143 * queue full.
2144 */
2145 if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
2146 lscsi_status == SAM_STAT_BUSY)
2147 qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
2148
2149 /*
2105 * Based on Host and scsi status generate status code for Linux 2150 * Based on Host and scsi status generate status code for Linux
2106 */ 2151 */
2107 switch (comp_status) { 2152 switch (comp_status) {
@@ -2633,7 +2678,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
2633 vha = pci_get_drvdata(ha->pdev); 2678 vha = pci_get_drvdata(ha->pdev);
2634 for (iter = 50; iter--; ) { 2679 for (iter = 50; iter--; ) {
2635 stat = RD_REG_DWORD(&reg->host_status); 2680 stat = RD_REG_DWORD(&reg->host_status);
2636 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2681 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2637 break; 2682 break;
2638 if (stat & HSRX_RISC_PAUSED) { 2683 if (stat & HSRX_RISC_PAUSED) {
2639 if (unlikely(pci_channel_offline(ha->pdev))) 2684 if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2723,7 +2768,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2723 * we process the response queue. 2768 * we process the response queue.
2724 */ 2769 */
2725 stat = RD_REG_DWORD(&reg->host_status); 2770 stat = RD_REG_DWORD(&reg->host_status);
2726 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2771 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2727 goto out; 2772 goto out;
2728 qla24xx_process_response_queue(vha, rsp); 2773 qla24xx_process_response_queue(vha, rsp);
2729 if (!ha->flags.disable_msix_handshake) { 2774 if (!ha->flags.disable_msix_handshake) {
@@ -2763,7 +2808,7 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2763 hccr = RD_REG_DWORD_RELAXED(&reg->hccr); 2808 hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
2764 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2809 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2765 } 2810 }
2766 if (qla2x00_check_reg_for_disconnect(vha, hccr)) 2811 if (qla2x00_check_reg32_for_disconnect(vha, hccr))
2767 goto out; 2812 goto out;
2768 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2813 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2769 2814
@@ -2798,7 +2843,7 @@ qla24xx_msix_default(int irq, void *dev_id)
2798 vha = pci_get_drvdata(ha->pdev); 2843 vha = pci_get_drvdata(ha->pdev);
2799 do { 2844 do {
2800 stat = RD_REG_DWORD(&reg->host_status); 2845 stat = RD_REG_DWORD(&reg->host_status);
2801 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2846 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2802 break; 2847 break;
2803 if (stat & HSRX_RISC_PAUSED) { 2848 if (stat & HSRX_RISC_PAUSED) {
2804 if (unlikely(pci_channel_offline(ha->pdev))) 2849 if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2923,27 +2968,22 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2923 for (i = 0; i < ha->msix_count; i++) 2968 for (i = 0; i < ha->msix_count; i++)
2924 entries[i].entry = i; 2969 entries[i].entry = i;
2925 2970
2926 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2971 ret = pci_enable_msix_range(ha->pdev,
2927 if (ret) { 2972 entries, MIN_MSIX_COUNT, ha->msix_count);
2928 if (ret < MIN_MSIX_COUNT) 2973 if (ret < 0) {
2929 goto msix_failed; 2974 ql_log(ql_log_fatal, vha, 0x00c7,
2930 2975 "MSI-X: Failed to enable support, "
2976 "giving up -- %d/%d.\n",
2977 ha->msix_count, ret);
2978 goto msix_out;
2979 } else if (ret < ha->msix_count) {
2931 ql_log(ql_log_warn, vha, 0x00c6, 2980 ql_log(ql_log_warn, vha, 0x00c6,
2932 "MSI-X: Failed to enable support " 2981 "MSI-X: Failed to enable support "
2933 "-- %d/%d\n Retry with %d vectors.\n", 2982 "-- %d/%d\n Retry with %d vectors.\n",
2934 ha->msix_count, ret, ret); 2983 ha->msix_count, ret, ret);
2935 ha->msix_count = ret;
2936 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2937 if (ret) {
2938msix_failed:
2939 ql_log(ql_log_fatal, vha, 0x00c7,
2940 "MSI-X: Failed to enable support, "
2941 "giving up -- %d/%d.\n",
2942 ha->msix_count, ret);
2943 goto msix_out;
2944 }
2945 ha->max_rsp_queues = ha->msix_count - 1;
2946 } 2984 }
2985 ha->msix_count = ret;
2986 ha->max_rsp_queues = ha->msix_count - 1;
2947 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2987 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2948 ha->msix_count, GFP_KERNEL); 2988 ha->msix_count, GFP_KERNEL);
2949 if (!ha->msix_entries) { 2989 if (!ha->msix_entries) {
@@ -3103,10 +3143,11 @@ skip_msi:
3103 } 3143 }
3104 3144
3105clear_risc_ints: 3145clear_risc_ints:
3146 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
3147 goto fail;
3106 3148
3107 spin_lock_irq(&ha->hardware_lock); 3149 spin_lock_irq(&ha->hardware_lock);
3108 if (!IS_FWI2_CAPABLE(ha)) 3150 WRT_REG_WORD(&reg->isp.semaphore, 0);
3109 WRT_REG_WORD(&reg->isp.semaphore, 0);
3110 spin_unlock_irq(&ha->hardware_lock); 3151 spin_unlock_irq(&ha->hardware_lock);
3111 3152
3112fail: 3153fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d9aafc003be2..72971daa2552 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -117,7 +117,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
117 command = mcp->mb[0]; 117 command = mcp->mb[0];
118 mboxes = mcp->out_mb; 118 mboxes = mcp->out_mb;
119 119
120 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111, 120 ql_dbg(ql_dbg_mbx, vha, 0x1111,
121 "Mailbox registers (OUT):\n"); 121 "Mailbox registers (OUT):\n");
122 for (cnt = 0; cnt < ha->mbx_count; cnt++) { 122 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
123 if (IS_QLA2200(ha) && cnt == 8) 123 if (IS_QLA2200(ha) && cnt == 8)
@@ -373,7 +373,7 @@ premature_exit:
373 373
374mbx_done: 374mbx_done:
375 if (rval) { 375 if (rval) {
376 ql_log(ql_log_warn, base_vha, 0x1020, 376 ql_dbg(ql_dbg_disc, base_vha, 0x1020,
377 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n", 377 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
378 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command); 378 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
379 } else { 379 } else {
@@ -1085,6 +1085,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1085 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1085 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1086 if (IS_CNA_CAPABLE(vha->hw)) 1086 if (IS_CNA_CAPABLE(vha->hw))
1087 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1087 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1088 if (IS_FWI2_CAPABLE(vha->hw))
1089 mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
1088 mcp->tov = MBX_TOV_SECONDS; 1090 mcp->tov = MBX_TOV_SECONDS;
1089 mcp->flags = 0; 1091 mcp->flags = 0;
1090 rval = qla2x00_mailbox_command(vha, mcp); 1092 rval = qla2x00_mailbox_command(vha, mcp);
@@ -1118,6 +1120,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1118 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8; 1120 vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
1119 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1121 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1120 } 1122 }
1123 /* If FA-WWN supported */
1124 if (mcp->mb[7] & BIT_14) {
1125 vha->port_name[0] = MSB(mcp->mb[16]);
1126 vha->port_name[1] = LSB(mcp->mb[16]);
1127 vha->port_name[2] = MSB(mcp->mb[17]);
1128 vha->port_name[3] = LSB(mcp->mb[17]);
1129 vha->port_name[4] = MSB(mcp->mb[18]);
1130 vha->port_name[5] = LSB(mcp->mb[18]);
1131 vha->port_name[6] = MSB(mcp->mb[19]);
1132 vha->port_name[7] = LSB(mcp->mb[19]);
1133 fc_host_port_name(vha->host) =
1134 wwn_to_u64(vha->port_name);
1135 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1136 "FA-WWN acquired %016llx\n",
1137 wwn_to_u64(vha->port_name));
1138 }
1121 } 1139 }
1122 1140
1123 return rval; 1141 return rval;
@@ -1546,7 +1564,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1546 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1564 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1547 mcp->out_mb = MBX_0; 1565 mcp->out_mb = MBX_0;
1548 if (IS_FWI2_CAPABLE(vha->hw)) 1566 if (IS_FWI2_CAPABLE(vha->hw))
1549 mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 1567 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
1550 else 1568 else
1551 mcp->in_mb = MBX_1|MBX_0; 1569 mcp->in_mb = MBX_1|MBX_0;
1552 mcp->tov = MBX_TOV_SECONDS; 1570 mcp->tov = MBX_TOV_SECONDS;
@@ -1560,6 +1578,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1560 states[2] = mcp->mb[3]; 1578 states[2] = mcp->mb[3];
1561 states[3] = mcp->mb[4]; 1579 states[3] = mcp->mb[4];
1562 states[4] = mcp->mb[5]; 1580 states[4] = mcp->mb[5];
1581 states[5] = mcp->mb[6]; /* DPORT status */
1563 } 1582 }
1564 1583
1565 if (rval != QLA_SUCCESS) { 1584 if (rval != QLA_SUCCESS) {
@@ -3328,8 +3347,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
3328 rptid_entry->port_id[2], rptid_entry->port_id[1], 3347 rptid_entry->port_id[2], rptid_entry->port_id[1],
3329 rptid_entry->port_id[0]); 3348 rptid_entry->port_id[0]);
3330 3349
3350 /* FA-WWN is only for physical port */
3351 if (!vp_idx) {
3352 void *wwpn = ha->init_cb->port_name;
3353
3354 if (!MSB(stat)) {
3355 if (rptid_entry->vp_idx_map[1] & BIT_6)
3356 wwpn = rptid_entry->reserved_4 + 8;
3357 }
3358 memcpy(vha->port_name, wwpn, WWN_SIZE);
3359 fc_host_port_name(vha->host) =
3360 wwn_to_u64(vha->port_name);
3361 ql_dbg(ql_dbg_mbx, vha, 0x1018,
3362 "FA-WWN portname %016llx (%x)\n",
3363 fc_host_port_name(vha->host), MSB(stat));
3364 }
3365
3331 vp = vha; 3366 vp = vha;
3332 if (vp_idx == 0 && (MSB(stat) != 1)) 3367 if (vp_idx == 0)
3333 goto reg_needed; 3368 goto reg_needed;
3334 3369
3335 if (MSB(stat) != 0 && MSB(stat) != 2) { 3370 if (MSB(stat) != 0 && MSB(stat) != 2) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 89998244f48d..5c2e0317f1c0 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -702,6 +702,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
702 req->req_q_in = &reg->isp25mq.req_q_in; 702 req->req_q_in = &reg->isp25mq.req_q_in;
703 req->req_q_out = &reg->isp25mq.req_q_out; 703 req->req_q_out = &reg->isp25mq.req_q_out;
704 req->max_q_depth = ha->req_q_map[0]->max_q_depth; 704 req->max_q_depth = ha->req_q_map[0]->max_q_depth;
705 req->out_ptr = (void *)(req->ring + req->length);
705 mutex_unlock(&ha->vport_lock); 706 mutex_unlock(&ha->vport_lock);
706 ql_dbg(ql_dbg_multiq, base_vha, 0xc004, 707 ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
707 "ring_ptr=%p ring_index=%d, " 708 "ring_ptr=%p ring_index=%d, "
@@ -811,6 +812,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
811 reg = ISP_QUE_REG(ha, que_id); 812 reg = ISP_QUE_REG(ha, que_id);
812 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; 813 rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
813 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; 814 rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
815 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
814 mutex_unlock(&ha->vport_lock); 816 mutex_unlock(&ha->vport_lock);
815 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, 817 ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
816 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", 818 "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 4775baa8b6a0..80867599527d 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -695,11 +695,11 @@ qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
695} 695}
696 696
697char * 697char *
698qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str) 698qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
699{ 699{
700 struct qla_hw_data *ha = vha->hw; 700 struct qla_hw_data *ha = vha->hw;
701 701
702 sprintf(str, "%s", ha->mr.fw_version); 702 snprintf(str, size, "%s", ha->mr.fw_version);
703 return str; 703 return str;
704} 704}
705 705
@@ -1551,7 +1551,10 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
1551 ha->mr.fw_reset_timer_tick = 1551 ha->mr.fw_reset_timer_tick =
1552 QLAFX00_MAX_RESET_INTERVAL; 1552 QLAFX00_MAX_RESET_INTERVAL;
1553 } 1553 }
1554 ha->mr.old_aenmbx0_state = aenmbx0; 1554 if (ha->mr.old_aenmbx0_state != aenmbx0) {
1555 ha->mr.old_aenmbx0_state = aenmbx0;
1556 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1557 }
1555 ha->mr.fw_reset_timer_tick--; 1558 ha->mr.fw_reset_timer_tick--;
1556 } 1559 }
1557 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) { 1560 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
@@ -1675,17 +1678,16 @@ qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
1675 fc_port_t *fcport; 1678 fc_port_t *fcport;
1676 1679
1677 /* Check for matching device in remote port list. */ 1680 /* Check for matching device in remote port list. */
1678 fcport = NULL;
1679 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1681 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1680 if (fcport->tgt_id == tgt_id) { 1682 if (fcport->tgt_id == tgt_id) {
1681 ql_dbg(ql_dbg_async, vha, 0x5072, 1683 ql_dbg(ql_dbg_async, vha, 0x5072,
1682 "Matching fcport(%p) found with TGT-ID: 0x%x " 1684 "Matching fcport(%p) found with TGT-ID: 0x%x "
1683 "and Remote TGT_ID: 0x%x\n", 1685 "and Remote TGT_ID: 0x%x\n",
1684 fcport, fcport->tgt_id, tgt_id); 1686 fcport, fcport->tgt_id, tgt_id);
1685 break; 1687 return fcport;
1686 } 1688 }
1687 } 1689 }
1688 return fcport; 1690 return NULL;
1689} 1691}
1690 1692
1691static void 1693static void
@@ -2924,7 +2926,7 @@ qlafx00_intr_handler(int irq, void *dev_id)
2924 vha = pci_get_drvdata(ha->pdev); 2926 vha = pci_get_drvdata(ha->pdev);
2925 for (iter = 50; iter--; clr_intr = 0) { 2927 for (iter = 50; iter--; clr_intr = 0) {
2926 stat = QLAFX00_RD_INTR_REG(ha); 2928 stat = QLAFX00_RD_INTR_REG(ha);
2927 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2929 if (qla2x00_check_reg32_for_disconnect(vha, stat))
2928 break; 2930 break;
2929 intr_stat = stat & QLAFX00_HST_INT_STS_BITS; 2931 intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
2930 if (!intr_stat) 2932 if (!intr_stat)
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 58f3c912d96e..54cb2ac9339b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -857,7 +857,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
857 break; 857 break;
858 if (timeout >= qla82xx_rom_lock_timeout) { 858 if (timeout >= qla82xx_rom_lock_timeout) {
859 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); 859 lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
860 ql_log(ql_log_warn, vha, 0xb157, 860 ql_dbg(ql_dbg_p3p, vha, 0xb157,
861 "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d", 861 "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
862 __func__, ha->portnum, lock_owner); 862 __func__, ha->portnum, lock_owner);
863 return -1; 863 return -1;
@@ -2123,7 +2123,7 @@ qla82xx_msix_default(int irq, void *dev_id)
2123 vha = pci_get_drvdata(ha->pdev); 2123 vha = pci_get_drvdata(ha->pdev);
2124 do { 2124 do {
2125 host_int = RD_REG_DWORD(&reg->host_int); 2125 host_int = RD_REG_DWORD(&reg->host_int);
2126 if (qla2x00_check_reg_for_disconnect(vha, host_int)) 2126 if (qla2x00_check_reg32_for_disconnect(vha, host_int))
2127 break; 2127 break;
2128 if (host_int) { 2128 if (host_int) {
2129 stat = RD_REG_DWORD(&reg->host_status); 2129 stat = RD_REG_DWORD(&reg->host_status);
@@ -2184,7 +2184,7 @@ qla82xx_msix_rsp_q(int irq, void *dev_id)
2184 spin_lock_irqsave(&ha->hardware_lock, flags); 2184 spin_lock_irqsave(&ha->hardware_lock, flags);
2185 vha = pci_get_drvdata(ha->pdev); 2185 vha = pci_get_drvdata(ha->pdev);
2186 host_int = RD_REG_DWORD(&reg->host_int); 2186 host_int = RD_REG_DWORD(&reg->host_int);
2187 if (qla2x00_check_reg_for_disconnect(vha, host_int)) 2187 if (qla2x00_check_reg32_for_disconnect(vha, host_int))
2188 goto out; 2188 goto out;
2189 qla24xx_process_response_queue(vha, rsp); 2189 qla24xx_process_response_queue(vha, rsp);
2190 WRT_REG_DWORD(&reg->host_int, 0); 2190 WRT_REG_DWORD(&reg->host_int, 0);
@@ -2219,7 +2219,7 @@ qla82xx_poll(int irq, void *dev_id)
2219 vha = pci_get_drvdata(ha->pdev); 2219 vha = pci_get_drvdata(ha->pdev);
2220 2220
2221 host_int = RD_REG_DWORD(&reg->host_int); 2221 host_int = RD_REG_DWORD(&reg->host_int);
2222 if (qla2x00_check_reg_for_disconnect(vha, host_int)) 2222 if (qla2x00_check_reg32_for_disconnect(vha, host_int))
2223 goto out; 2223 goto out;
2224 if (host_int) { 2224 if (host_int) {
2225 stat = RD_REG_DWORD(&reg->host_status); 2225 stat = RD_REG_DWORD(&reg->host_status);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index be9698d920c2..dabd25429c58 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(ql2xshiftctondsd,
105 "based on total number of SG elements."); 105 "based on total number of SG elements.");
106 106
107int ql2xfdmienable=1; 107int ql2xfdmienable=1;
108module_param(ql2xfdmienable, int, S_IRUGO); 108module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
109MODULE_PARM_DESC(ql2xfdmienable, 109MODULE_PARM_DESC(ql2xfdmienable,
110 "Enables FDMI registrations. " 110 "Enables FDMI registrations. "
111 "0 - no FDMI. Default is 1 - perform FDMI."); 111 "0 - no FDMI. Default is 1 - perform FDMI.");
@@ -238,7 +238,9 @@ static int qla2xxx_eh_host_reset(struct scsi_cmnd *);
238 238
239static int qla2x00_change_queue_depth(struct scsi_device *, int, int); 239static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
240static int qla2x00_change_queue_type(struct scsi_device *, int); 240static int qla2x00_change_queue_type(struct scsi_device *, int);
241static void qla2x00_clear_drv_active(struct qla_hw_data *);
241static void qla2x00_free_device(scsi_qla_host_t *); 242static void qla2x00_free_device(scsi_qla_host_t *);
243static void qla83xx_disable_laser(scsi_qla_host_t *vha);
242 244
243struct scsi_host_template qla2xxx_driver_template = { 245struct scsi_host_template qla2xxx_driver_template = {
244 .module = THIS_MODULE, 246 .module = THIS_MODULE,
@@ -547,14 +549,13 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
547} 549}
548 550
549static char * 551static char *
550qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str) 552qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
551{ 553{
552 char un_str[10]; 554 char un_str[10];
553 struct qla_hw_data *ha = vha->hw; 555 struct qla_hw_data *ha = vha->hw;
554 556
555 sprintf(str, "%d.%02d.%02d ", ha->fw_major_version, 557 snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
556 ha->fw_minor_version, 558 ha->fw_minor_version, ha->fw_subminor_version);
557 ha->fw_subminor_version);
558 559
559 if (ha->fw_attributes & BIT_9) { 560 if (ha->fw_attributes & BIT_9) {
560 strcat(str, "FLX"); 561 strcat(str, "FLX");
@@ -586,11 +587,11 @@ qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
586} 587}
587 588
588static char * 589static char *
589qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str) 590qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
590{ 591{
591 struct qla_hw_data *ha = vha->hw; 592 struct qla_hw_data *ha = vha->hw;
592 593
593 sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version, 594 snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
594 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes); 595 ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
595 return str; 596 return str;
596} 597}
@@ -730,6 +731,15 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
730 goto qc24_target_busy; 731 goto qc24_target_busy;
731 } 732 }
732 733
734 /*
735 * Return target busy if we've received a non-zero retry_delay_timer
736 * in a FCP_RSP.
737 */
738 if (time_after(jiffies, fcport->retry_delay_timestamp))
739 fcport->retry_delay_timestamp = 0;
740 else
741 goto qc24_target_busy;
742
733 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); 743 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
734 if (!sp) 744 if (!sp)
735 goto qc24_host_busy; 745 goto qc24_host_busy;
@@ -860,8 +870,10 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
860{ 870{
861 struct qla_hw_data *ha = vha->hw; 871 struct qla_hw_data *ha = vha->hw;
862 872
863 while ((!(vha->flags.online) || ha->dpc_active || 873 while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
864 ha->flags.mbox_busy)) 874 ha->flags.mbox_busy) ||
875 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
876 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags))
865 msleep(1000); 877 msleep(1000);
866} 878}
867 879
@@ -1351,6 +1363,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1351 struct qla_hw_data *ha = vha->hw; 1363 struct qla_hw_data *ha = vha->hw;
1352 struct req_que *req; 1364 struct req_que *req;
1353 1365
1366 qlt_host_reset_handler(ha);
1367
1354 spin_lock_irqsave(&ha->hardware_lock, flags); 1368 spin_lock_irqsave(&ha->hardware_lock, flags);
1355 for (que = 0; que < ha->max_req_queues; que++) { 1369 for (que = 0; que < ha->max_req_queues; que++) {
1356 req = ha->req_q_map[que]; 1370 req = ha->req_q_map[que];
@@ -2384,6 +2398,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2384 "Memory allocated for ha=%p.\n", ha); 2398 "Memory allocated for ha=%p.\n", ha);
2385 ha->pdev = pdev; 2399 ha->pdev = pdev;
2386 ha->tgt.enable_class_2 = ql2xenableclass2; 2400 ha->tgt.enable_class_2 = ql2xenableclass2;
2401 INIT_LIST_HEAD(&ha->tgt.q_full_list);
2402 spin_lock_init(&ha->tgt.q_full_lock);
2387 2403
2388 /* Clear our data area */ 2404 /* Clear our data area */
2389 ha->bars = bars; 2405 ha->bars = bars;
@@ -2527,7 +2543,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2527 ha->portnum = PCI_FUNC(ha->pdev->devfn); 2543 ha->portnum = PCI_FUNC(ha->pdev->devfn);
2528 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; 2544 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2529 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2545 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2530 req_length = REQUEST_ENTRY_CNT_24XX; 2546 req_length = REQUEST_ENTRY_CNT_83XX;
2531 rsp_length = RESPONSE_ENTRY_CNT_2300; 2547 rsp_length = RESPONSE_ENTRY_CNT_2300;
2532 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; 2548 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2533 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2549 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
@@ -2631,6 +2647,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2631 } 2647 }
2632 2648
2633 pci_set_drvdata(pdev, base_vha); 2649 pci_set_drvdata(pdev, base_vha);
2650 set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
2634 2651
2635 host = base_vha->host; 2652 host = base_vha->host;
2636 base_vha->req = req; 2653 base_vha->req = req;
@@ -2923,10 +2940,11 @@ skip_dpc:
2923 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), 2940 pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
2924 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', 2941 pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
2925 base_vha->host_no, 2942 base_vha->host_no,
2926 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2943 ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
2927 2944
2928 qlt_add_target(ha, base_vha); 2945 qlt_add_target(ha, base_vha);
2929 2946
2947 clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
2930 return 0; 2948 return 0;
2931 2949
2932probe_init_failed: 2950probe_init_failed:
@@ -2954,16 +2972,8 @@ probe_failed:
2954 scsi_host_put(base_vha->host); 2972 scsi_host_put(base_vha->host);
2955 2973
2956probe_hw_failed: 2974probe_hw_failed:
2957 if (IS_QLA82XX(ha)) { 2975 qla2x00_clear_drv_active(ha);
2958 qla82xx_idc_lock(ha); 2976
2959 qla82xx_clear_drv_active(ha);
2960 qla82xx_idc_unlock(ha);
2961 }
2962 if (IS_QLA8044(ha)) {
2963 qla8044_idc_lock(ha);
2964 qla8044_clear_drv_active(ha);
2965 qla8044_idc_unlock(ha);
2966 }
2967iospace_config_failed: 2977iospace_config_failed:
2968 if (IS_P3P_TYPE(ha)) { 2978 if (IS_P3P_TYPE(ha)) {
2969 if (!ha->nx_pcibase) 2979 if (!ha->nx_pcibase)
@@ -3026,6 +3036,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
3026 qla2x00_free_irqs(vha); 3036 qla2x00_free_irqs(vha);
3027 3037
3028 qla2x00_free_fw_dump(ha); 3038 qla2x00_free_fw_dump(ha);
3039
3040 pci_disable_pcie_error_reporting(pdev);
3041 pci_disable_device(pdev);
3029} 3042}
3030 3043
3031/* Deletes all the virtual ports for a given ha */ 3044/* Deletes all the virtual ports for a given ha */
@@ -3119,10 +3132,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
3119} 3132}
3120 3133
3121static void 3134static void
3122qla2x00_clear_drv_active(scsi_qla_host_t *vha) 3135qla2x00_clear_drv_active(struct qla_hw_data *ha)
3123{ 3136{
3124 struct qla_hw_data *ha = vha->hw;
3125
3126 if (IS_QLA8044(ha)) { 3137 if (IS_QLA8044(ha)) {
3127 qla8044_idc_lock(ha); 3138 qla8044_idc_lock(ha);
3128 qla8044_clear_drv_active(ha); 3139 qla8044_clear_drv_active(ha);
@@ -3140,15 +3151,25 @@ qla2x00_remove_one(struct pci_dev *pdev)
3140 scsi_qla_host_t *base_vha; 3151 scsi_qla_host_t *base_vha;
3141 struct qla_hw_data *ha; 3152 struct qla_hw_data *ha;
3142 3153
3154 base_vha = pci_get_drvdata(pdev);
3155 ha = base_vha->hw;
3156
3157 /* Indicate device removal to prevent future board_disable and wait
3158 * until any pending board_disable has completed. */
3159 set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
3160 cancel_work_sync(&ha->board_disable);
3161
3143 /* 3162 /*
3144 * If the PCI device is disabled that means that probe failed and any 3163 * If the PCI device is disabled then there was a PCI-disconnect and
3145 * resources should be have cleaned up on probe exit. 3164 * qla2x00_disable_board_on_pci_error has taken care of most of the
3165 * resources.
3146 */ 3166 */
3147 if (!atomic_read(&pdev->enable_cnt)) 3167 if (!atomic_read(&pdev->enable_cnt)) {
3168 scsi_host_put(base_vha->host);
3169 kfree(ha);
3170 pci_set_drvdata(pdev, NULL);
3148 return; 3171 return;
3149 3172 }
3150 base_vha = pci_get_drvdata(pdev);
3151 ha = base_vha->hw;
3152 3173
3153 qla2x00_wait_for_hba_ready(base_vha); 3174 qla2x00_wait_for_hba_ready(base_vha);
3154 3175
@@ -3173,6 +3194,10 @@ qla2x00_remove_one(struct pci_dev *pdev)
3173 3194
3174 qla84xx_put_chip(base_vha); 3195 qla84xx_put_chip(base_vha);
3175 3196
3197 /* Laser should be disabled only for ISP2031 */
3198 if (IS_QLA2031(ha))
3199 qla83xx_disable_laser(base_vha);
3200
3176 /* Disable timer */ 3201 /* Disable timer */
3177 if (base_vha->timer_active) 3202 if (base_vha->timer_active)
3178 qla2x00_stop_timer(base_vha); 3203 qla2x00_stop_timer(base_vha);
@@ -3191,9 +3216,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
3191 3216
3192 qla2x00_free_device(base_vha); 3217 qla2x00_free_device(base_vha);
3193 3218
3194 scsi_host_put(base_vha->host); 3219 qla2x00_clear_drv_active(ha);
3195 3220
3196 qla2x00_clear_drv_active(base_vha); 3221 scsi_host_put(base_vha->host);
3197 3222
3198 qla2x00_unmap_iobases(ha); 3223 qla2x00_unmap_iobases(ha);
3199 3224
@@ -4808,18 +4833,15 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
4808 qla82xx_md_free(base_vha); 4833 qla82xx_md_free(base_vha);
4809 qla2x00_free_queues(ha); 4834 qla2x00_free_queues(ha);
4810 4835
4811 scsi_host_put(base_vha->host);
4812
4813 qla2x00_unmap_iobases(ha); 4836 qla2x00_unmap_iobases(ha);
4814 4837
4815 pci_release_selected_regions(ha->pdev, ha->bars); 4838 pci_release_selected_regions(ha->pdev, ha->bars);
4816 kfree(ha);
4817 ha = NULL;
4818
4819 pci_disable_pcie_error_reporting(pdev); 4839 pci_disable_pcie_error_reporting(pdev);
4820 pci_disable_device(pdev); 4840 pci_disable_device(pdev);
4821 pci_set_drvdata(pdev, NULL);
4822 4841
4842 /*
4843 * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
4844 */
4823} 4845}
4824 4846
4825/************************************************************************** 4847/**************************************************************************
@@ -5192,13 +5214,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
5192 */ 5214 */
5193 if (!pci_channel_offline(ha->pdev)) { 5215 if (!pci_channel_offline(ha->pdev)) {
5194 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); 5216 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
5195 if (w == 0xffff) 5217 qla2x00_check_reg16_for_disconnect(vha, w);
5196 /*
5197 * Schedule this on the default system workqueue so that
5198 * all the adapter workqueues and the DPC thread can be
5199 * shutdown cleanly.
5200 */
5201 schedule_work(&ha->board_disable);
5202 } 5218 }
5203 5219
5204 /* Make sure qla82xx_watchdog is run only for physical port */ 5220 /* Make sure qla82xx_watchdog is run only for physical port */
@@ -5706,6 +5722,32 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
5706 ha->flags.eeh_busy = 0; 5722 ha->flags.eeh_busy = 0;
5707} 5723}
5708 5724
5725static void
5726qla83xx_disable_laser(scsi_qla_host_t *vha)
5727{
5728 uint32_t reg, data, fn;
5729 struct qla_hw_data *ha = vha->hw;
5730 struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
5731
5732 /* pci func #/port # */
5733 ql_dbg(ql_dbg_init, vha, 0x004b,
5734 "Disabling Laser for hba: %p\n", vha);
5735
5736 fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
5737 (BIT_15|BIT_14|BIT_13|BIT_12));
5738
5739 fn = (fn >> 12);
5740
5741 if (fn & 1)
5742 reg = PORT_1_2031;
5743 else
5744 reg = PORT_0_2031;
5745
5746 data = LASER_OFF_2031;
5747
5748 qla83xx_wr_reg(vha, reg, data);
5749}
5750
5709static const struct pci_error_handlers qla2xxx_err_handler = { 5751static const struct pci_error_handlers qla2xxx_err_handler = {
5710 .error_detected = qla2xxx_pci_error_detected, 5752 .error_detected = qla2xxx_pci_error_detected,
5711 .mmio_enabled = qla2xxx_pci_mmio_enabled, 5753 .mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bca173e56f16..b656a05613e8 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2580,7 +2580,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
2580 uint32_t faddr, left, burst; 2580 uint32_t faddr, left, burst;
2581 struct qla_hw_data *ha = vha->hw; 2581 struct qla_hw_data *ha = vha->hw;
2582 2582
2583 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha)) 2583 if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
2584 IS_QLA27XX(ha))
2584 goto try_fast; 2585 goto try_fast;
2585 if (offset & 0xfff) 2586 if (offset & 0xfff)
2586 goto slow_read; 2587 goto slow_read;
@@ -3091,7 +3092,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
3091 ha->fw_revision[2] = dcode[2]; 3092 ha->fw_revision[2] = dcode[2];
3092 ha->fw_revision[3] = dcode[3]; 3093 ha->fw_revision[3] = dcode[3];
3093 ql_dbg(ql_dbg_init, vha, 0x0060, 3094 ql_dbg(ql_dbg_init, vha, 0x0060,
3094 "Firmware revision %d.%d.%d.%d.\n", 3095 "Firmware revision %d.%d.%d (%x).\n",
3095 ha->fw_revision[0], ha->fw_revision[1], 3096 ha->fw_revision[0], ha->fw_revision[1],
3096 ha->fw_revision[2], ha->fw_revision[3]); 3097 ha->fw_revision[2], ha->fw_revision[3]);
3097 } 3098 }
@@ -3162,7 +3163,7 @@ qla2xxx_get_vpd_field(scsi_qla_host_t *vha, char *key, char *str, size_t size)
3162 } 3163 }
3163 3164
3164 if (pos < end - len && *pos != 0x78) 3165 if (pos < end - len && *pos != 0x78)
3165 return snprintf(str, size, "%.*s", len, pos + 3); 3166 return scnprintf(str, size, "%.*s", len, pos + 3);
3166 3167
3167 return 0; 3168 return 0;
3168} 3169}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e632e14180cf..829752cfd73f 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -42,6 +42,11 @@
42#include "qla_def.h" 42#include "qla_def.h"
43#include "qla_target.h" 43#include "qla_target.h"
44 44
45static int ql2xtgt_tape_enable;
46module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
47MODULE_PARM_DESC(ql2xtgt_tape_enable,
48 "Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
49
45static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED; 50static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
46module_param(qlini_mode, charp, S_IRUGO); 51module_param(qlini_mode, charp, S_IRUGO);
47MODULE_PARM_DESC(qlini_mode, 52MODULE_PARM_DESC(qlini_mode,
@@ -54,6 +59,8 @@ MODULE_PARM_DESC(qlini_mode,
54 59
55int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE; 60int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
56 61
62static int temp_sam_status = SAM_STAT_BUSY;
63
57/* 64/*
58 * From scsi/fc/fc_fcp.h 65 * From scsi/fc/fc_fcp.h
59 */ 66 */
@@ -101,6 +108,10 @@ static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
101 *cmd, struct atio_from_isp *atio, int ha_locked); 108 *cmd, struct atio_from_isp *atio, int ha_locked);
102static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
103 struct qla_tgt_srr_imm *imm, int ha_lock); 110 struct qla_tgt_srr_imm *imm, int ha_lock);
111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd);
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull);
104/* 115/*
105 * Global Variables 116 * Global Variables
106 */ 117 */
@@ -178,6 +189,27 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
178 return NULL; 189 return NULL;
179} 190}
180 191
192static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
193{
194 unsigned long flags;
195
196 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
197
198 vha->hw->tgt.num_pend_cmds++;
199 if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
200 vha->hw->qla_stats.stat_max_pend_cmds =
201 vha->hw->tgt.num_pend_cmds;
202 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
203}
204static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
205{
206 unsigned long flags;
207
208 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
209 vha->hw->tgt.num_pend_cmds--;
210 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
211}
212
181void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 213void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
182 struct atio_from_isp *atio) 214 struct atio_from_isp *atio)
183{ 215{
@@ -1008,6 +1040,8 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1008 "qla_target(%d): Sending 24xx Notify Ack %d\n", 1040 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1009 vha->vp_idx, nack->u.isp24.status); 1041 vha->vp_idx, nack->u.isp24.status);
1010 1042
1043 /* Memory Barrier */
1044 wmb();
1011 qla2x00_start_iocbs(vha, vha->req); 1045 qla2x00_start_iocbs(vha, vha->req);
1012} 1046}
1013 1047
@@ -1031,7 +1065,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1031 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1065 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1032 return; 1066 return;
1033 1067
1034 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1068 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1035 if (!resp) { 1069 if (!resp) {
1036 ql_dbg(ql_dbg_tgt, vha, 0xe04a, 1070 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1037 "qla_target(%d): %s failed: unable to allocate " 1071 "qla_target(%d): %s failed: unable to allocate "
@@ -1085,6 +1119,8 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1085 1119
1086 vha->vha_tgt.qla_tgt->abts_resp_expected++; 1120 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1087 1121
1122 /* Memory Barrier */
1123 wmb();
1088 qla2x00_start_iocbs(vha, vha->req); 1124 qla2x00_start_iocbs(vha, vha->req);
1089} 1125}
1090 1126
@@ -1102,7 +1138,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1102 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS) 1138 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1103 return; 1139 return;
1104 1140
1105 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); 1141 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
1106 if (ctio == NULL) { 1142 if (ctio == NULL) {
1107 ql_dbg(ql_dbg_tgt, vha, 0xe04b, 1143 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1108 "qla_target(%d): %s failed: unable to allocate " 1144 "qla_target(%d): %s failed: unable to allocate "
@@ -1130,6 +1166,8 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1130 CTIO7_FLAGS_TERMINATE); 1166 CTIO7_FLAGS_TERMINATE);
1131 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); 1167 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1132 1168
1169 /* Memory Barrier */
1170 wmb();
1133 qla2x00_start_iocbs(vha, vha->req); 1171 qla2x00_start_iocbs(vha, vha->req);
1134 1172
1135 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry, 1173 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
@@ -1178,6 +1216,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1178 1216
1179 mcmd->sess = sess; 1217 mcmd->sess = sess;
1180 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); 1218 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1219 mcmd->reset_count = vha->hw->chip_reset;
1181 1220
1182 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK, 1221 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
1183 abts->exchange_addr_to_abort); 1222 abts->exchange_addr_to_abort);
@@ -1300,6 +1339,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1300 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1339 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1301 ctio->u.status1.sense_data[0] = resp_code; 1340 ctio->u.status1.sense_data[0] = resp_code;
1302 1341
1342 /* Memory Barrier */
1343 wmb();
1303 qla2x00_start_iocbs(ha, ha->req); 1344 qla2x00_start_iocbs(ha, ha->req);
1304} 1345}
1305 1346
@@ -1321,6 +1362,21 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1321 mcmd, mcmd->fc_tm_rsp, mcmd->flags); 1362 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1322 1363
1323 spin_lock_irqsave(&ha->hardware_lock, flags); 1364 spin_lock_irqsave(&ha->hardware_lock, flags);
1365
1366 if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
1367 /*
1368 * Either a chip reset is active or this request was from
1369 * previous life, just abort the processing.
1370 */
1371 ql_dbg(ql_dbg_async, vha, 0xe100,
1372 "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
1373 qla2x00_reset_active(vha), mcmd->reset_count,
1374 ha->chip_reset);
1375 ha->tgt.tgt_ops->free_mcmd(mcmd);
1376 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1377 return;
1378 }
1379
1324 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK) 1380 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1325 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1381 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1326 0, 0, 0, 0, 0, 0); 1382 0, 0, 0, 0, 0, 0);
@@ -1397,8 +1453,6 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1397 } 1453 }
1398 } 1454 }
1399 1455
1400 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1401 prm->seg_cnt, prm->req_cnt);
1402 return 0; 1456 return 0;
1403 1457
1404out_err: 1458out_err:
@@ -1431,17 +1485,12 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1431static int qlt_check_reserve_free_req(struct scsi_qla_host *vha, 1485static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1432 uint32_t req_cnt) 1486 uint32_t req_cnt)
1433{ 1487{
1434 struct qla_hw_data *ha = vha->hw; 1488 uint32_t cnt, cnt_in;
1435 device_reg_t __iomem *reg = ha->iobase;
1436 uint32_t cnt;
1437 1489
1438 if (vha->req->cnt < (req_cnt + 2)) { 1490 if (vha->req->cnt < (req_cnt + 2)) {
1439 cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out); 1491 cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
1492 cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
1440 1493
1441 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
1442 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1443 "vha->req->cnt=%d, req_cnt=%d\n", cnt,
1444 vha->req->ring_index, vha->req->cnt, req_cnt);
1445 if (vha->req->ring_index < cnt) 1494 if (vha->req->ring_index < cnt)
1446 vha->req->cnt = cnt - vha->req->ring_index; 1495 vha->req->cnt = cnt - vha->req->ring_index;
1447 else 1496 else
@@ -1450,11 +1499,10 @@ static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1450 } 1499 }
1451 1500
1452 if (unlikely(vha->req->cnt < (req_cnt + 2))) { 1501 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1453 ql_dbg(ql_dbg_tgt, vha, 0xe00b, 1502 ql_dbg(ql_dbg_io, vha, 0x305a,
1454 "qla_target(%d): There is no room in the " 1503 "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
1455 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, " 1504 vha->vp_idx, vha->req->ring_index,
1456 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index, 1505 vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
1457 vha->req->cnt, req_cnt);
1458 return -EAGAIN; 1506 return -EAGAIN;
1459 } 1507 }
1460 vha->req->cnt -= req_cnt; 1508 vha->req->cnt -= req_cnt;
@@ -1491,7 +1539,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1491 if (h > DEFAULT_OUTSTANDING_COMMANDS) 1539 if (h > DEFAULT_OUTSTANDING_COMMANDS)
1492 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */ 1540 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1493 if (h == ha->tgt.current_handle) { 1541 if (h == ha->tgt.current_handle) {
1494 ql_dbg(ql_dbg_tgt, vha, 0xe04e, 1542 ql_dbg(ql_dbg_io, vha, 0x305b,
1495 "qla_target(%d): Ran out of " 1543 "qla_target(%d): Ran out of "
1496 "empty cmd slots in ha %p\n", vha->vp_idx, ha); 1544 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1497 h = QLA_TGT_NULL_HANDLE; 1545 h = QLA_TGT_NULL_HANDLE;
@@ -1548,9 +1596,6 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1548 pkt->u.status0.ox_id = cpu_to_le16(temp); 1596 pkt->u.status0.ox_id = cpu_to_le16(temp);
1549 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1597 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1550 1598
1551 ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1552 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1553 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
1554 return 0; 1599 return 0;
1555} 1600}
1556 1601
@@ -1608,14 +1653,6 @@ static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1608 } 1653 }
1609 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1654 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1610 1655
1611 ql_dbg(ql_dbg_tgt, vha, 0xe00d,
1612 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1613 (long long unsigned int)
1614 pci_dma_hi32(sg_dma_address(prm->sg)),
1615 (long long unsigned int)
1616 pci_dma_lo32(sg_dma_address(prm->sg)),
1617 (int)sg_dma_len(prm->sg));
1618
1619 prm->sg = sg_next(prm->sg); 1656 prm->sg = sg_next(prm->sg);
1620 } 1657 }
1621 } 1658 }
@@ -1633,11 +1670,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1633 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr; 1670 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1634 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt; 1671 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1635 1672
1636 ql_dbg(ql_dbg_tgt, vha, 0xe00e,
1637 "iocb->scsi_status=%x, iocb->flags=%x\n",
1638 le16_to_cpu(pkt24->u.status0.scsi_status),
1639 le16_to_cpu(pkt24->u.status0.flags));
1640
1641 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen); 1673 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1642 1674
1643 /* Setup packet address segment pointer */ 1675 /* Setup packet address segment pointer */
@@ -1655,7 +1687,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1655 } 1687 }
1656 1688
1657 /* If scatter gather */ 1689 /* If scatter gather */
1658 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
1659 1690
1660 /* Load command entry data segments */ 1691 /* Load command entry data segments */
1661 for (cnt = 0; 1692 for (cnt = 0;
@@ -1670,14 +1701,6 @@ static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1670 } 1701 }
1671 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg)); 1702 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1672 1703
1673 ql_dbg(ql_dbg_tgt, vha, 0xe010,
1674 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1675 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1676 prm->sg)),
1677 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1678 prm->sg)),
1679 (int)sg_dma_len(prm->sg));
1680
1681 prm->sg = sg_next(prm->sg); 1704 prm->sg = sg_next(prm->sg);
1682 } 1705 }
1683 1706
@@ -1708,6 +1731,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1708 se_cmd, cmd->tag); 1731 se_cmd, cmd->tag);
1709 1732
1710 cmd->state = QLA_TGT_STATE_ABORTED; 1733 cmd->state = QLA_TGT_STATE_ABORTED;
1734 cmd->cmd_flags |= BIT_6;
1711 1735
1712 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 1736 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1713 1737
@@ -1715,10 +1739,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1715 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED; 1739 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1716 } 1740 }
1717 1741
1718 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
1719 vha->vp_idx, cmd->tag,
1720 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
1721
1722 prm->cmd = cmd; 1742 prm->cmd = cmd;
1723 prm->tgt = tgt; 1743 prm->tgt = tgt;
1724 prm->rq_result = scsi_status; 1744 prm->rq_result = scsi_status;
@@ -1729,15 +1749,10 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1729 prm->req_cnt = 1; 1749 prm->req_cnt = 1;
1730 prm->add_status_pkt = 0; 1750 prm->add_status_pkt = 0;
1731 1751
1732 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
1733 prm->rq_result, xmit_type);
1734
1735 /* Send marker if required */ 1752 /* Send marker if required */
1736 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 1753 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1737 return -EFAULT; 1754 return -EFAULT;
1738 1755
1739 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
1740
1741 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) { 1756 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1742 if (qlt_pci_map_calc_cnt(prm) != 0) 1757 if (qlt_pci_map_calc_cnt(prm) != 0)
1743 return -EAGAIN; 1758 return -EAGAIN;
@@ -1747,7 +1762,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1747 1762
1748 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { 1763 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1749 prm->residual = se_cmd->residual_count; 1764 prm->residual = se_cmd->residual_count;
1750 ql_dbg(ql_dbg_tgt, vha, 0xe014, 1765 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
1751 "Residual underflow: %d (tag %d, " 1766 "Residual underflow: %d (tag %d, "
1752 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1767 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1753 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1768 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1755,7 +1770,7 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1755 prm->rq_result |= SS_RESIDUAL_UNDER; 1770 prm->rq_result |= SS_RESIDUAL_UNDER;
1756 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { 1771 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1757 prm->residual = se_cmd->residual_count; 1772 prm->residual = se_cmd->residual_count;
1758 ql_dbg(ql_dbg_tgt, vha, 0xe015, 1773 ql_dbg(ql_dbg_io, vha, 0x305d,
1759 "Residual overflow: %d (tag %d, " 1774 "Residual overflow: %d (tag %d, "
1760 "op %x, bufflen %d, rq_result %x)\n", prm->residual, 1775 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1761 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0, 1776 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1778,10 +1793,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1778 } 1793 }
1779 } 1794 }
1780 1795
1781 ql_dbg(ql_dbg_tgt, vha, 0xe016,
1782 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1783 prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
1784
1785 return 0; 1796 return 0;
1786} 1797}
1787 1798
@@ -2310,6 +2321,21 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2310 2321
2311 spin_lock_irqsave(&ha->hardware_lock, flags); 2322 spin_lock_irqsave(&ha->hardware_lock, flags);
2312 2323
2324 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2325 /*
2326 * Either a chip reset is active or this request was from
2327 * previous life, just abort the processing.
2328 */
2329 cmd->state = QLA_TGT_STATE_PROCESSED;
2330 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2331 ql_dbg(ql_dbg_async, vha, 0xe101,
2332 "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
2333 qla2x00_reset_active(vha), cmd->reset_count,
2334 ha->chip_reset);
2335 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2336 return 0;
2337 }
2338
2313 /* Does F/W have an IOCBs for this request */ 2339 /* Does F/W have an IOCBs for this request */
2314 res = qlt_check_reserve_free_req(vha, full_req_cnt); 2340 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2315 if (unlikely(res)) 2341 if (unlikely(res))
@@ -2358,8 +2384,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2358 struct ctio7_to_24xx *ctio = 2384 struct ctio7_to_24xx *ctio =
2359 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha); 2385 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2360 2386
2361 ql_dbg(ql_dbg_tgt, vha, 0xe019, 2387 ql_dbg(ql_dbg_io, vha, 0x305e,
2362 "Building additional status packet\n"); 2388 "Building additional status packet 0x%p.\n",
2389 ctio);
2363 2390
2364 /* 2391 /*
2365 * T10Dif: ctio_crc2_to_fw overlay ontop of 2392 * T10Dif: ctio_crc2_to_fw overlay ontop of
@@ -2391,11 +2418,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2391 2418
2392 2419
2393 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */ 2420 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2421 cmd->cmd_sent_to_fw = 1;
2394 2422
2395 ql_dbg(ql_dbg_tgt, vha, 0xe01a, 2423 /* Memory Barrier */
2396 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n", 2424 wmb();
2397 pkt, scsi_status);
2398
2399 qla2x00_start_iocbs(vha, vha->req); 2425 qla2x00_start_iocbs(vha, vha->req);
2400 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2426 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2401 2427
@@ -2430,17 +2456,27 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2430 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS) 2456 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2431 return -EIO; 2457 return -EIO;
2432 2458
2433 ql_dbg(ql_dbg_tgt, vha, 0xe01b,
2434 "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
2435 __func__, (int)vha->vp_idx, &cmd->se_cmd,
2436 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
2437
2438 /* Calculate number of entries and segments required */ 2459 /* Calculate number of entries and segments required */
2439 if (qlt_pci_map_calc_cnt(&prm) != 0) 2460 if (qlt_pci_map_calc_cnt(&prm) != 0)
2440 return -EAGAIN; 2461 return -EAGAIN;
2441 2462
2442 spin_lock_irqsave(&ha->hardware_lock, flags); 2463 spin_lock_irqsave(&ha->hardware_lock, flags);
2443 2464
2465 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
2466 /*
2467 * Either a chip reset is active or this request was from
2468 * previous life, just abort the processing.
2469 */
2470 cmd->state = QLA_TGT_STATE_NEED_DATA;
2471 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2472 ql_dbg(ql_dbg_async, vha, 0xe102,
2473 "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
2474 qla2x00_reset_active(vha), cmd->reset_count,
2475 ha->chip_reset);
2476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2477 return 0;
2478 }
2479
2444 /* Does F/W have an IOCBs for this request */ 2480 /* Does F/W have an IOCBs for this request */
2445 res = qlt_check_reserve_free_req(vha, prm.req_cnt); 2481 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2446 if (res != 0) 2482 if (res != 0)
@@ -2460,7 +2496,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2460 qlt_load_data_segments(&prm, vha); 2496 qlt_load_data_segments(&prm, vha);
2461 2497
2462 cmd->state = QLA_TGT_STATE_NEED_DATA; 2498 cmd->state = QLA_TGT_STATE_NEED_DATA;
2499 cmd->cmd_sent_to_fw = 1;
2463 2500
2501 /* Memory Barrier */
2502 wmb();
2464 qla2x00_start_iocbs(vha, vha->req); 2503 qla2x00_start_iocbs(vha, vha->req);
2465 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2504 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2466 2505
@@ -2503,7 +2542,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2503 "iocb(s) %p Returned STATUS.\n", sts); 2542 "iocb(s) %p Returned STATUS.\n", sts);
2504 2543
2505 ql_dbg(ql_dbg_tgt, vha, 0xf075, 2544 ql_dbg(ql_dbg_tgt, vha, 0xf075,
2506 "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", 2545 "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
2507 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, 2546 cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
2508 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); 2547 a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
2509 2548
@@ -2626,7 +2665,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2626 2665
2627 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2666 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2628 2667
2629 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 2668 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2630 if (pkt == NULL) { 2669 if (pkt == NULL) {
2631 ql_dbg(ql_dbg_tgt, vha, 0xe050, 2670 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2632 "qla_target(%d): %s failed: unable to allocate " 2671 "qla_target(%d): %s failed: unable to allocate "
@@ -2669,6 +2708,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2669 if (ctio24->u.status1.residual != 0) 2708 if (ctio24->u.status1.residual != 0)
2670 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; 2709 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2671 2710
2711 /* Memory Barrier */
2712 wmb();
2672 qla2x00_start_iocbs(vha, vha->req); 2713 qla2x00_start_iocbs(vha, vha->req);
2673 return ret; 2714 return ret;
2674} 2715}
@@ -2684,24 +2725,19 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2684 2725
2685 if (ha_locked) { 2726 if (ha_locked) {
2686 rc = __qlt_send_term_exchange(vha, cmd, atio); 2727 rc = __qlt_send_term_exchange(vha, cmd, atio);
2728 if (rc == -ENOMEM)
2729 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2687 goto done; 2730 goto done;
2688 } 2731 }
2689 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 2732 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2690 rc = __qlt_send_term_exchange(vha, cmd, atio); 2733 rc = __qlt_send_term_exchange(vha, cmd, atio);
2734 if (rc == -ENOMEM)
2735 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2691 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 2736 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2737
2692done: 2738done:
2693 /* 2739 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2694 * Terminate exchange will tell fw to release any active CTIO 2740 !cmd->cmd_sent_to_fw)) {
2695 * that's in FW posession and cleanup the exchange.
2696 *
2697 * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
2698 * down at FW. Free the cmd later when CTIO comes back later
2699 * w/aborted(0x2) status.
2700 *
2701 * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
2702 * back w/some err. Free the cmd now.
2703 */
2704 if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
2705 if (!ha_locked && !in_interrupt()) 2741 if (!ha_locked && !in_interrupt())
2706 msleep(250); /* just in case */ 2742 msleep(250); /* just in case */
2707 2743
@@ -2712,6 +2748,53 @@ done:
2712 return; 2748 return;
2713} 2749}
2714 2750
2751static void qlt_init_term_exchange(struct scsi_qla_host *vha)
2752{
2753 struct list_head free_list;
2754 struct qla_tgt_cmd *cmd, *tcmd;
2755
2756 vha->hw->tgt.leak_exchg_thresh_hold =
2757 (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
2758
2759 cmd = tcmd = NULL;
2760 if (!list_empty(&vha->hw->tgt.q_full_list)) {
2761 INIT_LIST_HEAD(&free_list);
2762 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
2763
2764 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
2765 list_del(&cmd->cmd_list);
2766 /* This cmd was never sent to TCM. There is no need
2767 * to schedule free or call free_cmd
2768 */
2769 qlt_free_cmd(cmd);
2770 vha->hw->tgt.num_qfull_cmds_alloc--;
2771 }
2772 }
2773 vha->hw->tgt.num_qfull_cmds_dropped = 0;
2774}
2775
2776static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2777{
2778 uint32_t total_leaked;
2779
2780 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
2781
2782 if (vha->hw->tgt.leak_exchg_thresh_hold &&
2783 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
2784
2785 ql_dbg(ql_dbg_tgt, vha, 0xe079,
2786 "Chip reset due to exchange starvation: %d/%d.\n",
2787 total_leaked, vha->hw->fw_xcb_count);
2788
2789 if (IS_P3P_TYPE(vha->hw))
2790 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2791 else
2792 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2793 qla2xxx_wake_dpc(vha);
2794 }
2795
2796}
2797
2715void qlt_free_cmd(struct qla_tgt_cmd *cmd) 2798void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2716{ 2799{
2717 struct qla_tgt_sess *sess = cmd->sess; 2800 struct qla_tgt_sess *sess = cmd->sess;
@@ -2721,7 +2804,13 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2721 __func__, &cmd->se_cmd, 2804 __func__, &cmd->se_cmd,
2722 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); 2805 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
2723 2806
2807 BUG_ON(cmd->cmd_in_wq);
2808
2809 if (!cmd->q_full)
2810 qlt_decr_num_pend_cmds(cmd->vha);
2811
2724 BUG_ON(cmd->sg_mapped); 2812 BUG_ON(cmd->sg_mapped);
2813 cmd->jiffies_at_free = get_jiffies_64();
2725 if (unlikely(cmd->free_sg)) 2814 if (unlikely(cmd->free_sg))
2726 kfree(cmd->sg); 2815 kfree(cmd->sg);
2727 2816
@@ -2729,6 +2818,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2729 WARN_ON(1); 2818 WARN_ON(1);
2730 return; 2819 return;
2731 } 2820 }
2821 cmd->jiffies_at_free = get_jiffies_64();
2732 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 2822 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
2733} 2823}
2734EXPORT_SYMBOL(qlt_free_cmd); 2824EXPORT_SYMBOL(qlt_free_cmd);
@@ -2742,6 +2832,7 @@ static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2742 struct qla_tgt_srr_imm *imm; 2832 struct qla_tgt_srr_imm *imm;
2743 2833
2744 tgt->ctio_srr_id++; 2834 tgt->ctio_srr_id++;
2835 cmd->cmd_flags |= BIT_15;
2745 2836
2746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, 2837 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2747 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx); 2838 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
@@ -2863,11 +2954,9 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2863 CTIO_INTERMEDIATE_HANDLE_MARK); 2954 CTIO_INTERMEDIATE_HANDLE_MARK);
2864 2955
2865 if (handle != QLA_TGT_NULL_HANDLE) { 2956 if (handle != QLA_TGT_NULL_HANDLE) {
2866 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) { 2957 if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
2867 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
2868 "SKIP_HANDLE CTIO\n");
2869 return NULL; 2958 return NULL;
2870 } 2959
2871 /* handle-1 is actually used */ 2960 /* handle-1 is actually used */
2872 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) { 2961 if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
2873 ql_dbg(ql_dbg_tgt, vha, 0xe052, 2962 ql_dbg(ql_dbg_tgt, vha, 0xe052,
@@ -2894,6 +2983,81 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2894 return cmd; 2983 return cmd;
2895} 2984}
2896 2985
2986/* hardware_lock should be held by caller. */
2987static void
2988qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2989{
2990 struct qla_hw_data *ha = vha->hw;
2991 uint32_t handle;
2992
2993 if (cmd->sg_mapped)
2994 qlt_unmap_sg(vha, cmd);
2995
2996 handle = qlt_make_handle(vha);
2997
2998 /* TODO: fix debug message type and ids. */
2999 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3000 ql_dbg(ql_dbg_io, vha, 0xff00,
3001 "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
3002 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3003 cmd->write_data_transferred = 0;
3004 cmd->state = QLA_TGT_STATE_DATA_IN;
3005
3006 ql_dbg(ql_dbg_io, vha, 0xff01,
3007 "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
3008
3009 ha->tgt.tgt_ops->handle_data(cmd);
3010 return;
3011 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3012 ql_dbg(ql_dbg_io, vha, 0xff02,
3013 "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
3014 } else {
3015 ql_dbg(ql_dbg_io, vha, 0xff03,
3016 "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
3017 cmd->state);
3018 dump_stack();
3019 }
3020
3021 cmd->cmd_flags |= BIT_12;
3022 ha->tgt.tgt_ops->free_cmd(cmd);
3023}
3024
3025void
3026qlt_host_reset_handler(struct qla_hw_data *ha)
3027{
3028 struct qla_tgt_cmd *cmd;
3029 unsigned long flags;
3030 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
3031 scsi_qla_host_t *vha = NULL;
3032 struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
3033 uint32_t i;
3034
3035 if (!base_vha->hw->tgt.tgt_ops)
3036 return;
3037
3038 if (!tgt || qla_ini_mode_enabled(base_vha)) {
3039 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
3040 "Target mode disabled\n");
3041 return;
3042 }
3043
3044 ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
3045 "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
3046 base_vha->dpc_flags);
3047
3048 spin_lock_irqsave(&ha->hardware_lock, flags);
3049 for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
3050 cmd = qlt_get_cmd(base_vha, i);
3051 if (!cmd)
3052 continue;
3053 /* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
3054 vha = cmd->vha;
3055 qlt_abort_cmd_on_host_reset(vha, cmd);
3056 }
3057 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3058}
3059
3060
2897/* 3061/*
2898 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 3062 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2899 */ 3063 */
@@ -2905,10 +3069,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2905 struct target_core_fabric_ops *tfo; 3069 struct target_core_fabric_ops *tfo;
2906 struct qla_tgt_cmd *cmd; 3070 struct qla_tgt_cmd *cmd;
2907 3071
2908 ql_dbg(ql_dbg_tgt, vha, 0xe01e,
2909 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2910 vha->vp_idx, ctio, status, handle);
2911
2912 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3072 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2913 /* That could happen only in case of an error/reset/abort */ 3073 /* That could happen only in case of an error/reset/abort */
2914 if (status != CTIO_SUCCESS) { 3074 if (status != CTIO_SUCCESS) {
@@ -2925,6 +3085,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2925 3085
2926 se_cmd = &cmd->se_cmd; 3086 se_cmd = &cmd->se_cmd;
2927 tfo = se_cmd->se_tfo; 3087 tfo = se_cmd->se_tfo;
3088 cmd->cmd_sent_to_fw = 0;
2928 3089
2929 if (cmd->sg_mapped) 3090 if (cmd->sg_mapped)
2930 qlt_unmap_sg(vha, cmd); 3091 qlt_unmap_sg(vha, cmd);
@@ -3011,7 +3172,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3011 * level. 3172 * level.
3012 */ 3173 */
3013 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) && 3174 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
3014 (cmd->state != QLA_TGT_STATE_ABORTED)) { 3175 (cmd->state != QLA_TGT_STATE_ABORTED)) {
3176 cmd->cmd_flags |= BIT_13;
3015 if (qlt_term_ctio_exchange(vha, ctio, cmd, status)) 3177 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
3016 return; 3178 return;
3017 } 3179 }
@@ -3019,7 +3181,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3019skip_term: 3181skip_term:
3020 3182
3021 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3183 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3022 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd); 3184 ;
3023 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3185 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3024 int rx_status = 0; 3186 int rx_status = 0;
3025 3187
@@ -3030,10 +3192,6 @@ skip_term:
3030 else 3192 else
3031 cmd->write_data_transferred = 1; 3193 cmd->write_data_transferred = 1;
3032 3194
3033 ql_dbg(ql_dbg_tgt, vha, 0xe020,
3034 "Data received, context %x, rx_status %d\n",
3035 0x0, rx_status);
3036
3037 ha->tgt.tgt_ops->handle_data(cmd); 3195 ha->tgt.tgt_ops->handle_data(cmd);
3038 return; 3196 return;
3039 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3197 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
@@ -3051,6 +3209,7 @@ skip_term:
3051 dump_stack(); 3209 dump_stack();
3052 } 3210 }
3053 3211
3212
3054 ha->tgt.tgt_ops->free_cmd(cmd); 3213 ha->tgt.tgt_ops->free_cmd(cmd);
3055} 3214}
3056 3215
@@ -3103,6 +3262,8 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3103 uint32_t data_length; 3262 uint32_t data_length;
3104 int ret, fcp_task_attr, data_dir, bidi = 0; 3263 int ret, fcp_task_attr, data_dir, bidi = 0;
3105 3264
3265 cmd->cmd_in_wq = 0;
3266 cmd->cmd_flags |= BIT_1;
3106 if (tgt->tgt_stop) 3267 if (tgt->tgt_stop)
3107 goto out_term; 3268 goto out_term;
3108 3269
@@ -3128,11 +3289,6 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3128 &atio->u.isp24.fcp_cmnd.add_cdb[ 3289 &atio->u.isp24.fcp_cmnd.add_cdb[
3129 atio->u.isp24.fcp_cmnd.add_cdb_len])); 3290 atio->u.isp24.fcp_cmnd.add_cdb_len]));
3130 3291
3131 ql_dbg(ql_dbg_tgt, vha, 0xe022,
3132 "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
3133 cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
3134 cmd->atio.u.isp24.fcp_hdr.ox_id);
3135
3136 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length, 3292 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
3137 fcp_task_attr, data_dir, bidi); 3293 fcp_task_attr, data_dir, bidi);
3138 if (ret != 0) 3294 if (ret != 0)
@@ -3146,13 +3302,16 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3146 return; 3302 return;
3147 3303
3148out_term: 3304out_term:
3149 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd); 3305 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
3150 /* 3306 /*
3151 * cmd has not sent to target yet, so pass NULL as the second 3307 * cmd has not sent to target yet, so pass NULL as the second
3152 * argument to qlt_send_term_exchange() and free the memory here. 3308 * argument to qlt_send_term_exchange() and free the memory here.
3153 */ 3309 */
3310 cmd->cmd_flags |= BIT_2;
3154 spin_lock_irqsave(&ha->hardware_lock, flags); 3311 spin_lock_irqsave(&ha->hardware_lock, flags);
3155 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3312 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
3313
3314 qlt_decr_num_pend_cmds(vha);
3156 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3315 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
3157 ha->tgt.tgt_ops->put_sess(sess); 3316 ha->tgt.tgt_ops->put_sess(sess);
3158 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3317 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3183,6 +3342,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3183 memcpy(&cmd->atio, atio, sizeof(*atio)); 3342 memcpy(&cmd->atio, atio, sizeof(*atio));
3184 cmd->state = QLA_TGT_STATE_NEW; 3343 cmd->state = QLA_TGT_STATE_NEW;
3185 cmd->tgt = vha->vha_tgt.qla_tgt; 3344 cmd->tgt = vha->vha_tgt.qla_tgt;
3345 qlt_incr_num_pend_cmds(vha);
3186 cmd->vha = vha; 3346 cmd->vha = vha;
3187 cmd->se_cmd.map_tag = tag; 3347 cmd->se_cmd.map_tag = tag;
3188 cmd->sess = sess; 3348 cmd->sess = sess;
@@ -3264,7 +3424,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3264 struct qla_tgt_cmd *cmd; 3424 struct qla_tgt_cmd *cmd;
3265 3425
3266 if (unlikely(tgt->tgt_stop)) { 3426 if (unlikely(tgt->tgt_stop)) {
3267 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021, 3427 ql_dbg(ql_dbg_io, vha, 0x3061,
3268 "New command while device %p is shutting down\n", tgt); 3428 "New command while device %p is shutting down\n", tgt);
3269 return -EFAULT; 3429 return -EFAULT;
3270 } 3430 }
@@ -3277,6 +3437,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3277 return -ENOMEM; 3437 return -ENOMEM;
3278 3438
3279 memcpy(&op->atio, atio, sizeof(*atio)); 3439 memcpy(&op->atio, atio, sizeof(*atio));
3440 op->vha = vha;
3280 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3441 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3281 queue_work(qla_tgt_wq, &op->work); 3442 queue_work(qla_tgt_wq, &op->work);
3282 return 0; 3443 return 0;
@@ -3288,12 +3449,19 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3288 3449
3289 cmd = qlt_get_tag(vha, sess, atio); 3450 cmd = qlt_get_tag(vha, sess, atio);
3290 if (!cmd) { 3451 if (!cmd) {
3291 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e, 3452 ql_dbg(ql_dbg_io, vha, 0x3062,
3292 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx); 3453 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
3293 ha->tgt.tgt_ops->put_sess(sess); 3454 ha->tgt.tgt_ops->put_sess(sess);
3294 return -ENOMEM; 3455 return -ENOMEM;
3295 } 3456 }
3296 3457
3458 cmd->cmd_flags = 0;
3459 cmd->jiffies_at_alloc = get_jiffies_64();
3460
3461 cmd->reset_count = vha->hw->chip_reset;
3462
3463 cmd->cmd_in_wq = 1;
3464 cmd->cmd_flags |= BIT_0;
3297 INIT_WORK(&cmd->work, qlt_do_work); 3465 INIT_WORK(&cmd->work, qlt_do_work);
3298 queue_work(qla_tgt_wq, &cmd->work); 3466 queue_work(qla_tgt_wq, &cmd->work);
3299 return 0; 3467 return 0;
@@ -3327,6 +3495,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3327 } 3495 }
3328 mcmd->tmr_func = fn; 3496 mcmd->tmr_func = fn;
3329 mcmd->flags = flags; 3497 mcmd->flags = flags;
3498 mcmd->reset_count = vha->hw->chip_reset;
3330 3499
3331 switch (fn) { 3500 switch (fn) {
3332 case QLA_TGT_CLEAR_ACA: 3501 case QLA_TGT_CLEAR_ACA:
@@ -3462,6 +3631,7 @@ static int __qlt_abort_task(struct scsi_qla_host *vha,
3462 3631
3463 lun = a->u.isp24.fcp_cmnd.lun; 3632 lun = a->u.isp24.fcp_cmnd.lun;
3464 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 3633 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
3634 mcmd->reset_count = vha->hw->chip_reset;
3465 3635
3466 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK, 3636 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
3467 le16_to_cpu(iocb->u.isp2x.seq_id)); 3637 le16_to_cpu(iocb->u.isp2x.seq_id));
@@ -3753,8 +3923,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
3753 qlt_send_notify_ack(vha, ntfy, 3923 qlt_send_notify_ack(vha, ntfy,
3754 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0); 3924 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3755 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3925 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3756 if (xmit_type & QLA_TGT_XMIT_DATA) 3926 if (xmit_type & QLA_TGT_XMIT_DATA) {
3927 cmd->cmd_flags |= BIT_8;
3757 qlt_rdy_to_xfer(cmd); 3928 qlt_rdy_to_xfer(cmd);
3929 }
3758 } else { 3930 } else {
3759 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066, 3931 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3760 "qla_target(%d): SRR for out data for cmd " 3932 "qla_target(%d): SRR for out data for cmd "
@@ -3772,8 +3944,10 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
3772 } 3944 }
3773 3945
3774 /* Transmit response in case of status and data-in cases */ 3946 /* Transmit response in case of status and data-in cases */
3775 if (resp) 3947 if (resp) {
3948 cmd->cmd_flags |= BIT_7;
3776 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); 3949 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3950 }
3777 3951
3778 return; 3952 return;
3779 3953
@@ -3786,8 +3960,10 @@ out_reject:
3786 if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3960 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3787 cmd->state = QLA_TGT_STATE_DATA_IN; 3961 cmd->state = QLA_TGT_STATE_DATA_IN;
3788 dump_stack(); 3962 dump_stack();
3789 } else 3963 } else {
3964 cmd->cmd_flags |= BIT_9;
3790 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3965 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3966 }
3791 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3967 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3792} 3968}
3793 3969
@@ -3901,7 +4077,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3901 4077
3902 tgt->imm_srr_id++; 4078 tgt->imm_srr_id++;
3903 4079
3904 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n", 4080 ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
3905 vha->vp_idx); 4081 vha->vp_idx);
3906 4082
3907 imm = kzalloc(sizeof(*imm), GFP_ATOMIC); 4083 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
@@ -4121,7 +4297,7 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
4121 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4297 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
4122 * This function sends busy to ISP 2xxx or 24xx. 4298 * This function sends busy to ISP 2xxx or 24xx.
4123 */ 4299 */
4124static void qlt_send_busy(struct scsi_qla_host *vha, 4300static int __qlt_send_busy(struct scsi_qla_host *vha,
4125 struct atio_from_isp *atio, uint16_t status) 4301 struct atio_from_isp *atio, uint16_t status)
4126{ 4302{
4127 struct ctio7_to_24xx *ctio24; 4303 struct ctio7_to_24xx *ctio24;
@@ -4133,16 +4309,16 @@ static void qlt_send_busy(struct scsi_qla_host *vha,
4133 atio->u.isp24.fcp_hdr.s_id); 4309 atio->u.isp24.fcp_hdr.s_id);
4134 if (!sess) { 4310 if (!sess) {
4135 qlt_send_term_exchange(vha, NULL, atio, 1); 4311 qlt_send_term_exchange(vha, NULL, atio, 1);
4136 return; 4312 return 0;
4137 } 4313 }
4138 /* Sending marker isn't necessary, since we called from ISR */ 4314 /* Sending marker isn't necessary, since we called from ISR */
4139 4315
4140 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); 4316 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
4141 if (!pkt) { 4317 if (!pkt) {
4142 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e, 4318 ql_dbg(ql_dbg_io, vha, 0x3063,
4143 "qla_target(%d): %s failed: unable to allocate " 4319 "qla_target(%d): %s failed: unable to allocate "
4144 "request packet", vha->vp_idx, __func__); 4320 "request packet", vha->vp_idx, __func__);
4145 return; 4321 return -ENOMEM;
4146 } 4322 }
4147 4323
4148 pkt->entry_count = 1; 4324 pkt->entry_count = 1;
@@ -4167,13 +4343,192 @@ static void qlt_send_busy(struct scsi_qla_host *vha,
4167 */ 4343 */
4168 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 4344 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
4169 ctio24->u.status1.scsi_status = cpu_to_le16(status); 4345 ctio24->u.status1.scsi_status = cpu_to_le16(status);
4170 ctio24->u.status1.residual = get_unaligned((uint32_t *) 4346 /* Memory Barrier */
4171 &atio->u.isp24.fcp_cmnd.add_cdb[ 4347 wmb();
4172 atio->u.isp24.fcp_cmnd.add_cdb_len]);
4173 if (ctio24->u.status1.residual != 0)
4174 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
4175
4176 qla2x00_start_iocbs(vha, vha->req); 4348 qla2x00_start_iocbs(vha, vha->req);
4349 return 0;
4350}
4351
4352/*
4353 * This routine is used to allocate a command for either a QFull condition
4354 * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
4355 * out previously.
4356 */
4357static void
4358qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
4359 struct atio_from_isp *atio, uint16_t status, int qfull)
4360{
4361 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4362 struct qla_hw_data *ha = vha->hw;
4363 struct qla_tgt_sess *sess;
4364 struct se_session *se_sess;
4365 struct qla_tgt_cmd *cmd;
4366 int tag;
4367
4368 if (unlikely(tgt->tgt_stop)) {
4369 ql_dbg(ql_dbg_io, vha, 0x300a,
4370 "New command while device %p is shutting down\n", tgt);
4371 return;
4372 }
4373
4374 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
4375 vha->hw->tgt.num_qfull_cmds_dropped++;
4376 if (vha->hw->tgt.num_qfull_cmds_dropped >
4377 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4378 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4379 vha->hw->tgt.num_qfull_cmds_dropped;
4380
4381 ql_dbg(ql_dbg_io, vha, 0x3068,
4382 "qla_target(%d): %s: QFull CMD dropped[%d]\n",
4383 vha->vp_idx, __func__,
4384 vha->hw->tgt.num_qfull_cmds_dropped);
4385
4386 qlt_chk_exch_leak_thresh_hold(vha);
4387 return;
4388 }
4389
4390 sess = ha->tgt.tgt_ops->find_sess_by_s_id
4391 (vha, atio->u.isp24.fcp_hdr.s_id);
4392 if (!sess)
4393 return;
4394
4395 se_sess = sess->se_sess;
4396
4397 tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
4398 if (tag < 0)
4399 return;
4400
4401 cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
4402 if (!cmd) {
4403 ql_dbg(ql_dbg_io, vha, 0x3009,
4404 "qla_target(%d): %s: Allocation of cmd failed\n",
4405 vha->vp_idx, __func__);
4406
4407 vha->hw->tgt.num_qfull_cmds_dropped++;
4408 if (vha->hw->tgt.num_qfull_cmds_dropped >
4409 vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
4410 vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
4411 vha->hw->tgt.num_qfull_cmds_dropped;
4412
4413 qlt_chk_exch_leak_thresh_hold(vha);
4414 return;
4415 }
4416
4417 memset(cmd, 0, sizeof(struct qla_tgt_cmd));
4418
4419 qlt_incr_num_pend_cmds(vha);
4420 INIT_LIST_HEAD(&cmd->cmd_list);
4421 memcpy(&cmd->atio, atio, sizeof(*atio));
4422
4423 cmd->tgt = vha->vha_tgt.qla_tgt;
4424 cmd->vha = vha;
4425 cmd->reset_count = vha->hw->chip_reset;
4426 cmd->q_full = 1;
4427
4428 if (qfull) {
4429 cmd->q_full = 1;
4430 /* NOTE: borrowing the state field to carry the status */
4431 cmd->state = status;
4432 } else
4433 cmd->term_exchg = 1;
4434
4435 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
4436
4437 vha->hw->tgt.num_qfull_cmds_alloc++;
4438 if (vha->hw->tgt.num_qfull_cmds_alloc >
4439 vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
4440 vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
4441 vha->hw->tgt.num_qfull_cmds_alloc;
4442}
4443
4444int
4445qlt_free_qfull_cmds(struct scsi_qla_host *vha)
4446{
4447 struct qla_hw_data *ha = vha->hw;
4448 unsigned long flags;
4449 struct qla_tgt_cmd *cmd, *tcmd;
4450 struct list_head free_list;
4451 int rc = 0;
4452
4453 if (list_empty(&ha->tgt.q_full_list))
4454 return 0;
4455
4456 INIT_LIST_HEAD(&free_list);
4457
4458 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
4459
4460 if (list_empty(&ha->tgt.q_full_list)) {
4461 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4462 return 0;
4463 }
4464
4465 list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
4466 if (cmd->q_full)
4467 /* cmd->state is a borrowed field to hold status */
4468 rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
4469 else if (cmd->term_exchg)
4470 rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
4471
4472 if (rc == -ENOMEM)
4473 break;
4474
4475 if (cmd->q_full)
4476 ql_dbg(ql_dbg_io, vha, 0x3006,
4477 "%s: busy sent for ox_id[%04x]\n", __func__,
4478 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4479 else if (cmd->term_exchg)
4480 ql_dbg(ql_dbg_io, vha, 0x3007,
4481 "%s: Term exchg sent for ox_id[%04x]\n", __func__,
4482 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
4483 else
4484 ql_dbg(ql_dbg_io, vha, 0x3008,
4485 "%s: Unexpected cmd in QFull list %p\n", __func__,
4486 cmd);
4487
4488 list_del(&cmd->cmd_list);
4489 list_add_tail(&cmd->cmd_list, &free_list);
4490
4491 /* piggy back on hardware_lock for protection */
4492 vha->hw->tgt.num_qfull_cmds_alloc--;
4493 }
4494 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
4495
4496 cmd = NULL;
4497
4498 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
4499 list_del(&cmd->cmd_list);
4500 /* This cmd was never sent to TCM. There is no need
4501 * to schedule free or call free_cmd
4502 */
4503 qlt_free_cmd(cmd);
4504 }
4505 return rc;
4506}
4507
4508static void
4509qlt_send_busy(struct scsi_qla_host *vha,
4510 struct atio_from_isp *atio, uint16_t status)
4511{
4512 int rc = 0;
4513
4514 rc = __qlt_send_busy(vha, atio, status);
4515 if (rc == -ENOMEM)
4516 qlt_alloc_qfull_cmd(vha, atio, status, 1);
4517}
4518
4519static int
4520qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
4521 struct atio_from_isp *atio)
4522{
4523 struct qla_hw_data *ha = vha->hw;
4524 uint16_t status;
4525
4526 if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
4527 return 0;
4528
4529 status = temp_sam_status;
4530 qlt_send_busy(vha, atio, status);
4531 return 1;
4177} 4532}
4178 4533
4179/* ha->hardware_lock supposed to be held on entry */ 4534/* ha->hardware_lock supposed to be held on entry */
@@ -4186,14 +4541,10 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
4186 int rc; 4541 int rc;
4187 4542
4188 if (unlikely(tgt == NULL)) { 4543 if (unlikely(tgt == NULL)) {
4189 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039, 4544 ql_dbg(ql_dbg_io, vha, 0x3064,
4190 "ATIO pkt, but no tgt (ha %p)", ha); 4545 "ATIO pkt, but no tgt (ha %p)", ha);
4191 return; 4546 return;
4192 } 4547 }
4193 ql_dbg(ql_dbg_tgt, vha, 0xe02c,
4194 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
4195 vha->vp_idx, atio, atio->u.raw.entry_type,
4196 atio->u.raw.entry_count);
4197 /* 4548 /*
4198 * In tgt_stop mode we also should allow all requests to pass. 4549 * In tgt_stop mode we also should allow all requests to pass.
4199 * Otherwise, some commands can stuck. 4550 * Otherwise, some commands can stuck.
@@ -4203,33 +4554,28 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
4203 4554
4204 switch (atio->u.raw.entry_type) { 4555 switch (atio->u.raw.entry_type) {
4205 case ATIO_TYPE7: 4556 case ATIO_TYPE7:
4206 ql_dbg(ql_dbg_tgt, vha, 0xe02d,
4207 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
4208 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
4209 atio->u.isp24.fcp_cmnd.rddata,
4210 atio->u.isp24.fcp_cmnd.wrdata,
4211 atio->u.isp24.fcp_cmnd.cdb[0],
4212 atio->u.isp24.fcp_cmnd.add_cdb_len,
4213 be32_to_cpu(get_unaligned((uint32_t *)
4214 &atio->u.isp24.fcp_cmnd.add_cdb[
4215 atio->u.isp24.fcp_cmnd.add_cdb_len])),
4216 atio->u.isp24.fcp_hdr.s_id[0],
4217 atio->u.isp24.fcp_hdr.s_id[1],
4218 atio->u.isp24.fcp_hdr.s_id[2]);
4219
4220 if (unlikely(atio->u.isp24.exchange_addr == 4557 if (unlikely(atio->u.isp24.exchange_addr ==
4221 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { 4558 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
4222 ql_dbg(ql_dbg_tgt, vha, 0xe058, 4559 ql_dbg(ql_dbg_io, vha, 0x3065,
4223 "qla_target(%d): ATIO_TYPE7 " 4560 "qla_target(%d): ATIO_TYPE7 "
4224 "received with UNKNOWN exchange address, " 4561 "received with UNKNOWN exchange address, "
4225 "sending QUEUE_FULL\n", vha->vp_idx); 4562 "sending QUEUE_FULL\n", vha->vp_idx);
4226 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL); 4563 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
4227 break; 4564 break;
4228 } 4565 }
4229 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) 4566
4567
4568
4569 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
4570 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4571 if (rc != 0) {
4572 tgt->irq_cmd_count--;
4573 return;
4574 }
4230 rc = qlt_handle_cmd_for_atio(vha, atio); 4575 rc = qlt_handle_cmd_for_atio(vha, atio);
4231 else 4576 } else {
4232 rc = qlt_handle_task_mgmt(vha, atio); 4577 rc = qlt_handle_task_mgmt(vha, atio);
4578 }
4233 if (unlikely(rc != 0)) { 4579 if (unlikely(rc != 0)) {
4234 if (rc == -ESRCH) { 4580 if (rc == -ESRCH) {
4235#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 4581#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
@@ -4293,11 +4639,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4293 return; 4639 return;
4294 } 4640 }
4295 4641
4296 ql_dbg(ql_dbg_tgt, vha, 0xe02f,
4297 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
4298 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
4299 pkt->entry_count, pkt->entry_status, pkt->handle);
4300
4301 /* 4642 /*
4302 * In tgt_stop mode we also should allow all requests to pass. 4643 * In tgt_stop mode we also should allow all requests to pass.
4303 * Otherwise, some commands can stuck. 4644 * Otherwise, some commands can stuck.
@@ -4310,9 +4651,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4310 case CTIO_TYPE7: 4651 case CTIO_TYPE7:
4311 { 4652 {
4312 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; 4653 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
4313 ql_dbg(ql_dbg_tgt, vha, 0xe030,
4314 "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
4315 entry->entry_type, vha->vp_idx);
4316 qlt_do_ctio_completion(vha, entry->handle, 4654 qlt_do_ctio_completion(vha, entry->handle,
4317 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4655 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4318 entry); 4656 entry);
@@ -4323,15 +4661,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4323 { 4661 {
4324 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 4662 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
4325 int rc; 4663 int rc;
4326 ql_dbg(ql_dbg_tgt, vha, 0xe031,
4327 "ACCEPT_TGT_IO instance %d status %04x "
4328 "lun %04x read/write %d data_length %04x "
4329 "target_id %02x rx_id %04x\n ", vha->vp_idx,
4330 le16_to_cpu(atio->u.isp2x.status),
4331 le16_to_cpu(atio->u.isp2x.lun),
4332 atio->u.isp2x.execution_codes,
4333 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
4334 atio), atio->u.isp2x.rx_id);
4335 if (atio->u.isp2x.status != 4664 if (atio->u.isp2x.status !=
4336 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 4665 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
4337 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 4666 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
@@ -4340,10 +4669,12 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4340 le16_to_cpu(atio->u.isp2x.status)); 4669 le16_to_cpu(atio->u.isp2x.status));
4341 break; 4670 break;
4342 } 4671 }
4343 ql_dbg(ql_dbg_tgt, vha, 0xe032, 4672
4344 "FCP CDB: 0x%02x, sizeof(cdb): %lu", 4673 rc = qlt_chk_qfull_thresh_hold(vha, atio);
4345 atio->u.isp2x.cdb[0], (unsigned long 4674 if (rc != 0) {
4346 int)sizeof(atio->u.isp2x.cdb)); 4675 tgt->irq_cmd_count--;
4676 return;
4677 }
4347 4678
4348 rc = qlt_handle_cmd_for_atio(vha, atio); 4679 rc = qlt_handle_cmd_for_atio(vha, atio);
4349 if (unlikely(rc != 0)) { 4680 if (unlikely(rc != 0)) {
@@ -4376,8 +4707,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4376 case CONTINUE_TGT_IO_TYPE: 4707 case CONTINUE_TGT_IO_TYPE:
4377 { 4708 {
4378 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 4709 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
4379 ql_dbg(ql_dbg_tgt, vha, 0xe033,
4380 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
4381 qlt_do_ctio_completion(vha, entry->handle, 4710 qlt_do_ctio_completion(vha, entry->handle,
4382 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4711 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4383 entry); 4712 entry);
@@ -4387,8 +4716,6 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
4387 case CTIO_A64_TYPE: 4716 case CTIO_A64_TYPE:
4388 { 4717 {
4389 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt; 4718 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
4390 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
4391 vha->vp_idx);
4392 qlt_do_ctio_completion(vha, entry->handle, 4719 qlt_do_ctio_completion(vha, entry->handle,
4393 le16_to_cpu(entry->status)|(pkt->entry_status << 16), 4720 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
4394 entry); 4721 entry);
@@ -4492,11 +4819,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4492 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 4819 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4493 int login_code; 4820 int login_code;
4494 4821
4495 ql_dbg(ql_dbg_tgt, vha, 0xe039,
4496 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
4497 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
4498 ha->operating_mode, ha->current_topology);
4499
4500 if (!ha->tgt.tgt_ops) 4822 if (!ha->tgt.tgt_ops)
4501 return; 4823 return;
4502 4824
@@ -4573,11 +4895,6 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
4573 break; 4895 break;
4574 4896
4575 default: 4897 default:
4576 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
4577 "qla_target(%d): Async event %#x occurred: "
4578 "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
4579 code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
4580 le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
4581 break; 4898 break;
4582 } 4899 }
4583 4900
@@ -4598,8 +4915,6 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4598 return NULL; 4915 return NULL;
4599 } 4916 }
4600 4917
4601 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
4602
4603 fcport->loop_id = loop_id; 4918 fcport->loop_id = loop_id;
4604 4919
4605 rc = qla2x00_get_port_database(vha, fcport, 0); 4920 rc = qla2x00_get_port_database(vha, fcport, 0);
@@ -4898,6 +5213,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4898 qlt_release(vha->vha_tgt.qla_tgt); 5213 qlt_release(vha->vha_tgt.qla_tgt);
4899 return 0; 5214 return 0;
4900 } 5215 }
5216
5217 /* free left over qfull cmds */
5218 qlt_init_term_exchange(vha);
5219
4901 mutex_lock(&qla_tgt_mutex); 5220 mutex_lock(&qla_tgt_mutex);
4902 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry); 5221 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
4903 mutex_unlock(&qla_tgt_mutex); 5222 mutex_unlock(&qla_tgt_mutex);
@@ -5295,8 +5614,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
5295 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 5614 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
5296 /* Enable initial LIP */ 5615 /* Enable initial LIP */
5297 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 5616 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
5298 /* Enable FC tapes support */ 5617 if (ql2xtgt_tape_enable)
5299 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5618 /* Enable FC Tape support */
5619 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5620 else
5621 /* Disable FC Tape support */
5622 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5623
5300 /* Disable Full Login after LIP */ 5624 /* Disable Full Login after LIP */
5301 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 5625 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
5302 /* Enable target PRLI control */ 5626 /* Enable target PRLI control */
@@ -5378,8 +5702,13 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
5378 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 5702 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
5379 /* Enable initial LIP */ 5703 /* Enable initial LIP */
5380 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 5704 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
5381 /* Enable FC tapes support */ 5705 if (ql2xtgt_tape_enable)
5382 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5706 /* Enable FC tape support */
5707 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5708 else
5709 /* Disable FC tape support */
5710 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
5711
5383 /* Disable Full Login after LIP */ 5712 /* Disable Full Login after LIP */
5384 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 5713 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
5385 /* Enable target PRLI control */ 5714 /* Enable target PRLI control */
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d1d24fb0160a..8ff330f7d6f5 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -915,6 +915,10 @@ struct qla_tgt_cmd {
915 unsigned int aborted:1; /* Needed in case of SRR */ 915 unsigned int aborted:1; /* Needed in case of SRR */
916 unsigned int write_data_transferred:1; 916 unsigned int write_data_transferred:1;
917 unsigned int ctx_dsd_alloced:1; 917 unsigned int ctx_dsd_alloced:1;
918 unsigned int q_full:1;
919 unsigned int term_exchg:1;
920 unsigned int cmd_sent_to_fw:1;
921 unsigned int cmd_in_wq:1;
918 922
919 struct scatterlist *sg; /* cmd data buffer SG vector */ 923 struct scatterlist *sg; /* cmd data buffer SG vector */
920 int sg_cnt; /* SG segments count */ 924 int sg_cnt; /* SG segments count */
@@ -923,10 +927,12 @@ struct qla_tgt_cmd {
923 uint32_t tag; 927 uint32_t tag;
924 uint32_t unpacked_lun; 928 uint32_t unpacked_lun;
925 enum dma_data_direction dma_data_direction; 929 enum dma_data_direction dma_data_direction;
930 uint32_t reset_count;
926 931
927 uint16_t loop_id; /* to save extra sess dereferences */ 932 uint16_t loop_id; /* to save extra sess dereferences */
928 struct qla_tgt *tgt; /* to save extra sess dereferences */ 933 struct qla_tgt *tgt; /* to save extra sess dereferences */
929 struct scsi_qla_host *vha; 934 struct scsi_qla_host *vha;
935 struct list_head cmd_list;
930 936
931 struct atio_from_isp atio; 937 struct atio_from_isp atio;
932 /* t10dif */ 938 /* t10dif */
@@ -935,6 +941,29 @@ struct qla_tgt_cmd {
935 uint32_t blk_sz; 941 uint32_t blk_sz;
936 struct crc_context *ctx; 942 struct crc_context *ctx;
937 943
944 uint64_t jiffies_at_alloc;
945 uint64_t jiffies_at_free;
946 /* BIT_0 - Atio Arrival / schedule to work
947 * BIT_1 - qlt_do_work
948 * BIT_2 - qlt_do work failed
949 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
950 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
951 * BIT_5 - status respond / tcm_qla2xx_queue_status
952 * BIT_6 - tcm request to abort/Term exchange.
953 * pre_xmit_response->qlt_send_term_exchange
954 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
955 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
956 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
957 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
958 * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
959 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
960 * BIT_13 - Bad completion -
961 * qlt_ctio_do_completion --> qlt_term_ctio_exchange
962 * BIT_14 - Back end data received/sent.
963 * BIT_15 - SRR prepare ctio
964 * BIT_16 - complete free
965 */
966 uint32_t cmd_flags;
938}; 967};
939 968
940struct qla_tgt_sess_work_param { 969struct qla_tgt_sess_work_param {
@@ -958,6 +987,7 @@ struct qla_tgt_mgmt_cmd {
958 struct se_cmd se_cmd; 987 struct se_cmd se_cmd;
959 struct work_struct free_work; 988 struct work_struct free_work;
960 unsigned int flags; 989 unsigned int flags;
990 uint32_t reset_count;
961#define QLA24XX_MGMT_SEND_NACK 1 991#define QLA24XX_MGMT_SEND_NACK 1
962 union { 992 union {
963 struct atio_from_isp atio; 993 struct atio_from_isp atio;
@@ -1089,5 +1119,6 @@ extern int qlt_stop_phase1(struct qla_tgt *);
1089extern void qlt_stop_phase2(struct qla_tgt *); 1119extern void qlt_stop_phase2(struct qla_tgt *);
1090extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1120extern irqreturn_t qla83xx_msix_atio_q(int, void *);
1091extern void qlt_83xx_iospace_config(struct qla_hw_data *); 1121extern void qlt_83xx_iospace_config(struct qla_hw_data *);
1122extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1092 1123
1093#endif /* __QLA_TARGET_H */ 1124#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index cb9a0c4bc419..a8c0c7362e48 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -128,18 +128,10 @@ qla27xx_insert32(uint32_t value, void *buf, ulong *len)
128static inline void 128static inline void
129qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) 129qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
130{ 130{
131 ulong cnt = size;
132 131
133 if (buf && mem) { 132 if (buf && mem && size) {
134 buf += *len; 133 buf += *len;
135 while (cnt >= sizeof(uint32_t)) { 134 memcpy(buf, mem, size);
136 *(__le32 *)buf = cpu_to_le32p(mem);
137 buf += sizeof(uint32_t);
138 mem += sizeof(uint32_t);
139 cnt -= sizeof(uint32_t);
140 }
141 if (cnt)
142 memcpy(buf, mem, cnt);
143 } 135 }
144 *len += size; 136 *len += size;
145} 137}
@@ -151,8 +143,6 @@ qla27xx_read8(void *window, void *buf, ulong *len)
151 143
152 if (buf) { 144 if (buf) {
153 value = RD_REG_BYTE((__iomem void *)window); 145 value = RD_REG_BYTE((__iomem void *)window);
154 ql_dbg(ql_dbg_misc, NULL, 0xd011,
155 "%s: -> %x\n", __func__, value);
156 } 146 }
157 qla27xx_insert32(value, buf, len); 147 qla27xx_insert32(value, buf, len);
158} 148}
@@ -164,8 +154,6 @@ qla27xx_read16(void *window, void *buf, ulong *len)
164 154
165 if (buf) { 155 if (buf) {
166 value = RD_REG_WORD((__iomem void *)window); 156 value = RD_REG_WORD((__iomem void *)window);
167 ql_dbg(ql_dbg_misc, NULL, 0xd012,
168 "%s: -> %x\n", __func__, value);
169 } 157 }
170 qla27xx_insert32(value, buf, len); 158 qla27xx_insert32(value, buf, len);
171} 159}
@@ -177,8 +165,6 @@ qla27xx_read32(void *window, void *buf, ulong *len)
177 165
178 if (buf) { 166 if (buf) {
179 value = RD_REG_DWORD((__iomem void *)window); 167 value = RD_REG_DWORD((__iomem void *)window);
180 ql_dbg(ql_dbg_misc, NULL, 0xd013,
181 "%s: -> %x\n", __func__, value);
182 } 168 }
183 qla27xx_insert32(value, buf, len); 169 qla27xx_insert32(value, buf, len);
184} 170}
@@ -197,10 +183,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
197{ 183{
198 void *window = (void *)reg + offset; 184 void *window = (void *)reg + offset;
199 185
200 if (buf) {
201 ql_dbg(ql_dbg_misc, NULL, 0xd014,
202 "%s: @%x\n", __func__, offset);
203 }
204 qla27xx_read32(window, buf, len); 186 qla27xx_read32(window, buf, len);
205} 187}
206 188
@@ -211,8 +193,6 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
211 __iomem void *window = reg + offset; 193 __iomem void *window = reg + offset;
212 194
213 if (buf) { 195 if (buf) {
214 ql_dbg(ql_dbg_misc, NULL, 0xd015,
215 "%s: @%x <- %x\n", __func__, offset, data);
216 WRT_REG_DWORD(window, data); 196 WRT_REG_DWORD(window, data);
217 } 197 }
218} 198}
@@ -225,11 +205,6 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
225 void *window = (void *)reg + offset; 205 void *window = (void *)reg + offset;
226 void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width); 206 void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
227 207
228 if (buf) {
229 ql_dbg(ql_dbg_misc, NULL, 0xd016,
230 "%s: base=%x offset=%x count=%x width=%x\n",
231 __func__, addr, offset, count, width);
232 }
233 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); 208 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
234 while (count--) { 209 while (count--) {
235 qla27xx_insert32(addr, buf, len); 210 qla27xx_insert32(addr, buf, len);
@@ -380,14 +355,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
380 ent->t262.start_addr = start; 355 ent->t262.start_addr = start;
381 ent->t262.end_addr = end; 356 ent->t262.end_addr = end;
382 } 357 }
383 } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
384 ql_dbg(ql_dbg_misc, vha, 0xd021,
385 "%s: unsupported ddr ram\n", __func__);
386 qla27xx_skip_entry(ent, buf);
387 goto done;
388 } else { 358 } else {
389 ql_dbg(ql_dbg_misc, vha, 0xd022, 359 ql_dbg(ql_dbg_misc, vha, 0xd022,
390 "%s: unknown area %u\n", __func__, ent->t262.ram_area); 360 "%s: unknown area %x\n", __func__, ent->t262.ram_area);
391 qla27xx_skip_entry(ent, buf); 361 qla27xx_skip_entry(ent, buf);
392 goto done; 362 goto done;
393 } 363 }
@@ -402,8 +372,6 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
402 372
403 dwords = end - start + 1; 373 dwords = end - start + 1;
404 if (buf) { 374 if (buf) {
405 ql_dbg(ql_dbg_misc, vha, 0xd024,
406 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
407 buf += *len; 375 buf += *len;
408 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); 376 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
409 } 377 }
@@ -448,13 +416,9 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
448 count++; 416 count++;
449 } 417 }
450 } 418 }
451 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
452 ql_dbg(ql_dbg_misc, vha, 0xd025,
453 "%s: unsupported atio queue\n", __func__);
454 qla27xx_skip_entry(ent, buf);
455 } else { 419 } else {
456 ql_dbg(ql_dbg_misc, vha, 0xd026, 420 ql_dbg(ql_dbg_misc, vha, 0xd026,
457 "%s: unknown queue %u\n", __func__, ent->t263.queue_type); 421 "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
458 qla27xx_skip_entry(ent, buf); 422 qla27xx_skip_entry(ent, buf);
459 } 423 }
460 424
@@ -549,17 +513,9 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
549 "%s: missing eft\n", __func__); 513 "%s: missing eft\n", __func__);
550 qla27xx_skip_entry(ent, buf); 514 qla27xx_skip_entry(ent, buf);
551 } 515 }
552 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
553 ql_dbg(ql_dbg_misc, vha, 0xd029,
554 "%s: unsupported exchange offload buffer\n", __func__);
555 qla27xx_skip_entry(ent, buf);
556 } else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
557 ql_dbg(ql_dbg_misc, vha, 0xd02a,
558 "%s: unsupported extended login buffer\n", __func__);
559 qla27xx_skip_entry(ent, buf);
560 } else { 516 } else {
561 ql_dbg(ql_dbg_misc, vha, 0xd02b, 517 ql_dbg(ql_dbg_misc, vha, 0xd02b,
562 "%s: unknown buf %x\n", __func__, ent->t268.buf_type); 518 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
563 qla27xx_skip_entry(ent, buf); 519 qla27xx_skip_entry(ent, buf);
564 } 520 }
565 521
@@ -695,13 +651,9 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
695 count++; 651 count++;
696 } 652 }
697 } 653 }
698 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
699 ql_dbg(ql_dbg_misc, vha, 0xd02e,
700 "%s: unsupported atio queue\n", __func__);
701 qla27xx_skip_entry(ent, buf);
702 } else { 654 } else {
703 ql_dbg(ql_dbg_misc, vha, 0xd02f, 655 ql_dbg(ql_dbg_misc, vha, 0xd02f,
704 "%s: unknown queue %u\n", __func__, ent->t274.queue_type); 656 "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
705 qla27xx_skip_entry(ent, buf); 657 qla27xx_skip_entry(ent, buf);
706 } 658 }
707 659
@@ -715,6 +667,32 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
715} 667}
716 668
717static int 669static int
670qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
671 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
672{
673 ulong offset = offsetof(typeof(*ent), t275.buffer);
674
675 ql_dbg(ql_dbg_misc, vha, 0xd213,
676 "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
677 if (!ent->t275.length) {
678 ql_dbg(ql_dbg_misc, vha, 0xd020,
679 "%s: buffer zero length\n", __func__);
680 qla27xx_skip_entry(ent, buf);
681 goto done;
682 }
683 if (offset + ent->t275.length > ent->hdr.entry_size) {
684 ql_dbg(ql_dbg_misc, vha, 0xd030,
685 "%s: buffer overflow\n", __func__);
686 qla27xx_skip_entry(ent, buf);
687 goto done;
688 }
689
690 qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
691done:
692 return false;
693}
694
695static int
718qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, 696qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
719 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) 697 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
720{ 698{
@@ -726,7 +704,7 @@ qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
726} 704}
727 705
728struct qla27xx_fwdt_entry_call { 706struct qla27xx_fwdt_entry_call {
729 int type; 707 uint type;
730 int (*call)( 708 int (*call)(
731 struct scsi_qla_host *, 709 struct scsi_qla_host *,
732 struct qla27xx_fwdt_entry *, 710 struct qla27xx_fwdt_entry *,
@@ -756,18 +734,21 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
756 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } , 734 { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
757 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } , 735 { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
758 { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } , 736 { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
737 { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
759 { -1 , qla27xx_fwdt_entry_other } 738 { -1 , qla27xx_fwdt_entry_other }
760}; 739};
761 740
762static inline int (*qla27xx_find_entry(int type)) 741static inline int (*qla27xx_find_entry(uint type))
763 (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *) 742 (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
764{ 743{
765 struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list; 744 struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
766 745
767 while (list->type != -1 && list->type != type) 746 while (list->type < type)
768 list++; 747 list++;
769 748
770 return list->call; 749 if (list->type == type)
750 return list->call;
751 return qla27xx_fwdt_entry_other;
771} 752}
772 753
773static inline void * 754static inline void *
@@ -792,6 +773,15 @@ qla27xx_walk_template(struct scsi_qla_host *vha,
792 break; 773 break;
793 ent = qla27xx_next_entry(ent); 774 ent = qla27xx_next_entry(ent);
794 } 775 }
776
777 if (count)
778 ql_dbg(ql_dbg_misc, vha, 0xd018,
779 "%s: residual count (%lx)\n", __func__, count);
780
781 if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
782 ql_dbg(ql_dbg_misc, vha, 0xd019,
783 "%s: missing end (%lx)\n", __func__, count);
784
795 ql_dbg(ql_dbg_misc, vha, 0xd01b, 785 ql_dbg(ql_dbg_misc, vha, 0xd01b,
796 "%s: len=%lx\n", __func__, *len); 786 "%s: len=%lx\n", __func__, *len);
797} 787}
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 1967424c8e64..141c1c5e73f4 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -53,6 +53,7 @@ struct __packed qla27xx_fwdt_template {
53#define ENTRY_TYPE_RDREMRAM 272 53#define ENTRY_TYPE_RDREMRAM 272
54#define ENTRY_TYPE_PCICFG 273 54#define ENTRY_TYPE_PCICFG 273
55#define ENTRY_TYPE_GET_SHADOW 274 55#define ENTRY_TYPE_GET_SHADOW 274
56#define ENTRY_TYPE_WRITE_BUF 275
56 57
57#define CAPTURE_FLAG_PHYS_ONLY BIT_0 58#define CAPTURE_FLAG_PHYS_ONLY BIT_0
58#define CAPTURE_FLAG_PHYS_VIRT BIT_1 59#define CAPTURE_FLAG_PHYS_VIRT BIT_1
@@ -193,6 +194,11 @@ struct __packed qla27xx_fwdt_entry {
193 uint8_t queue_type; 194 uint8_t queue_type;
194 uint8_t reserved[3]; 195 uint8_t reserved[3];
195 } t274; 196 } t274;
197
198 struct __packed {
199 uint32_t length;
200 uint8_t buffer[];
201 } t275;
196 }; 202 };
197}; 203};
198 204
@@ -208,6 +214,8 @@ struct __packed qla27xx_fwdt_entry {
208#define T268_BUF_TYPE_EXTD_TRACE 1 214#define T268_BUF_TYPE_EXTD_TRACE 1
209#define T268_BUF_TYPE_EXCH_BUFOFF 2 215#define T268_BUF_TYPE_EXCH_BUFOFF 2
210#define T268_BUF_TYPE_EXTD_LOGIN 3 216#define T268_BUF_TYPE_EXTD_LOGIN 3
217#define T268_BUF_TYPE_REQ_MIRROR 4
218#define T268_BUF_TYPE_RSP_MIRROR 5
211 219
212#define T274_QUEUE_TYPE_REQ_SHAD 1 220#define T274_QUEUE_TYPE_REQ_SHAD 1
213#define T274_QUEUE_TYPE_RSP_SHAD 2 221#define T274_QUEUE_TYPE_RSP_SHAD 2
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4d2c98cbec4f..d88b86214ec5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.07.00.08-k" 10#define QLA2XXX_VERSION "8.07.00.16-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 7 13#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e2beab962096..031b2961c6b7 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -50,8 +50,12 @@
50#include "qla_target.h" 50#include "qla_target.h"
51#include "tcm_qla2xxx.h" 51#include "tcm_qla2xxx.h"
52 52
53struct workqueue_struct *tcm_qla2xxx_free_wq; 53static struct workqueue_struct *tcm_qla2xxx_free_wq;
54struct workqueue_struct *tcm_qla2xxx_cmd_wq; 54static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
55
56/* Local pointer to allocated TCM configfs fabric module */
57static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
58static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
55 59
56/* 60/*
57 * Parse WWN. 61 * Parse WWN.
@@ -386,6 +390,11 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
386{ 390{
387 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 391 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
388 392
393 cmd->cmd_in_wq = 0;
394
395 WARN_ON(cmd->cmd_flags & BIT_16);
396
397 cmd->cmd_flags |= BIT_16;
389 transport_generic_free_cmd(&cmd->se_cmd, 0); 398 transport_generic_free_cmd(&cmd->se_cmd, 0);
390} 399}
391 400
@@ -396,6 +405,7 @@ static void tcm_qla2xxx_complete_free(struct work_struct *work)
396 */ 405 */
397static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) 406static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
398{ 407{
408 cmd->cmd_in_wq = 1;
399 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 409 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
400 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 410 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
401} 411}
@@ -405,6 +415,13 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
405 */ 415 */
406static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) 416static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
407{ 417{
418 struct qla_tgt_cmd *cmd;
419
420 if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
421 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
422 cmd->cmd_flags |= BIT_14;
423 }
424
408 return target_put_sess_cmd(se_cmd->se_sess, se_cmd); 425 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
409} 426}
410 427
@@ -511,8 +528,13 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
511 528
512static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd) 529static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
513{ 530{
514 struct qla_tgt_cmd *cmd = container_of(se_cmd, 531 struct qla_tgt_cmd *cmd;
515 struct qla_tgt_cmd, se_cmd); 532
533 /* check for task mgmt cmd */
534 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
535 return 0xffffffff;
536
537 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
516 538
517 return cmd->tag; 539 return cmd->tag;
518} 540}
@@ -562,6 +584,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
562 * Ensure that the complete FCP WRITE payload has been received. 584 * Ensure that the complete FCP WRITE payload has been received.
563 * Otherwise return an exception via CHECK_CONDITION status. 585 * Otherwise return an exception via CHECK_CONDITION status.
564 */ 586 */
587 cmd->cmd_in_wq = 0;
588 cmd->cmd_flags |= BIT_11;
565 if (!cmd->write_data_transferred) { 589 if (!cmd->write_data_transferred) {
566 /* 590 /*
567 * Check if se_cmd has already been aborted via LUN_RESET, and 591 * Check if se_cmd has already been aborted via LUN_RESET, and
@@ -590,6 +614,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
590 */ 614 */
591static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) 615static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
592{ 616{
617 cmd->cmd_flags |= BIT_10;
618 cmd->cmd_in_wq = 1;
593 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); 619 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
594 queue_work(tcm_qla2xxx_free_wq, &cmd->work); 620 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
595} 621}
@@ -633,6 +659,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
633 struct qla_tgt_cmd *cmd = container_of(se_cmd, 659 struct qla_tgt_cmd *cmd = container_of(se_cmd,
634 struct qla_tgt_cmd, se_cmd); 660 struct qla_tgt_cmd, se_cmd);
635 661
662 cmd->cmd_flags |= BIT_4;
636 cmd->bufflen = se_cmd->data_length; 663 cmd->bufflen = se_cmd->data_length;
637 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 664 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
638 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 665 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
@@ -640,6 +667,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
640 cmd->sg_cnt = se_cmd->t_data_nents; 667 cmd->sg_cnt = se_cmd->t_data_nents;
641 cmd->sg = se_cmd->t_data_sg; 668 cmd->sg = se_cmd->t_data_sg;
642 cmd->offset = 0; 669 cmd->offset = 0;
670 cmd->cmd_flags |= BIT_3;
643 671
644 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 672 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
645 cmd->prot_sg = se_cmd->t_prot_sg; 673 cmd->prot_sg = se_cmd->t_prot_sg;
@@ -665,6 +693,11 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
665 cmd->offset = 0; 693 cmd->offset = 0;
666 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 694 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
667 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED); 695 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
696 if (cmd->cmd_flags & BIT_5) {
697 pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
698 dump_stack();
699 }
700 cmd->cmd_flags |= BIT_5;
668 701
669 if (se_cmd->data_direction == DMA_FROM_DEVICE) { 702 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
670 /* 703 /*
@@ -734,10 +767,6 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
734 cmd->sg_mapped = 0; 767 cmd->sg_mapped = 0;
735} 768}
736 769
737/* Local pointer to allocated TCM configfs fabric module */
738struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
739struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
740
741static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 770static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
742 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *); 771 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
743/* 772/*
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d81f3cc43ff1..79c77b485a67 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -670,14 +670,10 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
670 return SCSI_MLQUEUE_DEVICE_BUSY; 670 return SCSI_MLQUEUE_DEVICE_BUSY;
671 } 671 }
672 672
673 /* 673 /* Store the LUN value in cmnd, if needed. */
674 * If SCSI-2 or lower, store the LUN value in cmnd. 674 if (cmd->device->lun_in_cdb)
675 */
676 if (cmd->device->scsi_level <= SCSI_2 &&
677 cmd->device->scsi_level != SCSI_UNKNOWN) {
678 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | 675 cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
679 (cmd->device->lun << 5 & 0xe0); 676 (cmd->device->lun << 5 & 0xe0);
680 }
681 677
682 scsi_log_send(cmd); 678 scsi_log_send(cmd);
683 679
@@ -1371,7 +1367,11 @@ MODULE_LICENSE("GPL");
1371module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 1367module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
1372MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 1368MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
1373 1369
1370#ifdef CONFIG_SCSI_MQ_DEFAULT
1371bool scsi_use_blk_mq = true;
1372#else
1374bool scsi_use_blk_mq = false; 1373bool scsi_use_blk_mq = false;
1374#endif
1375module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 1375module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
1376 1376
1377static int __init init_scsi(void) 1377static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d19c0e3c7f48..2b6d447ad6d6 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -123,7 +123,7 @@ static const char *scsi_debug_version_date = "20140706";
123#define DEF_PHYSBLK_EXP 0 123#define DEF_PHYSBLK_EXP 0
124#define DEF_PTYPE 0 124#define DEF_PTYPE 0
125#define DEF_REMOVABLE false 125#define DEF_REMOVABLE false
126#define DEF_SCSI_LEVEL 5 /* INQUIRY, byte2 [5->SPC-3] */ 126#define DEF_SCSI_LEVEL 6 /* INQUIRY, byte2 [6->SPC-4] */
127#define DEF_SECTOR_SIZE 512 127#define DEF_SECTOR_SIZE 512
128#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */ 128#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
129#define DEF_UNMAP_ALIGNMENT 0 129#define DEF_UNMAP_ALIGNMENT 0
@@ -929,7 +929,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
929{ 929{
930 unsigned char pq_pdt; 930 unsigned char pq_pdt;
931 unsigned char * arr; 931 unsigned char * arr;
932 unsigned char *cmd = (unsigned char *)scp->cmnd; 932 unsigned char *cmd = scp->cmnd;
933 int alloc_len, n, ret; 933 int alloc_len, n, ret;
934 934
935 alloc_len = (cmd[3] << 8) + cmd[4]; 935 alloc_len = (cmd[3] << 8) + cmd[4];
@@ -1056,15 +1056,15 @@ static int resp_inquiry(struct scsi_cmnd *scp, int target,
1056 memcpy(&arr[16], inq_product_id, 16); 1056 memcpy(&arr[16], inq_product_id, 16);
1057 memcpy(&arr[32], inq_product_rev, 4); 1057 memcpy(&arr[32], inq_product_rev, 4);
1058 /* version descriptors (2 bytes each) follow */ 1058 /* version descriptors (2 bytes each) follow */
1059 arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */ 1059 arr[58] = 0x0; arr[59] = 0xa2; /* SAM-5 rev 4 */
1060 arr[60] = 0x3; arr[61] = 0x14; /* SPC-3 ANSI */ 1060 arr[60] = 0x4; arr[61] = 0x68; /* SPC-4 rev 37 */
1061 n = 62; 1061 n = 62;
1062 if (scsi_debug_ptype == 0) { 1062 if (scsi_debug_ptype == 0) {
1063 arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */ 1063 arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
1064 } else if (scsi_debug_ptype == 1) { 1064 } else if (scsi_debug_ptype == 1) {
1065 arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */ 1065 arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
1066 } 1066 }
1067 arr[n++] = 0xc; arr[n++] = 0xf; /* SAS-1.1 rev 10 */ 1067 arr[n++] = 0x20; arr[n++] = 0xe6; /* SPL-3 rev 7 */
1068 ret = fill_from_dev_buffer(scp, arr, 1068 ret = fill_from_dev_buffer(scp, arr,
1069 min(alloc_len, SDEBUG_LONG_INQ_SZ)); 1069 min(alloc_len, SDEBUG_LONG_INQ_SZ));
1070 kfree(arr); 1070 kfree(arr);
@@ -1075,7 +1075,7 @@ static int resp_requests(struct scsi_cmnd * scp,
1075 struct sdebug_dev_info * devip) 1075 struct sdebug_dev_info * devip)
1076{ 1076{
1077 unsigned char * sbuff; 1077 unsigned char * sbuff;
1078 unsigned char *cmd = (unsigned char *)scp->cmnd; 1078 unsigned char *cmd = scp->cmnd;
1079 unsigned char arr[SCSI_SENSE_BUFFERSIZE]; 1079 unsigned char arr[SCSI_SENSE_BUFFERSIZE];
1080 int want_dsense; 1080 int want_dsense;
1081 int len = 18; 1081 int len = 18;
@@ -1115,7 +1115,7 @@ static int resp_requests(struct scsi_cmnd * scp,
1115static int resp_start_stop(struct scsi_cmnd * scp, 1115static int resp_start_stop(struct scsi_cmnd * scp,
1116 struct sdebug_dev_info * devip) 1116 struct sdebug_dev_info * devip)
1117{ 1117{
1118 unsigned char *cmd = (unsigned char *)scp->cmnd; 1118 unsigned char *cmd = scp->cmnd;
1119 int power_cond, errsts, start; 1119 int power_cond, errsts, start;
1120 1120
1121 errsts = check_readiness(scp, UAS_ONLY, devip); 1121 errsts = check_readiness(scp, UAS_ONLY, devip);
@@ -1177,7 +1177,7 @@ static int resp_readcap(struct scsi_cmnd * scp,
1177static int resp_readcap16(struct scsi_cmnd * scp, 1177static int resp_readcap16(struct scsi_cmnd * scp,
1178 struct sdebug_dev_info * devip) 1178 struct sdebug_dev_info * devip)
1179{ 1179{
1180 unsigned char *cmd = (unsigned char *)scp->cmnd; 1180 unsigned char *cmd = scp->cmnd;
1181 unsigned char arr[SDEBUG_READCAP16_ARR_SZ]; 1181 unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
1182 unsigned long long capac; 1182 unsigned long long capac;
1183 int errsts, k, alloc_len; 1183 int errsts, k, alloc_len;
@@ -1222,7 +1222,7 @@ static int resp_readcap16(struct scsi_cmnd * scp,
1222static int resp_report_tgtpgs(struct scsi_cmnd * scp, 1222static int resp_report_tgtpgs(struct scsi_cmnd * scp,
1223 struct sdebug_dev_info * devip) 1223 struct sdebug_dev_info * devip)
1224{ 1224{
1225 unsigned char *cmd = (unsigned char *)scp->cmnd; 1225 unsigned char *cmd = scp->cmnd;
1226 unsigned char * arr; 1226 unsigned char * arr;
1227 int host_no = devip->sdbg_host->shost->host_no; 1227 int host_no = devip->sdbg_host->shost->host_no;
1228 int n, ret, alen, rlen; 1228 int n, ret, alen, rlen;
@@ -1468,7 +1468,7 @@ static int resp_mode_sense(struct scsi_cmnd * scp, int target,
1468 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id; 1468 int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
1469 unsigned char * ap; 1469 unsigned char * ap;
1470 unsigned char arr[SDEBUG_MAX_MSENSE_SZ]; 1470 unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
1471 unsigned char *cmd = (unsigned char *)scp->cmnd; 1471 unsigned char *cmd = scp->cmnd;
1472 1472
1473 errsts = check_readiness(scp, UAS_ONLY, devip); 1473 errsts = check_readiness(scp, UAS_ONLY, devip);
1474 if (errsts) 1474 if (errsts)
@@ -1630,7 +1630,7 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
1630 int pf, sp, ps, md_len, bd_len, off, spf, pg_len; 1630 int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
1631 int param_len, res, errsts, mpage; 1631 int param_len, res, errsts, mpage;
1632 unsigned char arr[SDEBUG_MAX_MSELECT_SZ]; 1632 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
1633 unsigned char *cmd = (unsigned char *)scp->cmnd; 1633 unsigned char *cmd = scp->cmnd;
1634 1634
1635 errsts = check_readiness(scp, UAS_ONLY, devip); 1635 errsts = check_readiness(scp, UAS_ONLY, devip);
1636 if (errsts) 1636 if (errsts)
@@ -1739,7 +1739,7 @@ static int resp_log_sense(struct scsi_cmnd * scp,
1739{ 1739{
1740 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n; 1740 int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
1741 unsigned char arr[SDEBUG_MAX_LSENSE_SZ]; 1741 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
1742 unsigned char *cmd = (unsigned char *)scp->cmnd; 1742 unsigned char *cmd = scp->cmnd;
1743 1743
1744 errsts = check_readiness(scp, UAS_ONLY, devip); 1744 errsts = check_readiness(scp, UAS_ONLY, devip);
1745 if (errsts) 1745 if (errsts)
@@ -2414,7 +2414,7 @@ static int resp_report_luns(struct scsi_cmnd * scp,
2414 unsigned int alloc_len; 2414 unsigned int alloc_len;
2415 int lun_cnt, i, upper, num, n; 2415 int lun_cnt, i, upper, num, n;
2416 u64 wlun, lun; 2416 u64 wlun, lun;
2417 unsigned char *cmd = (unsigned char *)scp->cmnd; 2417 unsigned char *cmd = scp->cmnd;
2418 int select_report = (int)cmd[2]; 2418 int select_report = (int)cmd[2];
2419 struct scsi_lun *one_lun; 2419 struct scsi_lun *one_lun;
2420 unsigned char arr[SDEBUG_RLUN_ARR_SZ]; 2420 unsigned char arr[SDEBUG_RLUN_ARR_SZ];
@@ -2743,6 +2743,13 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2743 if (test_bit(k, queued_in_use_bm)) { 2743 if (test_bit(k, queued_in_use_bm)) {
2744 sqcp = &queued_arr[k]; 2744 sqcp = &queued_arr[k];
2745 if (cmnd == sqcp->a_cmnd) { 2745 if (cmnd == sqcp->a_cmnd) {
2746 devip = (struct sdebug_dev_info *)
2747 cmnd->device->hostdata;
2748 if (devip)
2749 atomic_dec(&devip->num_in_q);
2750 sqcp->a_cmnd = NULL;
2751 spin_unlock_irqrestore(&queued_arr_lock,
2752 iflags);
2746 if (scsi_debug_ndelay > 0) { 2753 if (scsi_debug_ndelay > 0) {
2747 if (sqcp->sd_hrtp) 2754 if (sqcp->sd_hrtp)
2748 hrtimer_cancel( 2755 hrtimer_cancel(
@@ -2755,18 +2762,13 @@ static int stop_queued_cmnd(struct scsi_cmnd *cmnd)
2755 if (sqcp->tletp) 2762 if (sqcp->tletp)
2756 tasklet_kill(sqcp->tletp); 2763 tasklet_kill(sqcp->tletp);
2757 } 2764 }
2758 __clear_bit(k, queued_in_use_bm); 2765 clear_bit(k, queued_in_use_bm);
2759 devip = (struct sdebug_dev_info *) 2766 return 1;
2760 cmnd->device->hostdata;
2761 if (devip)
2762 atomic_dec(&devip->num_in_q);
2763 sqcp->a_cmnd = NULL;
2764 break;
2765 } 2767 }
2766 } 2768 }
2767 } 2769 }
2768 spin_unlock_irqrestore(&queued_arr_lock, iflags); 2770 spin_unlock_irqrestore(&queued_arr_lock, iflags);
2769 return (k < qmax) ? 1 : 0; 2771 return 0;
2770} 2772}
2771 2773
2772/* Deletes (stops) timers or tasklets of all queued commands */ 2774/* Deletes (stops) timers or tasklets of all queued commands */
@@ -2782,6 +2784,13 @@ static void stop_all_queued(void)
2782 if (test_bit(k, queued_in_use_bm)) { 2784 if (test_bit(k, queued_in_use_bm)) {
2783 sqcp = &queued_arr[k]; 2785 sqcp = &queued_arr[k];
2784 if (sqcp->a_cmnd) { 2786 if (sqcp->a_cmnd) {
2787 devip = (struct sdebug_dev_info *)
2788 sqcp->a_cmnd->device->hostdata;
2789 if (devip)
2790 atomic_dec(&devip->num_in_q);
2791 sqcp->a_cmnd = NULL;
2792 spin_unlock_irqrestore(&queued_arr_lock,
2793 iflags);
2785 if (scsi_debug_ndelay > 0) { 2794 if (scsi_debug_ndelay > 0) {
2786 if (sqcp->sd_hrtp) 2795 if (sqcp->sd_hrtp)
2787 hrtimer_cancel( 2796 hrtimer_cancel(
@@ -2794,12 +2803,8 @@ static void stop_all_queued(void)
2794 if (sqcp->tletp) 2803 if (sqcp->tletp)
2795 tasklet_kill(sqcp->tletp); 2804 tasklet_kill(sqcp->tletp);
2796 } 2805 }
2797 __clear_bit(k, queued_in_use_bm); 2806 clear_bit(k, queued_in_use_bm);
2798 devip = (struct sdebug_dev_info *) 2807 spin_lock_irqsave(&queued_arr_lock, iflags);
2799 sqcp->a_cmnd->device->hostdata;
2800 if (devip)
2801 atomic_dec(&devip->num_in_q);
2802 sqcp->a_cmnd = NULL;
2803 } 2808 }
2804 } 2809 }
2805 } 2810 }
@@ -3006,7 +3011,7 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3006 int scsi_result, int delta_jiff) 3011 int scsi_result, int delta_jiff)
3007{ 3012{
3008 unsigned long iflags; 3013 unsigned long iflags;
3009 int k, num_in_q, tsf, qdepth, inject; 3014 int k, num_in_q, qdepth, inject;
3010 struct sdebug_queued_cmd *sqcp = NULL; 3015 struct sdebug_queued_cmd *sqcp = NULL;
3011 struct scsi_device *sdp = cmnd->device; 3016 struct scsi_device *sdp = cmnd->device;
3012 3017
@@ -3019,55 +3024,48 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3019 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)) 3024 if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
3020 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", 3025 sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
3021 __func__, scsi_result); 3026 __func__, scsi_result);
3022 if (delta_jiff == 0) { 3027 if (delta_jiff == 0)
3023 /* using same thread to call back mid-layer */ 3028 goto respond_in_thread;
3024 cmnd->result = scsi_result;
3025 cmnd->scsi_done(cmnd);
3026 return 0;
3027 }
3028 3029
3029 /* deferred response cases */ 3030 /* schedule the response at a later time if resources permit */
3030 spin_lock_irqsave(&queued_arr_lock, iflags); 3031 spin_lock_irqsave(&queued_arr_lock, iflags);
3031 num_in_q = atomic_read(&devip->num_in_q); 3032 num_in_q = atomic_read(&devip->num_in_q);
3032 qdepth = cmnd->device->queue_depth; 3033 qdepth = cmnd->device->queue_depth;
3033 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3034 tsf = 0;
3035 inject = 0; 3034 inject = 0;
3036 if ((qdepth > 0) && (num_in_q >= qdepth)) 3035 if ((qdepth > 0) && (num_in_q >= qdepth)) {
3037 tsf = 1; 3036 if (scsi_result) {
3038 else if ((scsi_debug_every_nth != 0) && 3037 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3039 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) { 3038 goto respond_in_thread;
3039 } else
3040 scsi_result = device_qfull_result;
3041 } else if ((scsi_debug_every_nth != 0) &&
3042 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
3043 (scsi_result == 0)) {
3040 if ((num_in_q == (qdepth - 1)) && 3044 if ((num_in_q == (qdepth - 1)) &&
3041 (atomic_inc_return(&sdebug_a_tsf) >= 3045 (atomic_inc_return(&sdebug_a_tsf) >=
3042 abs(scsi_debug_every_nth))) { 3046 abs(scsi_debug_every_nth))) {
3043 atomic_set(&sdebug_a_tsf, 0); 3047 atomic_set(&sdebug_a_tsf, 0);
3044 inject = 1; 3048 inject = 1;
3045 tsf = 1; 3049 scsi_result = device_qfull_result;
3046 } 3050 }
3047 } 3051 }
3048 3052
3049 /* if (tsf) simulate device reporting SCSI status of TASK SET FULL. 3053 k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
3050 * Might override existing CHECK CONDITION. */
3051 if (tsf)
3052 scsi_result = device_qfull_result;
3053 if (k >= scsi_debug_max_queue) { 3054 if (k >= scsi_debug_max_queue) {
3054 if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3055 tsf = 1;
3056 spin_unlock_irqrestore(&queued_arr_lock, iflags); 3055 spin_unlock_irqrestore(&queued_arr_lock, iflags);
3056 if (scsi_result)
3057 goto respond_in_thread;
3058 else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
3059 scsi_result = device_qfull_result;
3057 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) 3060 if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
3058 sdev_printk(KERN_INFO, sdp, 3061 sdev_printk(KERN_INFO, sdp,
3059 "%s: num_in_q=%d, bypass q, %s%s\n", 3062 "%s: max_queue=%d exceeded, %s\n",
3060 __func__, num_in_q, 3063 __func__, scsi_debug_max_queue,
3061 (inject ? "<inject> " : ""), 3064 (scsi_result ? "status: TASK SET FULL" :
3062 (tsf ? "status: TASK SET FULL" : 3065 "report: host busy"));
3063 "report: host busy")); 3066 if (scsi_result)
3064 if (tsf) { 3067 goto respond_in_thread;
3065 /* queued_arr full so respond in same thread */ 3068 else
3066 cmnd->result = scsi_result;
3067 cmnd->scsi_done(cmnd);
3068 /* As scsi_done() is called "inline" must return 0 */
3069 return 0;
3070 } else
3071 return SCSI_MLQUEUE_HOST_BUSY; 3069 return SCSI_MLQUEUE_HOST_BUSY;
3072 } 3070 }
3073 __set_bit(k, queued_in_use_bm); 3071 __set_bit(k, queued_in_use_bm);
@@ -3117,12 +3115,18 @@ schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
3117 else 3115 else
3118 tasklet_schedule(sqcp->tletp); 3116 tasklet_schedule(sqcp->tletp);
3119 } 3117 }
3120 if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)) 3118 if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
3119 (scsi_result == device_qfull_result))
3121 sdev_printk(KERN_INFO, sdp, 3120 sdev_printk(KERN_INFO, sdp,
3122 "%s: num_in_q=%d +1, %s%s\n", __func__, 3121 "%s: num_in_q=%d +1, %s%s\n", __func__,
3123 num_in_q, (inject ? "<inject> " : ""), 3122 num_in_q, (inject ? "<inject> " : ""),
3124 "status: TASK SET FULL"); 3123 "status: TASK SET FULL");
3125 return 0; 3124 return 0;
3125
3126respond_in_thread: /* call back to mid-layer using invocation thread */
3127 cmnd->result = scsi_result;
3128 cmnd->scsi_done(cmnd);
3129 return 0;
3126} 3130}
3127 3131
3128/* Note: The following macros create attribute files in the 3132/* Note: The following macros create attribute files in the
@@ -3206,7 +3210,7 @@ MODULE_PARM_DESC(opts, "1->noise, 2->medium_err, 4->timeout, 8->recovered_err...
3206MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)"); 3210MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
3207MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])"); 3211MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
3208MODULE_PARM_DESC(removable, "claim to have removable media (def=0)"); 3212MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
3209MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])"); 3213MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
3210MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)"); 3214MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
3211MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)"); 3215MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
3212MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)"); 3216MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
@@ -4085,7 +4089,7 @@ static void sdebug_remove_adapter(void)
4085static int 4089static int
4086scsi_debug_queuecommand(struct scsi_cmnd *SCpnt) 4090scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
4087{ 4091{
4088 unsigned char *cmd = (unsigned char *) SCpnt->cmnd; 4092 unsigned char *cmd = SCpnt->cmnd;
4089 int len, k; 4093 int len, k;
4090 unsigned int num; 4094 unsigned int num;
4091 unsigned long long lba; 4095 unsigned long long lba;
@@ -4103,7 +4107,7 @@ scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
4103 4107
4104 scsi_set_resid(SCpnt, 0); 4108 scsi_set_resid(SCpnt, 0);
4105 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) && 4109 if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
4106 !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) { 4110 !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
4107 char b[120]; 4111 char b[120];
4108 int n; 4112 int n;
4109 4113
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5db8454474ee..6b20ef3fee54 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1238,9 +1238,9 @@ retry_tur:
1238/** 1238/**
1239 * scsi_eh_test_devices - check if devices are responding from error recovery. 1239 * scsi_eh_test_devices - check if devices are responding from error recovery.
1240 * @cmd_list: scsi commands in error recovery. 1240 * @cmd_list: scsi commands in error recovery.
1241 * @work_q: queue for commands which still need more error recovery 1241 * @work_q: queue for commands which still need more error recovery
1242 * @done_q: queue for commands which are finished 1242 * @done_q: queue for commands which are finished
1243 * @try_stu: boolean on if a STU command should be tried in addition to TUR. 1243 * @try_stu: boolean on if a STU command should be tried in addition to TUR.
1244 * 1244 *
1245 * Decription: 1245 * Decription:
1246 * Tests if devices are in a working state. Commands to devices now in 1246 * Tests if devices are in a working state. Commands to devices now in
@@ -1373,7 +1373,7 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1373 /** 1373 /**
1374 * scsi_eh_stu - send START_UNIT if needed 1374 * scsi_eh_stu - send START_UNIT if needed
1375 * @shost: &scsi host being recovered. 1375 * @shost: &scsi host being recovered.
1376 * @work_q: &list_head for pending commands. 1376 * @work_q: &list_head for pending commands.
1377 * @done_q: &list_head for processed commands. 1377 * @done_q: &list_head for processed commands.
1378 * 1378 *
1379 * Notes: 1379 * Notes:
@@ -1436,7 +1436,7 @@ static int scsi_eh_stu(struct Scsi_Host *shost,
1436/** 1436/**
1437 * scsi_eh_bus_device_reset - send bdr if needed 1437 * scsi_eh_bus_device_reset - send bdr if needed
1438 * @shost: scsi host being recovered. 1438 * @shost: scsi host being recovered.
1439 * @work_q: &list_head for pending commands. 1439 * @work_q: &list_head for pending commands.
1440 * @done_q: &list_head for processed commands. 1440 * @done_q: &list_head for processed commands.
1441 * 1441 *
1442 * Notes: 1442 * Notes:
@@ -1502,7 +1502,7 @@ static int scsi_eh_bus_device_reset(struct Scsi_Host *shost,
1502/** 1502/**
1503 * scsi_eh_target_reset - send target reset if needed 1503 * scsi_eh_target_reset - send target reset if needed
1504 * @shost: scsi host being recovered. 1504 * @shost: scsi host being recovered.
1505 * @work_q: &list_head for pending commands. 1505 * @work_q: &list_head for pending commands.
1506 * @done_q: &list_head for processed commands. 1506 * @done_q: &list_head for processed commands.
1507 * 1507 *
1508 * Notes: 1508 * Notes:
@@ -1567,7 +1567,7 @@ static int scsi_eh_target_reset(struct Scsi_Host *shost,
1567/** 1567/**
1568 * scsi_eh_bus_reset - send a bus reset 1568 * scsi_eh_bus_reset - send a bus reset
1569 * @shost: &scsi host being recovered. 1569 * @shost: &scsi host being recovered.
1570 * @work_q: &list_head for pending commands. 1570 * @work_q: &list_head for pending commands.
1571 * @done_q: &list_head for processed commands. 1571 * @done_q: &list_head for processed commands.
1572 */ 1572 */
1573static int scsi_eh_bus_reset(struct Scsi_Host *shost, 1573static int scsi_eh_bus_reset(struct Scsi_Host *shost,
@@ -1638,8 +1638,9 @@ static int scsi_eh_bus_reset(struct Scsi_Host *shost,
1638 1638
1639/** 1639/**
1640 * scsi_eh_host_reset - send a host reset 1640 * scsi_eh_host_reset - send a host reset
1641 * @work_q: list_head for processed commands. 1641 * @shost: host to be reset.
1642 * @done_q: list_head for processed commands. 1642 * @work_q: &list_head for pending commands.
1643 * @done_q: &list_head for processed commands.
1643 */ 1644 */
1644static int scsi_eh_host_reset(struct Scsi_Host *shost, 1645static int scsi_eh_host_reset(struct Scsi_Host *shost,
1645 struct list_head *work_q, 1646 struct list_head *work_q,
@@ -1677,8 +1678,8 @@ static int scsi_eh_host_reset(struct Scsi_Host *shost,
1677 1678
1678/** 1679/**
1679 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover 1680 * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
1680 * @work_q: list_head for processed commands. 1681 * @work_q: &list_head for pending commands.
1681 * @done_q: list_head for processed commands. 1682 * @done_q: &list_head for processed commands.
1682 */ 1683 */
1683static void scsi_eh_offline_sdevs(struct list_head *work_q, 1684static void scsi_eh_offline_sdevs(struct list_head *work_q,
1684 struct list_head *done_q) 1685 struct list_head *done_q)
@@ -2043,8 +2044,8 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
2043 2044
2044/** 2045/**
2045 * scsi_eh_ready_devs - check device ready state and recover if not. 2046 * scsi_eh_ready_devs - check device ready state and recover if not.
2046 * @shost: host to be recovered. 2047 * @shost: host to be recovered.
2047 * @work_q: &list_head for pending commands. 2048 * @work_q: &list_head for pending commands.
2048 * @done_q: &list_head for processed commands. 2049 * @done_q: &list_head for processed commands.
2049 */ 2050 */
2050void scsi_eh_ready_devs(struct Scsi_Host *shost, 2051void scsi_eh_ready_devs(struct Scsi_Host *shost,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index aaea4b98af16..db8c449282f9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -645,16 +645,18 @@ static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
645static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) 645static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
646{ 646{
647 struct scsi_device *sdev = cmd->device; 647 struct scsi_device *sdev = cmd->device;
648 struct Scsi_Host *shost = sdev->host;
648 unsigned long flags; 649 unsigned long flags;
649 650
650 BUG_ON(list_empty(&cmd->list));
651
652 scsi_mq_free_sgtables(cmd); 651 scsi_mq_free_sgtables(cmd);
653 scsi_uninit_cmd(cmd); 652 scsi_uninit_cmd(cmd);
654 653
655 spin_lock_irqsave(&sdev->list_lock, flags); 654 if (shost->use_cmd_list) {
656 list_del_init(&cmd->list); 655 BUG_ON(list_empty(&cmd->list));
657 spin_unlock_irqrestore(&sdev->list_lock, flags); 656 spin_lock_irqsave(&sdev->list_lock, flags);
657 list_del_init(&cmd->list);
658 spin_unlock_irqrestore(&sdev->list_lock, flags);
659 }
658} 660}
659 661
660/* 662/*
@@ -1816,13 +1818,11 @@ static int scsi_mq_prep_fn(struct request *req)
1816 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); 1818 INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
1817 cmd->jiffies_at_alloc = jiffies; 1819 cmd->jiffies_at_alloc = jiffies;
1818 1820
1819 /* 1821 if (shost->use_cmd_list) {
1820 * XXX: cmd_list lookups are only used by two drivers, try to get 1822 spin_lock_irq(&sdev->list_lock);
1821 * rid of this list in common code. 1823 list_add_tail(&cmd->list, &sdev->cmd_list);
1822 */ 1824 spin_unlock_irq(&sdev->list_lock);
1823 spin_lock_irq(&sdev->list_lock); 1825 }
1824 list_add_tail(&cmd->list, &sdev->cmd_list);
1825 spin_unlock_irq(&sdev->list_lock);
1826 1826
1827 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; 1827 sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
1828 cmd->sdb.table.sgl = sg; 1828 cmd->sdb.table.sgl = sg;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 56675dbbf681..ba3f1e8d0d57 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -736,6 +736,16 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
736 sdev->scsi_level++; 736 sdev->scsi_level++;
737 sdev->sdev_target->scsi_level = sdev->scsi_level; 737 sdev->sdev_target->scsi_level = sdev->scsi_level;
738 738
739 /*
740 * If SCSI-2 or lower, and if the transport requires it,
741 * store the LUN value in CDB[1].
742 */
743 sdev->lun_in_cdb = 0;
744 if (sdev->scsi_level <= SCSI_2 &&
745 sdev->scsi_level != SCSI_UNKNOWN &&
746 !sdev->host->no_scsi2_lun_in_cdb)
747 sdev->lun_in_cdb = 1;
748
739 return 0; 749 return 0;
740} 750}
741 751
@@ -805,6 +815,19 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
805 } else { 815 } else {
806 sdev->type = (inq_result[0] & 0x1f); 816 sdev->type = (inq_result[0] & 0x1f);
807 sdev->removable = (inq_result[1] & 0x80) >> 7; 817 sdev->removable = (inq_result[1] & 0x80) >> 7;
818
819 /*
820 * some devices may respond with wrong type for
821 * well-known logical units. Force well-known type
822 * to enumerate them correctly.
823 */
824 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
825 sdev_printk(KERN_WARNING, sdev,
826 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
827 __func__, sdev->type, (unsigned int)sdev->lun);
828 sdev->type = TYPE_WLUN;
829 }
830
808 } 831 }
809 832
810 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { 833 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
@@ -1733,6 +1756,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1733 /* target removed before the device could be added */ 1756 /* target removed before the device could be added */
1734 if (sdev->sdev_state == SDEV_DEL) 1757 if (sdev->sdev_state == SDEV_DEL)
1735 continue; 1758 continue;
1759 /* If device is already visible, skip adding it to sysfs */
1760 if (sdev->is_visible)
1761 continue;
1736 if (!scsi_host_scan_allowed(shost) || 1762 if (!scsi_host_scan_allowed(shost) ||
1737 scsi_sysfs_add_sdev(sdev) != 0) 1763 scsi_sysfs_add_sdev(sdev) != 0)
1738 __scsi_remove_device(sdev); 1764 __scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8b4105a22ac2..f4cb7b3e9e23 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1044,10 +1044,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1044 pm_runtime_enable(&sdev->sdev_gendev); 1044 pm_runtime_enable(&sdev->sdev_gendev);
1045 scsi_autopm_put_target(starget); 1045 scsi_autopm_put_target(starget);
1046 1046
1047 /* The following call will keep sdev active indefinitely, until
1048 * its driver does a corresponding scsi_autopm_pm_device(). Only
1049 * drivers supporting autosuspend will do this.
1050 */
1051 scsi_autopm_get_device(sdev); 1047 scsi_autopm_get_device(sdev);
1052 1048
1053 error = device_add(&sdev->sdev_gendev); 1049 error = device_add(&sdev->sdev_gendev);
@@ -1085,6 +1081,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
1085 } 1081 }
1086 } 1082 }
1087 1083
1084 scsi_autopm_put_device(sdev);
1088 return error; 1085 return error;
1089} 1086}
1090 1087
@@ -1263,7 +1260,19 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
1263 sdev->sdev_dev.class = &sdev_class; 1260 sdev->sdev_dev.class = &sdev_class;
1264 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu", 1261 dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
1265 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); 1262 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
1263 /*
1264 * Get a default scsi_level from the target (derived from sibling
1265 * devices). This is the best we can do for guessing how to set
1266 * sdev->lun_in_cdb for the initial INQUIRY command. For LUN 0 the
1267 * setting doesn't matter, because all the bits are zero anyway.
1268 * But it does matter for higher LUNs.
1269 */
1266 sdev->scsi_level = starget->scsi_level; 1270 sdev->scsi_level = starget->scsi_level;
1271 if (sdev->scsi_level <= SCSI_2 &&
1272 sdev->scsi_level != SCSI_UNKNOWN &&
1273 !shost->no_scsi2_lun_in_cdb)
1274 sdev->lun_in_cdb = 1;
1275
1267 transport_setup_device(&sdev->sdev_gendev); 1276 transport_setup_device(&sdev->sdev_gendev);
1268 spin_lock_irqsave(shost->host_lock, flags); 1277 spin_lock_irqsave(shost->host_lock, flags);
1269 list_add_tail(&sdev->same_target_siblings, &starget->devices); 1278 list_add_tail(&sdev->same_target_siblings, &starget->devices);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2c2041ca4b70..0cb5c9f0c743 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -185,7 +185,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
185 if (ct < 0) 185 if (ct < 0)
186 return -EINVAL; 186 return -EINVAL;
187 rcd = ct & 0x01 ? 1 : 0; 187 rcd = ct & 0x01 ? 1 : 0;
188 wce = ct & 0x02 ? 1 : 0; 188 wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
189 189
190 if (sdkp->cache_override) { 190 if (sdkp->cache_override) {
191 sdkp->WCE = wce; 191 sdkp->WCE = wce;
@@ -2490,6 +2490,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2490 sdkp->DPOFUA = 0; 2490 sdkp->DPOFUA = 0;
2491 } 2491 }
2492 2492
2493 /* No cache flush allowed for write protected devices */
2494 if (sdkp->WCE && sdkp->write_prot)
2495 sdkp->WCE = 0;
2496
2493 if (sdkp->first_scan || old_wce != sdkp->WCE || 2497 if (sdkp->first_scan || old_wce != sdkp->WCE ||
2494 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA) 2498 old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
2495 sd_printk(KERN_NOTICE, sdkp, 2499 sd_printk(KERN_NOTICE, sdkp,
@@ -2961,6 +2965,7 @@ static int sd_probe(struct device *dev)
2961 int index; 2965 int index;
2962 int error; 2966 int error;
2963 2967
2968 scsi_autopm_get_device(sdp);
2964 error = -ENODEV; 2969 error = -ENODEV;
2965 if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC) 2970 if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
2966 goto out; 2971 goto out;
@@ -3037,6 +3042,7 @@ static int sd_probe(struct device *dev)
3037 out_free: 3042 out_free:
3038 kfree(sdkp); 3043 kfree(sdkp);
3039 out: 3044 out:
3045 scsi_autopm_put_device(sdp);
3040 return error; 3046 return error;
3041} 3047}
3042 3048
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 7eeb93627beb..2de44cc58b1a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -657,6 +657,7 @@ static int sr_probe(struct device *dev)
657 struct scsi_cd *cd; 657 struct scsi_cd *cd;
658 int minor, error; 658 int minor, error;
659 659
660 scsi_autopm_get_device(sdev);
660 error = -ENODEV; 661 error = -ENODEV;
661 if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM) 662 if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM)
662 goto fail; 663 goto fail;
@@ -744,6 +745,7 @@ fail_put:
744fail_free: 745fail_free:
745 kfree(cd); 746 kfree(cd);
746fail: 747fail:
748 scsi_autopm_put_device(sdev);
747 return error; 749 return error;
748} 750}
749 751
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index aff9689de0f7..d3fd6e8fb378 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4105,6 +4105,7 @@ static int st_probe(struct device *dev)
4105 return -ENODEV; 4105 return -ENODEV;
4106 } 4106 }
4107 4107
4108 scsi_autopm_get_device(SDp);
4108 i = queue_max_segments(SDp->request_queue); 4109 i = queue_max_segments(SDp->request_queue);
4109 if (st_max_sg_segs < i) 4110 if (st_max_sg_segs < i)
4110 i = st_max_sg_segs; 4111 i = st_max_sg_segs;
@@ -4244,6 +4245,7 @@ out_put_disk:
4244out_buffer_free: 4245out_buffer_free:
4245 kfree(buffer); 4246 kfree(buffer);
4246out: 4247out:
4248 scsi_autopm_put_device(SDp);
4247 return -ENODEV; 4249 return -ENODEV;
4248}; 4250};
4249 4251
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fecac5d03fdd..733e5f759518 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1152,24 +1152,12 @@ static void storvsc_on_io_completion(struct hv_device *device,
1152 stor_pkt->vm_srb.sense_info_length = 1152 stor_pkt->vm_srb.sense_info_length =
1153 vstor_packet->vm_srb.sense_info_length; 1153 vstor_packet->vm_srb.sense_info_length;
1154 1154
1155 if (vstor_packet->vm_srb.scsi_status != 0 ||
1156 vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
1157 dev_warn(&device->device,
1158 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
1159 stor_pkt->vm_srb.cdb[0],
1160 vstor_packet->vm_srb.scsi_status,
1161 vstor_packet->vm_srb.srb_status);
1162 }
1163 1155
1164 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) { 1156 if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
1165 /* CHECK_CONDITION */ 1157 /* CHECK_CONDITION */
1166 if (vstor_packet->vm_srb.srb_status & 1158 if (vstor_packet->vm_srb.srb_status &
1167 SRB_STATUS_AUTOSENSE_VALID) { 1159 SRB_STATUS_AUTOSENSE_VALID) {
1168 /* autosense data available */ 1160 /* autosense data available */
1169 dev_warn(&device->device,
1170 "stor pkt %p autosense data valid - len %d\n",
1171 request,
1172 vstor_packet->vm_srb.sense_info_length);
1173 1161
1174 memcpy(request->sense_buffer, 1162 memcpy(request->sense_buffer,
1175 vstor_packet->vm_srb.sense_data, 1163 vstor_packet->vm_srb.sense_data,
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index f07f90179bbc..6e07b2afddeb 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -35,6 +35,8 @@
35config SCSI_UFSHCD 35config SCSI_UFSHCD
36 tristate "Universal Flash Storage Controller Driver Core" 36 tristate "Universal Flash Storage Controller Driver Core"
37 depends on SCSI && SCSI_DMA 37 depends on SCSI && SCSI_DMA
38 select PM_DEVFREQ
39 select DEVFREQ_GOV_SIMPLE_ONDEMAND
38 ---help--- 40 ---help---
39 This selects the support for UFS devices in Linux, say Y and make 41 This selects the support for UFS devices in Linux, say Y and make
40 sure that you know the name of your UFS host adapter (the card 42 sure that you know the name of your UFS host adapter (the card
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index fafcf5e354c6..42c459a9d3fe 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -49,6 +49,27 @@
49#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ 49#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
50 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ 50 cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
51 (byte1 << 8) | (byte0)) 51 (byte1 << 8) | (byte0))
52/*
53 * UFS device may have standard LUs and LUN id could be from 0x00 to
54 * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
55 * UFS device may also have the Well Known LUs (also referred as W-LU)
56 * which again could be from 0x00 to 0x7F. For W-LUs, device only use
57 * the "Extended Addressing Format" which means the W-LUNs would be
58 * from 0xc100 (SCSI_W_LUN_BASE) onwards.
59 * This means max. LUN number reported from UFS device could be 0xC17F.
60 */
61#define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F
62#define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
63#define UFS_UPIU_WLUN_ID (1 << 7)
64#define UFS_UPIU_MAX_GENERAL_LUN 8
65
66/* Well known logical unit id in LUN field of UPIU */
67enum {
68 UFS_UPIU_REPORT_LUNS_WLUN = 0x81,
69 UFS_UPIU_UFS_DEVICE_WLUN = 0xD0,
70 UFS_UPIU_BOOT_WLUN = 0xB0,
71 UFS_UPIU_RPMB_WLUN = 0xC4,
72};
52 73
53/* 74/*
54 * UFS Protocol Information Unit related definitions 75 * UFS Protocol Information Unit related definitions
@@ -108,11 +129,13 @@ enum {
108/* Flag idn for Query Requests*/ 129/* Flag idn for Query Requests*/
109enum flag_idn { 130enum flag_idn {
110 QUERY_FLAG_IDN_FDEVICEINIT = 0x01, 131 QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
132 QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
111 QUERY_FLAG_IDN_BKOPS_EN = 0x04, 133 QUERY_FLAG_IDN_BKOPS_EN = 0x04,
112}; 134};
113 135
114/* Attribute idn for Query requests */ 136/* Attribute idn for Query requests */
115enum attr_idn { 137enum attr_idn {
138 QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
116 QUERY_ATTR_IDN_BKOPS_STATUS = 0x05, 139 QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
117 QUERY_ATTR_IDN_EE_CONTROL = 0x0D, 140 QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
118 QUERY_ATTR_IDN_EE_STATUS = 0x0E, 141 QUERY_ATTR_IDN_EE_STATUS = 0x0E,
@@ -129,10 +152,29 @@ enum desc_idn {
129 QUERY_DESC_IDN_RFU_1 = 0x6, 152 QUERY_DESC_IDN_RFU_1 = 0x6,
130 QUERY_DESC_IDN_GEOMETRY = 0x7, 153 QUERY_DESC_IDN_GEOMETRY = 0x7,
131 QUERY_DESC_IDN_POWER = 0x8, 154 QUERY_DESC_IDN_POWER = 0x8,
132 QUERY_DESC_IDN_RFU_2 = 0x9, 155 QUERY_DESC_IDN_MAX,
156};
157
158enum desc_header_offset {
159 QUERY_DESC_LENGTH_OFFSET = 0x00,
160 QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
161};
162
163enum ufs_desc_max_size {
164 QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
165 QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
166 QUERY_DESC_UNIT_MAX_SIZE = 0x23,
167 QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
168 /*
169 * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
170 * of descriptor header.
171 */
172 QUERY_DESC_STRING_MAX_SIZE = 0xFE,
173 QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
174 QUERY_DESC_POWER_MAX_SIZE = 0x62,
175 QUERY_DESC_RFU_MAX_SIZE = 0x00,
133}; 176};
134 177
135#define UNIT_DESC_MAX_SIZE 0x22
136/* Unit descriptor parameters offsets in bytes*/ 178/* Unit descriptor parameters offsets in bytes*/
137enum unit_desc_param { 179enum unit_desc_param {
138 UNIT_DESC_PARAM_LEN = 0x0, 180 UNIT_DESC_PARAM_LEN = 0x0,
@@ -153,6 +195,43 @@ enum unit_desc_param {
153 UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22, 195 UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
154}; 196};
155 197
198/*
199 * Logical Unit Write Protect
200 * 00h: LU not write protected
201 * 01h: LU write protected when fPowerOnWPEn =1
202 * 02h: LU permanently write protected when fPermanentWPEn =1
203 */
204enum ufs_lu_wp_type {
205 UFS_LU_NO_WP = 0x00,
206 UFS_LU_POWER_ON_WP = 0x01,
207 UFS_LU_PERM_WP = 0x02,
208};
209
210/* bActiveICCLevel parameter current units */
211enum {
212 UFSHCD_NANO_AMP = 0,
213 UFSHCD_MICRO_AMP = 1,
214 UFSHCD_MILI_AMP = 2,
215 UFSHCD_AMP = 3,
216};
217
218#define POWER_DESC_MAX_SIZE 0x62
219#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
220
221/* Attribute bActiveICCLevel parameter bit masks definitions */
222#define ATTR_ICC_LVL_UNIT_OFFSET 14
223#define ATTR_ICC_LVL_UNIT_MASK (0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
224#define ATTR_ICC_LVL_VALUE_MASK 0x3FF
225
226/* Power descriptor parameters offsets in bytes */
227enum power_desc_param_offset {
228 PWR_DESC_LEN = 0x0,
229 PWR_DESC_TYPE = 0x1,
230 PWR_DESC_ACTIVE_LVLS_VCC_0 = 0x2,
231 PWR_DESC_ACTIVE_LVLS_VCCQ_0 = 0x22,
232 PWR_DESC_ACTIVE_LVLS_VCCQ2_0 = 0x42,
233};
234
156/* Exception event mask values */ 235/* Exception event mask values */
157enum { 236enum {
158 MASK_EE_STATUS = 0xFFFF, 237 MASK_EE_STATUS = 0xFFFF,
@@ -160,11 +239,12 @@ enum {
160}; 239};
161 240
162/* Background operation status */ 241/* Background operation status */
163enum { 242enum bkops_status {
164 BKOPS_STATUS_NO_OP = 0x0, 243 BKOPS_STATUS_NO_OP = 0x0,
165 BKOPS_STATUS_NON_CRITICAL = 0x1, 244 BKOPS_STATUS_NON_CRITICAL = 0x1,
166 BKOPS_STATUS_PERF_IMPACT = 0x2, 245 BKOPS_STATUS_PERF_IMPACT = 0x2,
167 BKOPS_STATUS_CRITICAL = 0x3, 246 BKOPS_STATUS_CRITICAL = 0x3,
247 BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
168}; 248};
169 249
170/* UTP QUERY Transaction Specific Fields OpCode */ 250/* UTP QUERY Transaction Specific Fields OpCode */
@@ -225,6 +305,14 @@ enum {
225 UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05, 305 UPIU_TASK_MANAGEMENT_FUNC_FAILED = 0x05,
226 UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09, 306 UPIU_INCORRECT_LOGICAL_UNIT_NO = 0x09,
227}; 307};
308
309/* UFS device power modes */
310enum ufs_dev_pwr_mode {
311 UFS_ACTIVE_PWR_MODE = 1,
312 UFS_SLEEP_PWR_MODE = 2,
313 UFS_POWERDOWN_PWR_MODE = 3,
314};
315
228/** 316/**
229 * struct utp_upiu_header - UPIU header structure 317 * struct utp_upiu_header - UPIU header structure
230 * @dword_0: UPIU header DW-0 318 * @dword_0: UPIU header DW-0
@@ -362,4 +450,42 @@ struct ufs_query_res {
362 struct utp_upiu_query upiu_res; 450 struct utp_upiu_query upiu_res;
363}; 451};
364 452
453#define UFS_VREG_VCC_MIN_UV 2700000 /* uV */
454#define UFS_VREG_VCC_MAX_UV 3600000 /* uV */
455#define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */
456#define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */
457#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */
458#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */
459#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */
460#define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */
461
462/*
463 * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
464 * and link is in Hibern8 state.
465 */
466#define UFS_VREG_LPM_LOAD_UA 1000 /* uA */
467
468struct ufs_vreg {
469 struct regulator *reg;
470 const char *name;
471 bool enabled;
472 int min_uV;
473 int max_uV;
474 int min_uA;
475 int max_uA;
476};
477
478struct ufs_vreg_info {
479 struct ufs_vreg *vcc;
480 struct ufs_vreg *vccq;
481 struct ufs_vreg *vccq2;
482 struct ufs_vreg *vdd_hba;
483};
484
485struct ufs_dev_info {
486 bool f_power_on_wp_en;
487 /* Keeps information if any of the LU is power on write protected */
488 bool is_lu_power_on_wp;
489};
490
365#endif /* End of Header */ 491#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index afaabe2aeac8..955ed5587011 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -43,34 +43,24 @@
43 * @pdev: pointer to PCI device handle 43 * @pdev: pointer to PCI device handle
44 * @state: power state 44 * @state: power state
45 * 45 *
46 * Returns -ENOSYS 46 * Returns 0 if successful
47 * Returns non-zero otherwise
47 */ 48 */
48static int ufshcd_pci_suspend(struct device *dev) 49static int ufshcd_pci_suspend(struct device *dev)
49{ 50{
50 /* 51 return ufshcd_system_suspend(dev_get_drvdata(dev));
51 * TODO:
52 * 1. Call ufshcd_suspend
53 * 2. Do bus specific power management
54 */
55
56 return -ENOSYS;
57} 52}
58 53
59/** 54/**
60 * ufshcd_pci_resume - resume power management function 55 * ufshcd_pci_resume - resume power management function
61 * @pdev: pointer to PCI device handle 56 * @pdev: pointer to PCI device handle
62 * 57 *
63 * Returns -ENOSYS 58 * Returns 0 if successful
59 * Returns non-zero otherwise
64 */ 60 */
65static int ufshcd_pci_resume(struct device *dev) 61static int ufshcd_pci_resume(struct device *dev)
66{ 62{
67 /* 63 return ufshcd_system_resume(dev_get_drvdata(dev));
68 * TODO:
69 * 1. Call ufshcd_resume.
70 * 2. Do bus specific wake up
71 */
72
73 return -ENOSYS;
74} 64}
75#else 65#else
76#define ufshcd_pci_suspend NULL 66#define ufshcd_pci_suspend NULL
@@ -80,30 +70,15 @@ static int ufshcd_pci_resume(struct device *dev)
80#ifdef CONFIG_PM_RUNTIME 70#ifdef CONFIG_PM_RUNTIME
81static int ufshcd_pci_runtime_suspend(struct device *dev) 71static int ufshcd_pci_runtime_suspend(struct device *dev)
82{ 72{
83 struct ufs_hba *hba = dev_get_drvdata(dev); 73 return ufshcd_runtime_suspend(dev_get_drvdata(dev));
84
85 if (!hba)
86 return 0;
87
88 return ufshcd_runtime_suspend(hba);
89} 74}
90static int ufshcd_pci_runtime_resume(struct device *dev) 75static int ufshcd_pci_runtime_resume(struct device *dev)
91{ 76{
92 struct ufs_hba *hba = dev_get_drvdata(dev); 77 return ufshcd_runtime_resume(dev_get_drvdata(dev));
93
94 if (!hba)
95 return 0;
96
97 return ufshcd_runtime_resume(hba);
98} 78}
99static int ufshcd_pci_runtime_idle(struct device *dev) 79static int ufshcd_pci_runtime_idle(struct device *dev)
100{ 80{
101 struct ufs_hba *hba = dev_get_drvdata(dev); 81 return ufshcd_runtime_idle(dev_get_drvdata(dev));
102
103 if (!hba)
104 return 0;
105
106 return ufshcd_runtime_idle(hba);
107} 82}
108#else /* !CONFIG_PM_RUNTIME */ 83#else /* !CONFIG_PM_RUNTIME */
109#define ufshcd_pci_runtime_suspend NULL 84#define ufshcd_pci_runtime_suspend NULL
@@ -117,7 +92,7 @@ static int ufshcd_pci_runtime_idle(struct device *dev)
117 */ 92 */
118static void ufshcd_pci_shutdown(struct pci_dev *pdev) 93static void ufshcd_pci_shutdown(struct pci_dev *pdev)
119{ 94{
120 ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev)); 95 ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
121} 96}
122 97
123/** 98/**
@@ -164,7 +139,15 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
164 139
165 mmio_base = pcim_iomap_table(pdev)[0]; 140 mmio_base = pcim_iomap_table(pdev)[0];
166 141
167 err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq); 142 err = ufshcd_alloc_host(&pdev->dev, &hba);
143 if (err) {
144 dev_err(&pdev->dev, "Allocation failed\n");
145 return err;
146 }
147
148 INIT_LIST_HEAD(&hba->clk_list_head);
149
150 err = ufshcd_init(hba, mmio_base, pdev->irq);
168 if (err) { 151 if (err) {
169 dev_err(&pdev->dev, "Initialization failed\n"); 152 dev_err(&pdev->dev, "Initialization failed\n");
170 return err; 153 return err;
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 5e4623225422..8adf067ff019 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -35,53 +35,236 @@
35 35
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/pm_runtime.h> 37#include <linux/pm_runtime.h>
38#include <linux/of.h>
38 39
39#include "ufshcd.h" 40#include "ufshcd.h"
40 41
42static const struct of_device_id ufs_of_match[];
43static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev)
44{
45 if (dev->of_node) {
46 const struct of_device_id *match;
47
48 match = of_match_node(ufs_of_match, dev->of_node);
49 if (match)
50 return (struct ufs_hba_variant_ops *)match->data;
51 }
52
53 return NULL;
54}
55
56static int ufshcd_parse_clock_info(struct ufs_hba *hba)
57{
58 int ret = 0;
59 int cnt;
60 int i;
61 struct device *dev = hba->dev;
62 struct device_node *np = dev->of_node;
63 char *name;
64 u32 *clkfreq = NULL;
65 struct ufs_clk_info *clki;
66 int len = 0;
67 size_t sz = 0;
68
69 if (!np)
70 goto out;
71
72 INIT_LIST_HEAD(&hba->clk_list_head);
73
74 cnt = of_property_count_strings(np, "clock-names");
75 if (!cnt || (cnt == -EINVAL)) {
76 dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
77 __func__);
78 } else if (cnt < 0) {
79 dev_err(dev, "%s: count clock strings failed, err %d\n",
80 __func__, cnt);
81 ret = cnt;
82 }
83
84 if (cnt <= 0)
85 goto out;
86
87 if (!of_get_property(np, "freq-table-hz", &len)) {
88 dev_info(dev, "freq-table-hz property not specified\n");
89 goto out;
90 }
91
92 if (len <= 0)
93 goto out;
94
95 sz = len / sizeof(*clkfreq);
96 if (sz != 2 * cnt) {
97 dev_err(dev, "%s len mismatch\n", "freq-table-hz");
98 ret = -EINVAL;
99 goto out;
100 }
101
102 clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
103 GFP_KERNEL);
104 if (!clkfreq) {
105 dev_err(dev, "%s: no memory\n", "freq-table-hz");
106 ret = -ENOMEM;
107 goto out;
108 }
109
110 ret = of_property_read_u32_array(np, "freq-table-hz",
111 clkfreq, sz);
112 if (ret && (ret != -EINVAL)) {
113 dev_err(dev, "%s: error reading array %d\n",
114 "freq-table-hz", ret);
115 goto free_clkfreq;
116 }
117
118 for (i = 0; i < sz; i += 2) {
119 ret = of_property_read_string_index(np,
120 "clock-names", i/2, (const char **)&name);
121 if (ret)
122 goto free_clkfreq;
123
124 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
125 if (!clki) {
126 ret = -ENOMEM;
127 goto free_clkfreq;
128 }
129
130 clki->min_freq = clkfreq[i];
131 clki->max_freq = clkfreq[i+1];
132 clki->name = kstrdup(name, GFP_KERNEL);
133 dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
134 clki->min_freq, clki->max_freq, clki->name);
135 list_add_tail(&clki->list, &hba->clk_list_head);
136 }
137free_clkfreq:
138 kfree(clkfreq);
139out:
140 return ret;
141}
142
143#define MAX_PROP_SIZE 32
144static int ufshcd_populate_vreg(struct device *dev, const char *name,
145 struct ufs_vreg **out_vreg)
146{
147 int ret = 0;
148 char prop_name[MAX_PROP_SIZE];
149 struct ufs_vreg *vreg = NULL;
150 struct device_node *np = dev->of_node;
151
152 if (!np) {
153 dev_err(dev, "%s: non DT initialization\n", __func__);
154 goto out;
155 }
156
157 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
158 if (!of_parse_phandle(np, prop_name, 0)) {
159 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
160 __func__, prop_name);
161 goto out;
162 }
163
164 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
165 if (!vreg) {
166 dev_err(dev, "No memory for %s regulator\n", name);
167 goto out;
168 }
169
170 vreg->name = kstrdup(name, GFP_KERNEL);
171
172 /* if fixed regulator no need further initialization */
173 snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
174 if (of_property_read_bool(np, prop_name))
175 goto out;
176
177 snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
178 ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
179 if (ret) {
180 dev_err(dev, "%s: unable to find %s err %d\n",
181 __func__, prop_name, ret);
182 goto out_free;
183 }
184
185 vreg->min_uA = 0;
186 if (!strcmp(name, "vcc")) {
187 if (of_property_read_bool(np, "vcc-supply-1p8")) {
188 vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
189 vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV;
190 } else {
191 vreg->min_uV = UFS_VREG_VCC_MIN_UV;
192 vreg->max_uV = UFS_VREG_VCC_MAX_UV;
193 }
194 } else if (!strcmp(name, "vccq")) {
195 vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
196 vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
197 } else if (!strcmp(name, "vccq2")) {
198 vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
199 vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
200 }
201
202 goto out;
203
204out_free:
205 devm_kfree(dev, vreg);
206 vreg = NULL;
207out:
208 if (!ret)
209 *out_vreg = vreg;
210 return ret;
211}
212
213/**
214 * ufshcd_parse_regulator_info - get regulator info from device tree
215 * @hba: per adapter instance
216 *
217 * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
218 * If any of the supplies are not defined it is assumed that they are always-on
219 * and hence return zero. If the property is defined but parsing is failed
220 * then return corresponding error.
221 */
222static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
223{
224 int err;
225 struct device *dev = hba->dev;
226 struct ufs_vreg_info *info = &hba->vreg_info;
227
228 err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
229 if (err)
230 goto out;
231
232 err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
233 if (err)
234 goto out;
235
236 err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
237 if (err)
238 goto out;
239
240 err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
241out:
242 return err;
243}
244
41#ifdef CONFIG_PM 245#ifdef CONFIG_PM
42/** 246/**
43 * ufshcd_pltfrm_suspend - suspend power management function 247 * ufshcd_pltfrm_suspend - suspend power management function
44 * @dev: pointer to device handle 248 * @dev: pointer to device handle
45 * 249 *
46 * 250 * Returns 0 if successful
47 * Returns 0 251 * Returns non-zero otherwise
48 */ 252 */
49static int ufshcd_pltfrm_suspend(struct device *dev) 253static int ufshcd_pltfrm_suspend(struct device *dev)
50{ 254{
51 struct platform_device *pdev = to_platform_device(dev); 255 return ufshcd_system_suspend(dev_get_drvdata(dev));
52 struct ufs_hba *hba = platform_get_drvdata(pdev);
53
54 /*
55 * TODO:
56 * 1. Call ufshcd_suspend
57 * 2. Do bus specific power management
58 */
59
60 disable_irq(hba->irq);
61
62 return 0;
63} 256}
64 257
65/** 258/**
66 * ufshcd_pltfrm_resume - resume power management function 259 * ufshcd_pltfrm_resume - resume power management function
67 * @dev: pointer to device handle 260 * @dev: pointer to device handle
68 * 261 *
69 * Returns 0 262 * Returns 0 if successful
263 * Returns non-zero otherwise
70 */ 264 */
71static int ufshcd_pltfrm_resume(struct device *dev) 265static int ufshcd_pltfrm_resume(struct device *dev)
72{ 266{
73 struct platform_device *pdev = to_platform_device(dev); 267 return ufshcd_system_resume(dev_get_drvdata(dev));
74 struct ufs_hba *hba = platform_get_drvdata(pdev);
75
76 /*
77 * TODO:
78 * 1. Call ufshcd_resume.
79 * 2. Do bus specific wake up
80 */
81
82 enable_irq(hba->irq);
83
84 return 0;
85} 268}
86#else 269#else
87#define ufshcd_pltfrm_suspend NULL 270#define ufshcd_pltfrm_suspend NULL
@@ -91,30 +274,15 @@ static int ufshcd_pltfrm_resume(struct device *dev)
91#ifdef CONFIG_PM_RUNTIME 274#ifdef CONFIG_PM_RUNTIME
92static int ufshcd_pltfrm_runtime_suspend(struct device *dev) 275static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
93{ 276{
94 struct ufs_hba *hba = dev_get_drvdata(dev); 277 return ufshcd_runtime_suspend(dev_get_drvdata(dev));
95
96 if (!hba)
97 return 0;
98
99 return ufshcd_runtime_suspend(hba);
100} 278}
101static int ufshcd_pltfrm_runtime_resume(struct device *dev) 279static int ufshcd_pltfrm_runtime_resume(struct device *dev)
102{ 280{
103 struct ufs_hba *hba = dev_get_drvdata(dev); 281 return ufshcd_runtime_resume(dev_get_drvdata(dev));
104
105 if (!hba)
106 return 0;
107
108 return ufshcd_runtime_resume(hba);
109} 282}
110static int ufshcd_pltfrm_runtime_idle(struct device *dev) 283static int ufshcd_pltfrm_runtime_idle(struct device *dev)
111{ 284{
112 struct ufs_hba *hba = dev_get_drvdata(dev); 285 return ufshcd_runtime_idle(dev_get_drvdata(dev));
113
114 if (!hba)
115 return 0;
116
117 return ufshcd_runtime_idle(hba);
118} 286}
119#else /* !CONFIG_PM_RUNTIME */ 287#else /* !CONFIG_PM_RUNTIME */
120#define ufshcd_pltfrm_runtime_suspend NULL 288#define ufshcd_pltfrm_runtime_suspend NULL
@@ -122,6 +290,11 @@ static int ufshcd_pltfrm_runtime_idle(struct device *dev)
122#define ufshcd_pltfrm_runtime_idle NULL 290#define ufshcd_pltfrm_runtime_idle NULL
123#endif /* CONFIG_PM_RUNTIME */ 291#endif /* CONFIG_PM_RUNTIME */
124 292
293static void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
294{
295 ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
296}
297
125/** 298/**
126 * ufshcd_pltfrm_probe - probe routine of the driver 299 * ufshcd_pltfrm_probe - probe routine of the driver
127 * @pdev: pointer to Platform device handle 300 * @pdev: pointer to Platform device handle
@@ -138,8 +311,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
138 311
139 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 312 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
140 mmio_base = devm_ioremap_resource(dev, mem_res); 313 mmio_base = devm_ioremap_resource(dev, mem_res);
141 if (IS_ERR(mmio_base)) { 314 if (IS_ERR(*(void **)&mmio_base)) {
142 err = PTR_ERR(mmio_base); 315 err = PTR_ERR(*(void **)&mmio_base);
143 goto out; 316 goto out;
144 } 317 }
145 318
@@ -150,10 +323,31 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
150 goto out; 323 goto out;
151 } 324 }
152 325
326 err = ufshcd_alloc_host(dev, &hba);
327 if (err) {
328 dev_err(&pdev->dev, "Allocation failed\n");
329 goto out;
330 }
331
332 hba->vops = get_variant_ops(&pdev->dev);
333
334 err = ufshcd_parse_clock_info(hba);
335 if (err) {
336 dev_err(&pdev->dev, "%s: clock parse failed %d\n",
337 __func__, err);
338 goto out;
339 }
340 err = ufshcd_parse_regulator_info(hba);
341 if (err) {
342 dev_err(&pdev->dev, "%s: regulator init failed %d\n",
343 __func__, err);
344 goto out;
345 }
346
153 pm_runtime_set_active(&pdev->dev); 347 pm_runtime_set_active(&pdev->dev);
154 pm_runtime_enable(&pdev->dev); 348 pm_runtime_enable(&pdev->dev);
155 349
156 err = ufshcd_init(dev, &hba, mmio_base, irq); 350 err = ufshcd_init(hba, mmio_base, irq);
157 if (err) { 351 if (err) {
158 dev_err(dev, "Intialization failed\n"); 352 dev_err(dev, "Intialization failed\n");
159 goto out_disable_rpm; 353 goto out_disable_rpm;
@@ -201,6 +395,7 @@ static const struct dev_pm_ops ufshcd_dev_pm_ops = {
201static struct platform_driver ufshcd_pltfrm_driver = { 395static struct platform_driver ufshcd_pltfrm_driver = {
202 .probe = ufshcd_pltfrm_probe, 396 .probe = ufshcd_pltfrm_probe,
203 .remove = ufshcd_pltfrm_remove, 397 .remove = ufshcd_pltfrm_remove,
398 .shutdown = ufshcd_pltfrm_shutdown,
204 .driver = { 399 .driver = {
205 .name = "ufshcd", 400 .name = "ufshcd",
206 .owner = THIS_MODULE, 401 .owner = THIS_MODULE,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ba27215b8034..497c38a4a866 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c 4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations 5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
6 * 7 *
7 * Authors: 8 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com> 9 * Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -31,16 +32,19 @@
31 * circumstances will the contributor of this Program be liable for 32 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of 33 * any damages of any kind arising from your use or distribution of
33 * this program. 34 * this program.
35 *
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
34 */ 38 */
35 39
36#include <linux/async.h> 40#include <linux/async.h>
41#include <linux/devfreq.h>
37 42
38#include "ufshcd.h" 43#include "ufshcd.h"
39#include "unipro.h" 44#include "unipro.h"
40 45
41#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ 46#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
42 UTP_TASK_REQ_COMPL |\ 47 UTP_TASK_REQ_COMPL |\
43 UIC_POWER_MODE |\
44 UFSHCD_ERROR_MASK) 48 UFSHCD_ERROR_MASK)
45/* UIC command timeout, unit: ms */ 49/* UIC command timeout, unit: ms */
46#define UIC_CMD_TIMEOUT 500 50#define UIC_CMD_TIMEOUT 500
@@ -58,16 +62,44 @@
58/* Task management command timeout */ 62/* Task management command timeout */
59#define TM_CMD_TIMEOUT 100 /* msecs */ 63#define TM_CMD_TIMEOUT 100 /* msecs */
60 64
65/* maximum number of link-startup retries */
66#define DME_LINKSTARTUP_RETRIES 3
67
68/* maximum number of reset retries before giving up */
69#define MAX_HOST_RESET_RETRIES 5
70
61/* Expose the flag value from utp_upiu_query.value */ 71/* Expose the flag value from utp_upiu_query.value */
62#define MASK_QUERY_UPIU_FLAG_LOC 0xFF 72#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
63 73
64/* Interrupt aggregation default timeout, unit: 40us */ 74/* Interrupt aggregation default timeout, unit: 40us */
65#define INT_AGGR_DEF_TO 0x02 75#define INT_AGGR_DEF_TO 0x02
66 76
77#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
78 ({ \
79 int _ret; \
80 if (_on) \
81 _ret = ufshcd_enable_vreg(_dev, _vreg); \
82 else \
83 _ret = ufshcd_disable_vreg(_dev, _vreg); \
84 _ret; \
85 })
86
87static u32 ufs_query_desc_max_size[] = {
88 QUERY_DESC_DEVICE_MAX_SIZE,
89 QUERY_DESC_CONFIGURAION_MAX_SIZE,
90 QUERY_DESC_UNIT_MAX_SIZE,
91 QUERY_DESC_RFU_MAX_SIZE,
92 QUERY_DESC_INTERCONNECT_MAX_SIZE,
93 QUERY_DESC_STRING_MAX_SIZE,
94 QUERY_DESC_RFU_MAX_SIZE,
95 QUERY_DESC_GEOMETRY_MAZ_SIZE,
96 QUERY_DESC_POWER_MAX_SIZE,
97 QUERY_DESC_RFU_MAX_SIZE,
98};
99
67enum { 100enum {
68 UFSHCD_MAX_CHANNEL = 0, 101 UFSHCD_MAX_CHANNEL = 0,
69 UFSHCD_MAX_ID = 1, 102 UFSHCD_MAX_ID = 1,
70 UFSHCD_MAX_LUNS = 8,
71 UFSHCD_CMD_PER_LUN = 32, 103 UFSHCD_CMD_PER_LUN = 32,
72 UFSHCD_CAN_QUEUE = 32, 104 UFSHCD_CAN_QUEUE = 32,
73}; 105};
@@ -106,12 +138,79 @@ enum {
106#define ufshcd_clear_eh_in_progress(h) \ 138#define ufshcd_clear_eh_in_progress(h) \
107 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS) 139 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
108 140
141#define ufshcd_set_ufs_dev_active(h) \
142 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
143#define ufshcd_set_ufs_dev_sleep(h) \
144 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
145#define ufshcd_set_ufs_dev_poweroff(h) \
146 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
147#define ufshcd_is_ufs_dev_active(h) \
148 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
149#define ufshcd_is_ufs_dev_sleep(h) \
150 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
151#define ufshcd_is_ufs_dev_poweroff(h) \
152 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
153
154static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
155 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
156 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
157 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
158 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
159 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
160 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
161};
162
163static inline enum ufs_dev_pwr_mode
164ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
165{
166 return ufs_pm_lvl_states[lvl].dev_state;
167}
168
169static inline enum uic_link_state
170ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
171{
172 return ufs_pm_lvl_states[lvl].link_state;
173}
174
109static void ufshcd_tmc_handler(struct ufs_hba *hba); 175static void ufshcd_tmc_handler(struct ufs_hba *hba);
110static void ufshcd_async_scan(void *data, async_cookie_t cookie); 176static void ufshcd_async_scan(void *data, async_cookie_t cookie);
111static int ufshcd_reset_and_restore(struct ufs_hba *hba); 177static int ufshcd_reset_and_restore(struct ufs_hba *hba);
112static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); 178static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
113static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, 179static void ufshcd_hba_exit(struct ufs_hba *hba);
114 struct scsi_device *sdev); 180static int ufshcd_probe_hba(struct ufs_hba *hba);
181static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
182 bool skip_ref_clk);
183static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
184static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
185static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
186static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
187static irqreturn_t ufshcd_intr(int irq, void *__hba);
188static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
189 struct ufs_pa_layer_attr *desired_pwr_mode);
190
191static inline int ufshcd_enable_irq(struct ufs_hba *hba)
192{
193 int ret = 0;
194
195 if (!hba->is_irq_enabled) {
196 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
197 hba);
198 if (ret)
199 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
200 __func__, ret);
201 hba->is_irq_enabled = true;
202 }
203
204 return ret;
205}
206
207static inline void ufshcd_disable_irq(struct ufs_hba *hba)
208{
209 if (hba->is_irq_enabled) {
210 free_irq(hba->irq, hba);
211 hba->is_irq_enabled = false;
212 }
213}
115 214
116/* 215/*
117 * ufshcd_wait_for_register - wait for register value to change 216 * ufshcd_wait_for_register - wait for register value to change
@@ -175,13 +274,14 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
175/** 274/**
176 * ufshcd_is_device_present - Check if any device connected to 275 * ufshcd_is_device_present - Check if any device connected to
177 * the host controller 276 * the host controller
178 * @reg_hcs - host controller status register value 277 * @hba: pointer to adapter instance
179 * 278 *
180 * Returns 1 if device present, 0 if no device detected 279 * Returns 1 if device present, 0 if no device detected
181 */ 280 */
182static inline int ufshcd_is_device_present(u32 reg_hcs) 281static inline int ufshcd_is_device_present(struct ufs_hba *hba)
183{ 282{
184 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0; 283 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
284 DEVICE_PRESENT) ? 1 : 0;
185} 285}
186 286
187/** 287/**
@@ -413,6 +513,265 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
413 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1; 513 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
414} 514}
415 515
516static void ufshcd_ungate_work(struct work_struct *work)
517{
518 int ret;
519 unsigned long flags;
520 struct ufs_hba *hba = container_of(work, struct ufs_hba,
521 clk_gating.ungate_work);
522
523 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
524
525 spin_lock_irqsave(hba->host->host_lock, flags);
526 if (hba->clk_gating.state == CLKS_ON) {
527 spin_unlock_irqrestore(hba->host->host_lock, flags);
528 goto unblock_reqs;
529 }
530
531 spin_unlock_irqrestore(hba->host->host_lock, flags);
532 ufshcd_setup_clocks(hba, true);
533
534 /* Exit from hibern8 */
535 if (ufshcd_can_hibern8_during_gating(hba)) {
536 /* Prevent gating in this path */
537 hba->clk_gating.is_suspended = true;
538 if (ufshcd_is_link_hibern8(hba)) {
539 ret = ufshcd_uic_hibern8_exit(hba);
540 if (ret)
541 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
542 __func__, ret);
543 else
544 ufshcd_set_link_active(hba);
545 }
546 hba->clk_gating.is_suspended = false;
547 }
548unblock_reqs:
549 if (ufshcd_is_clkscaling_enabled(hba))
550 devfreq_resume_device(hba->devfreq);
551 scsi_unblock_requests(hba->host);
552}
553
554/**
555 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
556 * Also, exit from hibern8 mode and set the link as active.
557 * @hba: per adapter instance
558 * @async: This indicates whether caller should ungate clocks asynchronously.
559 */
560int ufshcd_hold(struct ufs_hba *hba, bool async)
561{
562 int rc = 0;
563 unsigned long flags;
564
565 if (!ufshcd_is_clkgating_allowed(hba))
566 goto out;
567 spin_lock_irqsave(hba->host->host_lock, flags);
568 hba->clk_gating.active_reqs++;
569
570start:
571 switch (hba->clk_gating.state) {
572 case CLKS_ON:
573 break;
574 case REQ_CLKS_OFF:
575 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
576 hba->clk_gating.state = CLKS_ON;
577 break;
578 }
579 /*
580 * If we here, it means gating work is either done or
581 * currently running. Hence, fall through to cancel gating
582 * work and to enable clocks.
583 */
584 case CLKS_OFF:
585 scsi_block_requests(hba->host);
586 hba->clk_gating.state = REQ_CLKS_ON;
587 schedule_work(&hba->clk_gating.ungate_work);
588 /*
589 * fall through to check if we should wait for this
590 * work to be done or not.
591 */
592 case REQ_CLKS_ON:
593 if (async) {
594 rc = -EAGAIN;
595 hba->clk_gating.active_reqs--;
596 break;
597 }
598
599 spin_unlock_irqrestore(hba->host->host_lock, flags);
600 flush_work(&hba->clk_gating.ungate_work);
601 /* Make sure state is CLKS_ON before returning */
602 spin_lock_irqsave(hba->host->host_lock, flags);
603 goto start;
604 default:
605 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
606 __func__, hba->clk_gating.state);
607 break;
608 }
609 spin_unlock_irqrestore(hba->host->host_lock, flags);
610out:
611 return rc;
612}
613
614static void ufshcd_gate_work(struct work_struct *work)
615{
616 struct ufs_hba *hba = container_of(work, struct ufs_hba,
617 clk_gating.gate_work.work);
618 unsigned long flags;
619
620 spin_lock_irqsave(hba->host->host_lock, flags);
621 if (hba->clk_gating.is_suspended) {
622 hba->clk_gating.state = CLKS_ON;
623 goto rel_lock;
624 }
625
626 if (hba->clk_gating.active_reqs
627 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
628 || hba->lrb_in_use || hba->outstanding_tasks
629 || hba->active_uic_cmd || hba->uic_async_done)
630 goto rel_lock;
631
632 spin_unlock_irqrestore(hba->host->host_lock, flags);
633
634 /* put the link into hibern8 mode before turning off clocks */
635 if (ufshcd_can_hibern8_during_gating(hba)) {
636 if (ufshcd_uic_hibern8_enter(hba)) {
637 hba->clk_gating.state = CLKS_ON;
638 goto out;
639 }
640 ufshcd_set_link_hibern8(hba);
641 }
642
643 if (ufshcd_is_clkscaling_enabled(hba)) {
644 devfreq_suspend_device(hba->devfreq);
645 hba->clk_scaling.window_start_t = 0;
646 }
647
648 if (!ufshcd_is_link_active(hba))
649 ufshcd_setup_clocks(hba, false);
650 else
651 /* If link is active, device ref_clk can't be switched off */
652 __ufshcd_setup_clocks(hba, false, true);
653
654 /*
655 * In case you are here to cancel this work the gating state
656 * would be marked as REQ_CLKS_ON. In this case keep the state
657 * as REQ_CLKS_ON which would anyway imply that clocks are off
658 * and a request to turn them on is pending. By doing this way,
659 * we keep the state machine in tact and this would ultimately
660 * prevent from doing cancel work multiple times when there are
661 * new requests arriving before the current cancel work is done.
662 */
663 spin_lock_irqsave(hba->host->host_lock, flags);
664 if (hba->clk_gating.state == REQ_CLKS_OFF)
665 hba->clk_gating.state = CLKS_OFF;
666
667rel_lock:
668 spin_unlock_irqrestore(hba->host->host_lock, flags);
669out:
670 return;
671}
672
673/* host lock must be held before calling this variant */
674static void __ufshcd_release(struct ufs_hba *hba)
675{
676 if (!ufshcd_is_clkgating_allowed(hba))
677 return;
678
679 hba->clk_gating.active_reqs--;
680
681 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
682 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
683 || hba->lrb_in_use || hba->outstanding_tasks
684 || hba->active_uic_cmd || hba->uic_async_done)
685 return;
686
687 hba->clk_gating.state = REQ_CLKS_OFF;
688 schedule_delayed_work(&hba->clk_gating.gate_work,
689 msecs_to_jiffies(hba->clk_gating.delay_ms));
690}
691
692void ufshcd_release(struct ufs_hba *hba)
693{
694 unsigned long flags;
695
696 spin_lock_irqsave(hba->host->host_lock, flags);
697 __ufshcd_release(hba);
698 spin_unlock_irqrestore(hba->host->host_lock, flags);
699}
700
701static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
702 struct device_attribute *attr, char *buf)
703{
704 struct ufs_hba *hba = dev_get_drvdata(dev);
705
706 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
707}
708
709static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
710 struct device_attribute *attr, const char *buf, size_t count)
711{
712 struct ufs_hba *hba = dev_get_drvdata(dev);
713 unsigned long flags, value;
714
715 if (kstrtoul(buf, 0, &value))
716 return -EINVAL;
717
718 spin_lock_irqsave(hba->host->host_lock, flags);
719 hba->clk_gating.delay_ms = value;
720 spin_unlock_irqrestore(hba->host->host_lock, flags);
721 return count;
722}
723
724static void ufshcd_init_clk_gating(struct ufs_hba *hba)
725{
726 if (!ufshcd_is_clkgating_allowed(hba))
727 return;
728
729 hba->clk_gating.delay_ms = 150;
730 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
731 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
732
733 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
734 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
735 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
736 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
737 hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
738 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
739 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
740}
741
742static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
743{
744 if (!ufshcd_is_clkgating_allowed(hba))
745 return;
746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
747}
748
749/* Must be called with host lock acquired */
750static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
751{
752 if (!ufshcd_is_clkscaling_enabled(hba))
753 return;
754
755 if (!hba->clk_scaling.is_busy_started) {
756 hba->clk_scaling.busy_start_t = ktime_get();
757 hba->clk_scaling.is_busy_started = true;
758 }
759}
760
761static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
762{
763 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
764
765 if (!ufshcd_is_clkscaling_enabled(hba))
766 return;
767
768 if (!hba->outstanding_reqs && scaling->is_busy_started) {
769 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
770 scaling->busy_start_t));
771 scaling->busy_start_t = ktime_set(0, 0);
772 scaling->is_busy_started = false;
773 }
774}
416/** 775/**
417 * ufshcd_send_command - Send SCSI or device management commands 776 * ufshcd_send_command - Send SCSI or device management commands
418 * @hba: per adapter instance 777 * @hba: per adapter instance
@@ -421,6 +780,7 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
421static inline 780static inline
422void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) 781void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
423{ 782{
783 ufshcd_clk_scaling_start_busy(hba);
424 __set_bit(task_tag, &hba->outstanding_reqs); 784 __set_bit(task_tag, &hba->outstanding_reqs);
425 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); 785 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
426} 786}
@@ -576,15 +936,12 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
576 * @uic_cmd: UIC command 936 * @uic_cmd: UIC command
577 * 937 *
578 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called 938 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
579 * with mutex held. 939 * with mutex held and host_lock locked.
580 * Returns 0 only if success. 940 * Returns 0 only if success.
581 */ 941 */
582static int 942static int
583__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 943__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
584{ 944{
585 int ret;
586 unsigned long flags;
587
588 if (!ufshcd_ready_for_uic_cmd(hba)) { 945 if (!ufshcd_ready_for_uic_cmd(hba)) {
589 dev_err(hba->dev, 946 dev_err(hba->dev,
590 "Controller not ready to accept UIC commands\n"); 947 "Controller not ready to accept UIC commands\n");
@@ -593,13 +950,9 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
593 950
594 init_completion(&uic_cmd->done); 951 init_completion(&uic_cmd->done);
595 952
596 spin_lock_irqsave(hba->host->host_lock, flags);
597 ufshcd_dispatch_uic_cmd(hba, uic_cmd); 953 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
598 spin_unlock_irqrestore(hba->host->host_lock, flags);
599
600 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
601 954
602 return ret; 955 return 0;
603} 956}
604 957
605/** 958/**
@@ -613,11 +966,19 @@ static int
613ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) 966ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
614{ 967{
615 int ret; 968 int ret;
969 unsigned long flags;
616 970
971 ufshcd_hold(hba, false);
617 mutex_lock(&hba->uic_cmd_mutex); 972 mutex_lock(&hba->uic_cmd_mutex);
973 spin_lock_irqsave(hba->host->host_lock, flags);
618 ret = __ufshcd_send_uic_cmd(hba, uic_cmd); 974 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
975 spin_unlock_irqrestore(hba->host->host_lock, flags);
976 if (!ret)
977 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
978
619 mutex_unlock(&hba->uic_cmd_mutex); 979 mutex_unlock(&hba->uic_cmd_mutex);
620 980
981 ufshcd_release(hba);
621 return ret; 982 return ret;
622} 983}
623 984
@@ -867,6 +1228,32 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
867 return ret; 1228 return ret;
868} 1229}
869 1230
1231/*
1232 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
1233 * @scsi_lun: scsi LUN id
1234 *
1235 * Returns UPIU LUN id
1236 */
1237static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1238{
1239 if (scsi_is_wlun(scsi_lun))
1240 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1241 | UFS_UPIU_WLUN_ID;
1242 else
1243 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1244}
1245
1246/**
1247 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
1248 * @scsi_lun: UPIU W-LUN id
1249 *
1250 * Returns SCSI W-LUN id
1251 */
1252static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
1253{
1254 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
1255}
1256
870/** 1257/**
871 * ufshcd_queuecommand - main entry point for SCSI requests 1258 * ufshcd_queuecommand - main entry point for SCSI requests
872 * @cmd: command from SCSI Midlayer 1259 * @cmd: command from SCSI Midlayer
@@ -918,6 +1305,14 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
918 goto out; 1305 goto out;
919 } 1306 }
920 1307
1308 err = ufshcd_hold(hba, true);
1309 if (err) {
1310 err = SCSI_MLQUEUE_HOST_BUSY;
1311 clear_bit_unlock(tag, &hba->lrb_in_use);
1312 goto out;
1313 }
1314 WARN_ON(hba->clk_gating.state != CLKS_ON);
1315
921 lrbp = &hba->lrb[tag]; 1316 lrbp = &hba->lrb[tag];
922 1317
923 WARN_ON(lrbp->cmd); 1318 WARN_ON(lrbp->cmd);
@@ -925,7 +1320,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
925 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE; 1320 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
926 lrbp->sense_buffer = cmd->sense_buffer; 1321 lrbp->sense_buffer = cmd->sense_buffer;
927 lrbp->task_tag = tag; 1322 lrbp->task_tag = tag;
928 lrbp->lun = cmd->device->lun; 1323 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
929 lrbp->intr_cmd = false; 1324 lrbp->intr_cmd = false;
930 lrbp->command_type = UTP_CMD_TYPE_SCSI; 1325 lrbp->command_type = UTP_CMD_TYPE_SCSI;
931 1326
@@ -1193,6 +1588,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1193 1588
1194 BUG_ON(!hba); 1589 BUG_ON(!hba);
1195 1590
1591 ufshcd_hold(hba, false);
1196 mutex_lock(&hba->dev_cmd.lock); 1592 mutex_lock(&hba->dev_cmd.lock);
1197 ufshcd_init_query(hba, &request, &response, opcode, idn, index, 1593 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1198 selector); 1594 selector);
@@ -1236,6 +1632,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1236 1632
1237out_unlock: 1633out_unlock:
1238 mutex_unlock(&hba->dev_cmd.lock); 1634 mutex_unlock(&hba->dev_cmd.lock);
1635 ufshcd_release(hba);
1239 return err; 1636 return err;
1240} 1637}
1241 1638
@@ -1259,6 +1656,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1259 1656
1260 BUG_ON(!hba); 1657 BUG_ON(!hba);
1261 1658
1659 ufshcd_hold(hba, false);
1262 if (!attr_val) { 1660 if (!attr_val) {
1263 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n", 1661 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1264 __func__, opcode); 1662 __func__, opcode);
@@ -1298,6 +1696,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
1298out_unlock: 1696out_unlock:
1299 mutex_unlock(&hba->dev_cmd.lock); 1697 mutex_unlock(&hba->dev_cmd.lock);
1300out: 1698out:
1699 ufshcd_release(hba);
1301 return err; 1700 return err;
1302} 1701}
1303 1702
@@ -1325,6 +1724,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
1325 1724
1326 BUG_ON(!hba); 1725 BUG_ON(!hba);
1327 1726
1727 ufshcd_hold(hba, false);
1328 if (!desc_buf) { 1728 if (!desc_buf) {
1329 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", 1729 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1330 __func__, opcode); 1730 __func__, opcode);
@@ -1374,10 +1774,120 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
1374out_unlock: 1774out_unlock:
1375 mutex_unlock(&hba->dev_cmd.lock); 1775 mutex_unlock(&hba->dev_cmd.lock);
1376out: 1776out:
1777 ufshcd_release(hba);
1377 return err; 1778 return err;
1378} 1779}
1379 1780
1380/** 1781/**
1782 * ufshcd_read_desc_param - read the specified descriptor parameter
1783 * @hba: Pointer to adapter instance
1784 * @desc_id: descriptor idn value
1785 * @desc_index: descriptor index
1786 * @param_offset: offset of the parameter to read
1787 * @param_read_buf: pointer to buffer where parameter would be read
1788 * @param_size: sizeof(param_read_buf)
1789 *
1790 * Return 0 in case of success, non-zero otherwise
1791 */
1792static int ufshcd_read_desc_param(struct ufs_hba *hba,
1793 enum desc_idn desc_id,
1794 int desc_index,
1795 u32 param_offset,
1796 u8 *param_read_buf,
1797 u32 param_size)
1798{
1799 int ret;
1800 u8 *desc_buf;
1801 u32 buff_len;
1802 bool is_kmalloc = true;
1803
1804 /* safety checks */
1805 if (desc_id >= QUERY_DESC_IDN_MAX)
1806 return -EINVAL;
1807
1808 buff_len = ufs_query_desc_max_size[desc_id];
1809 if ((param_offset + param_size) > buff_len)
1810 return -EINVAL;
1811
1812 if (!param_offset && (param_size == buff_len)) {
1813 /* memory space already available to hold full descriptor */
1814 desc_buf = param_read_buf;
1815 is_kmalloc = false;
1816 } else {
1817 /* allocate memory to hold full descriptor */
1818 desc_buf = kmalloc(buff_len, GFP_KERNEL);
1819 if (!desc_buf)
1820 return -ENOMEM;
1821 }
1822
1823 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
1824 desc_id, desc_index, 0, desc_buf,
1825 &buff_len);
1826
1827 if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
1828 (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
1829 ufs_query_desc_max_size[desc_id])
1830 || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
1831 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
1832 __func__, desc_id, param_offset, buff_len, ret);
1833 if (!ret)
1834 ret = -EINVAL;
1835
1836 goto out;
1837 }
1838
1839 if (is_kmalloc)
1840 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
1841out:
1842 if (is_kmalloc)
1843 kfree(desc_buf);
1844 return ret;
1845}
1846
1847static inline int ufshcd_read_desc(struct ufs_hba *hba,
1848 enum desc_idn desc_id,
1849 int desc_index,
1850 u8 *buf,
1851 u32 size)
1852{
1853 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
1854}
1855
1856static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
1857 u8 *buf,
1858 u32 size)
1859{
1860 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
1861}
1862
1863/**
1864 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
1865 * @hba: Pointer to adapter instance
1866 * @lun: lun id
1867 * @param_offset: offset of the parameter to read
1868 * @param_read_buf: pointer to buffer where parameter would be read
1869 * @param_size: sizeof(param_read_buf)
1870 *
1871 * Return 0 in case of success, non-zero otherwise
1872 */
1873static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
1874 int lun,
1875 enum unit_desc_param param_offset,
1876 u8 *param_read_buf,
1877 u32 param_size)
1878{
1879 /*
1880 * Unit descriptors are only available for general purpose LUs (LUN id
1881 * from 0 to 7) and RPMB Well known LU.
1882 */
1883 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
1884 return -EOPNOTSUPP;
1885
1886 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
1887 param_offset, param_read_buf, param_size);
1888}
1889
1890/**
1381 * ufshcd_memory_alloc - allocate memory for host memory space data structures 1891 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1382 * @hba: per adapter instance 1892 * @hba: per adapter instance
1383 * 1893 *
@@ -1621,44 +2131,54 @@ out:
1621EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); 2131EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1622 2132
1623/** 2133/**
1624 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage 2134 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
1625 * using DME_SET primitives. 2135 * state) and waits for it to take effect.
2136 *
1626 * @hba: per adapter instance 2137 * @hba: per adapter instance
1627 * @mode: powr mode value 2138 * @cmd: UIC command to execute
2139 *
2140 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
2141 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
2142 * and device UniPro link and hence it's final completion would be indicated by
2143 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
2144 * addition to normal UIC command completion Status (UCCS). This function only
2145 * returns after the relevant status bits indicate the completion.
1628 * 2146 *
1629 * Returns 0 on success, non-zero value on failure 2147 * Returns 0 on success, non-zero value on failure
1630 */ 2148 */
1631static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) 2149static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
1632{ 2150{
1633 struct uic_command uic_cmd = {0}; 2151 struct completion uic_async_done;
1634 struct completion pwr_done;
1635 unsigned long flags; 2152 unsigned long flags;
1636 u8 status; 2153 u8 status;
1637 int ret; 2154 int ret;
1638 2155
1639 uic_cmd.command = UIC_CMD_DME_SET;
1640 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1641 uic_cmd.argument3 = mode;
1642 init_completion(&pwr_done);
1643
1644 mutex_lock(&hba->uic_cmd_mutex); 2156 mutex_lock(&hba->uic_cmd_mutex);
2157 init_completion(&uic_async_done);
1645 2158
1646 spin_lock_irqsave(hba->host->host_lock, flags); 2159 spin_lock_irqsave(hba->host->host_lock, flags);
1647 hba->pwr_done = &pwr_done; 2160 hba->uic_async_done = &uic_async_done;
2161 ret = __ufshcd_send_uic_cmd(hba, cmd);
1648 spin_unlock_irqrestore(hba->host->host_lock, flags); 2162 spin_unlock_irqrestore(hba->host->host_lock, flags);
1649 ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
1650 if (ret) { 2163 if (ret) {
1651 dev_err(hba->dev, 2164 dev_err(hba->dev,
1652 "pwr mode change with mode 0x%x uic error %d\n", 2165 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
1653 mode, ret); 2166 cmd->command, cmd->argument3, ret);
2167 goto out;
2168 }
2169 ret = ufshcd_wait_for_uic_cmd(hba, cmd);
2170 if (ret) {
2171 dev_err(hba->dev,
2172 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
2173 cmd->command, cmd->argument3, ret);
1654 goto out; 2174 goto out;
1655 } 2175 }
1656 2176
1657 if (!wait_for_completion_timeout(hba->pwr_done, 2177 if (!wait_for_completion_timeout(hba->uic_async_done,
1658 msecs_to_jiffies(UIC_CMD_TIMEOUT))) { 2178 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
1659 dev_err(hba->dev, 2179 dev_err(hba->dev,
1660 "pwr mode change with mode 0x%x completion timeout\n", 2180 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
1661 mode); 2181 cmd->command, cmd->argument3);
1662 ret = -ETIMEDOUT; 2182 ret = -ETIMEDOUT;
1663 goto out; 2183 goto out;
1664 } 2184 }
@@ -1666,53 +2186,144 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1666 status = ufshcd_get_upmcrs(hba); 2186 status = ufshcd_get_upmcrs(hba);
1667 if (status != PWR_LOCAL) { 2187 if (status != PWR_LOCAL) {
1668 dev_err(hba->dev, 2188 dev_err(hba->dev,
1669 "pwr mode change failed, host umpcrs:0x%x\n", 2189 "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
1670 status); 2190 cmd->command, status);
1671 ret = (status != PWR_OK) ? status : -1; 2191 ret = (status != PWR_OK) ? status : -1;
1672 } 2192 }
1673out: 2193out:
1674 spin_lock_irqsave(hba->host->host_lock, flags); 2194 spin_lock_irqsave(hba->host->host_lock, flags);
1675 hba->pwr_done = NULL; 2195 hba->uic_async_done = NULL;
1676 spin_unlock_irqrestore(hba->host->host_lock, flags); 2196 spin_unlock_irqrestore(hba->host->host_lock, flags);
1677 mutex_unlock(&hba->uic_cmd_mutex); 2197 mutex_unlock(&hba->uic_cmd_mutex);
2198
1678 return ret; 2199 return ret;
1679} 2200}
1680 2201
1681/** 2202/**
1682 * ufshcd_config_max_pwr_mode - Set & Change power mode with 2203 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
1683 * maximum capability attribute information. 2204 * using DME_SET primitives.
1684 * @hba: per adapter instance 2205 * @hba: per adapter instance
2206 * @mode: powr mode value
1685 * 2207 *
1686 * Returns 0 on success, non-zero value on failure 2208 * Returns 0 on success, non-zero value on failure
1687 */ 2209 */
1688static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) 2210static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
1689{ 2211{
1690 enum {RX = 0, TX = 1}; 2212 struct uic_command uic_cmd = {0};
1691 u32 lanes[] = {1, 1};
1692 u32 gear[] = {1, 1};
1693 u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
1694 int ret; 2213 int ret;
1695 2214
2215 uic_cmd.command = UIC_CMD_DME_SET;
2216 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
2217 uic_cmd.argument3 = mode;
2218 ufshcd_hold(hba, false);
2219 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2220 ufshcd_release(hba);
2221
2222 return ret;
2223}
2224
2225static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
2226{
2227 struct uic_command uic_cmd = {0};
2228
2229 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
2230
2231 return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2232}
2233
2234static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2235{
2236 struct uic_command uic_cmd = {0};
2237 int ret;
2238
2239 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
2240 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
2241 if (ret) {
2242 ufshcd_set_link_off(hba);
2243 ret = ufshcd_host_reset_and_restore(hba);
2244 }
2245
2246 return ret;
2247}
2248
2249/**
2250 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2251 * @hba: per-adapter instance
2252 */
2253static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
2254{
2255 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
2256
2257 if (hba->max_pwr_info.is_valid)
2258 return 0;
2259
2260 pwr_info->pwr_tx = FASTAUTO_MODE;
2261 pwr_info->pwr_rx = FASTAUTO_MODE;
2262 pwr_info->hs_rate = PA_HS_MODE_B;
2263
1696 /* Get the connected lane count */ 2264 /* Get the connected lane count */
1697 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); 2265 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
1698 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); 2266 &pwr_info->lane_rx);
2267 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
2268 &pwr_info->lane_tx);
2269
2270 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
2271 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
2272 __func__,
2273 pwr_info->lane_rx,
2274 pwr_info->lane_tx);
2275 return -EINVAL;
2276 }
1699 2277
1700 /* 2278 /*
1701 * First, get the maximum gears of HS speed. 2279 * First, get the maximum gears of HS speed.
1702 * If a zero value, it means there is no HSGEAR capability. 2280 * If a zero value, it means there is no HSGEAR capability.
1703 * Then, get the maximum gears of PWM speed. 2281 * Then, get the maximum gears of PWM speed.
1704 */ 2282 */
1705 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); 2283 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
1706 if (!gear[RX]) { 2284 if (!pwr_info->gear_rx) {
1707 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); 2285 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1708 pwr[RX] = SLOWAUTO_MODE; 2286 &pwr_info->gear_rx);
2287 if (!pwr_info->gear_rx) {
2288 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
2289 __func__, pwr_info->gear_rx);
2290 return -EINVAL;
2291 }
2292 pwr_info->pwr_rx = SLOWAUTO_MODE;
1709 } 2293 }
1710 2294
1711 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); 2295 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
1712 if (!gear[TX]) { 2296 &pwr_info->gear_tx);
2297 if (!pwr_info->gear_tx) {
1713 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), 2298 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1714 &gear[TX]); 2299 &pwr_info->gear_tx);
1715 pwr[TX] = SLOWAUTO_MODE; 2300 if (!pwr_info->gear_tx) {
2301 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
2302 __func__, pwr_info->gear_tx);
2303 return -EINVAL;
2304 }
2305 pwr_info->pwr_tx = SLOWAUTO_MODE;
2306 }
2307
2308 hba->max_pwr_info.is_valid = true;
2309 return 0;
2310}
2311
2312static int ufshcd_change_power_mode(struct ufs_hba *hba,
2313 struct ufs_pa_layer_attr *pwr_mode)
2314{
2315 int ret;
2316
2317 /* if already configured to the requested pwr_mode */
2318 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
2319 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
2320 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
2321 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
2322 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
2323 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
2324 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
2325 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
2326 return 0;
1716 } 2327 }
1717 2328
1718 /* 2329 /*
@@ -1721,23 +2332,67 @@ static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
1721 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, 2332 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1722 * - PA_HSSERIES 2333 * - PA_HSSERIES
1723 */ 2334 */
1724 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); 2335 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
1725 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); 2336 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1726 if (pwr[RX] == FASTAUTO_MODE) 2337 pwr_mode->lane_rx);
2338 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
2339 pwr_mode->pwr_rx == FAST_MODE)
1727 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); 2340 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
2341 else
2342 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
1728 2343
1729 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); 2344 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
1730 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); 2345 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1731 if (pwr[TX] == FASTAUTO_MODE) 2346 pwr_mode->lane_tx);
2347 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
2348 pwr_mode->pwr_tx == FAST_MODE)
1732 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); 2349 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
2350 else
2351 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
1733 2352
1734 if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) 2353 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
1735 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); 2354 pwr_mode->pwr_tx == FASTAUTO_MODE ||
2355 pwr_mode->pwr_rx == FAST_MODE ||
2356 pwr_mode->pwr_tx == FAST_MODE)
2357 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
2358 pwr_mode->hs_rate);
1736 2359
1737 ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); 2360 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
1738 if (ret) 2361 | pwr_mode->pwr_tx);
2362
2363 if (ret) {
1739 dev_err(hba->dev, 2364 dev_err(hba->dev,
1740 "pwr_mode: power mode change failed %d\n", ret); 2365 "%s: power mode change failed %d\n", __func__, ret);
2366 } else {
2367 if (hba->vops && hba->vops->pwr_change_notify)
2368 hba->vops->pwr_change_notify(hba,
2369 POST_CHANGE, NULL, pwr_mode);
2370
2371 memcpy(&hba->pwr_info, pwr_mode,
2372 sizeof(struct ufs_pa_layer_attr));
2373 }
2374
2375 return ret;
2376}
2377
2378/**
2379 * ufshcd_config_pwr_mode - configure a new power mode
2380 * @hba: per-adapter instance
2381 * @desired_pwr_mode: desired power configuration
2382 */
2383static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
2384 struct ufs_pa_layer_attr *desired_pwr_mode)
2385{
2386 struct ufs_pa_layer_attr final_params = { 0 };
2387 int ret;
2388
2389 if (hba->vops && hba->vops->pwr_change_notify)
2390 hba->vops->pwr_change_notify(hba,
2391 PRE_CHANGE, desired_pwr_mode, &final_params);
2392 else
2393 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
2394
2395 ret = ufshcd_change_power_mode(hba, &final_params);
1741 2396
1742 return ret; 2397 return ret;
1743} 2398}
@@ -1798,11 +2453,10 @@ out:
1798 * @hba: per adapter instance 2453 * @hba: per adapter instance
1799 * 2454 *
1800 * To bring UFS host controller to operational state, 2455 * To bring UFS host controller to operational state,
1801 * 1. Check if device is present 2456 * 1. Enable required interrupts
1802 * 2. Enable required interrupts 2457 * 2. Configure interrupt aggregation
1803 * 3. Configure interrupt aggregation 2458 * 3. Program UTRL and UTMRL base addres
1804 * 4. Program UTRL and UTMRL base addres 2459 * 4. Configure run-stop-registers
1805 * 5. Configure run-stop-registers
1806 * 2460 *
1807 * Returns 0 on success, non-zero value on failure 2461 * Returns 0 on success, non-zero value on failure
1808 */ 2462 */
@@ -1811,14 +2465,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1811 int err = 0; 2465 int err = 0;
1812 u32 reg; 2466 u32 reg;
1813 2467
1814 /* check if device present */
1815 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
1816 if (!ufshcd_is_device_present(reg)) {
1817 dev_err(hba->dev, "cc: Device not present\n");
1818 err = -ENXIO;
1819 goto out;
1820 }
1821
1822 /* Enable required interrupts */ 2468 /* Enable required interrupts */
1823 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); 2469 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1824 2470
@@ -1839,6 +2485,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1839 * UCRDY, UTMRLDY and UTRLRDY bits must be 1 2485 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
1840 * DEI, HEI bits must be 0 2486 * DEI, HEI bits must be 0
1841 */ 2487 */
2488 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
1842 if (!(ufshcd_get_lists_status(reg))) { 2489 if (!(ufshcd_get_lists_status(reg))) {
1843 ufshcd_enable_run_stop_reg(hba); 2490 ufshcd_enable_run_stop_reg(hba);
1844 } else { 2491 } else {
@@ -1885,6 +2532,12 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
1885 msleep(5); 2532 msleep(5);
1886 } 2533 }
1887 2534
2535 /* UniPro link is disabled at this point */
2536 ufshcd_set_link_off(hba);
2537
2538 if (hba->vops && hba->vops->hce_enable_notify)
2539 hba->vops->hce_enable_notify(hba, PRE_CHANGE);
2540
1888 /* start controller initialization sequence */ 2541 /* start controller initialization sequence */
1889 ufshcd_hba_start(hba); 2542 ufshcd_hba_start(hba);
1890 2543
@@ -1912,6 +2565,13 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
1912 } 2565 }
1913 msleep(5); 2566 msleep(5);
1914 } 2567 }
2568
2569 /* enable UIC related interrupts */
2570 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
2571
2572 if (hba->vops && hba->vops->hce_enable_notify)
2573 hba->vops->hce_enable_notify(hba, POST_CHANGE);
2574
1915 return 0; 2575 return 0;
1916} 2576}
1917 2577
@@ -1924,16 +2584,42 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
1924static int ufshcd_link_startup(struct ufs_hba *hba) 2584static int ufshcd_link_startup(struct ufs_hba *hba)
1925{ 2585{
1926 int ret; 2586 int ret;
2587 int retries = DME_LINKSTARTUP_RETRIES;
1927 2588
1928 /* enable UIC related interrupts */ 2589 do {
1929 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL); 2590 if (hba->vops && hba->vops->link_startup_notify)
2591 hba->vops->link_startup_notify(hba, PRE_CHANGE);
2592
2593 ret = ufshcd_dme_link_startup(hba);
2594
2595 /* check if device is detected by inter-connect layer */
2596 if (!ret && !ufshcd_is_device_present(hba)) {
2597 dev_err(hba->dev, "%s: Device not present\n", __func__);
2598 ret = -ENXIO;
2599 goto out;
2600 }
2601
2602 /*
2603 * DME link lost indication is only received when link is up,
2604 * but we can't be sure if the link is up until link startup
2605 * succeeds. So reset the local Uni-Pro and try again.
2606 */
2607 if (ret && ufshcd_hba_enable(hba))
2608 goto out;
2609 } while (ret && retries--);
1930 2610
1931 ret = ufshcd_dme_link_startup(hba);
1932 if (ret) 2611 if (ret)
2612 /* failed to get the link up... retire */
1933 goto out; 2613 goto out;
1934 2614
1935 ret = ufshcd_make_hba_operational(hba); 2615 /* Include any host controller configuration via UIC commands */
2616 if (hba->vops && hba->vops->link_startup_notify) {
2617 ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
2618 if (ret)
2619 goto out;
2620 }
1936 2621
2622 ret = ufshcd_make_hba_operational(hba);
1937out: 2623out:
1938 if (ret) 2624 if (ret)
1939 dev_err(hba->dev, "link startup failed %d\n", ret); 2625 dev_err(hba->dev, "link startup failed %d\n", ret);
@@ -1955,6 +2641,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1955 int err = 0; 2641 int err = 0;
1956 int retries; 2642 int retries;
1957 2643
2644 ufshcd_hold(hba, false);
1958 mutex_lock(&hba->dev_cmd.lock); 2645 mutex_lock(&hba->dev_cmd.lock);
1959 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { 2646 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1960 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, 2647 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -1966,6 +2653,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1966 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); 2653 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1967 } 2654 }
1968 mutex_unlock(&hba->dev_cmd.lock); 2655 mutex_unlock(&hba->dev_cmd.lock);
2656 ufshcd_release(hba);
1969 2657
1970 if (err) 2658 if (err)
1971 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); 2659 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -1973,6 +2661,100 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1973} 2661}
1974 2662
1975/** 2663/**
2664 * ufshcd_set_queue_depth - set lun queue depth
2665 * @sdev: pointer to SCSI device
2666 *
2667 * Read bLUQueueDepth value and activate scsi tagged command
2668 * queueing. For WLUN, queue depth is set to 1. For best-effort
2669 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
2670 * value that host can queue.
2671 */
2672static void ufshcd_set_queue_depth(struct scsi_device *sdev)
2673{
2674 int ret = 0;
2675 u8 lun_qdepth;
2676 struct ufs_hba *hba;
2677
2678 hba = shost_priv(sdev->host);
2679
2680 lun_qdepth = hba->nutrs;
2681 ret = ufshcd_read_unit_desc_param(hba,
2682 ufshcd_scsi_to_upiu_lun(sdev->lun),
2683 UNIT_DESC_PARAM_LU_Q_DEPTH,
2684 &lun_qdepth,
2685 sizeof(lun_qdepth));
2686
2687 /* Some WLUN doesn't support unit descriptor */
2688 if (ret == -EOPNOTSUPP)
2689 lun_qdepth = 1;
2690 else if (!lun_qdepth)
2691 /* eventually, we can figure out the real queue depth */
2692 lun_qdepth = hba->nutrs;
2693 else
2694 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
2695
2696 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2697 __func__, lun_qdepth);
2698 scsi_activate_tcq(sdev, lun_qdepth);
2699}
2700
2701/*
2702 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
2703 * @hba: per-adapter instance
2704 * @lun: UFS device lun id
2705 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
2706 *
2707 * Returns 0 in case of success and b_lu_write_protect status would be returned
2708 * @b_lu_write_protect parameter.
2709 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
2710 * Returns -EINVAL in case of invalid parameters passed to this function.
2711 */
2712static int ufshcd_get_lu_wp(struct ufs_hba *hba,
2713 u8 lun,
2714 u8 *b_lu_write_protect)
2715{
2716 int ret;
2717
2718 if (!b_lu_write_protect)
2719 ret = -EINVAL;
2720 /*
2721 * According to UFS device spec, RPMB LU can't be write
2722 * protected so skip reading bLUWriteProtect parameter for
2723 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
2724 */
2725 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
2726 ret = -ENOTSUPP;
2727 else
2728 ret = ufshcd_read_unit_desc_param(hba,
2729 lun,
2730 UNIT_DESC_PARAM_LU_WR_PROTECT,
2731 b_lu_write_protect,
2732 sizeof(*b_lu_write_protect));
2733 return ret;
2734}
2735
2736/**
2737 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
2738 * status
2739 * @hba: per-adapter instance
2740 * @sdev: pointer to SCSI device
2741 *
2742 */
2743static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
2744 struct scsi_device *sdev)
2745{
2746 if (hba->dev_info.f_power_on_wp_en &&
2747 !hba->dev_info.is_lu_power_on_wp) {
2748 u8 b_lu_write_protect;
2749
2750 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
2751 &b_lu_write_protect) &&
2752 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
2753 hba->dev_info.is_lu_power_on_wp = true;
2754 }
2755}
2756
2757/**
1976 * ufshcd_slave_alloc - handle initial SCSI device configurations 2758 * ufshcd_slave_alloc - handle initial SCSI device configurations
1977 * @sdev: pointer to SCSI device 2759 * @sdev: pointer to SCSI device
1978 * 2760 *
@@ -1981,7 +2763,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1981static int ufshcd_slave_alloc(struct scsi_device *sdev) 2763static int ufshcd_slave_alloc(struct scsi_device *sdev)
1982{ 2764{
1983 struct ufs_hba *hba; 2765 struct ufs_hba *hba;
1984 int lun_qdepth;
1985 2766
1986 hba = shost_priv(sdev->host); 2767 hba = shost_priv(sdev->host);
1987 sdev->tagged_supported = 1; 2768 sdev->tagged_supported = 1;
@@ -1996,16 +2777,10 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
1996 /* REPORT SUPPORTED OPERATION CODES is not supported */ 2777 /* REPORT SUPPORTED OPERATION CODES is not supported */
1997 sdev->no_report_opcodes = 1; 2778 sdev->no_report_opcodes = 1;
1998 2779
1999 lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev);
2000 if (lun_qdepth <= 0)
2001 /* eventually, we can figure out the real queue depth */
2002 lun_qdepth = hba->nutrs;
2003 else
2004 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
2005 2780
2006 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n", 2781 ufshcd_set_queue_depth(sdev);
2007 __func__, lun_qdepth); 2782
2008 scsi_activate_tcq(sdev, lun_qdepth); 2783 ufshcd_get_lu_power_on_wp_status(hba, sdev);
2009 2784
2010 return 0; 2785 return 0;
2011} 2786}
@@ -2068,6 +2843,9 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
2068 2843
2069 hba = shost_priv(sdev->host); 2844 hba = shost_priv(sdev->host);
2070 scsi_deactivate_tcq(sdev, hba->nutrs); 2845 scsi_deactivate_tcq(sdev, hba->nutrs);
2846 /* Drop the reference as it won't be needed anymore */
2847 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
2848 hba->sdev_ufs_device = NULL;
2071} 2849}
2072 2850
2073/** 2851/**
@@ -2234,8 +3012,8 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
2234 complete(&hba->active_uic_cmd->done); 3012 complete(&hba->active_uic_cmd->done);
2235 } 3013 }
2236 3014
2237 if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) 3015 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
2238 complete(hba->pwr_done); 3016 complete(hba->uic_async_done);
2239} 3017}
2240 3018
2241/** 3019/**
@@ -2275,6 +3053,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2275 clear_bit_unlock(index, &hba->lrb_in_use); 3053 clear_bit_unlock(index, &hba->lrb_in_use);
2276 /* Do not touch lrbp after scsi done */ 3054 /* Do not touch lrbp after scsi done */
2277 cmd->scsi_done(cmd); 3055 cmd->scsi_done(cmd);
3056 __ufshcd_release(hba);
2278 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) { 3057 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
2279 if (hba->dev_cmd.complete) 3058 if (hba->dev_cmd.complete)
2280 complete(hba->dev_cmd.complete); 3059 complete(hba->dev_cmd.complete);
@@ -2284,6 +3063,8 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2284 /* clear corresponding bits of completed commands */ 3063 /* clear corresponding bits of completed commands */
2285 hba->outstanding_reqs ^= completed_reqs; 3064 hba->outstanding_reqs ^= completed_reqs;
2286 3065
3066 ufshcd_clk_scaling_update_busy(hba);
3067
2287 /* we might have free'd some tags above */ 3068 /* we might have free'd some tags above */
2288 wake_up(&hba->dev_cmd.tag_wq); 3069 wake_up(&hba->dev_cmd.tag_wq);
2289} 3070}
@@ -2447,33 +3228,62 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
2447} 3228}
2448 3229
2449/** 3230/**
2450 * ufshcd_urgent_bkops - handle urgent bkops exception event 3231 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
2451 * @hba: per-adapter instance 3232 * @hba: per-adapter instance
3233 * @status: bkops_status value
2452 * 3234 *
2453 * Enable fBackgroundOpsEn flag in the device to permit background 3235 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
2454 * operations. 3236 * flag in the device to permit background operations if the device
3237 * bkops_status is greater than or equal to "status" argument passed to
3238 * this function, disable otherwise.
3239 *
3240 * Returns 0 for success, non-zero in case of failure.
3241 *
3242 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
3243 * to know whether auto bkops is enabled or disabled after this function
3244 * returns control to it.
2455 */ 3245 */
2456static int ufshcd_urgent_bkops(struct ufs_hba *hba) 3246static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
3247 enum bkops_status status)
2457{ 3248{
2458 int err; 3249 int err;
2459 u32 status = 0; 3250 u32 curr_status = 0;
2460 3251
2461 err = ufshcd_get_bkops_status(hba, &status); 3252 err = ufshcd_get_bkops_status(hba, &curr_status);
2462 if (err) { 3253 if (err) {
2463 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n", 3254 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
2464 __func__, err); 3255 __func__, err);
2465 goto out; 3256 goto out;
3257 } else if (curr_status > BKOPS_STATUS_MAX) {
3258 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
3259 __func__, curr_status);
3260 err = -EINVAL;
3261 goto out;
2466 } 3262 }
2467 3263
2468 status = status & 0xF; 3264 if (curr_status >= status)
2469
2470 /* handle only if status indicates performance impact or critical */
2471 if (status >= BKOPS_STATUS_PERF_IMPACT)
2472 err = ufshcd_enable_auto_bkops(hba); 3265 err = ufshcd_enable_auto_bkops(hba);
3266 else
3267 err = ufshcd_disable_auto_bkops(hba);
2473out: 3268out:
2474 return err; 3269 return err;
2475} 3270}
2476 3271
3272/**
3273 * ufshcd_urgent_bkops - handle urgent bkops exception event
3274 * @hba: per-adapter instance
3275 *
3276 * Enable fBackgroundOpsEn flag in the device to permit background
3277 * operations.
3278 *
3279 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
3280 * and negative error value for any other failure.
3281 */
3282static int ufshcd_urgent_bkops(struct ufs_hba *hba)
3283{
3284 return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
3285}
3286
2477static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status) 3287static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
2478{ 3288{
2479 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, 3289 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
@@ -2505,7 +3315,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
2505 status &= hba->ee_ctrl_mask; 3315 status &= hba->ee_ctrl_mask;
2506 if (status & MASK_EE_URGENT_BKOPS) { 3316 if (status & MASK_EE_URGENT_BKOPS) {
2507 err = ufshcd_urgent_bkops(hba); 3317 err = ufshcd_urgent_bkops(hba);
2508 if (err) 3318 if (err < 0)
2509 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n", 3319 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
2510 __func__, err); 3320 __func__, err);
2511 } 3321 }
@@ -2530,6 +3340,7 @@ static void ufshcd_err_handler(struct work_struct *work)
2530 hba = container_of(work, struct ufs_hba, eh_work); 3340 hba = container_of(work, struct ufs_hba, eh_work);
2531 3341
2532 pm_runtime_get_sync(hba->dev); 3342 pm_runtime_get_sync(hba->dev);
3343 ufshcd_hold(hba, false);
2533 3344
2534 spin_lock_irqsave(hba->host->host_lock, flags); 3345 spin_lock_irqsave(hba->host->host_lock, flags);
2535 if (hba->ufshcd_state == UFSHCD_STATE_RESET) { 3346 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
@@ -2583,6 +3394,7 @@ static void ufshcd_err_handler(struct work_struct *work)
2583 3394
2584out: 3395out:
2585 scsi_unblock_requests(hba->host); 3396 scsi_unblock_requests(hba->host);
3397 ufshcd_release(hba);
2586 pm_runtime_put_sync(hba->dev); 3398 pm_runtime_put_sync(hba->dev);
2587} 3399}
2588 3400
@@ -2766,6 +3578,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2766 * the maximum wait time is bounded by %TM_CMD_TIMEOUT. 3578 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2767 */ 3579 */
2768 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); 3580 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
3581 ufshcd_hold(hba, false);
2769 3582
2770 spin_lock_irqsave(host->host_lock, flags); 3583 spin_lock_irqsave(host->host_lock, flags);
2771 task_req_descp = hba->utmrdl_base_addr; 3584 task_req_descp = hba->utmrdl_base_addr;
@@ -2785,7 +3598,10 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2785 lun_id, task_tag); 3598 lun_id, task_tag);
2786 task_req_upiup->header.dword_1 = 3599 task_req_upiup->header.dword_1 =
2787 UPIU_HEADER_DWORD(0, tm_function, 0, 0); 3600 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
2788 3601 /*
3602 * The host shall provide the same value for LUN field in the basic
3603 * header and for Input Parameter.
3604 */
2789 task_req_upiup->input_param1 = cpu_to_be32(lun_id); 3605 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
2790 task_req_upiup->input_param2 = cpu_to_be32(task_id); 3606 task_req_upiup->input_param2 = cpu_to_be32(task_id);
2791 3607
@@ -2814,6 +3630,7 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2814 ufshcd_put_tm_slot(hba, free_slot); 3630 ufshcd_put_tm_slot(hba, free_slot);
2815 wake_up(&hba->tm_tag_wq); 3631 wake_up(&hba->tm_tag_wq);
2816 3632
3633 ufshcd_release(hba);
2817 return err; 3634 return err;
2818} 3635}
2819 3636
@@ -2896,6 +3713,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
2896 hba = shost_priv(host); 3713 hba = shost_priv(host);
2897 tag = cmd->request->tag; 3714 tag = cmd->request->tag;
2898 3715
3716 ufshcd_hold(hba, false);
2899 /* If command is already aborted/completed, return SUCCESS */ 3717 /* If command is already aborted/completed, return SUCCESS */
2900 if (!(test_bit(tag, &hba->outstanding_reqs))) 3718 if (!(test_bit(tag, &hba->outstanding_reqs)))
2901 goto out; 3719 goto out;
@@ -2960,6 +3778,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
2960 3778
2961 clear_bit_unlock(tag, &hba->lrb_in_use); 3779 clear_bit_unlock(tag, &hba->lrb_in_use);
2962 wake_up(&hba->dev_cmd.tag_wq); 3780 wake_up(&hba->dev_cmd.tag_wq);
3781
2963out: 3782out:
2964 if (!err) { 3783 if (!err) {
2965 err = SUCCESS; 3784 err = SUCCESS;
@@ -2968,6 +3787,11 @@ out:
2968 err = FAILED; 3787 err = FAILED;
2969 } 3788 }
2970 3789
3790 /*
3791 * This ufshcd_release() corresponds to the original scsi cmd that got
3792 * aborted here (as we won't get any IRQ for it).
3793 */
3794 ufshcd_release(hba);
2971 return err; 3795 return err;
2972} 3796}
2973 3797
@@ -2984,7 +3808,6 @@ out:
2984static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) 3808static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
2985{ 3809{
2986 int err; 3810 int err;
2987 async_cookie_t cookie;
2988 unsigned long flags; 3811 unsigned long flags;
2989 3812
2990 /* Reset the host controller */ 3813 /* Reset the host controller */
@@ -2997,10 +3820,9 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
2997 goto out; 3820 goto out;
2998 3821
2999 /* Establish the link again and restore the device */ 3822 /* Establish the link again and restore the device */
3000 cookie = async_schedule(ufshcd_async_scan, hba); 3823 err = ufshcd_probe_hba(hba);
3001 /* wait for async scan to be completed */ 3824
3002 async_synchronize_cookie(++cookie); 3825 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
3003 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
3004 err = -EIO; 3826 err = -EIO;
3005out: 3827out:
3006 if (err) 3828 if (err)
@@ -3022,8 +3844,11 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
3022{ 3844{
3023 int err = 0; 3845 int err = 0;
3024 unsigned long flags; 3846 unsigned long flags;
3847 int retries = MAX_HOST_RESET_RETRIES;
3025 3848
3026 err = ufshcd_host_reset_and_restore(hba); 3849 do {
3850 err = ufshcd_host_reset_and_restore(hba);
3851 } while (err && --retries);
3027 3852
3028 /* 3853 /*
3029 * After reset the door-bell might be cleared, complete 3854 * After reset the door-bell might be cleared, complete
@@ -3051,6 +3876,7 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3051 3876
3052 hba = shost_priv(cmd->device->host); 3877 hba = shost_priv(cmd->device->host);
3053 3878
3879 ufshcd_hold(hba, false);
3054 /* 3880 /*
3055 * Check if there is any race with fatal error handling. 3881 * Check if there is any race with fatal error handling.
3056 * If so, wait for it to complete. Even though fatal error 3882 * If so, wait for it to complete. Even though fatal error
@@ -3084,56 +3910,232 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3084 ufshcd_clear_eh_in_progress(hba); 3910 ufshcd_clear_eh_in_progress(hba);
3085 spin_unlock_irqrestore(hba->host->host_lock, flags); 3911 spin_unlock_irqrestore(hba->host->host_lock, flags);
3086 3912
3913 ufshcd_release(hba);
3087 return err; 3914 return err;
3088} 3915}
3089 3916
3090/** 3917/**
3091 * ufshcd_read_sdev_qdepth - read the lun command queue depth 3918 * ufshcd_get_max_icc_level - calculate the ICC level
3092 * @hba: Pointer to adapter instance 3919 * @sup_curr_uA: max. current supported by the regulator
3093 * @sdev: pointer to SCSI device 3920 * @start_scan: row at the desc table to start scan from
3921 * @buff: power descriptor buffer
3094 * 3922 *
3095 * Return in case of success the lun's queue depth else error. 3923 * Returns calculated max ICC level for specific regulator
3096 */ 3924 */
3097static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba, 3925static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
3098 struct scsi_device *sdev) 3926{
3927 int i;
3928 int curr_uA;
3929 u16 data;
3930 u16 unit;
3931
3932 for (i = start_scan; i >= 0; i--) {
3933 data = be16_to_cpu(*((u16 *)(buff + 2*i)));
3934 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
3935 ATTR_ICC_LVL_UNIT_OFFSET;
3936 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
3937 switch (unit) {
3938 case UFSHCD_NANO_AMP:
3939 curr_uA = curr_uA / 1000;
3940 break;
3941 case UFSHCD_MILI_AMP:
3942 curr_uA = curr_uA * 1000;
3943 break;
3944 case UFSHCD_AMP:
3945 curr_uA = curr_uA * 1000 * 1000;
3946 break;
3947 case UFSHCD_MICRO_AMP:
3948 default:
3949 break;
3950 }
3951 if (sup_curr_uA >= curr_uA)
3952 break;
3953 }
3954 if (i < 0) {
3955 i = 0;
3956 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
3957 }
3958
3959 return (u32)i;
3960}
3961
3962/**
3963 * ufshcd_calc_icc_level - calculate the max ICC level
3964 * In case regulators are not initialized we'll return 0
3965 * @hba: per-adapter instance
3966 * @desc_buf: power descriptor buffer to extract ICC levels from.
3967 * @len: length of desc_buff
3968 *
3969 * Returns calculated ICC level
3970 */
3971static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
3972 u8 *desc_buf, int len)
3973{
3974 u32 icc_level = 0;
3975
3976 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
3977 !hba->vreg_info.vccq2) {
3978 dev_err(hba->dev,
3979 "%s: Regulator capability was not set, actvIccLevel=%d",
3980 __func__, icc_level);
3981 goto out;
3982 }
3983
3984 if (hba->vreg_info.vcc)
3985 icc_level = ufshcd_get_max_icc_level(
3986 hba->vreg_info.vcc->max_uA,
3987 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
3988 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
3989
3990 if (hba->vreg_info.vccq)
3991 icc_level = ufshcd_get_max_icc_level(
3992 hba->vreg_info.vccq->max_uA,
3993 icc_level,
3994 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
3995
3996 if (hba->vreg_info.vccq2)
3997 icc_level = ufshcd_get_max_icc_level(
3998 hba->vreg_info.vccq2->max_uA,
3999 icc_level,
4000 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
4001out:
4002 return icc_level;
4003}
4004
4005static void ufshcd_init_icc_levels(struct ufs_hba *hba)
3099{ 4006{
3100 int ret; 4007 int ret;
3101 int buff_len = UNIT_DESC_MAX_SIZE; 4008 int buff_len = QUERY_DESC_POWER_MAX_SIZE;
3102 u8 desc_buf[UNIT_DESC_MAX_SIZE]; 4009 u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
3103 4010
3104 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC, 4011 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
3105 QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len); 4012 if (ret) {
4013 dev_err(hba->dev,
4014 "%s: Failed reading power descriptor.len = %d ret = %d",
4015 __func__, buff_len, ret);
4016 return;
4017 }
4018
4019 hba->init_prefetch_data.icc_level =
4020 ufshcd_find_max_sup_active_icc_level(hba,
4021 desc_buf, buff_len);
4022 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
4023 __func__, hba->init_prefetch_data.icc_level);
3106 4024
3107 if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) { 4025 ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4026 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
4027 &hba->init_prefetch_data.icc_level);
4028
4029 if (ret)
3108 dev_err(hba->dev, 4030 dev_err(hba->dev,
3109 "%s:Failed reading unit descriptor. len = %d ret = %d" 4031 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
3110 , __func__, buff_len, ret); 4032 __func__, hba->init_prefetch_data.icc_level , ret);
3111 if (!ret)
3112 ret = -EINVAL;
3113 4033
4034}
4035
4036/**
4037 * ufshcd_scsi_add_wlus - Adds required W-LUs
4038 * @hba: per-adapter instance
4039 *
4040 * UFS device specification requires the UFS devices to support 4 well known
4041 * logical units:
4042 * "REPORT_LUNS" (address: 01h)
4043 * "UFS Device" (address: 50h)
4044 * "RPMB" (address: 44h)
4045 * "BOOT" (address: 30h)
4046 * UFS device's power management needs to be controlled by "POWER CONDITION"
4047 * field of SSU (START STOP UNIT) command. But this "power condition" field
4048 * will take effect only when its sent to "UFS device" well known logical unit
4049 * hence we require the scsi_device instance to represent this logical unit in
4050 * order for the UFS host driver to send the SSU command for power management.
4051
4052 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
4053 * Block) LU so user space process can control this LU. User space may also
4054 * want to have access to BOOT LU.
4055
4056 * This function adds scsi device instances for each of all well known LUs
4057 * (except "REPORT LUNS" LU).
4058 *
4059 * Returns zero on success (all required W-LUs are added successfully),
4060 * non-zero error value on failure (if failed to add any of the required W-LU).
4061 */
4062static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4063{
4064 int ret = 0;
4065
4066 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4067 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
4068 if (IS_ERR(hba->sdev_ufs_device)) {
4069 ret = PTR_ERR(hba->sdev_ufs_device);
4070 hba->sdev_ufs_device = NULL;
3114 goto out; 4071 goto out;
3115 } 4072 }
3116 4073
3117 ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF; 4074 hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
4075 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4076 if (IS_ERR(hba->sdev_boot)) {
4077 ret = PTR_ERR(hba->sdev_boot);
4078 hba->sdev_boot = NULL;
4079 goto remove_sdev_ufs_device;
4080 }
4081
4082 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4083 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
4084 if (IS_ERR(hba->sdev_rpmb)) {
4085 ret = PTR_ERR(hba->sdev_rpmb);
4086 hba->sdev_rpmb = NULL;
4087 goto remove_sdev_boot;
4088 }
4089 goto out;
4090
4091remove_sdev_boot:
4092 scsi_remove_device(hba->sdev_boot);
4093remove_sdev_ufs_device:
4094 scsi_remove_device(hba->sdev_ufs_device);
3118out: 4095out:
3119 return ret; 4096 return ret;
3120} 4097}
3121 4098
3122/** 4099/**
3123 * ufshcd_async_scan - asynchronous execution for link startup 4100 * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
3124 * @data: data pointer to pass to this function 4101 * ufshcd_scsi_add_wlus()
3125 * @cookie: cookie data 4102 * @hba: per-adapter instance
4103 *
3126 */ 4104 */
3127static void ufshcd_async_scan(void *data, async_cookie_t cookie) 4105static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
4106{
4107 if (hba->sdev_ufs_device) {
4108 scsi_remove_device(hba->sdev_ufs_device);
4109 hba->sdev_ufs_device = NULL;
4110 }
4111
4112 if (hba->sdev_boot) {
4113 scsi_remove_device(hba->sdev_boot);
4114 hba->sdev_boot = NULL;
4115 }
4116
4117 if (hba->sdev_rpmb) {
4118 scsi_remove_device(hba->sdev_rpmb);
4119 hba->sdev_rpmb = NULL;
4120 }
4121}
4122
4123/**
4124 * ufshcd_probe_hba - probe hba to detect device and initialize
4125 * @hba: per-adapter instance
4126 *
4127 * Execute link-startup and verify device initialization
4128 */
4129static int ufshcd_probe_hba(struct ufs_hba *hba)
3128{ 4130{
3129 struct ufs_hba *hba = (struct ufs_hba *)data;
3130 int ret; 4131 int ret;
3131 4132
3132 ret = ufshcd_link_startup(hba); 4133 ret = ufshcd_link_startup(hba);
3133 if (ret) 4134 if (ret)
3134 goto out; 4135 goto out;
3135 4136
3136 ufshcd_config_max_pwr_mode(hba); 4137 /* UniPro link is active now */
4138 ufshcd_set_link_active(hba);
3137 4139
3138 ret = ufshcd_verify_dev_init(hba); 4140 ret = ufshcd_verify_dev_init(hba);
3139 if (ret) 4141 if (ret)
@@ -3143,16 +4145,77 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
3143 if (ret) 4145 if (ret)
3144 goto out; 4146 goto out;
3145 4147
4148 /* UFS device is also active now */
4149 ufshcd_set_ufs_dev_active(hba);
3146 ufshcd_force_reset_auto_bkops(hba); 4150 ufshcd_force_reset_auto_bkops(hba);
3147 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; 4151 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
4152 hba->wlun_dev_clr_ua = true;
4153
4154 if (ufshcd_get_max_pwr_mode(hba)) {
4155 dev_err(hba->dev,
4156 "%s: Failed getting max supported power mode\n",
4157 __func__);
4158 } else {
4159 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
4160 if (ret)
4161 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
4162 __func__, ret);
4163 }
4164
4165 /*
4166 * If we are in error handling context or in power management callbacks
4167 * context, no need to scan the host
4168 */
4169 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4170 bool flag;
4171
4172 /* clear any previous UFS device information */
4173 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
4174 if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
4175 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
4176 hba->dev_info.f_power_on_wp_en = flag;
4177
4178 if (!hba->is_init_prefetch)
4179 ufshcd_init_icc_levels(hba);
4180
4181 /* Add required well known logical units to scsi mid layer */
4182 if (ufshcd_scsi_add_wlus(hba))
4183 goto out;
3148 4184
3149 /* If we are in error handling context no need to scan the host */
3150 if (!ufshcd_eh_in_progress(hba)) {
3151 scsi_scan_host(hba->host); 4185 scsi_scan_host(hba->host);
3152 pm_runtime_put_sync(hba->dev); 4186 pm_runtime_put_sync(hba->dev);
3153 } 4187 }
4188
4189 if (!hba->is_init_prefetch)
4190 hba->is_init_prefetch = true;
4191
4192 /* Resume devfreq after UFS device is detected */
4193 if (ufshcd_is_clkscaling_enabled(hba))
4194 devfreq_resume_device(hba->devfreq);
4195
3154out: 4196out:
3155 return; 4197 /*
4198 * If we failed to initialize the device or the device is not
4199 * present, turn off the power/clocks etc.
4200 */
4201 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
4202 pm_runtime_put_sync(hba->dev);
4203 ufshcd_hba_exit(hba);
4204 }
4205
4206 return ret;
4207}
4208
4209/**
4210 * ufshcd_async_scan - asynchronous execution for probing hba
4211 * @data: data pointer to pass to this function
4212 * @cookie: cookie data
4213 */
4214static void ufshcd_async_scan(void *data, async_cookie_t cookie)
4215{
4216 struct ufs_hba *hba = (struct ufs_hba *)data;
4217
4218 ufshcd_probe_hba(hba);
3156} 4219}
3157 4220
3158static struct scsi_host_template ufshcd_driver_template = { 4221static struct scsi_host_template ufshcd_driver_template = {
@@ -3171,70 +4234,956 @@ static struct scsi_host_template ufshcd_driver_template = {
3171 .sg_tablesize = SG_ALL, 4234 .sg_tablesize = SG_ALL,
3172 .cmd_per_lun = UFSHCD_CMD_PER_LUN, 4235 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
3173 .can_queue = UFSHCD_CAN_QUEUE, 4236 .can_queue = UFSHCD_CAN_QUEUE,
4237 .max_host_blocked = 1,
3174}; 4238};
3175 4239
4240static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4241 int ua)
4242{
4243 int ret = 0;
4244 struct regulator *reg = vreg->reg;
4245 const char *name = vreg->name;
4246
4247 BUG_ON(!vreg);
4248
4249 ret = regulator_set_optimum_mode(reg, ua);
4250 if (ret >= 0) {
4251 /*
4252 * regulator_set_optimum_mode() returns new regulator
4253 * mode upon success.
4254 */
4255 ret = 0;
4256 } else {
4257 dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n",
4258 __func__, name, ua, ret);
4259 }
4260
4261 return ret;
4262}
4263
4264static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4265 struct ufs_vreg *vreg)
4266{
4267 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4268}
4269
4270static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4271 struct ufs_vreg *vreg)
4272{
4273 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4274}
4275
4276static int ufshcd_config_vreg(struct device *dev,
4277 struct ufs_vreg *vreg, bool on)
4278{
4279 int ret = 0;
4280 struct regulator *reg = vreg->reg;
4281 const char *name = vreg->name;
4282 int min_uV, uA_load;
4283
4284 BUG_ON(!vreg);
4285
4286 if (regulator_count_voltages(reg) > 0) {
4287 min_uV = on ? vreg->min_uV : 0;
4288 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
4289 if (ret) {
4290 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
4291 __func__, name, ret);
4292 goto out;
4293 }
4294
4295 uA_load = on ? vreg->max_uA : 0;
4296 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
4297 if (ret)
4298 goto out;
4299 }
4300out:
4301 return ret;
4302}
4303
4304static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
4305{
4306 int ret = 0;
4307
4308 if (!vreg || vreg->enabled)
4309 goto out;
4310
4311 ret = ufshcd_config_vreg(dev, vreg, true);
4312 if (!ret)
4313 ret = regulator_enable(vreg->reg);
4314
4315 if (!ret)
4316 vreg->enabled = true;
4317 else
4318 dev_err(dev, "%s: %s enable failed, err=%d\n",
4319 __func__, vreg->name, ret);
4320out:
4321 return ret;
4322}
4323
4324static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
4325{
4326 int ret = 0;
4327
4328 if (!vreg || !vreg->enabled)
4329 goto out;
4330
4331 ret = regulator_disable(vreg->reg);
4332
4333 if (!ret) {
4334 /* ignore errors on applying disable config */
4335 ufshcd_config_vreg(dev, vreg, false);
4336 vreg->enabled = false;
4337 } else {
4338 dev_err(dev, "%s: %s disable failed, err=%d\n",
4339 __func__, vreg->name, ret);
4340 }
4341out:
4342 return ret;
4343}
4344
4345static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
4346{
4347 int ret = 0;
4348 struct device *dev = hba->dev;
4349 struct ufs_vreg_info *info = &hba->vreg_info;
4350
4351 if (!info)
4352 goto out;
4353
4354 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
4355 if (ret)
4356 goto out;
4357
4358 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
4359 if (ret)
4360 goto out;
4361
4362 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
4363 if (ret)
4364 goto out;
4365
4366out:
4367 if (ret) {
4368 ufshcd_toggle_vreg(dev, info->vccq2, false);
4369 ufshcd_toggle_vreg(dev, info->vccq, false);
4370 ufshcd_toggle_vreg(dev, info->vcc, false);
4371 }
4372 return ret;
4373}
4374
4375static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
4376{
4377 struct ufs_vreg_info *info = &hba->vreg_info;
4378
4379 if (info)
4380 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
4381
4382 return 0;
4383}
4384
4385static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
4386{
4387 int ret = 0;
4388
4389 if (!vreg)
4390 goto out;
4391
4392 vreg->reg = devm_regulator_get(dev, vreg->name);
4393 if (IS_ERR(vreg->reg)) {
4394 ret = PTR_ERR(vreg->reg);
4395 dev_err(dev, "%s: %s get failed, err=%d\n",
4396 __func__, vreg->name, ret);
4397 }
4398out:
4399 return ret;
4400}
4401
4402static int ufshcd_init_vreg(struct ufs_hba *hba)
4403{
4404 int ret = 0;
4405 struct device *dev = hba->dev;
4406 struct ufs_vreg_info *info = &hba->vreg_info;
4407
4408 if (!info)
4409 goto out;
4410
4411 ret = ufshcd_get_vreg(dev, info->vcc);
4412 if (ret)
4413 goto out;
4414
4415 ret = ufshcd_get_vreg(dev, info->vccq);
4416 if (ret)
4417 goto out;
4418
4419 ret = ufshcd_get_vreg(dev, info->vccq2);
4420out:
4421 return ret;
4422}
4423
4424static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
4425{
4426 struct ufs_vreg_info *info = &hba->vreg_info;
4427
4428 if (info)
4429 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
4430
4431 return 0;
4432}
4433
4434static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
4435 bool skip_ref_clk)
4436{
4437 int ret = 0;
4438 struct ufs_clk_info *clki;
4439 struct list_head *head = &hba->clk_list_head;
4440 unsigned long flags;
4441
4442 if (!head || list_empty(head))
4443 goto out;
4444
4445 list_for_each_entry(clki, head, list) {
4446 if (!IS_ERR_OR_NULL(clki->clk)) {
4447 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
4448 continue;
4449
4450 if (on && !clki->enabled) {
4451 ret = clk_prepare_enable(clki->clk);
4452 if (ret) {
4453 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
4454 __func__, clki->name, ret);
4455 goto out;
4456 }
4457 } else if (!on && clki->enabled) {
4458 clk_disable_unprepare(clki->clk);
4459 }
4460 clki->enabled = on;
4461 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
4462 clki->name, on ? "en" : "dis");
4463 }
4464 }
4465
4466 if (hba->vops && hba->vops->setup_clocks)
4467 ret = hba->vops->setup_clocks(hba, on);
4468out:
4469 if (ret) {
4470 list_for_each_entry(clki, head, list) {
4471 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4472 clk_disable_unprepare(clki->clk);
4473 }
4474 } else if (!ret && on) {
4475 spin_lock_irqsave(hba->host->host_lock, flags);
4476 hba->clk_gating.state = CLKS_ON;
4477 spin_unlock_irqrestore(hba->host->host_lock, flags);
4478 }
4479 return ret;
4480}
4481
4482static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
4483{
4484 return __ufshcd_setup_clocks(hba, on, false);
4485}
4486
4487static int ufshcd_init_clocks(struct ufs_hba *hba)
4488{
4489 int ret = 0;
4490 struct ufs_clk_info *clki;
4491 struct device *dev = hba->dev;
4492 struct list_head *head = &hba->clk_list_head;
4493
4494 if (!head || list_empty(head))
4495 goto out;
4496
4497 list_for_each_entry(clki, head, list) {
4498 if (!clki->name)
4499 continue;
4500
4501 clki->clk = devm_clk_get(dev, clki->name);
4502 if (IS_ERR(clki->clk)) {
4503 ret = PTR_ERR(clki->clk);
4504 dev_err(dev, "%s: %s clk get failed, %d\n",
4505 __func__, clki->name, ret);
4506 goto out;
4507 }
4508
4509 if (clki->max_freq) {
4510 ret = clk_set_rate(clki->clk, clki->max_freq);
4511 if (ret) {
4512 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
4513 __func__, clki->name,
4514 clki->max_freq, ret);
4515 goto out;
4516 }
4517 clki->curr_freq = clki->max_freq;
4518 }
4519 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
4520 clki->name, clk_get_rate(clki->clk));
4521 }
4522out:
4523 return ret;
4524}
4525
4526static int ufshcd_variant_hba_init(struct ufs_hba *hba)
4527{
4528 int err = 0;
4529
4530 if (!hba->vops)
4531 goto out;
4532
4533 if (hba->vops->init) {
4534 err = hba->vops->init(hba);
4535 if (err)
4536 goto out;
4537 }
4538
4539 if (hba->vops->setup_regulators) {
4540 err = hba->vops->setup_regulators(hba, true);
4541 if (err)
4542 goto out_exit;
4543 }
4544
4545 goto out;
4546
4547out_exit:
4548 if (hba->vops->exit)
4549 hba->vops->exit(hba);
4550out:
4551 if (err)
4552 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
4553 __func__, hba->vops ? hba->vops->name : "", err);
4554 return err;
4555}
4556
4557static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
4558{
4559 if (!hba->vops)
4560 return;
4561
4562 if (hba->vops->setup_clocks)
4563 hba->vops->setup_clocks(hba, false);
4564
4565 if (hba->vops->setup_regulators)
4566 hba->vops->setup_regulators(hba, false);
4567
4568 if (hba->vops->exit)
4569 hba->vops->exit(hba);
4570}
4571
4572static int ufshcd_hba_init(struct ufs_hba *hba)
4573{
4574 int err;
4575
4576 /*
4577 * Handle host controller power separately from the UFS device power
4578 * rails as it will help controlling the UFS host controller power
4579 * collapse easily which is different than UFS device power collapse.
4580 * Also, enable the host controller power before we go ahead with rest
4581 * of the initialization here.
4582 */
4583 err = ufshcd_init_hba_vreg(hba);
4584 if (err)
4585 goto out;
4586
4587 err = ufshcd_setup_hba_vreg(hba, true);
4588 if (err)
4589 goto out;
4590
4591 err = ufshcd_init_clocks(hba);
4592 if (err)
4593 goto out_disable_hba_vreg;
4594
4595 err = ufshcd_setup_clocks(hba, true);
4596 if (err)
4597 goto out_disable_hba_vreg;
4598
4599 err = ufshcd_init_vreg(hba);
4600 if (err)
4601 goto out_disable_clks;
4602
4603 err = ufshcd_setup_vreg(hba, true);
4604 if (err)
4605 goto out_disable_clks;
4606
4607 err = ufshcd_variant_hba_init(hba);
4608 if (err)
4609 goto out_disable_vreg;
4610
4611 hba->is_powered = true;
4612 goto out;
4613
4614out_disable_vreg:
4615 ufshcd_setup_vreg(hba, false);
4616out_disable_clks:
4617 ufshcd_setup_clocks(hba, false);
4618out_disable_hba_vreg:
4619 ufshcd_setup_hba_vreg(hba, false);
4620out:
4621 return err;
4622}
4623
4624static void ufshcd_hba_exit(struct ufs_hba *hba)
4625{
4626 if (hba->is_powered) {
4627 ufshcd_variant_hba_exit(hba);
4628 ufshcd_setup_vreg(hba, false);
4629 ufshcd_setup_clocks(hba, false);
4630 ufshcd_setup_hba_vreg(hba, false);
4631 hba->is_powered = false;
4632 }
4633}
4634
4635static int
4636ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
4637{
4638 unsigned char cmd[6] = {REQUEST_SENSE,
4639 0,
4640 0,
4641 0,
4642 SCSI_SENSE_BUFFERSIZE,
4643 0};
4644 char *buffer;
4645 int ret;
4646
4647 buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
4648 if (!buffer) {
4649 ret = -ENOMEM;
4650 goto out;
4651 }
4652
4653 ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
4654 SCSI_SENSE_BUFFERSIZE, NULL,
4655 msecs_to_jiffies(1000), 3, NULL, REQ_PM);
4656 if (ret)
4657 pr_err("%s: failed with err %d\n", __func__, ret);
4658
4659 kfree(buffer);
4660out:
4661 return ret;
4662}
4663
4664/**
4665 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
4666 * power mode
4667 * @hba: per adapter instance
4668 * @pwr_mode: device power mode to set
4669 *
4670 * Returns 0 if requested power mode is set successfully
4671 * Returns non-zero if failed to set the requested power mode
4672 */
4673static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4674 enum ufs_dev_pwr_mode pwr_mode)
4675{
4676 unsigned char cmd[6] = { START_STOP };
4677 struct scsi_sense_hdr sshdr;
4678 struct scsi_device *sdp = hba->sdev_ufs_device;
4679 int ret;
4680
4681 if (!sdp || !scsi_device_online(sdp))
4682 return -ENODEV;
4683
4684 /*
4685 * If scsi commands fail, the scsi mid-layer schedules scsi error-
4686 * handling, which would wait for host to be resumed. Since we know
4687 * we are functional while we are here, skip host resume in error
4688 * handling context.
4689 */
4690 hba->host->eh_noresume = 1;
4691 if (hba->wlun_dev_clr_ua) {
4692 ret = ufshcd_send_request_sense(hba, sdp);
4693 if (ret)
4694 goto out;
4695 /* Unit attention condition is cleared now */
4696 hba->wlun_dev_clr_ua = false;
4697 }
4698
4699 cmd[4] = pwr_mode << 4;
4700
4701 /*
4702 * Current function would be generally called from the power management
4703 * callbacks hence set the REQ_PM flag so that it doesn't resume the
4704 * already suspended childs.
4705 */
4706 ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
4707 START_STOP_TIMEOUT, 0, NULL, REQ_PM);
4708 if (ret) {
4709 sdev_printk(KERN_WARNING, sdp,
4710 "START_STOP failed for power mode: %d\n", pwr_mode);
4711 scsi_show_result(ret);
4712 if (driver_byte(ret) & DRIVER_SENSE) {
4713 scsi_show_sense_hdr(&sshdr);
4714 scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
4715 }
4716 }
4717
4718 if (!ret)
4719 hba->curr_dev_pwr_mode = pwr_mode;
4720out:
4721 hba->host->eh_noresume = 0;
4722 return ret;
4723}
4724
4725static int ufshcd_link_state_transition(struct ufs_hba *hba,
4726 enum uic_link_state req_link_state,
4727 int check_for_bkops)
4728{
4729 int ret = 0;
4730
4731 if (req_link_state == hba->uic_link_state)
4732 return 0;
4733
4734 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
4735 ret = ufshcd_uic_hibern8_enter(hba);
4736 if (!ret)
4737 ufshcd_set_link_hibern8(hba);
4738 else
4739 goto out;
4740 }
4741 /*
4742 * If autobkops is enabled, link can't be turned off because
4743 * turning off the link would also turn off the device.
4744 */
4745 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
4746 (!check_for_bkops || (check_for_bkops &&
4747 !hba->auto_bkops_enabled))) {
4748 /*
4749 * Change controller state to "reset state" which
4750 * should also put the link in off/reset state
4751 */
4752 ufshcd_hba_stop(hba);
4753 /*
4754 * TODO: Check if we need any delay to make sure that
4755 * controller is reset
4756 */
4757 ufshcd_set_link_off(hba);
4758 }
4759
4760out:
4761 return ret;
4762}
4763
4764static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
4765{
4766 /*
4767 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
4768 * power.
4769 *
4770 * If UFS device and link is in OFF state, all power supplies (VCC,
4771 * VCCQ, VCCQ2) can be turned off if power on write protect is not
4772 * required. If UFS link is inactive (Hibern8 or OFF state) and device
4773 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
4774 *
4775 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
4776 * in low power state which would save some power.
4777 */
4778 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4779 !hba->dev_info.is_lu_power_on_wp) {
4780 ufshcd_setup_vreg(hba, false);
4781 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4782 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4783 if (!ufshcd_is_link_active(hba)) {
4784 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4785 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
4786 }
4787 }
4788}
4789
4790static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
4791{
4792 int ret = 0;
4793
4794 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
4795 !hba->dev_info.is_lu_power_on_wp) {
4796 ret = ufshcd_setup_vreg(hba, true);
4797 } else if (!ufshcd_is_ufs_dev_active(hba)) {
4798 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
4799 if (!ret && !ufshcd_is_link_active(hba)) {
4800 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
4801 if (ret)
4802 goto vcc_disable;
4803 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
4804 if (ret)
4805 goto vccq_lpm;
4806 }
4807 }
4808 goto out;
4809
4810vccq_lpm:
4811 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
4812vcc_disable:
4813 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
4814out:
4815 return ret;
4816}
4817
4818static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
4819{
4820 if (ufshcd_is_link_off(hba))
4821 ufshcd_setup_hba_vreg(hba, false);
4822}
4823
4824static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
4825{
4826 if (ufshcd_is_link_off(hba))
4827 ufshcd_setup_hba_vreg(hba, true);
4828}
4829
3176/** 4830/**
3177 * ufshcd_suspend - suspend power management function 4831 * ufshcd_suspend - helper function for suspend operations
3178 * @hba: per adapter instance 4832 * @hba: per adapter instance
3179 * @state: power state 4833 * @pm_op: desired low power operation type
4834 *
4835 * This function will try to put the UFS device and link into low power
4836 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
4837 * (System PM level).
4838 *
4839 * If this function is called during shutdown, it will make sure that
4840 * both UFS device and UFS link is powered off.
3180 * 4841 *
3181 * Returns -ENOSYS 4842 * NOTE: UFS device & link must be active before we enter in this function.
4843 *
4844 * Returns 0 for success and non-zero for failure
3182 */ 4845 */
3183int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state) 4846static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
3184{ 4847{
4848 int ret = 0;
4849 enum ufs_pm_level pm_lvl;
4850 enum ufs_dev_pwr_mode req_dev_pwr_mode;
4851 enum uic_link_state req_link_state;
4852
4853 hba->pm_op_in_progress = 1;
4854 if (!ufshcd_is_shutdown_pm(pm_op)) {
4855 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
4856 hba->rpm_lvl : hba->spm_lvl;
4857 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
4858 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
4859 } else {
4860 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
4861 req_link_state = UIC_LINK_OFF_STATE;
4862 }
4863
3185 /* 4864 /*
3186 * TODO: 4865 * If we can't transition into any of the low power modes
3187 * 1. Block SCSI requests from SCSI midlayer 4866 * just gate the clocks.
3188 * 2. Change the internal driver state to non operational
3189 * 3. Set UTRLRSR and UTMRLRSR bits to zero
3190 * 4. Wait until outstanding commands are completed
3191 * 5. Set HCE to zero to send the UFS host controller to reset state
3192 */ 4867 */
4868 ufshcd_hold(hba, false);
4869 hba->clk_gating.is_suspended = true;
4870
4871 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
4872 req_link_state == UIC_LINK_ACTIVE_STATE) {
4873 goto disable_clks;
4874 }
4875
4876 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
4877 (req_link_state == hba->uic_link_state))
4878 goto out;
4879
4880 /* UFS device & link must be active before we enter in this function */
4881 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
4882 ret = -EINVAL;
4883 goto out;
4884 }
3193 4885
3194 return -ENOSYS; 4886 if (ufshcd_is_runtime_pm(pm_op)) {
4887 if (ufshcd_can_autobkops_during_suspend(hba)) {
4888 /*
4889 * The device is idle with no requests in the queue,
4890 * allow background operations if bkops status shows
4891 * that performance might be impacted.
4892 */
4893 ret = ufshcd_urgent_bkops(hba);
4894 if (ret)
4895 goto enable_gating;
4896 } else {
4897 /* make sure that auto bkops is disabled */
4898 ufshcd_disable_auto_bkops(hba);
4899 }
4900 }
4901
4902 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
4903 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
4904 !ufshcd_is_runtime_pm(pm_op))) {
4905 /* ensure that bkops is disabled */
4906 ufshcd_disable_auto_bkops(hba);
4907 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
4908 if (ret)
4909 goto enable_gating;
4910 }
4911
4912 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
4913 if (ret)
4914 goto set_dev_active;
4915
4916 ufshcd_vreg_set_lpm(hba);
4917
4918disable_clks:
4919 /*
4920 * The clock scaling needs access to controller registers. Hence, Wait
4921 * for pending clock scaling work to be done before clocks are
4922 * turned off.
4923 */
4924 if (ufshcd_is_clkscaling_enabled(hba)) {
4925 devfreq_suspend_device(hba->devfreq);
4926 hba->clk_scaling.window_start_t = 0;
4927 }
4928 /*
4929 * Call vendor specific suspend callback. As these callbacks may access
4930 * vendor specific host controller register space call them before the
4931 * host clocks are ON.
4932 */
4933 if (hba->vops && hba->vops->suspend) {
4934 ret = hba->vops->suspend(hba, pm_op);
4935 if (ret)
4936 goto set_link_active;
4937 }
4938
4939 if (hba->vops && hba->vops->setup_clocks) {
4940 ret = hba->vops->setup_clocks(hba, false);
4941 if (ret)
4942 goto vops_resume;
4943 }
4944
4945 if (!ufshcd_is_link_active(hba))
4946 ufshcd_setup_clocks(hba, false);
4947 else
4948 /* If link is active, device ref_clk can't be switched off */
4949 __ufshcd_setup_clocks(hba, false, true);
4950
4951 hba->clk_gating.state = CLKS_OFF;
4952 /*
4953 * Disable the host irq as host controller as there won't be any
4954 * host controller trasanction expected till resume.
4955 */
4956 ufshcd_disable_irq(hba);
4957 /* Put the host controller in low power mode if possible */
4958 ufshcd_hba_vreg_set_lpm(hba);
4959 goto out;
4960
4961vops_resume:
4962 if (hba->vops && hba->vops->resume)
4963 hba->vops->resume(hba, pm_op);
4964set_link_active:
4965 ufshcd_vreg_set_hpm(hba);
4966 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
4967 ufshcd_set_link_active(hba);
4968 else if (ufshcd_is_link_off(hba))
4969 ufshcd_host_reset_and_restore(hba);
4970set_dev_active:
4971 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
4972 ufshcd_disable_auto_bkops(hba);
4973enable_gating:
4974 hba->clk_gating.is_suspended = false;
4975 ufshcd_release(hba);
4976out:
4977 hba->pm_op_in_progress = 0;
4978 return ret;
3195} 4979}
3196EXPORT_SYMBOL_GPL(ufshcd_suspend);
3197 4980
3198/** 4981/**
3199 * ufshcd_resume - resume power management function 4982 * ufshcd_resume - helper function for resume operations
3200 * @hba: per adapter instance 4983 * @hba: per adapter instance
4984 * @pm_op: runtime PM or system PM
4985 *
4986 * This function basically brings the UFS device, UniPro link and controller
4987 * to active state.
3201 * 4988 *
3202 * Returns -ENOSYS 4989 * Returns 0 for success and non-zero for failure
3203 */ 4990 */
3204int ufshcd_resume(struct ufs_hba *hba) 4991static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
3205{ 4992{
4993 int ret;
4994 enum uic_link_state old_link_state;
4995
4996 hba->pm_op_in_progress = 1;
4997 old_link_state = hba->uic_link_state;
4998
4999 ufshcd_hba_vreg_set_hpm(hba);
5000 /* Make sure clocks are enabled before accessing controller */
5001 ret = ufshcd_setup_clocks(hba, true);
5002 if (ret)
5003 goto out;
5004
5005 /* enable the host irq as host controller would be active soon */
5006 ret = ufshcd_enable_irq(hba);
5007 if (ret)
5008 goto disable_irq_and_vops_clks;
5009
5010 ret = ufshcd_vreg_set_hpm(hba);
5011 if (ret)
5012 goto disable_irq_and_vops_clks;
5013
5014 /*
5015 * Call vendor specific resume callback. As these callbacks may access
5016 * vendor specific host controller register space call them when the
5017 * host clocks are ON.
5018 */
5019 if (hba->vops && hba->vops->resume) {
5020 ret = hba->vops->resume(hba, pm_op);
5021 if (ret)
5022 goto disable_vreg;
5023 }
5024
5025 if (ufshcd_is_link_hibern8(hba)) {
5026 ret = ufshcd_uic_hibern8_exit(hba);
5027 if (!ret)
5028 ufshcd_set_link_active(hba);
5029 else
5030 goto vendor_suspend;
5031 } else if (ufshcd_is_link_off(hba)) {
5032 ret = ufshcd_host_reset_and_restore(hba);
5033 /*
5034 * ufshcd_host_reset_and_restore() should have already
5035 * set the link state as active
5036 */
5037 if (ret || !ufshcd_is_link_active(hba))
5038 goto vendor_suspend;
5039 }
5040
5041 if (!ufshcd_is_ufs_dev_active(hba)) {
5042 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
5043 if (ret)
5044 goto set_old_link_state;
5045 }
5046
3206 /* 5047 /*
3207 * TODO: 5048 * If BKOPs operations are urgently needed at this moment then
3208 * 1. Set HCE to 1, to start the UFS host controller 5049 * keep auto-bkops enabled or else disable it.
3209 * initialization process
3210 * 2. Set UTRLRSR and UTMRLRSR bits to 1
3211 * 3. Change the internal driver state to operational
3212 * 4. Unblock SCSI requests from SCSI midlayer
3213 */ 5050 */
5051 ufshcd_urgent_bkops(hba);
5052 hba->clk_gating.is_suspended = false;
5053
5054 if (ufshcd_is_clkscaling_enabled(hba))
5055 devfreq_resume_device(hba->devfreq);
5056
5057 /* Schedule clock gating in case of no access to UFS device yet */
5058 ufshcd_release(hba);
5059 goto out;
5060
5061set_old_link_state:
5062 ufshcd_link_state_transition(hba, old_link_state, 0);
5063vendor_suspend:
5064 if (hba->vops && hba->vops->suspend)
5065 hba->vops->suspend(hba, pm_op);
5066disable_vreg:
5067 ufshcd_vreg_set_lpm(hba);
5068disable_irq_and_vops_clks:
5069 ufshcd_disable_irq(hba);
5070 ufshcd_setup_clocks(hba, false);
5071out:
5072 hba->pm_op_in_progress = 0;
5073 return ret;
5074}
5075
5076/**
5077 * ufshcd_system_suspend - system suspend routine
5078 * @hba: per adapter instance
5079 * @pm_op: runtime PM or system PM
5080 *
5081 * Check the description of ufshcd_suspend() function for more details.
5082 *
5083 * Returns 0 for success and non-zero for failure
5084 */
5085int ufshcd_system_suspend(struct ufs_hba *hba)
5086{
5087 int ret = 0;
5088
5089 if (!hba || !hba->is_powered)
5090 goto out;
5091
5092 if (pm_runtime_suspended(hba->dev)) {
5093 if (hba->rpm_lvl == hba->spm_lvl)
5094 /*
5095 * There is possibility that device may still be in
5096 * active state during the runtime suspend.
5097 */
5098 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
5099 hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
5100 goto out;
5101
5102 /*
5103 * UFS device and/or UFS link low power states during runtime
5104 * suspend seems to be different than what is expected during
5105 * system suspend. Hence runtime resume the devic & link and
5106 * let the system suspend low power states to take effect.
5107 * TODO: If resume takes longer time, we might have optimize
5108 * it in future by not resuming everything if possible.
5109 */
5110 ret = ufshcd_runtime_resume(hba);
5111 if (ret)
5112 goto out;
5113 }
5114
5115 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
5116out:
5117 if (!ret)
5118 hba->is_sys_suspended = true;
5119 return ret;
5120}
5121EXPORT_SYMBOL(ufshcd_system_suspend);
5122
5123/**
5124 * ufshcd_system_resume - system resume routine
5125 * @hba: per adapter instance
5126 *
5127 * Returns 0 for success and non-zero for failure
5128 */
5129
5130int ufshcd_system_resume(struct ufs_hba *hba)
5131{
5132 if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
5133 /*
5134 * Let the runtime resume take care of resuming
5135 * if runtime suspended.
5136 */
5137 return 0;
3214 5138
3215 return -ENOSYS; 5139 return ufshcd_resume(hba, UFS_SYSTEM_PM);
3216} 5140}
3217EXPORT_SYMBOL_GPL(ufshcd_resume); 5141EXPORT_SYMBOL(ufshcd_system_resume);
3218 5142
5143/**
5144 * ufshcd_runtime_suspend - runtime suspend routine
5145 * @hba: per adapter instance
5146 *
5147 * Check the description of ufshcd_suspend() function for more details.
5148 *
5149 * Returns 0 for success and non-zero for failure
5150 */
3219int ufshcd_runtime_suspend(struct ufs_hba *hba) 5151int ufshcd_runtime_suspend(struct ufs_hba *hba)
3220{ 5152{
3221 if (!hba) 5153 if (!hba || !hba->is_powered)
3222 return 0; 5154 return 0;
3223 5155
3224 /* 5156 return ufshcd_suspend(hba, UFS_RUNTIME_PM);
3225 * The device is idle with no requests in the queue,
3226 * allow background operations.
3227 */
3228 return ufshcd_enable_auto_bkops(hba);
3229} 5157}
3230EXPORT_SYMBOL(ufshcd_runtime_suspend); 5158EXPORT_SYMBOL(ufshcd_runtime_suspend);
3231 5159
5160/**
5161 * ufshcd_runtime_resume - runtime resume routine
5162 * @hba: per adapter instance
5163 *
5164 * This function basically brings the UFS device, UniPro link and controller
5165 * to active state. Following operations are done in this function:
5166 *
5167 * 1. Turn on all the controller related clocks
5168 * 2. Bring the UniPro link out of Hibernate state
5169 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
5170 * to active state.
5171 * 4. If auto-bkops is enabled on the device, disable it.
5172 *
5173 * So following would be the possible power state after this function return
5174 * successfully:
5175 * S1: UFS device in Active state with VCC rail ON
5176 * UniPro link in Active state
5177 * All the UFS/UniPro controller clocks are ON
5178 *
5179 * Returns 0 for success and non-zero for failure
5180 */
3232int ufshcd_runtime_resume(struct ufs_hba *hba) 5181int ufshcd_runtime_resume(struct ufs_hba *hba)
3233{ 5182{
3234 if (!hba) 5183 if (!hba || !hba->is_powered)
3235 return 0; 5184 return 0;
3236 5185 else
3237 return ufshcd_disable_auto_bkops(hba); 5186 return ufshcd_resume(hba, UFS_RUNTIME_PM);
3238} 5187}
3239EXPORT_SYMBOL(ufshcd_runtime_resume); 5188EXPORT_SYMBOL(ufshcd_runtime_resume);
3240 5189
@@ -3245,6 +5194,36 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
3245EXPORT_SYMBOL(ufshcd_runtime_idle); 5194EXPORT_SYMBOL(ufshcd_runtime_idle);
3246 5195
3247/** 5196/**
5197 * ufshcd_shutdown - shutdown routine
5198 * @hba: per adapter instance
5199 *
5200 * This function would power off both UFS device and UFS link.
5201 *
5202 * Returns 0 always to allow force shutdown even in case of errors.
5203 */
5204int ufshcd_shutdown(struct ufs_hba *hba)
5205{
5206 int ret = 0;
5207
5208 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
5209 goto out;
5210
5211 if (pm_runtime_suspended(hba->dev)) {
5212 ret = ufshcd_runtime_resume(hba);
5213 if (ret)
5214 goto out;
5215 }
5216
5217 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
5218out:
5219 if (ret)
5220 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
5221 /* allow force shutdown even in case of errors */
5222 return 0;
5223}
5224EXPORT_SYMBOL(ufshcd_shutdown);
5225
5226/**
3248 * ufshcd_remove - de-allocate SCSI host and host memory space 5227 * ufshcd_remove - de-allocate SCSI host and host memory space
3249 * data structure memory 5228 * data structure memory
3250 * @hba - per adapter instance 5229 * @hba - per adapter instance
@@ -3252,11 +5231,17 @@ EXPORT_SYMBOL(ufshcd_runtime_idle);
3252void ufshcd_remove(struct ufs_hba *hba) 5231void ufshcd_remove(struct ufs_hba *hba)
3253{ 5232{
3254 scsi_remove_host(hba->host); 5233 scsi_remove_host(hba->host);
5234 ufshcd_scsi_remove_wlus(hba);
3255 /* disable interrupts */ 5235 /* disable interrupts */
3256 ufshcd_disable_intr(hba, hba->intr_mask); 5236 ufshcd_disable_intr(hba, hba->intr_mask);
3257 ufshcd_hba_stop(hba); 5237 ufshcd_hba_stop(hba);
3258 5238
3259 scsi_host_put(hba->host); 5239 scsi_host_put(hba->host);
5240
5241 ufshcd_exit_clk_gating(hba);
5242 if (ufshcd_is_clkscaling_enabled(hba))
5243 devfreq_remove_device(hba->devfreq);
5244 ufshcd_hba_exit(hba);
3260} 5245}
3261EXPORT_SYMBOL_GPL(ufshcd_remove); 5246EXPORT_SYMBOL_GPL(ufshcd_remove);
3262 5247
@@ -3277,19 +5262,16 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
3277} 5262}
3278 5263
3279/** 5264/**
3280 * ufshcd_init - Driver initialization routine 5265 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
3281 * @dev: pointer to device handle 5266 * @dev: pointer to device handle
3282 * @hba_handle: driver private handle 5267 * @hba_handle: driver private handle
3283 * @mmio_base: base register address
3284 * @irq: Interrupt line of device
3285 * Returns 0 on success, non-zero value on failure 5268 * Returns 0 on success, non-zero value on failure
3286 */ 5269 */
3287int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle, 5270int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
3288 void __iomem *mmio_base, unsigned int irq)
3289{ 5271{
3290 struct Scsi_Host *host; 5272 struct Scsi_Host *host;
3291 struct ufs_hba *hba; 5273 struct ufs_hba *hba;
3292 int err; 5274 int err = 0;
3293 5275
3294 if (!dev) { 5276 if (!dev) {
3295 dev_err(dev, 5277 dev_err(dev,
@@ -3298,13 +5280,6 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
3298 goto out_error; 5280 goto out_error;
3299 } 5281 }
3300 5282
3301 if (!mmio_base) {
3302 dev_err(dev,
3303 "Invalid memory reference for mmio_base is NULL\n");
3304 err = -ENODEV;
3305 goto out_error;
3306 }
3307
3308 host = scsi_host_alloc(&ufshcd_driver_template, 5283 host = scsi_host_alloc(&ufshcd_driver_template,
3309 sizeof(struct ufs_hba)); 5284 sizeof(struct ufs_hba));
3310 if (!host) { 5285 if (!host) {
@@ -3315,9 +5290,146 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
3315 hba = shost_priv(host); 5290 hba = shost_priv(host);
3316 hba->host = host; 5291 hba->host = host;
3317 hba->dev = dev; 5292 hba->dev = dev;
5293 *hba_handle = hba;
5294
5295out_error:
5296 return err;
5297}
5298EXPORT_SYMBOL(ufshcd_alloc_host);
5299
5300static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
5301{
5302 int ret = 0;
5303 struct ufs_clk_info *clki;
5304 struct list_head *head = &hba->clk_list_head;
5305
5306 if (!head || list_empty(head))
5307 goto out;
5308
5309 list_for_each_entry(clki, head, list) {
5310 if (!IS_ERR_OR_NULL(clki->clk)) {
5311 if (scale_up && clki->max_freq) {
5312 if (clki->curr_freq == clki->max_freq)
5313 continue;
5314 ret = clk_set_rate(clki->clk, clki->max_freq);
5315 if (ret) {
5316 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5317 __func__, clki->name,
5318 clki->max_freq, ret);
5319 break;
5320 }
5321 clki->curr_freq = clki->max_freq;
5322
5323 } else if (!scale_up && clki->min_freq) {
5324 if (clki->curr_freq == clki->min_freq)
5325 continue;
5326 ret = clk_set_rate(clki->clk, clki->min_freq);
5327 if (ret) {
5328 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
5329 __func__, clki->name,
5330 clki->min_freq, ret);
5331 break;
5332 }
5333 clki->curr_freq = clki->min_freq;
5334 }
5335 }
5336 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
5337 clki->name, clk_get_rate(clki->clk));
5338 }
5339 if (hba->vops->clk_scale_notify)
5340 hba->vops->clk_scale_notify(hba);
5341out:
5342 return ret;
5343}
5344
5345static int ufshcd_devfreq_target(struct device *dev,
5346 unsigned long *freq, u32 flags)
5347{
5348 int err = 0;
5349 struct ufs_hba *hba = dev_get_drvdata(dev);
5350
5351 if (!ufshcd_is_clkscaling_enabled(hba))
5352 return -EINVAL;
5353
5354 if (*freq == UINT_MAX)
5355 err = ufshcd_scale_clks(hba, true);
5356 else if (*freq == 0)
5357 err = ufshcd_scale_clks(hba, false);
5358
5359 return err;
5360}
5361
5362static int ufshcd_devfreq_get_dev_status(struct device *dev,
5363 struct devfreq_dev_status *stat)
5364{
5365 struct ufs_hba *hba = dev_get_drvdata(dev);
5366 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
5367 unsigned long flags;
5368
5369 if (!ufshcd_is_clkscaling_enabled(hba))
5370 return -EINVAL;
5371
5372 memset(stat, 0, sizeof(*stat));
5373
5374 spin_lock_irqsave(hba->host->host_lock, flags);
5375 if (!scaling->window_start_t)
5376 goto start_window;
5377
5378 if (scaling->is_busy_started)
5379 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
5380 scaling->busy_start_t));
5381
5382 stat->total_time = jiffies_to_usecs((long)jiffies -
5383 (long)scaling->window_start_t);
5384 stat->busy_time = scaling->tot_busy_t;
5385start_window:
5386 scaling->window_start_t = jiffies;
5387 scaling->tot_busy_t = 0;
5388
5389 if (hba->outstanding_reqs) {
5390 scaling->busy_start_t = ktime_get();
5391 scaling->is_busy_started = true;
5392 } else {
5393 scaling->busy_start_t = ktime_set(0, 0);
5394 scaling->is_busy_started = false;
5395 }
5396 spin_unlock_irqrestore(hba->host->host_lock, flags);
5397 return 0;
5398}
5399
5400static struct devfreq_dev_profile ufs_devfreq_profile = {
5401 .polling_ms = 100,
5402 .target = ufshcd_devfreq_target,
5403 .get_dev_status = ufshcd_devfreq_get_dev_status,
5404};
5405
5406/**
5407 * ufshcd_init - Driver initialization routine
5408 * @hba: per-adapter instance
5409 * @mmio_base: base register address
5410 * @irq: Interrupt line of device
5411 * Returns 0 on success, non-zero value on failure
5412 */
5413int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
5414{
5415 int err;
5416 struct Scsi_Host *host = hba->host;
5417 struct device *dev = hba->dev;
5418
5419 if (!mmio_base) {
5420 dev_err(hba->dev,
5421 "Invalid memory reference for mmio_base is NULL\n");
5422 err = -ENODEV;
5423 goto out_error;
5424 }
5425
3318 hba->mmio_base = mmio_base; 5426 hba->mmio_base = mmio_base;
3319 hba->irq = irq; 5427 hba->irq = irq;
3320 5428
5429 err = ufshcd_hba_init(hba);
5430 if (err)
5431 goto out_error;
5432
3321 /* Read capabilities registers */ 5433 /* Read capabilities registers */
3322 ufshcd_hba_capabilities(hba); 5434 ufshcd_hba_capabilities(hba);
3323 5435
@@ -3346,11 +5458,13 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
3346 host->can_queue = hba->nutrs; 5458 host->can_queue = hba->nutrs;
3347 host->cmd_per_lun = hba->nutrs; 5459 host->cmd_per_lun = hba->nutrs;
3348 host->max_id = UFSHCD_MAX_ID; 5460 host->max_id = UFSHCD_MAX_ID;
3349 host->max_lun = UFSHCD_MAX_LUNS; 5461 host->max_lun = UFS_MAX_LUNS;
3350 host->max_channel = UFSHCD_MAX_CHANNEL; 5462 host->max_channel = UFSHCD_MAX_CHANNEL;
3351 host->unique_id = host->host_no; 5463 host->unique_id = host->host_no;
3352 host->max_cmd_len = MAX_CDB_SIZE; 5464 host->max_cmd_len = MAX_CDB_SIZE;
3353 5465
5466 hba->max_pwr_info.is_valid = false;
5467
3354 /* Initailize wait queue for task management */ 5468 /* Initailize wait queue for task management */
3355 init_waitqueue_head(&hba->tm_wq); 5469 init_waitqueue_head(&hba->tm_wq);
3356 init_waitqueue_head(&hba->tm_tag_wq); 5470 init_waitqueue_head(&hba->tm_tag_wq);
@@ -3368,24 +5482,27 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
3368 /* Initialize device management tag acquire wait queue */ 5482 /* Initialize device management tag acquire wait queue */
3369 init_waitqueue_head(&hba->dev_cmd.tag_wq); 5483 init_waitqueue_head(&hba->dev_cmd.tag_wq);
3370 5484
5485 ufshcd_init_clk_gating(hba);
3371 /* IRQ registration */ 5486 /* IRQ registration */
3372 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba); 5487 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
3373 if (err) { 5488 if (err) {
3374 dev_err(hba->dev, "request irq failed\n"); 5489 dev_err(hba->dev, "request irq failed\n");
3375 goto out_disable; 5490 goto exit_gating;
5491 } else {
5492 hba->is_irq_enabled = true;
3376 } 5493 }
3377 5494
3378 /* Enable SCSI tag mapping */ 5495 /* Enable SCSI tag mapping */
3379 err = scsi_init_shared_tag_map(host, host->can_queue); 5496 err = scsi_init_shared_tag_map(host, host->can_queue);
3380 if (err) { 5497 if (err) {
3381 dev_err(hba->dev, "init shared queue failed\n"); 5498 dev_err(hba->dev, "init shared queue failed\n");
3382 goto out_disable; 5499 goto exit_gating;
3383 } 5500 }
3384 5501
3385 err = scsi_add_host(host, hba->dev); 5502 err = scsi_add_host(host, hba->dev);
3386 if (err) { 5503 if (err) {
3387 dev_err(hba->dev, "scsi_add_host failed\n"); 5504 dev_err(hba->dev, "scsi_add_host failed\n");
3388 goto out_disable; 5505 goto exit_gating;
3389 } 5506 }
3390 5507
3391 /* Host controller enable */ 5508 /* Host controller enable */
@@ -3395,19 +5512,40 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
3395 goto out_remove_scsi_host; 5512 goto out_remove_scsi_host;
3396 } 5513 }
3397 5514
3398 *hba_handle = hba; 5515 if (ufshcd_is_clkscaling_enabled(hba)) {
5516 hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
5517 "simple_ondemand", NULL);
5518 if (IS_ERR(hba->devfreq)) {
5519 dev_err(hba->dev, "Unable to register with devfreq %ld\n",
5520 PTR_ERR(hba->devfreq));
5521 goto out_remove_scsi_host;
5522 }
5523 /* Suspend devfreq until the UFS device is detected */
5524 devfreq_suspend_device(hba->devfreq);
5525 hba->clk_scaling.window_start_t = 0;
5526 }
3399 5527
3400 /* Hold auto suspend until async scan completes */ 5528 /* Hold auto suspend until async scan completes */
3401 pm_runtime_get_sync(dev); 5529 pm_runtime_get_sync(dev);
3402 5530
5531 /*
5532 * The device-initialize-sequence hasn't been invoked yet.
5533 * Set the device to power-off state
5534 */
5535 ufshcd_set_ufs_dev_poweroff(hba);
5536
3403 async_schedule(ufshcd_async_scan, hba); 5537 async_schedule(ufshcd_async_scan, hba);
3404 5538
3405 return 0; 5539 return 0;
3406 5540
3407out_remove_scsi_host: 5541out_remove_scsi_host:
3408 scsi_remove_host(hba->host); 5542 scsi_remove_host(hba->host);
5543exit_gating:
5544 ufshcd_exit_clk_gating(hba);
3409out_disable: 5545out_disable:
5546 hba->is_irq_enabled = false;
3410 scsi_host_put(host); 5547 scsi_host_put(host);
5548 ufshcd_hba_exit(hba);
3411out_error: 5549out_error:
3412 return err; 5550 return err;
3413} 5551}
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index acf318e338ed..58ecdff5065c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -52,6 +52,7 @@
52#include <linux/pm_runtime.h> 52#include <linux/pm_runtime.h>
53#include <linux/clk.h> 53#include <linux/clk.h>
54#include <linux/completion.h> 54#include <linux/completion.h>
55#include <linux/regulator/consumer.h>
55 56
56#include <asm/irq.h> 57#include <asm/irq.h>
57#include <asm/byteorder.h> 58#include <asm/byteorder.h>
@@ -68,6 +69,8 @@
68#define UFSHCD "ufshcd" 69#define UFSHCD "ufshcd"
69#define UFSHCD_DRIVER_VERSION "0.2" 70#define UFSHCD_DRIVER_VERSION "0.2"
70 71
72struct ufs_hba;
73
71enum dev_cmd_type { 74enum dev_cmd_type {
72 DEV_CMD_TYPE_NOP = 0x0, 75 DEV_CMD_TYPE_NOP = 0x0,
73 DEV_CMD_TYPE_QUERY = 0x1, 76 DEV_CMD_TYPE_QUERY = 0x1,
@@ -93,6 +96,54 @@ struct uic_command {
93 struct completion done; 96 struct completion done;
94}; 97};
95 98
99/* Used to differentiate the power management options */
100enum ufs_pm_op {
101 UFS_RUNTIME_PM,
102 UFS_SYSTEM_PM,
103 UFS_SHUTDOWN_PM,
104};
105
106#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
107#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
108#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
109
110/* Host <-> Device UniPro Link state */
111enum uic_link_state {
112 UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
113 UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
114 UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
115};
116
117#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
118#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
119 UIC_LINK_ACTIVE_STATE)
120#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
121 UIC_LINK_HIBERN8_STATE)
122#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
123#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
124 UIC_LINK_ACTIVE_STATE)
125#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
126 UIC_LINK_HIBERN8_STATE)
127
128/*
129 * UFS Power management levels.
130 * Each level is in increasing order of power savings.
131 */
132enum ufs_pm_level {
133 UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
134 UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
135 UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
136 UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
137 UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
138 UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
139 UFS_PM_LVL_MAX
140};
141
142struct ufs_pm_lvl_states {
143 enum ufs_dev_pwr_mode dev_state;
144 enum uic_link_state link_state;
145};
146
96/** 147/**
97 * struct ufshcd_lrb - local reference block 148 * struct ufshcd_lrb - local reference block
98 * @utr_descriptor_ptr: UTRD address of the command 149 * @utr_descriptor_ptr: UTRD address of the command
@@ -121,7 +172,7 @@ struct ufshcd_lrb {
121 172
122 int command_type; 173 int command_type;
123 int task_tag; 174 int task_tag;
124 unsigned int lun; 175 u8 lun; /* UPIU LUN id field is only 8-bit wide */
125 bool intr_cmd; 176 bool intr_cmd;
126}; 177};
127 178
@@ -153,6 +204,126 @@ struct ufs_dev_cmd {
153}; 204};
154 205
155/** 206/**
207 * struct ufs_clk_info - UFS clock related info
208 * @list: list headed by hba->clk_list_head
209 * @clk: clock node
210 * @name: clock name
211 * @max_freq: maximum frequency supported by the clock
212 * @min_freq: min frequency that can be used for clock scaling
213 * @curr_freq: indicates the current frequency that it is set to
214 * @enabled: variable to check against multiple enable/disable
215 */
216struct ufs_clk_info {
217 struct list_head list;
218 struct clk *clk;
219 const char *name;
220 u32 max_freq;
221 u32 min_freq;
222 u32 curr_freq;
223 bool enabled;
224};
225
226#define PRE_CHANGE 0
227#define POST_CHANGE 1
228
229struct ufs_pa_layer_attr {
230 u32 gear_rx;
231 u32 gear_tx;
232 u32 lane_rx;
233 u32 lane_tx;
234 u32 pwr_rx;
235 u32 pwr_tx;
236 u32 hs_rate;
237};
238
239struct ufs_pwr_mode_info {
240 bool is_valid;
241 struct ufs_pa_layer_attr info;
242};
243
244/**
245 * struct ufs_hba_variant_ops - variant specific callbacks
246 * @name: variant name
247 * @init: called when the driver is initialized
248 * @exit: called to cleanup everything done in init
249 * @clk_scale_notify: notifies that clks are scaled up/down
250 * @setup_clocks: called before touching any of the controller registers
251 * @setup_regulators: called before accessing the host controller
252 * @hce_enable_notify: called before and after HCE enable bit is set to allow
253 * variant specific Uni-Pro initialization.
254 * @link_startup_notify: called before and after Link startup is carried out
255 * to allow variant specific Uni-Pro initialization.
256 * @pwr_change_notify: called before and after a power mode change
257 * is carried out to allow vendor spesific capabilities
258 * to be set.
259 * @suspend: called during host controller PM callback
260 * @resume: called during host controller PM callback
261 */
262struct ufs_hba_variant_ops {
263 const char *name;
264 int (*init)(struct ufs_hba *);
265 void (*exit)(struct ufs_hba *);
266 void (*clk_scale_notify)(struct ufs_hba *);
267 int (*setup_clocks)(struct ufs_hba *, bool);
268 int (*setup_regulators)(struct ufs_hba *, bool);
269 int (*hce_enable_notify)(struct ufs_hba *, bool);
270 int (*link_startup_notify)(struct ufs_hba *, bool);
271 int (*pwr_change_notify)(struct ufs_hba *,
272 bool, struct ufs_pa_layer_attr *,
273 struct ufs_pa_layer_attr *);
274 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
275 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
276};
277
278/* clock gating state */
279enum clk_gating_state {
280 CLKS_OFF,
281 CLKS_ON,
282 REQ_CLKS_OFF,
283 REQ_CLKS_ON,
284};
285
286/**
287 * struct ufs_clk_gating - UFS clock gating related info
288 * @gate_work: worker to turn off clocks after some delay as specified in
289 * delay_ms
290 * @ungate_work: worker to turn on clocks that will be used in case of
291 * interrupt context
292 * @state: the current clocks state
293 * @delay_ms: gating delay in ms
294 * @is_suspended: clk gating is suspended when set to 1 which can be used
295 * during suspend/resume
296 * @delay_attr: sysfs attribute to control delay_attr
297 * @active_reqs: number of requests that are pending and should be waited for
298 * completion before gating clocks.
299 */
300struct ufs_clk_gating {
301 struct delayed_work gate_work;
302 struct work_struct ungate_work;
303 enum clk_gating_state state;
304 unsigned long delay_ms;
305 bool is_suspended;
306 struct device_attribute delay_attr;
307 int active_reqs;
308};
309
310struct ufs_clk_scaling {
311 ktime_t busy_start_t;
312 bool is_busy_started;
313 unsigned long tot_busy_t;
314 unsigned long window_start_t;
315};
316
317/**
318 * struct ufs_init_prefetch - contains data that is pre-fetched once during
319 * initialization
320 * @icc_level: icc level which was read during initialization
321 */
322struct ufs_init_prefetch {
323 u32 icc_level;
324};
325
326/**
156 * struct ufs_hba - per adapter private structure 327 * struct ufs_hba - per adapter private structure
157 * @mmio_base: UFSHCI base register address 328 * @mmio_base: UFSHCI base register address
158 * @ucdl_base_addr: UFS Command Descriptor base address 329 * @ucdl_base_addr: UFS Command Descriptor base address
@@ -171,6 +342,8 @@ struct ufs_dev_cmd {
171 * @nutrs: Transfer Request Queue depth supported by controller 342 * @nutrs: Transfer Request Queue depth supported by controller
172 * @nutmrs: Task Management Queue depth supported by controller 343 * @nutmrs: Task Management Queue depth supported by controller
173 * @ufs_version: UFS Version to which controller complies 344 * @ufs_version: UFS Version to which controller complies
345 * @vops: pointer to variant specific operations
346 * @priv: pointer to variant specific private data
174 * @irq: Irq number of the controller 347 * @irq: Irq number of the controller
175 * @active_uic_cmd: handle of active UIC command 348 * @active_uic_cmd: handle of active UIC command
176 * @uic_cmd_mutex: mutex for uic command 349 * @uic_cmd_mutex: mutex for uic command
@@ -183,6 +356,9 @@ struct ufs_dev_cmd {
183 * @eh_flags: Error handling flags 356 * @eh_flags: Error handling flags
184 * @intr_mask: Interrupt Mask Bits 357 * @intr_mask: Interrupt Mask Bits
185 * @ee_ctrl_mask: Exception event control mask 358 * @ee_ctrl_mask: Exception event control mask
359 * @is_powered: flag to check if HBA is powered
360 * @is_init_prefetch: flag to check if data was pre-fetched in initialization
361 * @init_prefetch_data: data pre-fetched during initialization
186 * @eh_work: Worker to handle UFS errors that require s/w attention 362 * @eh_work: Worker to handle UFS errors that require s/w attention
187 * @eeh_work: Worker to handle exception events 363 * @eeh_work: Worker to handle exception events
188 * @errors: HBA errors 364 * @errors: HBA errors
@@ -191,6 +367,10 @@ struct ufs_dev_cmd {
191 * @saved_uic_err: sticky UIC error mask 367 * @saved_uic_err: sticky UIC error mask
192 * @dev_cmd: ufs device management command information 368 * @dev_cmd: ufs device management command information
193 * @auto_bkops_enabled: to track whether bkops is enabled in device 369 * @auto_bkops_enabled: to track whether bkops is enabled in device
370 * @vreg_info: UFS device voltage regulator information
371 * @clk_list_head: UFS host controller clocks list node head
372 * @pwr_info: holds current power mode
373 * @max_pwr_info: keeps the device max valid pwm
194 */ 374 */
195struct ufs_hba { 375struct ufs_hba {
196 void __iomem *mmio_base; 376 void __iomem *mmio_base;
@@ -207,6 +387,21 @@ struct ufs_hba {
207 387
208 struct Scsi_Host *host; 388 struct Scsi_Host *host;
209 struct device *dev; 389 struct device *dev;
390 /*
391 * This field is to keep a reference to "scsi_device" corresponding to
392 * "UFS device" W-LU.
393 */
394 struct scsi_device *sdev_ufs_device;
395 struct scsi_device *sdev_rpmb;
396 struct scsi_device *sdev_boot;
397
398 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
399 enum uic_link_state uic_link_state;
400 /* Desired UFS power management level during runtime PM */
401 enum ufs_pm_level rpm_lvl;
402 /* Desired UFS power management level during system PM */
403 enum ufs_pm_level spm_lvl;
404 int pm_op_in_progress;
210 405
211 struct ufshcd_lrb *lrb; 406 struct ufshcd_lrb *lrb;
212 unsigned long lrb_in_use; 407 unsigned long lrb_in_use;
@@ -218,22 +413,28 @@ struct ufs_hba {
218 int nutrs; 413 int nutrs;
219 int nutmrs; 414 int nutmrs;
220 u32 ufs_version; 415 u32 ufs_version;
416 struct ufs_hba_variant_ops *vops;
417 void *priv;
221 unsigned int irq; 418 unsigned int irq;
419 bool is_irq_enabled;
222 420
223 struct uic_command *active_uic_cmd;
224 struct mutex uic_cmd_mutex;
225 421
226 wait_queue_head_t tm_wq; 422 wait_queue_head_t tm_wq;
227 wait_queue_head_t tm_tag_wq; 423 wait_queue_head_t tm_tag_wq;
228 unsigned long tm_condition; 424 unsigned long tm_condition;
229 unsigned long tm_slots_in_use; 425 unsigned long tm_slots_in_use;
230 426
231 struct completion *pwr_done; 427 struct uic_command *active_uic_cmd;
428 struct mutex uic_cmd_mutex;
429 struct completion *uic_async_done;
232 430
233 u32 ufshcd_state; 431 u32 ufshcd_state;
234 u32 eh_flags; 432 u32 eh_flags;
235 u32 intr_mask; 433 u32 intr_mask;
236 u16 ee_ctrl_mask; 434 u16 ee_ctrl_mask;
435 bool is_powered;
436 bool is_init_prefetch;
437 struct ufs_init_prefetch init_prefetch_data;
237 438
238 /* Work Queues */ 439 /* Work Queues */
239 struct work_struct eh_work; 440 struct work_struct eh_work;
@@ -248,16 +449,76 @@ struct ufs_hba {
248 /* Device management request data */ 449 /* Device management request data */
249 struct ufs_dev_cmd dev_cmd; 450 struct ufs_dev_cmd dev_cmd;
250 451
452 /* Keeps information of the UFS device connected to this host */
453 struct ufs_dev_info dev_info;
251 bool auto_bkops_enabled; 454 bool auto_bkops_enabled;
455 struct ufs_vreg_info vreg_info;
456 struct list_head clk_list_head;
457
458 bool wlun_dev_clr_ua;
459
460 struct ufs_pa_layer_attr pwr_info;
461 struct ufs_pwr_mode_info max_pwr_info;
462
463 struct ufs_clk_gating clk_gating;
464 /* Control to enable/disable host capabilities */
465 u32 caps;
466 /* Allow dynamic clk gating */
467#define UFSHCD_CAP_CLK_GATING (1 << 0)
468 /* Allow hiberb8 with clk gating */
469#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
470 /* Allow dynamic clk scaling */
471#define UFSHCD_CAP_CLK_SCALING (1 << 2)
472 /* Allow auto bkops to enabled during runtime suspend */
473#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
474
475 struct devfreq *devfreq;
476 struct ufs_clk_scaling clk_scaling;
477 bool is_sys_suspended;
252}; 478};
253 479
480/* Returns true if clocks can be gated. Otherwise false */
481static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
482{
483 return hba->caps & UFSHCD_CAP_CLK_GATING;
484}
485static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
486{
487 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
488}
489static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
490{
491 return hba->caps & UFSHCD_CAP_CLK_SCALING;
492}
493static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
494{
495 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
496}
497
254#define ufshcd_writel(hba, val, reg) \ 498#define ufshcd_writel(hba, val, reg) \
255 writel((val), (hba)->mmio_base + (reg)) 499 writel((val), (hba)->mmio_base + (reg))
256#define ufshcd_readl(hba, reg) \ 500#define ufshcd_readl(hba, reg) \
257 readl((hba)->mmio_base + (reg)) 501 readl((hba)->mmio_base + (reg))
258 502
259int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * , 503/**
260 unsigned int); 504 * ufshcd_rmwl - read modify write into a register
505 * @hba - per adapter instance
506 * @mask - mask to apply on read value
507 * @val - actual value to write
508 * @reg - register address
509 */
510static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
511{
512 u32 tmp;
513
514 tmp = ufshcd_readl(hba, reg);
515 tmp &= ~mask;
516 tmp |= (val & mask);
517 ufshcd_writel(hba, tmp, reg);
518}
519
520int ufshcd_alloc_host(struct device *, struct ufs_hba **);
521int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
261void ufshcd_remove(struct ufs_hba *); 522void ufshcd_remove(struct ufs_hba *);
262 523
263/** 524/**
@@ -275,11 +536,12 @@ static inline void check_upiu_size(void)
275 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE); 536 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
276} 537}
277 538
278extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state);
279extern int ufshcd_resume(struct ufs_hba *hba);
280extern int ufshcd_runtime_suspend(struct ufs_hba *hba); 539extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
281extern int ufshcd_runtime_resume(struct ufs_hba *hba); 540extern int ufshcd_runtime_resume(struct ufs_hba *hba);
282extern int ufshcd_runtime_idle(struct ufs_hba *hba); 541extern int ufshcd_runtime_idle(struct ufs_hba *hba);
542extern int ufshcd_system_suspend(struct ufs_hba *hba);
543extern int ufshcd_system_resume(struct ufs_hba *hba);
544extern int ufshcd_shutdown(struct ufs_hba *hba);
283extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, 545extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
284 u8 attr_set, u32 mib_val, u8 peer); 546 u8 attr_set, u32 mib_val, u8 peer);
285extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, 547extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
@@ -331,4 +593,6 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
331 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); 593 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
332} 594}
333 595
596int ufshcd_hold(struct ufs_hba *hba, bool async);
597void ufshcd_release(struct ufs_hba *hba);
334#endif /* End of Header */ 598#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index e1b844bc9460..d5721199e9cc 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -124,8 +124,11 @@ enum {
124#define CONTROLLER_FATAL_ERROR UFS_BIT(16) 124#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
125#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) 125#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
126 126
127#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ 127#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
128 UIC_POWER_MODE) 128 UIC_HIBERNATE_EXIT |\
129 UIC_POWER_MODE)
130
131#define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
129 132
130#define UFSHCD_ERROR_MASK (UIC_ERROR |\ 133#define UFSHCD_ERROR_MASK (UIC_ERROR |\
131 DEVICE_FATAL_ERROR |\ 134 DEVICE_FATAL_ERROR |\
@@ -210,7 +213,7 @@ enum {
210#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) 213#define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF)
211 214
212/* UIC Commands */ 215/* UIC Commands */
213enum { 216enum uic_cmd_dme {
214 UIC_CMD_DME_GET = 0x01, 217 UIC_CMD_DME_GET = 0x01,
215 UIC_CMD_DME_SET = 0x02, 218 UIC_CMD_DME_SET = 0x02,
216 UIC_CMD_DME_PEER_GET = 0x03, 219 UIC_CMD_DME_PEER_GET = 0x03,
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 0bb8041c047a..3fc3e21b746b 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -13,6 +13,44 @@
13#define _UNIPRO_H_ 13#define _UNIPRO_H_
14 14
15/* 15/*
16 * M-TX Configuration Attributes
17 */
18#define TX_MODE 0x0021
19#define TX_HSRATE_SERIES 0x0022
20#define TX_HSGEAR 0x0023
21#define TX_PWMGEAR 0x0024
22#define TX_AMPLITUDE 0x0025
23#define TX_HS_SLEWRATE 0x0026
24#define TX_SYNC_SOURCE 0x0027
25#define TX_HS_SYNC_LENGTH 0x0028
26#define TX_HS_PREPARE_LENGTH 0x0029
27#define TX_LS_PREPARE_LENGTH 0x002A
28#define TX_HIBERN8_CONTROL 0x002B
29#define TX_LCC_ENABLE 0x002C
30#define TX_PWM_BURST_CLOSURE_EXTENSION 0x002D
31#define TX_BYPASS_8B10B_ENABLE 0x002E
32#define TX_DRIVER_POLARITY 0x002F
33#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE 0x0030
34#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE 0x0031
35#define TX_LCC_SEQUENCER 0x0032
36#define TX_MIN_ACTIVATETIME 0x0033
37#define TX_PWM_G6_G7_SYNC_LENGTH 0x0034
38
39/*
40 * M-RX Configuration Attributes
41 */
42#define RX_MODE 0x00A1
43#define RX_HSRATE_SERIES 0x00A2
44#define RX_HSGEAR 0x00A3
45#define RX_PWMGEAR 0x00A4
46#define RX_LS_TERMINATED_ENABLE 0x00A5
47#define RX_HS_UNTERMINATED_ENABLE 0x00A6
48#define RX_ENTER_HIBERN8 0x00A7
49#define RX_BYPASS_8B10B_ENABLE 0x00A8
50#define RX_TERMINATION_FORCE_ENABLE 0x0089
51
52#define is_mphy_tx_attr(attr) (attr < RX_MODE)
53/*
16 * PHY Adpater attributes 54 * PHY Adpater attributes
17 */ 55 */
18#define PA_ACTIVETXDATALANES 0x1560 56#define PA_ACTIVETXDATALANES 0x1560
@@ -87,6 +125,24 @@ enum {
87 PA_HS_MODE_B = 2, 125 PA_HS_MODE_B = 2,
88}; 126};
89 127
128enum ufs_pwm_gear_tag {
129 UFS_PWM_DONT_CHANGE, /* Don't change Gear */
130 UFS_PWM_G1, /* PWM Gear 1 (default for reset) */
131 UFS_PWM_G2, /* PWM Gear 2 */
132 UFS_PWM_G3, /* PWM Gear 3 */
133 UFS_PWM_G4, /* PWM Gear 4 */
134 UFS_PWM_G5, /* PWM Gear 5 */
135 UFS_PWM_G6, /* PWM Gear 6 */
136 UFS_PWM_G7, /* PWM Gear 7 */
137};
138
139enum ufs_hs_gear_tag {
140 UFS_HS_DONT_CHANGE, /* Don't change Gear */
141 UFS_HS_G1, /* HS Gear 1 (default for reset) */
142 UFS_HS_G2, /* HS Gear 2 */
143 UFS_HS_G3, /* HS Gear 3 */
144};
145
90/* 146/*
91 * Data Link Layer Attributes 147 * Data Link Layer Attributes
92 */ 148 */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index cedb29252a92..bf3f8e2de046 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -983,6 +983,14 @@ int usb_stor_probe2(struct us_data *us)
983 if (!(us->fflags & US_FL_SCM_MULT_TARG)) 983 if (!(us->fflags & US_FL_SCM_MULT_TARG))
984 us_to_host(us)->max_id = 1; 984 us_to_host(us)->max_id = 1;
985 985
986 /*
987 * Like Windows, we won't store the LUN bits in CDB[1] for SCSI-2
988 * devices using the Bulk-Only transport (even though this violates
989 * the SCSI spec).
990 */
991 if (us->transport == usb_stor_Bulk_transport)
992 us_to_host(us)->no_scsi2_lun_in_cdb = 1;
993
986 /* Find the endpoints and calculate pipe values */ 994 /* Find the endpoints and calculate pipe values */
987 result = get_pipes(us); 995 result = get_pipes(us);
988 if (result) 996 if (result)
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 261e708010da..d17178e6fcdd 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -333,6 +333,7 @@ static inline int scsi_status_is_good(int status)
333#define TYPE_RBC 0x0e 333#define TYPE_RBC 0x0e
334#define TYPE_OSD 0x11 334#define TYPE_OSD 0x11
335#define TYPE_ZBC 0x14 335#define TYPE_ZBC 0x14
336#define TYPE_WLUN 0x1e /* well-known logical unit */
336#define TYPE_NO_LUN 0x7f 337#define TYPE_NO_LUN 0x7f
337 338
338/* SCSI protocols; these are taken from SPC-3 section 7.5 */ 339/* SCSI protocols; these are taken from SPC-3 section 7.5 */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 1a0d1842962e..27ecee73bd72 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -174,6 +174,7 @@ struct scsi_device {
174 unsigned wce_default_on:1; /* Cache is ON by default */ 174 unsigned wce_default_on:1; /* Cache is ON by default */
175 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ 175 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
176 unsigned broken_fua:1; /* Don't set FUA bit */ 176 unsigned broken_fua:1; /* Don't set FUA bit */
177 unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
177 178
178 atomic_t disk_events_disable_depth; /* disable depth for disk events */ 179 atomic_t disk_events_disable_depth; /* disable depth for disk events */
179 180
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ba2034779961..5e362489ee88 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -606,7 +606,7 @@ struct Scsi_Host {
606 /* 606 /*
607 * These three parameters can be used to allow for wide scsi, 607 * These three parameters can be used to allow for wide scsi,
608 * and for host adapters that support multiple busses 608 * and for host adapters that support multiple busses
609 * The first two should be set to 1 more than the actual max id 609 * The last two should be set to 1 more than the actual max id
610 * or lun (e.g. 8 for SCSI parallel systems). 610 * or lun (e.g. 8 for SCSI parallel systems).
611 */ 611 */
612 unsigned int max_channel; 612 unsigned int max_channel;
@@ -680,6 +680,7 @@ struct Scsi_Host {
680 unsigned no_write_same:1; 680 unsigned no_write_same:1;
681 681
682 unsigned use_blk_mq:1; 682 unsigned use_blk_mq:1;
683 unsigned use_cmd_list:1;
683 684
684 /* 685 /*
685 * Optional work queue to be utilized by the transport 686 * Optional work queue to be utilized by the transport
@@ -692,6 +693,9 @@ struct Scsi_Host {
692 */ 693 */
693 struct workqueue_struct *tmf_work_q; 694 struct workqueue_struct *tmf_work_q;
694 695
696 /* The transport requires the LUN bits NOT to be stored in CDB[1] */
697 unsigned no_scsi2_lun_in_cdb:1;
698
695 /* 699 /*
696 * Value host_blocked counts down from 700 * Value host_blocked counts down from
697 */ 701 */