diff options
212 files changed, 5294 insertions, 4660 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ufs-hisi.txt b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt new file mode 100644 index 000000000000..a48c44817367 --- /dev/null +++ b/Documentation/devicetree/bindings/ufs/ufs-hisi.txt | |||
@@ -0,0 +1,41 @@ | |||
1 | * Hisilicon Universal Flash Storage (UFS) Host Controller | ||
2 | |||
3 | UFS nodes are defined to describe on-chip UFS hardware macro. | ||
4 | Each UFS Host Controller should have its own node. | ||
5 | |||
6 | Required properties: | ||
7 | - compatible : compatible list, contains one of the following - | ||
8 | "hisilicon,hi3660-ufs", "jedec,ufs-1.1" for hisi ufs | ||
9 | host controller present on Hi36xx chipset. | ||
10 | - reg : should contain UFS register address space & UFS SYS CTRL register address, | ||
11 | - interrupt-parent : interrupt device | ||
12 | - interrupts : interrupt number | ||
13 | - clocks : List of phandle and clock specifier pairs | ||
14 | - clock-names : List of clock input name strings sorted in the same | ||
15 | order as the clocks property. "ref_clk", "phy_clk" is optional | ||
16 | - freq-table-hz : Array of <min max> operating frequencies stored in the same | ||
17 | order as the clocks property. If this property is not | ||
18 | defined or a value in the array is "0" then it is assumed | ||
19 | that the frequency is set by the parent clock or a | ||
20 | fixed rate clock source. | ||
21 | - resets : describe reset node register | ||
22 | - reset-names : reset node register, the "rst" corresponds to reset the whole UFS IP. | ||
23 | |||
24 | Example: | ||
25 | |||
26 | ufs: ufs@ff3b0000 { | ||
27 | compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1"; | ||
28 | /* 0: HCI standard */ | ||
29 | /* 1: UFS SYS CTRL */ | ||
30 | reg = <0x0 0xff3b0000 0x0 0x1000>, | ||
31 | <0x0 0xff3b1000 0x0 0x1000>; | ||
32 | interrupt-parent = <&gic>; | ||
33 | interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>; | ||
34 | clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>, | ||
35 | <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>; | ||
36 | clock-names = "ref_clk", "phy_clk"; | ||
37 | freq-table-hz = <0 0>, <0 0>; | ||
38 | /* offset: 0x84; bit: 12 */ | ||
39 | resets = <&crg_rst 0x84 12>; | ||
40 | reset-names = "rst"; | ||
41 | }; | ||
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt index c39dfef76a18..2df00524bd21 100644 --- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt +++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt | |||
@@ -41,6 +41,8 @@ Optional properties: | |||
41 | -lanes-per-direction : number of lanes available per direction - either 1 or 2. | 41 | -lanes-per-direction : number of lanes available per direction - either 1 or 2. |
42 | Note that it is assume same number of lanes is used both | 42 | Note that it is assume same number of lanes is used both |
43 | directions at once. If not specified, default is 2 lanes per direction. | 43 | directions at once. If not specified, default is 2 lanes per direction. |
44 | - resets : reset node register | ||
45 | - reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP. | ||
44 | 46 | ||
45 | Note: If above properties are not defined it can be assumed that the supply | 47 | Note: If above properties are not defined it can be assumed that the supply |
46 | regulators or clocks are always on. | 48 | regulators or clocks are always on. |
@@ -61,9 +63,11 @@ Example: | |||
61 | vccq-max-microamp = 200000; | 63 | vccq-max-microamp = 200000; |
62 | vccq2-max-microamp = 200000; | 64 | vccq2-max-microamp = 200000; |
63 | 65 | ||
64 | clocks = <&core 0>, <&ref 0>, <&iface 0>; | 66 | clocks = <&core 0>, <&ref 0>, <&phy 0>, <&iface 0>; |
65 | clock-names = "core_clk", "ref_clk", "iface_clk"; | 67 | clock-names = "core_clk", "ref_clk", "phy_clk", "iface_clk"; |
66 | freq-table-hz = <100000000 200000000>, <0 0>, <0 0>; | 68 | freq-table-hz = <100000000 200000000>, <0 0>, <0 0>, <0 0>; |
69 | resets = <&reset 0 1>; | ||
70 | reset-names = "rst"; | ||
67 | phys = <&ufsphy1>; | 71 | phys = <&ufsphy1>; |
68 | phy-names = "ufsphy"; | 72 | phy-names = "ufsphy"; |
69 | }; | 73 | }; |
diff --git a/MAINTAINERS b/MAINTAINERS index 64ddcd5041ae..3d08725527aa 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -9847,12 +9847,6 @@ F: drivers/scsi/mac_scsi.* | |||
9847 | F: drivers/scsi/sun3_scsi.* | 9847 | F: drivers/scsi/sun3_scsi.* |
9848 | F: drivers/scsi/sun3_scsi_vme.c | 9848 | F: drivers/scsi/sun3_scsi_vme.c |
9849 | 9849 | ||
9850 | NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) | ||
9851 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> | ||
9852 | L: linux-scsi@vger.kernel.org | ||
9853 | S: Maintained | ||
9854 | F: drivers/scsi/NCR_D700.* | ||
9855 | |||
9856 | NCSI LIBRARY: | 9850 | NCSI LIBRARY: |
9857 | M: Samuel Mendoza-Jonas <sam@mendozajonas.com> | 9851 | M: Samuel Mendoza-Jonas <sam@mendozajonas.com> |
9858 | S: Maintained | 9852 | S: Maintained |
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi index 8d477dcbfa58..851190a719ea 100644 --- a/arch/arm64/boot/dts/hisilicon/hi3660.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hi3660.dtsi | |||
@@ -1000,6 +1000,24 @@ | |||
1000 | reset-gpios = <&gpio11 1 0 >; | 1000 | reset-gpios = <&gpio11 1 0 >; |
1001 | }; | 1001 | }; |
1002 | 1002 | ||
1003 | /* UFS */ | ||
1004 | ufs: ufs@ff3b0000 { | ||
1005 | compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1"; | ||
1006 | /* 0: HCI standard */ | ||
1007 | /* 1: UFS SYS CTRL */ | ||
1008 | reg = <0x0 0xff3b0000 0x0 0x1000>, | ||
1009 | <0x0 0xff3b1000 0x0 0x1000>; | ||
1010 | interrupt-parent = <&gic>; | ||
1011 | interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>; | ||
1012 | clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>, | ||
1013 | <&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>; | ||
1014 | clock-names = "ref_clk", "phy_clk"; | ||
1015 | freq-table-hz = <0 0>, <0 0>; | ||
1016 | /* offset: 0x84; bit: 12 */ | ||
1017 | resets = <&crg_rst 0x84 12>; | ||
1018 | reset-names = "rst"; | ||
1019 | }; | ||
1020 | |||
1003 | /* SD */ | 1021 | /* SD */ |
1004 | dwmmc1: dwmmc1@ff37f000 { | 1022 | dwmmc1: dwmmc1@ff37f000 { |
1005 | #address-cells = <1>; | 1023 | #address-cells = <1>; |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index f9a186f6af8a..2c07e233012b 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -193,6 +193,7 @@ CONFIG_SCSI_HISI_SAS=y | |||
193 | CONFIG_SCSI_HISI_SAS_PCI=y | 193 | CONFIG_SCSI_HISI_SAS_PCI=y |
194 | CONFIG_SCSI_UFSHCD=m | 194 | CONFIG_SCSI_UFSHCD=m |
195 | CONFIG_SCSI_UFSHCD_PLATFORM=m | 195 | CONFIG_SCSI_UFSHCD_PLATFORM=m |
196 | CONFIG_SCSI_UFS_HISI=y | ||
196 | CONFIG_SCSI_UFS_QCOM=m | 197 | CONFIG_SCSI_UFS_QCOM=m |
197 | CONFIG_ATA=y | 198 | CONFIG_ATA=y |
198 | CONFIG_SATA_AHCI=y | 199 | CONFIG_SATA_AHCI=y |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index cc71c63df381..984b37647b2f 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -6424,6 +6424,7 @@ void ata_host_init(struct ata_host *host, struct device *dev, | |||
6424 | host->n_tags = ATA_MAX_QUEUE; | 6424 | host->n_tags = ATA_MAX_QUEUE; |
6425 | host->dev = dev; | 6425 | host->dev = dev; |
6426 | host->ops = ops; | 6426 | host->ops = ops; |
6427 | kref_init(&host->kref); | ||
6427 | } | 6428 | } |
6428 | 6429 | ||
6429 | void __ata_port_probe(struct ata_port *ap) | 6430 | void __ata_port_probe(struct ata_port *ap) |
@@ -7391,3 +7392,5 @@ EXPORT_SYMBOL_GPL(ata_cable_80wire); | |||
7391 | EXPORT_SYMBOL_GPL(ata_cable_unknown); | 7392 | EXPORT_SYMBOL_GPL(ata_cable_unknown); |
7392 | EXPORT_SYMBOL_GPL(ata_cable_ignore); | 7393 | EXPORT_SYMBOL_GPL(ata_cable_ignore); |
7393 | EXPORT_SYMBOL_GPL(ata_cable_sata); | 7394 | EXPORT_SYMBOL_GPL(ata_cable_sata); |
7395 | EXPORT_SYMBOL_GPL(ata_host_get); | ||
7396 | EXPORT_SYMBOL_GPL(ata_host_put); \ No newline at end of file | ||
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 9e21c49cf6be..f953cb4bb1ba 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -100,8 +100,6 @@ extern int ata_port_probe(struct ata_port *ap); | |||
100 | extern void __ata_port_probe(struct ata_port *ap); | 100 | extern void __ata_port_probe(struct ata_port *ap); |
101 | extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log, | 101 | extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log, |
102 | u8 page, void *buf, unsigned int sectors); | 102 | u8 page, void *buf, unsigned int sectors); |
103 | extern void ata_host_get(struct ata_host *host); | ||
104 | extern void ata_host_put(struct ata_host *host); | ||
105 | 103 | ||
106 | #define to_ata_port(d) container_of(d, struct ata_port, tdev) | 104 | #define to_ata_port(d) container_of(d, struct ata_port, tdev) |
107 | 105 | ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 3081c629a7f7..1ae638b58b63 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -2029,8 +2029,7 @@ static void srpt_release_channel_work(struct work_struct *w) | |||
2029 | target_sess_cmd_list_set_waiting(se_sess); | 2029 | target_sess_cmd_list_set_waiting(se_sess); |
2030 | target_wait_for_sess_cmds(se_sess); | 2030 | target_wait_for_sess_cmds(se_sess); |
2031 | 2031 | ||
2032 | transport_deregister_session_configfs(se_sess); | 2032 | target_remove_session(se_sess); |
2033 | transport_deregister_session(se_sess); | ||
2034 | ch->sess = NULL; | 2033 | ch->sess = NULL; |
2035 | 2034 | ||
2036 | if (ch->using_rdma_cm) | 2035 | if (ch->using_rdma_cm) |
@@ -2221,16 +2220,16 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev, | |||
2221 | pr_debug("registering session %s\n", ch->sess_name); | 2220 | pr_debug("registering session %s\n", ch->sess_name); |
2222 | 2221 | ||
2223 | if (sport->port_guid_tpg.se_tpg_wwn) | 2222 | if (sport->port_guid_tpg.se_tpg_wwn) |
2224 | ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0, | 2223 | ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0, |
2225 | TARGET_PROT_NORMAL, | 2224 | TARGET_PROT_NORMAL, |
2226 | ch->sess_name, ch, NULL); | 2225 | ch->sess_name, ch, NULL); |
2227 | if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) | 2226 | if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) |
2228 | ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0, | 2227 | ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0, |
2229 | TARGET_PROT_NORMAL, i_port_id, ch, | 2228 | TARGET_PROT_NORMAL, i_port_id, ch, |
2230 | NULL); | 2229 | NULL); |
2231 | /* Retry without leading "0x" */ | 2230 | /* Retry without leading "0x" */ |
2232 | if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) | 2231 | if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess)) |
2233 | ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0, | 2232 | ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0, |
2234 | TARGET_PROT_NORMAL, | 2233 | TARGET_PROT_NORMAL, |
2235 | i_port_id + 2, ch, NULL); | 2234 | i_port_id + 2, ch, NULL); |
2236 | if (IS_ERR_OR_NULL(ch->sess)) { | 2235 | if (IS_ERR_OR_NULL(ch->sess)) { |
@@ -3597,11 +3596,9 @@ static struct configfs_attribute *srpt_tpg_attrs[] = { | |||
3597 | /** | 3596 | /** |
3598 | * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg | 3597 | * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg |
3599 | * @wwn: Corresponds to $driver/$port. | 3598 | * @wwn: Corresponds to $driver/$port. |
3600 | * @group: Not used. | ||
3601 | * @name: $tpg. | 3599 | * @name: $tpg. |
3602 | */ | 3600 | */ |
3603 | static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, | 3601 | static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn, |
3604 | struct config_group *group, | ||
3605 | const char *name) | 3602 | const char *name) |
3606 | { | 3603 | { |
3607 | struct srpt_port *sport = wwn->priv; | 3604 | struct srpt_port *sport = wwn->priv; |
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index a625ac4e2872..e6b4ae558767 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c | |||
@@ -642,6 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) | |||
642 | freereq = 0; | 642 | freereq = 0; |
643 | if (event != MPI_EVENT_EVENT_CHANGE) | 643 | if (event != MPI_EVENT_EVENT_CHANGE) |
644 | break; | 644 | break; |
645 | /* else: fall through */ | ||
645 | case MPI_FUNCTION_CONFIG: | 646 | case MPI_FUNCTION_CONFIG: |
646 | case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: | 647 | case MPI_FUNCTION_SAS_IO_UNIT_CONTROL: |
647 | ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; | 648 | ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD; |
@@ -1779,7 +1780,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1779 | struct proc_dir_entry *dent; | 1780 | struct proc_dir_entry *dent; |
1780 | #endif | 1781 | #endif |
1781 | 1782 | ||
1782 | ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC); | 1783 | ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL); |
1783 | if (ioc == NULL) { | 1784 | if (ioc == NULL) { |
1784 | printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); | 1785 | printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n"); |
1785 | return -ENOMEM; | 1786 | return -ENOMEM; |
@@ -1886,6 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1886 | case MPI_MANUFACTPAGE_DEVICEID_FC939X: | 1887 | case MPI_MANUFACTPAGE_DEVICEID_FC939X: |
1887 | case MPI_MANUFACTPAGE_DEVICEID_FC949X: | 1888 | case MPI_MANUFACTPAGE_DEVICEID_FC949X: |
1888 | ioc->errata_flag_1064 = 1; | 1889 | ioc->errata_flag_1064 = 1; |
1890 | /* fall through */ | ||
1889 | case MPI_MANUFACTPAGE_DEVICEID_FC909: | 1891 | case MPI_MANUFACTPAGE_DEVICEID_FC909: |
1890 | case MPI_MANUFACTPAGE_DEVICEID_FC929: | 1892 | case MPI_MANUFACTPAGE_DEVICEID_FC929: |
1891 | case MPI_MANUFACTPAGE_DEVICEID_FC919: | 1893 | case MPI_MANUFACTPAGE_DEVICEID_FC919: |
@@ -1930,6 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1930 | pcixcmd &= 0x8F; | 1932 | pcixcmd &= 0x8F; |
1931 | pci_write_config_byte(pdev, 0x6a, pcixcmd); | 1933 | pci_write_config_byte(pdev, 0x6a, pcixcmd); |
1932 | } | 1934 | } |
1935 | /* fall through */ | ||
1933 | 1936 | ||
1934 | case MPI_MANUFACTPAGE_DEVID_1030_53C1035: | 1937 | case MPI_MANUFACTPAGE_DEVID_1030_53C1035: |
1935 | ioc->bus_type = SPI; | 1938 | ioc->bus_type = SPI; |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index 4470630dd545..8d22d6134a89 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
@@ -2514,8 +2514,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size) | |||
2514 | if (mpt_config(ioc, &cfg) == 0) { | 2514 | if (mpt_config(ioc, &cfg) == 0) { |
2515 | ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; | 2515 | ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf; |
2516 | if (strlen(pdata->BoardTracerNumber) > 1) { | 2516 | if (strlen(pdata->BoardTracerNumber) > 1) { |
2517 | strncpy(karg.serial_number, pdata->BoardTracerNumber, 24); | 2517 | strlcpy(karg.serial_number, |
2518 | karg.serial_number[24-1]='\0'; | 2518 | pdata->BoardTracerNumber, 24); |
2519 | } | 2519 | } |
2520 | } | 2520 | } |
2521 | pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); | 2521 | pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma); |
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index 06b175420be9..b15fdc626fb8 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c | |||
@@ -1292,7 +1292,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1292 | /* SCSI needs scsi_cmnd lookup table! | 1292 | /* SCSI needs scsi_cmnd lookup table! |
1293 | * (with size equal to req_depth*PtrSz!) | 1293 | * (with size equal to req_depth*PtrSz!) |
1294 | */ | 1294 | */ |
1295 | ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC); | 1295 | ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL); |
1296 | if (!ioc->ScsiLookup) { | 1296 | if (!ioc->ScsiLookup) { |
1297 | error = -ENOMEM; | 1297 | error = -ENOMEM; |
1298 | goto out_mptfc_probe; | 1298 | goto out_mptfc_probe; |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 76a66da33996..b8cf2658649e 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -4327,6 +4327,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, | |||
4327 | } | 4327 | } |
4328 | } | 4328 | } |
4329 | mpt_findImVolumes(ioc); | 4329 | mpt_findImVolumes(ioc); |
4330 | /* fall through */ | ||
4330 | 4331 | ||
4331 | case MPTSAS_ADD_DEVICE: | 4332 | case MPTSAS_ADD_DEVICE: |
4332 | memset(&sas_device, 0, sizeof(struct mptsas_devinfo)); | 4333 | memset(&sas_device, 0, sizeof(struct mptsas_devinfo)); |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index 99ba4a770406..27521fc3ef5a 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
2038 | 2038 | ||
2039 | if (twa_initialize_device_extension(tw_dev)) { | 2039 | if (twa_initialize_device_extension(tw_dev)) { |
2040 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); | 2040 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension"); |
2041 | retval = -ENOMEM; | ||
2041 | goto out_free_device_extension; | 2042 | goto out_free_device_extension; |
2042 | } | 2043 | } |
2043 | 2044 | ||
@@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
2060 | tw_dev->base_addr = ioremap(mem_addr, mem_len); | 2061 | tw_dev->base_addr = ioremap(mem_addr, mem_len); |
2061 | if (!tw_dev->base_addr) { | 2062 | if (!tw_dev->base_addr) { |
2062 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); | 2063 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap"); |
2064 | retval = -ENOMEM; | ||
2063 | goto out_release_mem_region; | 2065 | goto out_release_mem_region; |
2064 | } | 2066 | } |
2065 | 2067 | ||
@@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
2067 | TW_DISABLE_INTERRUPTS(tw_dev); | 2069 | TW_DISABLE_INTERRUPTS(tw_dev); |
2068 | 2070 | ||
2069 | /* Initialize the card */ | 2071 | /* Initialize the card */ |
2070 | if (twa_reset_sequence(tw_dev, 0)) | 2072 | if (twa_reset_sequence(tw_dev, 0)) { |
2073 | retval = -ENOMEM; | ||
2071 | goto out_iounmap; | 2074 | goto out_iounmap; |
2075 | } | ||
2072 | 2076 | ||
2073 | /* Set host specific parameters */ | 2077 | /* Set host specific parameters */ |
2074 | if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || | 2078 | if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) || |
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index cf9f2a09b47d..40c1e6e64f58 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c | |||
@@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
1594 | 1594 | ||
1595 | if (twl_initialize_device_extension(tw_dev)) { | 1595 | if (twl_initialize_device_extension(tw_dev)) { |
1596 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); | 1596 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension"); |
1597 | retval = -ENOMEM; | ||
1597 | goto out_free_device_extension; | 1598 | goto out_free_device_extension; |
1598 | } | 1599 | } |
1599 | 1600 | ||
@@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
1608 | tw_dev->base_addr = pci_iomap(pdev, 1, 0); | 1609 | tw_dev->base_addr = pci_iomap(pdev, 1, 0); |
1609 | if (!tw_dev->base_addr) { | 1610 | if (!tw_dev->base_addr) { |
1610 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); | 1611 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap"); |
1612 | retval = -ENOMEM; | ||
1611 | goto out_release_mem_region; | 1613 | goto out_release_mem_region; |
1612 | } | 1614 | } |
1613 | 1615 | ||
@@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
1617 | /* Initialize the card */ | 1619 | /* Initialize the card */ |
1618 | if (twl_reset_sequence(tw_dev, 0)) { | 1620 | if (twl_reset_sequence(tw_dev, 0)) { |
1619 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); | 1621 | TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe"); |
1622 | retval = -ENOMEM; | ||
1620 | goto out_iounmap; | 1623 | goto out_iounmap; |
1621 | } | 1624 | } |
1622 | 1625 | ||
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index f6179e3d6953..471366945bd4 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -1925,7 +1925,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c | |||
1925 | if (test_bit(TW_IN_RESET, &tw_dev->flags)) | 1925 | if (test_bit(TW_IN_RESET, &tw_dev->flags)) |
1926 | return SCSI_MLQUEUE_HOST_BUSY; | 1926 | return SCSI_MLQUEUE_HOST_BUSY; |
1927 | 1927 | ||
1928 | /* Save done function into Scsi_Cmnd struct */ | 1928 | /* Save done function into struct scsi_cmnd */ |
1929 | SCpnt->scsi_done = done; | 1929 | SCpnt->scsi_done = done; |
1930 | 1930 | ||
1931 | /* Queue the command and get a request id */ | 1931 | /* Queue the command and get a request id */ |
@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
2280 | 2280 | ||
2281 | if (tw_initialize_device_extension(tw_dev)) { | 2281 | if (tw_initialize_device_extension(tw_dev)) { |
2282 | printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension."); | 2282 | printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension."); |
2283 | retval = -ENOMEM; | ||
2283 | goto out_free_device_extension; | 2284 | goto out_free_device_extension; |
2284 | } | 2285 | } |
2285 | 2286 | ||
@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id) | |||
2294 | tw_dev->base_addr = pci_resource_start(pdev, 0); | 2295 | tw_dev->base_addr = pci_resource_start(pdev, 0); |
2295 | if (!tw_dev->base_addr) { | 2296 | if (!tw_dev->base_addr) { |
2296 | printk(KERN_WARNING "3w-xxxx: Failed to get io address."); | 2297 | printk(KERN_WARNING "3w-xxxx: Failed to get io address."); |
2298 | retval = -ENOMEM; | ||
2297 | goto out_release_mem_region; | 2299 | goto out_release_mem_region; |
2298 | } | 2300 | } |
2299 | 2301 | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 35c909bbf8ba..8fc851a9e116 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -49,6 +49,7 @@ config SCSI_NETLINK | |||
49 | 49 | ||
50 | config SCSI_MQ_DEFAULT | 50 | config SCSI_MQ_DEFAULT |
51 | bool "SCSI: use blk-mq I/O path by default" | 51 | bool "SCSI: use blk-mq I/O path by default" |
52 | default y | ||
52 | depends on SCSI | 53 | depends on SCSI |
53 | ---help--- | 54 | ---help--- |
54 | This option enables the new blk-mq based I/O path for SCSI | 55 | This option enables the new blk-mq based I/O path for SCSI |
@@ -841,18 +842,6 @@ config SCSI_IZIP_SLOW_CTR | |||
841 | 842 | ||
842 | Generally, saying N is fine. | 843 | Generally, saying N is fine. |
843 | 844 | ||
844 | config SCSI_NCR_D700 | ||
845 | tristate "NCR Dual 700 MCA SCSI support" | ||
846 | depends on MCA && SCSI | ||
847 | select SCSI_SPI_ATTRS | ||
848 | help | ||
849 | This is a driver for the MicroChannel Dual 700 card produced by | ||
850 | NCR and commonly used in 345x/35xx/4100 class machines. It always | ||
851 | tries to negotiate sync and uses tag command queueing. | ||
852 | |||
853 | Unless you have an NCR manufactured machine, the chances are that | ||
854 | you do not have this SCSI card, so say N. | ||
855 | |||
856 | config SCSI_LASI700 | 845 | config SCSI_LASI700 |
857 | tristate "HP Lasi SCSI support for 53c700/710" | 846 | tristate "HP Lasi SCSI support for 53c700/710" |
858 | depends on GSC && SCSI | 847 | depends on GSC && SCSI |
@@ -1000,21 +989,9 @@ config SCSI_ZALON | |||
1000 | used on the add-in Bluefish, Barracuda & Shrike SCSI cards. | 989 | used on the add-in Bluefish, Barracuda & Shrike SCSI cards. |
1001 | Say Y here if you have one of these machines or cards. | 990 | Say Y here if you have one of these machines or cards. |
1002 | 991 | ||
1003 | config SCSI_NCR_Q720 | ||
1004 | tristate "NCR Quad 720 MCA SCSI support" | ||
1005 | depends on MCA && SCSI | ||
1006 | select SCSI_SPI_ATTRS | ||
1007 | help | ||
1008 | This is a driver for the MicroChannel Quad 720 card produced by | ||
1009 | NCR and commonly used in 345x/35xx/4100 class machines. It always | ||
1010 | tries to negotiate sync and uses tag command queueing. | ||
1011 | |||
1012 | Unless you have an NCR manufactured machine, the chances are that | ||
1013 | you do not have this SCSI card, so say N. | ||
1014 | |||
1015 | config SCSI_NCR53C8XX_DEFAULT_TAGS | 992 | config SCSI_NCR53C8XX_DEFAULT_TAGS |
1016 | int "default tagged command queue depth" | 993 | int "default tagged command queue depth" |
1017 | depends on SCSI_ZALON || SCSI_NCR_Q720 | 994 | depends on SCSI_ZALON |
1018 | default "8" | 995 | default "8" |
1019 | ---help--- | 996 | ---help--- |
1020 | "Tagged command queuing" is a feature of SCSI-2 which improves | 997 | "Tagged command queuing" is a feature of SCSI-2 which improves |
@@ -1040,7 +1017,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS | |||
1040 | 1017 | ||
1041 | config SCSI_NCR53C8XX_MAX_TAGS | 1018 | config SCSI_NCR53C8XX_MAX_TAGS |
1042 | int "maximum number of queued commands" | 1019 | int "maximum number of queued commands" |
1043 | depends on SCSI_ZALON || SCSI_NCR_Q720 | 1020 | depends on SCSI_ZALON |
1044 | default "32" | 1021 | default "32" |
1045 | ---help--- | 1022 | ---help--- |
1046 | This option allows you to specify the maximum number of commands | 1023 | This option allows you to specify the maximum number of commands |
@@ -1057,7 +1034,7 @@ config SCSI_NCR53C8XX_MAX_TAGS | |||
1057 | 1034 | ||
1058 | config SCSI_NCR53C8XX_SYNC | 1035 | config SCSI_NCR53C8XX_SYNC |
1059 | int "synchronous transfers frequency in MHz" | 1036 | int "synchronous transfers frequency in MHz" |
1060 | depends on SCSI_ZALON || SCSI_NCR_Q720 | 1037 | depends on SCSI_ZALON |
1061 | default "20" | 1038 | default "20" |
1062 | ---help--- | 1039 | ---help--- |
1063 | The SCSI Parallel Interface-2 Standard defines 5 classes of transfer | 1040 | The SCSI Parallel Interface-2 Standard defines 5 classes of transfer |
@@ -1091,7 +1068,7 @@ config SCSI_NCR53C8XX_SYNC | |||
1091 | 1068 | ||
1092 | config SCSI_NCR53C8XX_NO_DISCONNECT | 1069 | config SCSI_NCR53C8XX_NO_DISCONNECT |
1093 | bool "not allow targets to disconnect" | 1070 | bool "not allow targets to disconnect" |
1094 | depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0 | 1071 | depends on SCSI_ZALON && SCSI_NCR53C8XX_DEFAULT_TAGS=0 |
1095 | help | 1072 | help |
1096 | This option is only provided for safety if you suspect some SCSI | 1073 | This option is only provided for safety if you suspect some SCSI |
1097 | device of yours to not support properly the target-disconnect | 1074 | device of yours to not support properly the target-disconnect |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 768953881c9e..6d71b2a9592b 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -77,8 +77,6 @@ obj-$(CONFIG_SCSI_PM8001) += pm8001/ | |||
77 | obj-$(CONFIG_SCSI_ISCI) += isci/ | 77 | obj-$(CONFIG_SCSI_ISCI) += isci/ |
78 | obj-$(CONFIG_SCSI_IPS) += ips.o | 78 | obj-$(CONFIG_SCSI_IPS) += ips.o |
79 | obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o | 79 | obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o |
80 | obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o | ||
81 | obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o | ||
82 | obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o | 80 | obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o |
83 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o | 81 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o |
84 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o | 82 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o |
@@ -180,7 +178,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \ | |||
180 | -DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS | 178 | -DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS |
181 | CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) | 179 | CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) |
182 | zalon7xx-objs := zalon.o ncr53c8xx.o | 180 | zalon7xx-objs := zalon.o ncr53c8xx.o |
183 | NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o | ||
184 | 181 | ||
185 | # Files generated that shall be removed upon make clean | 182 | # Files generated that shall be removed upon make clean |
186 | clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c | 183 | clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c |
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c deleted file mode 100644 index b39a2409a507..000000000000 --- a/drivers/scsi/NCR_D700.c +++ /dev/null | |||
@@ -1,405 +0,0 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
2 | |||
3 | /* NCR Dual 700 MCA SCSI Driver | ||
4 | * | ||
5 | * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com | ||
6 | **----------------------------------------------------------------------------- | ||
7 | ** | ||
8 | ** This program is free software; you can redistribute it and/or modify | ||
9 | ** it under the terms of the GNU General Public License as published by | ||
10 | ** the Free Software Foundation; either version 2 of the License, or | ||
11 | ** (at your option) any later version. | ||
12 | ** | ||
13 | ** This program is distributed in the hope that it will be useful, | ||
14 | ** but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | ** GNU General Public License for more details. | ||
17 | ** | ||
18 | ** You should have received a copy of the GNU General Public License | ||
19 | ** along with this program; if not, write to the Free Software | ||
20 | ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | ** | ||
22 | **----------------------------------------------------------------------------- | ||
23 | */ | ||
24 | |||
25 | /* Notes: | ||
26 | * | ||
27 | * Most of the work is done in the chip specific module, 53c700.o | ||
28 | * | ||
29 | * TODO List: | ||
30 | * | ||
31 | * 1. Extract the SCSI ID from the voyager CMOS table (necessary to | ||
32 | * support multi-host environments. | ||
33 | * | ||
34 | * */ | ||
35 | |||
36 | |||
37 | /* CHANGELOG | ||
38 | * | ||
39 | * Version 2.2 | ||
40 | * | ||
41 | * Added mca_set_adapter_name(). | ||
42 | * | ||
43 | * Version 2.1 | ||
44 | * | ||
45 | * Modularise the driver into a Board piece (this file) and a chip | ||
46 | * piece 53c700.[ch] and 53c700.scr, added module options. You can | ||
47 | * now specify the scsi id by the parameters | ||
48 | * | ||
49 | * NCR_D700=slot:<n> [siop:<n>] id:<n> .... | ||
50 | * | ||
51 | * They need to be comma separated if compiled into the kernel | ||
52 | * | ||
53 | * Version 2.0 | ||
54 | * | ||
55 | * Initial implementation of TCQ (Tag Command Queueing). TCQ is full | ||
56 | * featured and uses the clock algorithm to keep track of outstanding | ||
57 | * tags and guard against individual tag starvation. Also fixed a bug | ||
58 | * in all of the 1.x versions where the D700_data_residue() function | ||
59 | * was returning results off by 32 bytes (and thus causing the same 32 | ||
60 | * bytes to be written twice corrupting the data block). It turns out | ||
61 | * the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones | ||
62 | * like the 53c710 (The 710 is the only data manual still available, | ||
63 | * which I'd been using to program the 700). | ||
64 | * | ||
65 | * Version 1.2 | ||
66 | * | ||
67 | * Much improved message handling engine | ||
68 | * | ||
69 | * Version 1.1 | ||
70 | * | ||
71 | * Add code to handle selection reasonably correctly. By the time we | ||
72 | * get the selection interrupt, we've already responded, but drop off the | ||
73 | * bus and hope the selector will go away. | ||
74 | * | ||
75 | * Version 1.0: | ||
76 | * | ||
77 | * Initial release. Fully functional except for procfs and tag | ||
78 | * command queueing. Has only been tested on cards with 53c700-66 | ||
79 | * chips and only single ended. Features are | ||
80 | * | ||
81 | * 1. Synchronous data transfers to offset 8 (limit of 700-66) and | ||
82 | * 100ns (10MHz) limit of SCSI-2 | ||
83 | * | ||
84 | * 2. Disconnection and reselection | ||
85 | * | ||
86 | * Testing: | ||
87 | * | ||
88 | * I've only really tested this with the 700-66 chip, but have done | ||
89 | * soak tests in multi-device environments to verify that | ||
90 | * disconnections and reselections are being processed correctly. | ||
91 | * */ | ||
92 | |||
93 | #define NCR_D700_VERSION "2.2" | ||
94 | |||
95 | #include <linux/blkdev.h> | ||
96 | #include <linux/interrupt.h> | ||
97 | #include <linux/kernel.h> | ||
98 | #include <linux/module.h> | ||
99 | #include <linux/mca.h> | ||
100 | #include <linux/slab.h> | ||
101 | #include <asm/io.h> | ||
102 | #include <scsi/scsi_host.h> | ||
103 | #include <scsi/scsi_device.h> | ||
104 | #include <scsi/scsi_transport.h> | ||
105 | #include <scsi/scsi_transport_spi.h> | ||
106 | |||
107 | #include "53c700.h" | ||
108 | #include "NCR_D700.h" | ||
109 | |||
110 | static char *NCR_D700; /* command line from insmod */ | ||
111 | |||
112 | MODULE_AUTHOR("James Bottomley"); | ||
113 | MODULE_DESCRIPTION("NCR Dual700 SCSI Driver"); | ||
114 | MODULE_LICENSE("GPL"); | ||
115 | module_param(NCR_D700, charp, 0); | ||
116 | |||
117 | static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] = | ||
118 | { [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 }; | ||
119 | |||
120 | #ifdef MODULE | ||
121 | #define ARG_SEP ' ' | ||
122 | #else | ||
123 | #define ARG_SEP ',' | ||
124 | #endif | ||
125 | |||
126 | static int __init | ||
127 | param_setup(char *string) | ||
128 | { | ||
129 | char *pos = string, *next; | ||
130 | int slot = -1, siop = -1; | ||
131 | |||
132 | while(pos != NULL && (next = strchr(pos, ':')) != NULL) { | ||
133 | int val = (int)simple_strtoul(++next, NULL, 0); | ||
134 | |||
135 | if(!strncmp(pos, "slot:", 5)) | ||
136 | slot = val; | ||
137 | else if(!strncmp(pos, "siop:", 5)) | ||
138 | siop = val; | ||
139 | else if(!strncmp(pos, "id:", 3)) { | ||
140 | if(slot == -1) { | ||
141 | printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n"); | ||
142 | } else if(slot > MCA_MAX_SLOT_NR) { | ||
143 | printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val); | ||
144 | } else { | ||
145 | if(siop != 0 && siop != 1) { | ||
146 | id_array[slot*2] = val; | ||
147 | id_array[slot*2 + 1] =val; | ||
148 | } else { | ||
149 | id_array[slot*2 + siop] = val; | ||
150 | } | ||
151 | } | ||
152 | } | ||
153 | if((pos = strchr(pos, ARG_SEP)) != NULL) | ||
154 | pos++; | ||
155 | } | ||
156 | return 1; | ||
157 | } | ||
158 | |||
159 | /* Host template. The 53c700 routine NCR_700_detect will | ||
160 | * fill in all of the missing routines */ | ||
161 | static struct scsi_host_template NCR_D700_driver_template = { | ||
162 | .module = THIS_MODULE, | ||
163 | .name = "NCR Dual 700 MCA", | ||
164 | .proc_name = "NCR_D700", | ||
165 | .this_id = 7, | ||
166 | }; | ||
167 | |||
168 | /* We needs this helper because we have two hosts per struct device */ | ||
169 | struct NCR_D700_private { | ||
170 | struct device *dev; | ||
171 | struct Scsi_Host *hosts[2]; | ||
172 | char name[30]; | ||
173 | char pad; | ||
174 | }; | ||
175 | |||
176 | static int | ||
177 | NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq, | ||
178 | int slot, u32 region, int differential) | ||
179 | { | ||
180 | struct NCR_700_Host_Parameters *hostdata; | ||
181 | struct Scsi_Host *host; | ||
182 | int ret; | ||
183 | |||
184 | hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); | ||
185 | if (!hostdata) { | ||
186 | printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host" | ||
187 | "data, detatching\n", siop); | ||
188 | return -ENOMEM; | ||
189 | } | ||
190 | |||
191 | if (!request_region(region, 64, "NCR_D700")) { | ||
192 | printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n", | ||
193 | region); | ||
194 | ret = -ENODEV; | ||
195 | goto region_failed; | ||
196 | } | ||
197 | |||
198 | /* Fill in the three required pieces of hostdata */ | ||
199 | hostdata->base = ioport_map(region, 64); | ||
200 | hostdata->differential = (((1<<siop) & differential) != 0); | ||
201 | hostdata->clock = NCR_D700_CLOCK_MHZ; | ||
202 | hostdata->burst_length = 8; | ||
203 | |||
204 | /* and register the siop */ | ||
205 | host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev); | ||
206 | if (!host) { | ||
207 | ret = -ENOMEM; | ||
208 | goto detect_failed; | ||
209 | } | ||
210 | |||
211 | p->hosts[siop] = host; | ||
212 | /* FIXME: read this from SUS */ | ||
213 | host->this_id = id_array[slot * 2 + siop]; | ||
214 | host->irq = irq; | ||
215 | host->base = region; | ||
216 | scsi_scan_host(host); | ||
217 | |||
218 | return 0; | ||
219 | |||
220 | detect_failed: | ||
221 | release_region(region, 64); | ||
222 | region_failed: | ||
223 | kfree(hostdata); | ||
224 | |||
225 | return ret; | ||
226 | } | ||
227 | |||
228 | static irqreturn_t | ||
229 | NCR_D700_intr(int irq, void *data) | ||
230 | { | ||
231 | struct NCR_D700_private *p = (struct NCR_D700_private *)data; | ||
232 | int i, found = 0; | ||
233 | |||
234 | for (i = 0; i < 2; i++) | ||
235 | if (p->hosts[i] && | ||
236 | NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED) | ||
237 | found++; | ||
238 | |||
239 | return found ? IRQ_HANDLED : IRQ_NONE; | ||
240 | } | ||
241 | |||
242 | /* Detect a D700 card. Note, because of the setup --- the chips are | ||
243 | * essentially connectecd to the MCA bus independently, it is easier | ||
244 | * to set them up as two separate host adapters, rather than one | ||
245 | * adapter with two channels */ | ||
246 | static int | ||
247 | NCR_D700_probe(struct device *dev) | ||
248 | { | ||
249 | struct NCR_D700_private *p; | ||
250 | int differential; | ||
251 | static int banner = 1; | ||
252 | struct mca_device *mca_dev = to_mca_device(dev); | ||
253 | int slot = mca_dev->slot; | ||
254 | int found = 0; | ||
255 | int irq, i; | ||
256 | int pos3j, pos3k, pos3a, pos3b, pos4; | ||
257 | __u32 base_addr, offset_addr; | ||
258 | |||
259 | /* enable board interrupt */ | ||
260 | pos4 = mca_device_read_pos(mca_dev, 4); | ||
261 | pos4 |= 0x4; | ||
262 | mca_device_write_pos(mca_dev, 4, pos4); | ||
263 | |||
264 | mca_device_write_pos(mca_dev, 6, 9); | ||
265 | pos3j = mca_device_read_pos(mca_dev, 3); | ||
266 | mca_device_write_pos(mca_dev, 6, 10); | ||
267 | pos3k = mca_device_read_pos(mca_dev, 3); | ||
268 | mca_device_write_pos(mca_dev, 6, 0); | ||
269 | pos3a = mca_device_read_pos(mca_dev, 3); | ||
270 | mca_device_write_pos(mca_dev, 6, 1); | ||
271 | pos3b = mca_device_read_pos(mca_dev, 3); | ||
272 | |||
273 | base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0; | ||
274 | offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70; | ||
275 | |||
276 | irq = (pos4 & 0x3) + 11; | ||
277 | if(irq >= 13) | ||
278 | irq++; | ||
279 | if(banner) { | ||
280 | printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n" | ||
281 | "NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n" | ||
282 | "NCR D700:\n"); | ||
283 | banner = 0; | ||
284 | } | ||
285 | /* now do the bus related transforms */ | ||
286 | irq = mca_device_transform_irq(mca_dev, irq); | ||
287 | base_addr = mca_device_transform_ioport(mca_dev, base_addr); | ||
288 | offset_addr = mca_device_transform_ioport(mca_dev, offset_addr); | ||
289 | |||
290 | printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr); | ||
291 | |||
292 | /*outb(BOARD_RESET, base_addr);*/ | ||
293 | |||
294 | /* clear any pending interrupts */ | ||
295 | (void)inb(base_addr + 0x08); | ||
296 | /* get modctl, used later for setting diff bits */ | ||
297 | switch(differential = (inb(base_addr + 0x08) >> 6)) { | ||
298 | case 0x00: | ||
299 | /* only SIOP1 differential */ | ||
300 | differential = 0x02; | ||
301 | break; | ||
302 | case 0x01: | ||
303 | /* Both SIOPs differential */ | ||
304 | differential = 0x03; | ||
305 | break; | ||
306 | case 0x03: | ||
307 | /* No SIOPs differential */ | ||
308 | differential = 0x00; | ||
309 | break; | ||
310 | default: | ||
311 | printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n", | ||
312 | differential); | ||
313 | differential = 0x00; | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
318 | if (!p) | ||
319 | return -ENOMEM; | ||
320 | |||
321 | p->dev = dev; | ||
322 | snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev)); | ||
323 | if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) { | ||
324 | printk(KERN_ERR "D700: request_irq failed\n"); | ||
325 | kfree(p); | ||
326 | return -EBUSY; | ||
327 | } | ||
328 | /* plumb in both 700 chips */ | ||
329 | for (i = 0; i < 2; i++) { | ||
330 | int err; | ||
331 | |||
332 | if ((err = NCR_D700_probe_one(p, i, irq, slot, | ||
333 | offset_addr + (0x80 * i), | ||
334 | differential)) != 0) | ||
335 | printk("D700: SIOP%d: probe failed, error = %d\n", | ||
336 | i, err); | ||
337 | else | ||
338 | found++; | ||
339 | } | ||
340 | |||
341 | if (!found) { | ||
342 | kfree(p); | ||
343 | return -ENODEV; | ||
344 | } | ||
345 | |||
346 | mca_device_set_claim(mca_dev, 1); | ||
347 | mca_device_set_name(mca_dev, "NCR_D700"); | ||
348 | dev_set_drvdata(dev, p); | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | static void | ||
353 | NCR_D700_remove_one(struct Scsi_Host *host) | ||
354 | { | ||
355 | scsi_remove_host(host); | ||
356 | NCR_700_release(host); | ||
357 | kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]); | ||
358 | free_irq(host->irq, host); | ||
359 | release_region(host->base, 64); | ||
360 | } | ||
361 | |||
362 | static int | ||
363 | NCR_D700_remove(struct device *dev) | ||
364 | { | ||
365 | struct NCR_D700_private *p = dev_get_drvdata(dev); | ||
366 | int i; | ||
367 | |||
368 | for (i = 0; i < 2; i++) | ||
369 | NCR_D700_remove_one(p->hosts[i]); | ||
370 | |||
371 | kfree(p); | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 }; | ||
376 | |||
377 | static struct mca_driver NCR_D700_driver = { | ||
378 | .id_table = NCR_D700_id_table, | ||
379 | .driver = { | ||
380 | .name = "NCR_D700", | ||
381 | .bus = &mca_bus_type, | ||
382 | .probe = NCR_D700_probe, | ||
383 | .remove = NCR_D700_remove, | ||
384 | }, | ||
385 | }; | ||
386 | |||
387 | static int __init NCR_D700_init(void) | ||
388 | { | ||
389 | #ifdef MODULE | ||
390 | if (NCR_D700) | ||
391 | param_setup(NCR_D700); | ||
392 | #endif | ||
393 | |||
394 | return mca_register_driver(&NCR_D700_driver); | ||
395 | } | ||
396 | |||
397 | static void __exit NCR_D700_exit(void) | ||
398 | { | ||
399 | mca_unregister_driver(&NCR_D700_driver); | ||
400 | } | ||
401 | |||
402 | module_init(NCR_D700_init); | ||
403 | module_exit(NCR_D700_exit); | ||
404 | |||
405 | __setup("NCR_D700=", param_setup); | ||
diff --git a/drivers/scsi/NCR_D700.h b/drivers/scsi/NCR_D700.h deleted file mode 100644 index eb675d782ef6..000000000000 --- a/drivers/scsi/NCR_D700.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
3 | |||
4 | /* NCR Dual 700 MCA SCSI Driver | ||
5 | * | ||
6 | * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com | ||
7 | */ | ||
8 | |||
9 | #ifndef _NCR_D700_H | ||
10 | #define _NCR_D700_H | ||
11 | |||
12 | /* Don't turn on debugging messages */ | ||
13 | #undef NCR_D700_DEBUG | ||
14 | |||
15 | /* The MCA identifier */ | ||
16 | #define NCR_D700_MCA_ID 0x0092 | ||
17 | |||
18 | /* Defines for the Board registers */ | ||
19 | #define BOARD_RESET 0x80 /* board level reset */ | ||
20 | #define ADD_PARENB 0x04 /* Address Parity Enabled */ | ||
21 | #define DAT_PARENB 0x01 /* Data Parity Enabled */ | ||
22 | #define SFBK_ENB 0x10 /* SFDBK Interrupt Enabled */ | ||
23 | #define LED0GREEN 0x20 /* Led 0 (red 0; green 1) */ | ||
24 | #define LED1GREEN 0x40 /* Led 1 (red 0; green 1) */ | ||
25 | #define LED0RED 0xDF /* Led 0 (red 0; green 1) */ | ||
26 | #define LED1RED 0xBF /* Led 1 (red 0; green 1) */ | ||
27 | |||
28 | #define NCR_D700_CLOCK_MHZ 50 | ||
29 | |||
30 | #endif | ||
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c deleted file mode 100644 index 54e7d26908ee..000000000000 --- a/drivers/scsi/NCR_Q720.c +++ /dev/null | |||
@@ -1,376 +0,0 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
2 | |||
3 | /* NCR Quad 720 MCA SCSI Driver | ||
4 | * | ||
5 | * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com | ||
6 | */ | ||
7 | |||
8 | #include <linux/blkdev.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/mca.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <asm/io.h> | ||
18 | |||
19 | #include "scsi.h" | ||
20 | #include <scsi/scsi_host.h> | ||
21 | |||
22 | #include "ncr53c8xx.h" | ||
23 | |||
24 | #include "NCR_Q720.h" | ||
25 | |||
26 | static struct ncr_chip q720_chip __initdata = { | ||
27 | .revision_id = 0x0f, | ||
28 | .burst_max = 3, | ||
29 | .offset_max = 8, | ||
30 | .nr_divisor = 4, | ||
31 | .features = FE_WIDE | FE_DIFF | FE_VARCLK, | ||
32 | }; | ||
33 | |||
34 | MODULE_AUTHOR("James Bottomley"); | ||
35 | MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver"); | ||
36 | MODULE_LICENSE("GPL"); | ||
37 | |||
38 | #define NCR_Q720_VERSION "0.9" | ||
39 | |||
40 | /* We needs this helper because we have up to four hosts per struct device */ | ||
41 | struct NCR_Q720_private { | ||
42 | struct device *dev; | ||
43 | void __iomem * mem_base; | ||
44 | __u32 phys_mem_base; | ||
45 | __u32 mem_size; | ||
46 | __u8 irq; | ||
47 | __u8 siops; | ||
48 | __u8 irq_enable; | ||
49 | struct Scsi_Host *hosts[4]; | ||
50 | }; | ||
51 | |||
52 | static struct scsi_host_template NCR_Q720_tpnt = { | ||
53 | .module = THIS_MODULE, | ||
54 | .proc_name = "NCR_Q720", | ||
55 | }; | ||
56 | |||
57 | static irqreturn_t | ||
58 | NCR_Q720_intr(int irq, void *data) | ||
59 | { | ||
60 | struct NCR_Q720_private *p = (struct NCR_Q720_private *)data; | ||
61 | __u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4; | ||
62 | __u8 siop; | ||
63 | |||
64 | sir |= ~p->irq_enable; | ||
65 | |||
66 | if(sir == 0xff) | ||
67 | return IRQ_NONE; | ||
68 | |||
69 | |||
70 | while((siop = ffz(sir)) < p->siops) { | ||
71 | sir |= 1<<siop; | ||
72 | ncr53c8xx_intr(irq, p->hosts[siop]); | ||
73 | } | ||
74 | return IRQ_HANDLED; | ||
75 | } | ||
76 | |||
77 | static int __init | ||
78 | NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop, | ||
79 | int irq, int slot, __u32 paddr, void __iomem *vaddr) | ||
80 | { | ||
81 | struct ncr_device device; | ||
82 | __u8 scsi_id; | ||
83 | static int unit = 0; | ||
84 | __u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1); | ||
85 | __u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20; | ||
86 | __u8 version; | ||
87 | int error; | ||
88 | |||
89 | scsi_id = scsr1 >> 4; | ||
90 | /* enable burst length 16 (FIXME: should allow this) */ | ||
91 | scsr1 |= 0x02; | ||
92 | /* force a siop reset */ | ||
93 | scsr1 |= 0x04; | ||
94 | writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1); | ||
95 | udelay(10); | ||
96 | version = readb(vaddr + 0x18) >> 4; | ||
97 | |||
98 | memset(&device, 0, sizeof(struct ncr_device)); | ||
99 | /* Initialise ncr_device structure with items required by ncr_attach. */ | ||
100 | device.chip = q720_chip; | ||
101 | device.chip.revision_id = version; | ||
102 | device.host_id = scsi_id; | ||
103 | device.dev = p->dev; | ||
104 | device.slot.base = paddr; | ||
105 | device.slot.base_c = paddr; | ||
106 | device.slot.base_v = vaddr; | ||
107 | device.slot.irq = irq; | ||
108 | device.differential = differential ? 2 : 0; | ||
109 | printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop, | ||
110 | (unsigned long)paddr, differential, version); | ||
111 | |||
112 | p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device); | ||
113 | |||
114 | if (!p->hosts[siop]) | ||
115 | goto fail; | ||
116 | |||
117 | p->irq_enable |= (1<<siop); | ||
118 | scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1); | ||
119 | /* clear the disable interrupt bit */ | ||
120 | scsr1 &= ~0x01; | ||
121 | writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1); | ||
122 | |||
123 | error = scsi_add_host(p->hosts[siop], p->dev); | ||
124 | if (error) | ||
125 | ncr53c8xx_release(p->hosts[siop]); | ||
126 | else | ||
127 | scsi_scan_host(p->hosts[siop]); | ||
128 | return error; | ||
129 | |||
130 | fail: | ||
131 | return -ENODEV; | ||
132 | } | ||
133 | |||
134 | /* Detect a Q720 card. Note, because of the setup --- the chips are | ||
135 | * essentially connectecd to the MCA bus independently, it is easier | ||
136 | * to set them up as two separate host adapters, rather than one | ||
137 | * adapter with two channels */ | ||
138 | static int __init | ||
139 | NCR_Q720_probe(struct device *dev) | ||
140 | { | ||
141 | struct NCR_Q720_private *p; | ||
142 | static int banner = 1; | ||
143 | struct mca_device *mca_dev = to_mca_device(dev); | ||
144 | int slot = mca_dev->slot; | ||
145 | int found = 0; | ||
146 | int irq, i, siops; | ||
147 | __u8 pos2, pos4, asr2, asr9, asr10; | ||
148 | __u16 io_base; | ||
149 | __u32 base_addr, mem_size; | ||
150 | void __iomem *mem_base; | ||
151 | |||
152 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
153 | if (!p) | ||
154 | return -ENOMEM; | ||
155 | |||
156 | pos2 = mca_device_read_pos(mca_dev, 2); | ||
157 | /* enable device */ | ||
158 | pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE; | ||
159 | mca_device_write_pos(mca_dev, 2, pos2); | ||
160 | |||
161 | io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT; | ||
162 | |||
163 | |||
164 | if(banner) { | ||
165 | printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n" | ||
166 | "NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n" | ||
167 | "NCR Q720:\n"); | ||
168 | banner = 0; | ||
169 | } | ||
170 | io_base = mca_device_transform_ioport(mca_dev, io_base); | ||
171 | |||
172 | /* OK, this is phase one of the bootstrap, we now know the | ||
173 | * I/O space base address. All the configuration registers | ||
174 | * are mapped here (including pos) */ | ||
175 | |||
176 | /* sanity check I/O mapping */ | ||
177 | i = inb(io_base) | (inb(io_base+1)<<8); | ||
178 | if(i != NCR_Q720_MCA_ID) { | ||
179 | printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i); | ||
180 | kfree(p); | ||
181 | return -ENODEV; | ||
182 | } | ||
183 | |||
184 | /* Phase II, find the ram base and memory map the board register */ | ||
185 | pos4 = inb(io_base + 4); | ||
186 | /* enable streaming data */ | ||
187 | pos4 |= 0x01; | ||
188 | outb(pos4, io_base + 4); | ||
189 | base_addr = (pos4 & 0x7e) << 20; | ||
190 | base_addr += (pos4 & 0x80) << 23; | ||
191 | asr10 = inb(io_base + 0x12); | ||
192 | base_addr += (asr10 & 0x80) << 24; | ||
193 | base_addr += (asr10 & 0x70) << 23; | ||
194 | |||
195 | /* OK, got the base addr, now we need to find the ram size, | ||
196 | * enable and map it */ | ||
197 | asr9 = inb(io_base + 0x11); | ||
198 | i = (asr9 & 0xc0) >> 6; | ||
199 | if(i == 0) | ||
200 | mem_size = 1024; | ||
201 | else | ||
202 | mem_size = 1 << (19 + i); | ||
203 | |||
204 | /* enable the sram mapping */ | ||
205 | asr9 |= 0x20; | ||
206 | |||
207 | /* disable the rom mapping */ | ||
208 | asr9 &= ~0x10; | ||
209 | |||
210 | outb(asr9, io_base + 0x11); | ||
211 | |||
212 | if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) { | ||
213 | printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx", | ||
214 | (unsigned long)base_addr, | ||
215 | (unsigned long)(base_addr + mem_size)); | ||
216 | goto out_free; | ||
217 | } | ||
218 | |||
219 | if (dma_declare_coherent_memory(dev, base_addr, base_addr, | ||
220 | mem_size, 0)) { | ||
221 | printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n"); | ||
222 | goto out_release_region; | ||
223 | } | ||
224 | |||
225 | /* The first 1k of the memory buffer is a memory map of the registers | ||
226 | */ | ||
227 | mem_base = dma_mark_declared_memory_occupied(dev, base_addr, | ||
228 | 1024); | ||
229 | if (IS_ERR(mem_base)) { | ||
230 | printk("NCR_Q720 failed to reserve memory mapped region\n"); | ||
231 | goto out_release; | ||
232 | } | ||
233 | |||
234 | /* now also enable accesses in asr 2 */ | ||
235 | asr2 = inb(io_base + 0x0a); | ||
236 | |||
237 | asr2 |= 0x01; | ||
238 | |||
239 | outb(asr2, io_base + 0x0a); | ||
240 | |||
241 | /* get the number of SIOPs (this should be 2 or 4) */ | ||
242 | siops = ((asr2 & 0xe0) >> 5) + 1; | ||
243 | |||
244 | /* sanity check mapping (again) */ | ||
245 | i = readw(mem_base); | ||
246 | if(i != NCR_Q720_MCA_ID) { | ||
247 | printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i); | ||
248 | goto out_release; | ||
249 | } | ||
250 | |||
251 | irq = readb(mem_base + 5) & 0x0f; | ||
252 | |||
253 | |||
254 | /* now do the bus related transforms */ | ||
255 | irq = mca_device_transform_irq(mca_dev, irq); | ||
256 | |||
257 | printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops); | ||
258 | printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024); | ||
259 | |||
260 | p->dev = dev; | ||
261 | p->mem_base = mem_base; | ||
262 | p->phys_mem_base = base_addr; | ||
263 | p->mem_size = mem_size; | ||
264 | p->irq = irq; | ||
265 | p->siops = siops; | ||
266 | |||
267 | if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) { | ||
268 | printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq); | ||
269 | goto out_release; | ||
270 | } | ||
271 | /* disable all the siop interrupts */ | ||
272 | for(i = 0; i < siops; i++) { | ||
273 | void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET | ||
274 | + i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1; | ||
275 | __u8 scsr1 = readb(reg_scsr1); | ||
276 | scsr1 |= 0x01; | ||
277 | writeb(scsr1, reg_scsr1); | ||
278 | } | ||
279 | |||
280 | /* plumb in all 720 chips */ | ||
281 | for (i = 0; i < siops; i++) { | ||
282 | void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET | ||
283 | + i*NCR_Q720_SIOP_SHIFT; | ||
284 | __u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET | ||
285 | + i*NCR_Q720_SIOP_SHIFT; | ||
286 | __u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET | ||
287 | + i*NCR_Q720_SIOP_SHIFT; | ||
288 | int err; | ||
289 | |||
290 | outb(0xff, port + 0x40); | ||
291 | outb(0x07, port + 0x41); | ||
292 | if ((err = NCR_Q720_probe_one(p, i, irq, slot, | ||
293 | siop_p_base, siop_v_base)) != 0) | ||
294 | printk("Q720: SIOP%d: probe failed, error = %d\n", | ||
295 | i, err); | ||
296 | else | ||
297 | found++; | ||
298 | } | ||
299 | |||
300 | if (!found) { | ||
301 | kfree(p); | ||
302 | return -ENODEV; | ||
303 | } | ||
304 | |||
305 | mca_device_set_claim(mca_dev, 1); | ||
306 | mca_device_set_name(mca_dev, "NCR_Q720"); | ||
307 | dev_set_drvdata(dev, p); | ||
308 | |||
309 | return 0; | ||
310 | |||
311 | out_release: | ||
312 | dma_release_declared_memory(dev); | ||
313 | out_release_region: | ||
314 | release_mem_region(base_addr, mem_size); | ||
315 | out_free: | ||
316 | kfree(p); | ||
317 | |||
318 | return -ENODEV; | ||
319 | } | ||
320 | |||
321 | static void __exit | ||
322 | NCR_Q720_remove_one(struct Scsi_Host *host) | ||
323 | { | ||
324 | scsi_remove_host(host); | ||
325 | ncr53c8xx_release(host); | ||
326 | } | ||
327 | |||
328 | static int __exit | ||
329 | NCR_Q720_remove(struct device *dev) | ||
330 | { | ||
331 | struct NCR_Q720_private *p = dev_get_drvdata(dev); | ||
332 | int i; | ||
333 | |||
334 | for (i = 0; i < p->siops; i++) | ||
335 | if(p->hosts[i]) | ||
336 | NCR_Q720_remove_one(p->hosts[i]); | ||
337 | |||
338 | dma_release_declared_memory(dev); | ||
339 | release_mem_region(p->phys_mem_base, p->mem_size); | ||
340 | free_irq(p->irq, p); | ||
341 | kfree(p); | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 }; | ||
346 | |||
347 | static struct mca_driver NCR_Q720_driver = { | ||
348 | .id_table = NCR_Q720_id_table, | ||
349 | .driver = { | ||
350 | .name = "NCR_Q720", | ||
351 | .bus = &mca_bus_type, | ||
352 | .probe = NCR_Q720_probe, | ||
353 | .remove = NCR_Q720_remove, | ||
354 | }, | ||
355 | }; | ||
356 | |||
357 | static int __init | ||
358 | NCR_Q720_init(void) | ||
359 | { | ||
360 | int ret = ncr53c8xx_init(); | ||
361 | if (!ret) | ||
362 | ret = mca_register_driver(&NCR_Q720_driver); | ||
363 | if (ret) | ||
364 | ncr53c8xx_exit(); | ||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static void __exit | ||
369 | NCR_Q720_exit(void) | ||
370 | { | ||
371 | mca_unregister_driver(&NCR_Q720_driver); | ||
372 | ncr53c8xx_exit(); | ||
373 | } | ||
374 | |||
375 | module_init(NCR_Q720_init); | ||
376 | module_exit(NCR_Q720_exit); | ||
diff --git a/drivers/scsi/NCR_Q720.h b/drivers/scsi/NCR_Q720.h deleted file mode 100644 index d5f46cdb736e..000000000000 --- a/drivers/scsi/NCR_Q720.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* -*- mode: c; c-basic-offset: 8 -*- */ | ||
3 | |||
4 | /* NCR Quad 720 MCA SCSI Driver | ||
5 | * | ||
6 | * Copyright (C) 2003 by James.Bottomley@HansenPartnership.com | ||
7 | */ | ||
8 | |||
9 | #ifndef _NCR_Q720_H | ||
10 | #define _NCR_Q720_H | ||
11 | |||
12 | /* The MCA identifier */ | ||
13 | #define NCR_Q720_MCA_ID 0x0720 | ||
14 | |||
15 | #define NCR_Q720_CLOCK_MHZ 30 | ||
16 | |||
17 | #define NCR_Q720_POS2_BOARD_ENABLE 0x01 | ||
18 | #define NCR_Q720_POS2_INTERRUPT_ENABLE 0x02 | ||
19 | #define NCR_Q720_POS2_PARITY_DISABLE 0x04 | ||
20 | #define NCR_Q720_POS2_IO_MASK 0xf8 | ||
21 | #define NCR_Q720_POS2_IO_SHIFT 8 | ||
22 | |||
23 | #define NCR_Q720_CHIP_REGISTER_OFFSET 0x200 | ||
24 | #define NCR_Q720_SCSR_OFFSET 0x070 | ||
25 | #define NCR_Q720_SIOP_SHIFT 0x080 | ||
26 | |||
27 | #endif | ||
28 | |||
29 | |||
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index b2942ec3d455..23b17621b6d2 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c | |||
@@ -143,7 +143,7 @@ static u8 wait_chip_ready(struct orc_host * host) | |||
143 | for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ | 143 | for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ |
144 | if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */ | 144 | if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */ |
145 | return 1; | 145 | return 1; |
146 | mdelay(100); | 146 | msleep(100); |
147 | } | 147 | } |
148 | return 0; | 148 | return 0; |
149 | } | 149 | } |
@@ -155,7 +155,7 @@ static u8 wait_firmware_ready(struct orc_host * host) | |||
155 | for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ | 155 | for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */ |
156 | if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */ | 156 | if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */ |
157 | return 1; | 157 | return 1; |
158 | mdelay(100); /* wait 100ms before try again */ | 158 | msleep(100); /* wait 100ms before try again */ |
159 | } | 159 | } |
160 | return 0; | 160 | return 0; |
161 | } | 161 | } |
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index a57f3a7d4748..6e356325d8d9 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c | |||
@@ -115,8 +115,6 @@ | |||
115 | #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 | 115 | #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 |
116 | #define ASENCODE_OVERLAPPED_COMMAND 0x00 | 116 | #define ASENCODE_OVERLAPPED_COMMAND 0x00 |
117 | 117 | ||
118 | #define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD) | ||
119 | |||
120 | #define BYTE0(x) (unsigned char)(x) | 118 | #define BYTE0(x) (unsigned char)(x) |
121 | #define BYTE1(x) (unsigned char)((x) >> 8) | 119 | #define BYTE1(x) (unsigned char)((x) >> 8) |
122 | #define BYTE2(x) (unsigned char)((x) >> 16) | 120 | #define BYTE2(x) (unsigned char)((x) >> 16) |
@@ -2961,7 +2959,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2961 | 2959 | ||
2962 | case SYNCHRONIZE_CACHE: | 2960 | case SYNCHRONIZE_CACHE: |
2963 | if (((aac_cache & 6) == 6) && dev->cache_protected) { | 2961 | if (((aac_cache & 6) == 6) && dev->cache_protected) { |
2964 | scsicmd->result = AAC_STAT_GOOD; | 2962 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
2963 | SAM_STAT_GOOD; | ||
2965 | break; | 2964 | break; |
2966 | } | 2965 | } |
2967 | /* Issue FIB to tell Firmware to flush it's cache */ | 2966 | /* Issue FIB to tell Firmware to flush it's cache */ |
@@ -2989,7 +2988,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
2989 | arr[1] = scsicmd->cmnd[2]; | 2988 | arr[1] = scsicmd->cmnd[2]; |
2990 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, | 2989 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, |
2991 | sizeof(inq_data)); | 2990 | sizeof(inq_data)); |
2992 | scsicmd->result = AAC_STAT_GOOD; | 2991 | scsicmd->result = DID_OK << 16 | |
2992 | COMMAND_COMPLETE << 8 | | ||
2993 | SAM_STAT_GOOD; | ||
2993 | } else if (scsicmd->cmnd[2] == 0x80) { | 2994 | } else if (scsicmd->cmnd[2] == 0x80) { |
2994 | /* unit serial number page */ | 2995 | /* unit serial number page */ |
2995 | arr[3] = setinqserial(dev, &arr[4], | 2996 | arr[3] = setinqserial(dev, &arr[4], |
@@ -3000,7 +3001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3000 | if (aac_wwn != 2) | 3001 | if (aac_wwn != 2) |
3001 | return aac_get_container_serial( | 3002 | return aac_get_container_serial( |
3002 | scsicmd); | 3003 | scsicmd); |
3003 | scsicmd->result = AAC_STAT_GOOD; | 3004 | scsicmd->result = DID_OK << 16 | |
3005 | COMMAND_COMPLETE << 8 | | ||
3006 | SAM_STAT_GOOD; | ||
3004 | } else if (scsicmd->cmnd[2] == 0x83) { | 3007 | } else if (scsicmd->cmnd[2] == 0x83) { |
3005 | /* vpd page 0x83 - Device Identification Page */ | 3008 | /* vpd page 0x83 - Device Identification Page */ |
3006 | char *sno = (char *)&inq_data; | 3009 | char *sno = (char *)&inq_data; |
@@ -3009,7 +3012,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3009 | if (aac_wwn != 2) | 3012 | if (aac_wwn != 2) |
3010 | return aac_get_container_serial( | 3013 | return aac_get_container_serial( |
3011 | scsicmd); | 3014 | scsicmd); |
3012 | scsicmd->result = AAC_STAT_GOOD; | 3015 | scsicmd->result = DID_OK << 16 | |
3016 | COMMAND_COMPLETE << 8 | | ||
3017 | SAM_STAT_GOOD; | ||
3013 | } else { | 3018 | } else { |
3014 | /* vpd page not implemented */ | 3019 | /* vpd page not implemented */ |
3015 | scsicmd->result = DID_OK << 16 | | 3020 | scsicmd->result = DID_OK << 16 | |
@@ -3040,7 +3045,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3040 | inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ | 3045 | inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ |
3041 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, | 3046 | scsi_sg_copy_from_buffer(scsicmd, &inq_data, |
3042 | sizeof(inq_data)); | 3047 | sizeof(inq_data)); |
3043 | scsicmd->result = AAC_STAT_GOOD; | 3048 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3049 | SAM_STAT_GOOD; | ||
3044 | break; | 3050 | break; |
3045 | } | 3051 | } |
3046 | if (dev->in_reset) | 3052 | if (dev->in_reset) |
@@ -3089,7 +3095,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3089 | /* Do not cache partition table for arrays */ | 3095 | /* Do not cache partition table for arrays */ |
3090 | scsicmd->device->removable = 1; | 3096 | scsicmd->device->removable = 1; |
3091 | 3097 | ||
3092 | scsicmd->result = AAC_STAT_GOOD; | 3098 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3099 | SAM_STAT_GOOD; | ||
3093 | break; | 3100 | break; |
3094 | } | 3101 | } |
3095 | 3102 | ||
@@ -3115,7 +3122,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3115 | scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); | 3122 | scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); |
3116 | /* Do not cache partition table for arrays */ | 3123 | /* Do not cache partition table for arrays */ |
3117 | scsicmd->device->removable = 1; | 3124 | scsicmd->device->removable = 1; |
3118 | scsicmd->result = AAC_STAT_GOOD; | 3125 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3126 | SAM_STAT_GOOD; | ||
3119 | break; | 3127 | break; |
3120 | } | 3128 | } |
3121 | 3129 | ||
@@ -3194,7 +3202,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3194 | scsi_sg_copy_from_buffer(scsicmd, | 3202 | scsi_sg_copy_from_buffer(scsicmd, |
3195 | (char *)&mpd, | 3203 | (char *)&mpd, |
3196 | mode_buf_length); | 3204 | mode_buf_length); |
3197 | scsicmd->result = AAC_STAT_GOOD; | 3205 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3206 | SAM_STAT_GOOD; | ||
3198 | break; | 3207 | break; |
3199 | } | 3208 | } |
3200 | case MODE_SENSE_10: | 3209 | case MODE_SENSE_10: |
@@ -3271,7 +3280,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3271 | (char *)&mpd10, | 3280 | (char *)&mpd10, |
3272 | mode_buf_length); | 3281 | mode_buf_length); |
3273 | 3282 | ||
3274 | scsicmd->result = AAC_STAT_GOOD; | 3283 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3284 | SAM_STAT_GOOD; | ||
3275 | break; | 3285 | break; |
3276 | } | 3286 | } |
3277 | case REQUEST_SENSE: | 3287 | case REQUEST_SENSE: |
@@ -3280,7 +3290,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3280 | sizeof(struct sense_data)); | 3290 | sizeof(struct sense_data)); |
3281 | memset(&dev->fsa_dev[cid].sense_data, 0, | 3291 | memset(&dev->fsa_dev[cid].sense_data, 0, |
3282 | sizeof(struct sense_data)); | 3292 | sizeof(struct sense_data)); |
3283 | scsicmd->result = AAC_STAT_GOOD; | 3293 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3294 | SAM_STAT_GOOD; | ||
3284 | break; | 3295 | break; |
3285 | 3296 | ||
3286 | case ALLOW_MEDIUM_REMOVAL: | 3297 | case ALLOW_MEDIUM_REMOVAL: |
@@ -3290,7 +3301,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3290 | else | 3301 | else |
3291 | fsa_dev_ptr[cid].locked = 0; | 3302 | fsa_dev_ptr[cid].locked = 0; |
3292 | 3303 | ||
3293 | scsicmd->result = AAC_STAT_GOOD; | 3304 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3305 | SAM_STAT_GOOD; | ||
3294 | break; | 3306 | break; |
3295 | /* | 3307 | /* |
3296 | * These commands are all No-Ops | 3308 | * These commands are all No-Ops |
@@ -3314,7 +3326,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd) | |||
3314 | case REZERO_UNIT: | 3326 | case REZERO_UNIT: |
3315 | case REASSIGN_BLOCKS: | 3327 | case REASSIGN_BLOCKS: |
3316 | case SEEK_10: | 3328 | case SEEK_10: |
3317 | scsicmd->result = AAC_STAT_GOOD; | 3329 | scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | |
3330 | SAM_STAT_GOOD; | ||
3318 | break; | 3331 | break; |
3319 | 3332 | ||
3320 | case START_STOP: | 3333 | case START_STOP: |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index d62ddd63f4fe..6e1b022a823d 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -514,7 +514,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size, | |||
514 | * The only invalid cases are if the caller requests to wait and | 514 | * The only invalid cases are if the caller requests to wait and |
515 | * does not request a response and if the caller does not want a | 515 | * does not request a response and if the caller does not want a |
516 | * response and the Fib is not allocated from pool. If a response | 516 | * response and the Fib is not allocated from pool. If a response |
517 | * is not requesed the Fib will just be deallocaed by the DPC | 517 | * is not requested the Fib will just be deallocaed by the DPC |
518 | * routine when the response comes back from the adapter. No | 518 | * routine when the response comes back from the adapter. No |
519 | * further processing will be done besides deleting the Fib. We | 519 | * further processing will be done besides deleting the Fib. We |
520 | * will have a debug mode where the adapter can notify the host | 520 | * will have a debug mode where the adapter can notify the host |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index 417ba349e10e..ddc69738375f 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -65,7 +65,7 @@ unsigned int aac_response_normal(struct aac_queue * q) | |||
65 | /* | 65 | /* |
66 | * Keep pulling response QEs off the response queue and waking | 66 | * Keep pulling response QEs off the response queue and waking |
67 | * up the waiters until there are no more QEs. We then return | 67 | * up the waiters until there are no more QEs. We then return |
68 | * back to the system. If no response was requesed we just | 68 | * back to the system. If no response was requested we just |
69 | * deallocate the Fib here and continue. | 69 | * deallocate the Fib here and continue. |
70 | */ | 70 | */ |
71 | while(aac_consumer_get(dev, q, &entry)) | 71 | while(aac_consumer_get(dev, q, &entry)) |
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c index 620166694171..576cdf9cc120 100644 --- a/drivers/scsi/aacraid/rx.c +++ b/drivers/scsi/aacraid/rx.c | |||
@@ -319,7 +319,7 @@ static void aac_rx_start_adapter(struct aac_dev *dev) | |||
319 | union aac_init *init; | 319 | union aac_init *init; |
320 | 320 | ||
321 | init = dev->init; | 321 | init = dev->init; |
322 | init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds()); | 322 | init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); |
323 | // We can only use a 32 bit address here | 323 | // We can only use a 32 bit address here |
324 | rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, | 324 | rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa, |
325 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); | 325 | 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 882f40353b96..efa96c1c6aa3 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -251,7 +251,7 @@ static void aac_sa_start_adapter(struct aac_dev *dev) | |||
251 | * Fill in the remaining pieces of the init. | 251 | * Fill in the remaining pieces of the init. |
252 | */ | 252 | */ |
253 | init = dev->init; | 253 | init = dev->init; |
254 | init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds()); | 254 | init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds()); |
255 | /* We can only use a 32 bit address here */ | 255 | /* We can only use a 32 bit address here */ |
256 | sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, | 256 | sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, |
257 | (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, | 257 | (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, |
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 4ebb35a29caa..7a51ccfa8662 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c | |||
@@ -409,7 +409,8 @@ static void aac_src_start_adapter(struct aac_dev *dev) | |||
409 | 409 | ||
410 | init = dev->init; | 410 | init = dev->init; |
411 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { | 411 | if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) { |
412 | init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds()); | 412 | init->r8.host_elapsed_seconds = |
413 | cpu_to_le32(ktime_get_real_seconds()); | ||
413 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, | 414 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, |
414 | lower_32_bits(dev->init_pa), | 415 | lower_32_bits(dev->init_pa), |
415 | upper_32_bits(dev->init_pa), | 416 | upper_32_bits(dev->init_pa), |
@@ -417,7 +418,8 @@ static void aac_src_start_adapter(struct aac_dev *dev) | |||
417 | (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), | 418 | (AAC_MAX_HRRQ - 1) * sizeof(struct _rrq), |
418 | 0, 0, 0, NULL, NULL, NULL, NULL, NULL); | 419 | 0, 0, 0, NULL, NULL, NULL, NULL, NULL); |
419 | } else { | 420 | } else { |
420 | init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds()); | 421 | init->r7.host_elapsed_seconds = |
422 | cpu_to_le32(ktime_get_real_seconds()); | ||
421 | // We can only use a 32 bit address here | 423 | // We can only use a 32 bit address here |
422 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, | 424 | src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, |
423 | (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, | 425 | (u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0, |
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 24e57e770432..713f69033f20 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c | |||
@@ -2416,8 +2416,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s) | |||
2416 | struct asc_board *boardp = shost_priv(s); | 2416 | struct asc_board *boardp = shost_priv(s); |
2417 | 2417 | ||
2418 | printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); | 2418 | printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev)); |
2419 | printk(" host_busy %u, host_no %d,\n", | 2419 | printk(" host_busy %d, host_no %d,\n", |
2420 | atomic_read(&s->host_busy), s->host_no); | 2420 | scsi_host_busy(s), s->host_no); |
2421 | 2421 | ||
2422 | printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", | 2422 | printk(" base 0x%lx, io_port 0x%lx, irq %d,\n", |
2423 | (ulong)s->base, (ulong)s->io_port, boardp->irq); | 2423 | (ulong)s->base, (ulong)s->io_port, boardp->irq); |
@@ -3182,8 +3182,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost) | |||
3182 | shost->host_no); | 3182 | shost->host_no); |
3183 | 3183 | ||
3184 | seq_printf(m, | 3184 | seq_printf(m, |
3185 | " host_busy %u, max_id %u, max_lun %llu, max_channel %u\n", | 3185 | " host_busy %d, max_id %u, max_lun %llu, max_channel %u\n", |
3186 | atomic_read(&shost->host_busy), shost->max_id, | 3186 | scsi_host_busy(shost), shost->max_id, |
3187 | shost->max_lun, shost->max_channel); | 3187 | shost->max_lun, shost->max_channel); |
3188 | 3188 | ||
3189 | seq_printf(m, | 3189 | seq_printf(m, |
@@ -8466,7 +8466,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp) | |||
8466 | } | 8466 | } |
8467 | 8467 | ||
8468 | /* | 8468 | /* |
8469 | * Execute a single 'Scsi_Cmnd'. | 8469 | * Execute a single 'struct scsi_cmnd'. |
8470 | */ | 8470 | */ |
8471 | static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) | 8471 | static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) |
8472 | { | 8472 | { |
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index bc0058df31c6..4d7b0e0adbf7 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c | |||
@@ -422,16 +422,16 @@ enum aha152x_state { | |||
422 | * | 422 | * |
423 | */ | 423 | */ |
424 | struct aha152x_hostdata { | 424 | struct aha152x_hostdata { |
425 | Scsi_Cmnd *issue_SC; | 425 | struct scsi_cmnd *issue_SC; |
426 | /* pending commands to issue */ | 426 | /* pending commands to issue */ |
427 | 427 | ||
428 | Scsi_Cmnd *current_SC; | 428 | struct scsi_cmnd *current_SC; |
429 | /* current command on the bus */ | 429 | /* current command on the bus */ |
430 | 430 | ||
431 | Scsi_Cmnd *disconnected_SC; | 431 | struct scsi_cmnd *disconnected_SC; |
432 | /* commands that disconnected */ | 432 | /* commands that disconnected */ |
433 | 433 | ||
434 | Scsi_Cmnd *done_SC; | 434 | struct scsi_cmnd *done_SC; |
435 | /* command that was completed */ | 435 | /* command that was completed */ |
436 | 436 | ||
437 | spinlock_t lock; | 437 | spinlock_t lock; |
@@ -510,7 +510,7 @@ struct aha152x_hostdata { | |||
510 | * | 510 | * |
511 | */ | 511 | */ |
512 | struct aha152x_scdata { | 512 | struct aha152x_scdata { |
513 | Scsi_Cmnd *next; /* next sc in queue */ | 513 | struct scsi_cmnd *next; /* next sc in queue */ |
514 | struct completion *done;/* semaphore to block on */ | 514 | struct completion *done;/* semaphore to block on */ |
515 | struct scsi_eh_save ses; | 515 | struct scsi_eh_save ses; |
516 | }; | 516 | }; |
@@ -633,7 +633,7 @@ static void aha152x_error(struct Scsi_Host *shpnt, char *msg); | |||
633 | static void done(struct Scsi_Host *shpnt, int error); | 633 | static void done(struct Scsi_Host *shpnt, int error); |
634 | 634 | ||
635 | /* diagnostics */ | 635 | /* diagnostics */ |
636 | static void show_command(Scsi_Cmnd * ptr); | 636 | static void show_command(struct scsi_cmnd * ptr); |
637 | static void show_queues(struct Scsi_Host *shpnt); | 637 | static void show_queues(struct Scsi_Host *shpnt); |
638 | static void disp_enintr(struct Scsi_Host *shpnt); | 638 | static void disp_enintr(struct Scsi_Host *shpnt); |
639 | 639 | ||
@@ -642,9 +642,9 @@ static void disp_enintr(struct Scsi_Host *shpnt); | |||
642 | * queue services: | 642 | * queue services: |
643 | * | 643 | * |
644 | */ | 644 | */ |
645 | static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC) | 645 | static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) |
646 | { | 646 | { |
647 | Scsi_Cmnd *end; | 647 | struct scsi_cmnd *end; |
648 | 648 | ||
649 | SCNEXT(new_SC) = NULL; | 649 | SCNEXT(new_SC) = NULL; |
650 | if (!*SC) | 650 | if (!*SC) |
@@ -656,9 +656,9 @@ static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC) | |||
656 | } | 656 | } |
657 | } | 657 | } |
658 | 658 | ||
659 | static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC) | 659 | static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd ** SC) |
660 | { | 660 | { |
661 | Scsi_Cmnd *ptr; | 661 | struct scsi_cmnd *ptr; |
662 | 662 | ||
663 | ptr = *SC; | 663 | ptr = *SC; |
664 | if (ptr) { | 664 | if (ptr) { |
@@ -668,9 +668,10 @@ static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC) | |||
668 | return ptr; | 668 | return ptr; |
669 | } | 669 | } |
670 | 670 | ||
671 | static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun) | 671 | static inline struct scsi_cmnd *remove_lun_SC(struct scsi_cmnd ** SC, |
672 | int target, int lun) | ||
672 | { | 673 | { |
673 | Scsi_Cmnd *ptr, *prev; | 674 | struct scsi_cmnd *ptr, *prev; |
674 | 675 | ||
675 | for (ptr = *SC, prev = NULL; | 676 | for (ptr = *SC, prev = NULL; |
676 | ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); | 677 | ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); |
@@ -689,9 +690,10 @@ static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun) | |||
689 | return ptr; | 690 | return ptr; |
690 | } | 691 | } |
691 | 692 | ||
692 | static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp) | 693 | static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, |
694 | struct scsi_cmnd *SCp) | ||
693 | { | 695 | { |
694 | Scsi_Cmnd *ptr, *prev; | 696 | struct scsi_cmnd *ptr, *prev; |
695 | 697 | ||
696 | for (ptr = *SC, prev = NULL; | 698 | for (ptr = *SC, prev = NULL; |
697 | ptr && SCp!=ptr; | 699 | ptr && SCp!=ptr; |
@@ -912,8 +914,9 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt) | |||
912 | /* | 914 | /* |
913 | * Queue a command and setup interrupts for a free bus. | 915 | * Queue a command and setup interrupts for a free bus. |
914 | */ | 916 | */ |
915 | static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, | 917 | static int aha152x_internal_queue(struct scsi_cmnd *SCpnt, |
916 | int phase, void (*done)(Scsi_Cmnd *)) | 918 | struct completion *complete, |
919 | int phase, void (*done)(struct scsi_cmnd *)) | ||
917 | { | 920 | { |
918 | struct Scsi_Host *shpnt = SCpnt->device->host; | 921 | struct Scsi_Host *shpnt = SCpnt->device->host; |
919 | unsigned long flags; | 922 | unsigned long flags; |
@@ -987,7 +990,8 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, | |||
987 | * queue a command | 990 | * queue a command |
988 | * | 991 | * |
989 | */ | 992 | */ |
990 | static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) | 993 | static int aha152x_queue_lck(struct scsi_cmnd *SCpnt, |
994 | void (*done)(struct scsi_cmnd *)) | ||
991 | { | 995 | { |
992 | return aha152x_internal_queue(SCpnt, NULL, 0, done); | 996 | return aha152x_internal_queue(SCpnt, NULL, 0, done); |
993 | } | 997 | } |
@@ -998,7 +1002,7 @@ static DEF_SCSI_QCMD(aha152x_queue) | |||
998 | /* | 1002 | /* |
999 | * | 1003 | * |
1000 | */ | 1004 | */ |
1001 | static void reset_done(Scsi_Cmnd *SCpnt) | 1005 | static void reset_done(struct scsi_cmnd *SCpnt) |
1002 | { | 1006 | { |
1003 | if(SCSEM(SCpnt)) { | 1007 | if(SCSEM(SCpnt)) { |
1004 | complete(SCSEM(SCpnt)); | 1008 | complete(SCSEM(SCpnt)); |
@@ -1011,10 +1015,10 @@ static void reset_done(Scsi_Cmnd *SCpnt) | |||
1011 | * Abort a command | 1015 | * Abort a command |
1012 | * | 1016 | * |
1013 | */ | 1017 | */ |
1014 | static int aha152x_abort(Scsi_Cmnd *SCpnt) | 1018 | static int aha152x_abort(struct scsi_cmnd *SCpnt) |
1015 | { | 1019 | { |
1016 | struct Scsi_Host *shpnt = SCpnt->device->host; | 1020 | struct Scsi_Host *shpnt = SCpnt->device->host; |
1017 | Scsi_Cmnd *ptr; | 1021 | struct scsi_cmnd *ptr; |
1018 | unsigned long flags; | 1022 | unsigned long flags; |
1019 | 1023 | ||
1020 | DO_LOCK(flags); | 1024 | DO_LOCK(flags); |
@@ -1052,7 +1056,7 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt) | |||
1052 | * Reset a device | 1056 | * Reset a device |
1053 | * | 1057 | * |
1054 | */ | 1058 | */ |
1055 | static int aha152x_device_reset(Scsi_Cmnd * SCpnt) | 1059 | static int aha152x_device_reset(struct scsi_cmnd * SCpnt) |
1056 | { | 1060 | { |
1057 | struct Scsi_Host *shpnt = SCpnt->device->host; | 1061 | struct Scsi_Host *shpnt = SCpnt->device->host; |
1058 | DECLARE_COMPLETION(done); | 1062 | DECLARE_COMPLETION(done); |
@@ -1110,13 +1114,14 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt) | |||
1110 | return ret; | 1114 | return ret; |
1111 | } | 1115 | } |
1112 | 1116 | ||
1113 | static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs) | 1117 | static void free_hard_reset_SCs(struct Scsi_Host *shpnt, |
1118 | struct scsi_cmnd **SCs) | ||
1114 | { | 1119 | { |
1115 | Scsi_Cmnd *ptr; | 1120 | struct scsi_cmnd *ptr; |
1116 | 1121 | ||
1117 | ptr=*SCs; | 1122 | ptr=*SCs; |
1118 | while(ptr) { | 1123 | while(ptr) { |
1119 | Scsi_Cmnd *next; | 1124 | struct scsi_cmnd *next; |
1120 | 1125 | ||
1121 | if(SCDATA(ptr)) { | 1126 | if(SCDATA(ptr)) { |
1122 | next = SCNEXT(ptr); | 1127 | next = SCNEXT(ptr); |
@@ -1171,7 +1176,7 @@ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt) | |||
1171 | * Reset the bus | 1176 | * Reset the bus |
1172 | * | 1177 | * |
1173 | */ | 1178 | */ |
1174 | static int aha152x_bus_reset(Scsi_Cmnd *SCpnt) | 1179 | static int aha152x_bus_reset(struct scsi_cmnd *SCpnt) |
1175 | { | 1180 | { |
1176 | return aha152x_bus_reset_host(SCpnt->device->host); | 1181 | return aha152x_bus_reset_host(SCpnt->device->host); |
1177 | } | 1182 | } |
@@ -1436,7 +1441,7 @@ static void busfree_run(struct Scsi_Host *shpnt) | |||
1436 | 1441 | ||
1437 | if(!(DONE_SC->SCp.phase & not_issued)) { | 1442 | if(!(DONE_SC->SCp.phase & not_issued)) { |
1438 | struct aha152x_scdata *sc; | 1443 | struct aha152x_scdata *sc; |
1439 | Scsi_Cmnd *ptr = DONE_SC; | 1444 | struct scsi_cmnd *ptr = DONE_SC; |
1440 | DONE_SC=NULL; | 1445 | DONE_SC=NULL; |
1441 | 1446 | ||
1442 | sc = SCDATA(ptr); | 1447 | sc = SCDATA(ptr); |
@@ -1451,7 +1456,7 @@ static void busfree_run(struct Scsi_Host *shpnt) | |||
1451 | } | 1456 | } |
1452 | 1457 | ||
1453 | if(DONE_SC && DONE_SC->scsi_done) { | 1458 | if(DONE_SC && DONE_SC->scsi_done) { |
1454 | Scsi_Cmnd *ptr = DONE_SC; | 1459 | struct scsi_cmnd *ptr = DONE_SC; |
1455 | DONE_SC=NULL; | 1460 | DONE_SC=NULL; |
1456 | 1461 | ||
1457 | /* turn led off, when no commands are in the driver */ | 1462 | /* turn led off, when no commands are in the driver */ |
@@ -2247,13 +2252,13 @@ static void parerr_run(struct Scsi_Host *shpnt) | |||
2247 | */ | 2252 | */ |
2248 | static void rsti_run(struct Scsi_Host *shpnt) | 2253 | static void rsti_run(struct Scsi_Host *shpnt) |
2249 | { | 2254 | { |
2250 | Scsi_Cmnd *ptr; | 2255 | struct scsi_cmnd *ptr; |
2251 | 2256 | ||
2252 | shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n"); | 2257 | shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n"); |
2253 | 2258 | ||
2254 | ptr=DISCONNECTED_SC; | 2259 | ptr=DISCONNECTED_SC; |
2255 | while(ptr) { | 2260 | while(ptr) { |
2256 | Scsi_Cmnd *next = SCNEXT(ptr); | 2261 | struct scsi_cmnd *next = SCNEXT(ptr); |
2257 | 2262 | ||
2258 | if (!ptr->device->soft_reset) { | 2263 | if (!ptr->device->soft_reset) { |
2259 | remove_SC(&DISCONNECTED_SC, ptr); | 2264 | remove_SC(&DISCONNECTED_SC, ptr); |
@@ -2438,7 +2443,7 @@ static void disp_enintr(struct Scsi_Host *shpnt) | |||
2438 | /* | 2443 | /* |
2439 | * Show the command data of a command | 2444 | * Show the command data of a command |
2440 | */ | 2445 | */ |
2441 | static void show_command(Scsi_Cmnd *ptr) | 2446 | static void show_command(struct scsi_cmnd *ptr) |
2442 | { | 2447 | { |
2443 | scsi_print_command(ptr); | 2448 | scsi_print_command(ptr); |
2444 | scmd_printk(KERN_DEBUG, ptr, | 2449 | scmd_printk(KERN_DEBUG, ptr, |
@@ -2462,7 +2467,7 @@ static void show_command(Scsi_Cmnd *ptr) | |||
2462 | */ | 2467 | */ |
2463 | static void show_queues(struct Scsi_Host *shpnt) | 2468 | static void show_queues(struct Scsi_Host *shpnt) |
2464 | { | 2469 | { |
2465 | Scsi_Cmnd *ptr; | 2470 | struct scsi_cmnd *ptr; |
2466 | unsigned long flags; | 2471 | unsigned long flags; |
2467 | 2472 | ||
2468 | DO_LOCK(flags); | 2473 | DO_LOCK(flags); |
@@ -2484,7 +2489,7 @@ static void show_queues(struct Scsi_Host *shpnt) | |||
2484 | disp_enintr(shpnt); | 2489 | disp_enintr(shpnt); |
2485 | } | 2490 | } |
2486 | 2491 | ||
2487 | static void get_command(struct seq_file *m, Scsi_Cmnd * ptr) | 2492 | static void get_command(struct seq_file *m, struct scsi_cmnd * ptr) |
2488 | { | 2493 | { |
2489 | int i; | 2494 | int i; |
2490 | 2495 | ||
@@ -2813,7 +2818,7 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length) | |||
2813 | static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) | 2818 | static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt) |
2814 | { | 2819 | { |
2815 | int i; | 2820 | int i; |
2816 | Scsi_Cmnd *ptr; | 2821 | struct scsi_cmnd *ptr; |
2817 | unsigned long flags; | 2822 | unsigned long flags; |
2818 | 2823 | ||
2819 | seq_puts(m, AHA152X_REVID "\n"); | 2824 | seq_puts(m, AHA152X_REVID "\n"); |
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index b48d5436f094..786bf7f32c64 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c | |||
@@ -207,11 +207,11 @@ static int aha1740_test_port(unsigned int base) | |||
207 | static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) | 207 | static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) |
208 | { | 208 | { |
209 | struct Scsi_Host *host = (struct Scsi_Host *) dev_id; | 209 | struct Scsi_Host *host = (struct Scsi_Host *) dev_id; |
210 | void (*my_done)(Scsi_Cmnd *); | 210 | void (*my_done)(struct scsi_cmnd *); |
211 | int errstatus, adapstat; | 211 | int errstatus, adapstat; |
212 | int number_serviced; | 212 | int number_serviced; |
213 | struct ecb *ecbptr; | 213 | struct ecb *ecbptr; |
214 | Scsi_Cmnd *SCtmp; | 214 | struct scsi_cmnd *SCtmp; |
215 | unsigned int base; | 215 | unsigned int base; |
216 | unsigned long flags; | 216 | unsigned long flags; |
217 | int handled = 0; | 217 | int handled = 0; |
@@ -311,7 +311,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) | |||
311 | return IRQ_RETVAL(handled); | 311 | return IRQ_RETVAL(handled); |
312 | } | 312 | } |
313 | 313 | ||
314 | static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) | 314 | static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt, |
315 | void (*done)(struct scsi_cmnd *)) | ||
315 | { | 316 | { |
316 | unchar direction; | 317 | unchar direction; |
317 | unchar *cmd = (unchar *) SCpnt->cmnd; | 318 | unchar *cmd = (unchar *) SCpnt->cmnd; |
@@ -520,7 +521,7 @@ static int aha1740_biosparam(struct scsi_device *sdev, | |||
520 | return 0; | 521 | return 0; |
521 | } | 522 | } |
522 | 523 | ||
523 | static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy) | 524 | static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy) |
524 | { | 525 | { |
525 | /* | 526 | /* |
526 | * From Alan Cox : | 527 | * From Alan Cox : |
diff --git a/drivers/scsi/aha1740.h b/drivers/scsi/aha1740.h index dfdaa4d3ea4e..6eeed6da0b54 100644 --- a/drivers/scsi/aha1740.h +++ b/drivers/scsi/aha1740.h | |||
@@ -135,8 +135,8 @@ struct ecb { /* Enhanced Control Block 6.1 */ | |||
135 | /* Hardware defined portion ends here, rest is driver defined */ | 135 | /* Hardware defined portion ends here, rest is driver defined */ |
136 | u8 sense[MAX_SENSE]; /* Sense area */ | 136 | u8 sense[MAX_SENSE]; /* Sense area */ |
137 | u8 status[MAX_STATUS]; /* Status area */ | 137 | u8 status[MAX_STATUS]; /* Status area */ |
138 | Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */ | 138 | struct scsi_cmnd *SCpnt; /* Link to the SCSI Command Block */ |
139 | void (*done) (Scsi_Cmnd *); /* Completion Function */ | 139 | void (*done) (struct scsi_cmnd *); /* Completion Function */ |
140 | }; | 140 | }; |
141 | 141 | ||
142 | #define AHA1740CMD_NOP 0x00 /* No OP */ | 142 | #define AHA1740CMD_NOP 0x00 /* No OP */ |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 80e5b283fd81..1391e5f35918 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void) | |||
1030 | 1030 | ||
1031 | aic94xx_transport_template = | 1031 | aic94xx_transport_template = |
1032 | sas_domain_attach_transport(&aic94xx_transport_functions); | 1032 | sas_domain_attach_transport(&aic94xx_transport_functions); |
1033 | if (!aic94xx_transport_template) | 1033 | if (!aic94xx_transport_template) { |
1034 | err = -ENOMEM; | ||
1034 | goto out_destroy_caches; | 1035 | goto out_destroy_caches; |
1036 | } | ||
1035 | 1037 | ||
1036 | err = pci_register_driver(&aic94xx_pci_driver); | 1038 | err = pci_register_driver(&aic94xx_pci_driver); |
1037 | if (err) | 1039 | if (err) |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 2e51ccc510e8..9c397a2794d6 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
@@ -49,7 +49,7 @@ struct device_attribute; | |||
49 | #define ARCMSR_MAX_OUTSTANDING_CMD 1024 | 49 | #define ARCMSR_MAX_OUTSTANDING_CMD 1024 |
50 | #define ARCMSR_DEFAULT_OUTSTANDING_CMD 128 | 50 | #define ARCMSR_DEFAULT_OUTSTANDING_CMD 128 |
51 | #define ARCMSR_MIN_OUTSTANDING_CMD 32 | 51 | #define ARCMSR_MIN_OUTSTANDING_CMD 32 |
52 | #define ARCMSR_DRIVER_VERSION "v1.40.00.05-20180309" | 52 | #define ARCMSR_DRIVER_VERSION "v1.40.00.09-20180709" |
53 | #define ARCMSR_SCSI_INITIATOR_ID 255 | 53 | #define ARCMSR_SCSI_INITIATOR_ID 255 |
54 | #define ARCMSR_MAX_XFER_SECTORS 512 | 54 | #define ARCMSR_MAX_XFER_SECTORS 512 |
55 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 | 55 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 |
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 732b5d9242f1..12316ef4c893 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c | |||
@@ -1061,6 +1061,13 @@ static int arcmsr_resume(struct pci_dev *pdev) | |||
1061 | pci_set_master(pdev); | 1061 | pci_set_master(pdev); |
1062 | if (arcmsr_request_irq(pdev, acb) == FAILED) | 1062 | if (arcmsr_request_irq(pdev, acb) == FAILED) |
1063 | goto controller_stop; | 1063 | goto controller_stop; |
1064 | if (acb->adapter_type == ACB_ADAPTER_TYPE_E) { | ||
1065 | writel(0, &acb->pmuE->host_int_status); | ||
1066 | writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell); | ||
1067 | acb->in_doorbell = 0; | ||
1068 | acb->out_doorbell = 0; | ||
1069 | acb->doneq_index = 0; | ||
1070 | } | ||
1064 | arcmsr_iop_init(acb); | 1071 | arcmsr_iop_init(acb); |
1065 | arcmsr_init_get_devmap_timer(acb); | 1072 | arcmsr_init_get_devmap_timer(acb); |
1066 | if (set_date_time) | 1073 | if (set_date_time) |
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index b46997cf77e2..8996d2329e11 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c | |||
@@ -1055,7 +1055,7 @@ static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on) | |||
1055 | udelay(2); /* 2 deskew delay(45ns*2=90ns) */ | 1055 | udelay(2); /* 2 deskew delay(45ns*2=90ns) */ |
1056 | val &= 0x007f; /* no bsy */ | 1056 | val &= 0x007f; /* no bsy */ |
1057 | atp_writew_io(dev, 0, 0x1c, val); | 1057 | atp_writew_io(dev, 0, 0x1c, val); |
1058 | mdelay(128); | 1058 | msleep(128); |
1059 | val &= 0x00fb; /* after 1ms no msg */ | 1059 | val &= 0x00fb; /* after 1ms no msg */ |
1060 | atp_writew_io(dev, 0, 0x1c, val); | 1060 | atp_writew_io(dev, 0, 0x1c, val); |
1061 | while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0) | 1061 | while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0) |
@@ -1286,9 +1286,9 @@ static void atp870_init(struct Scsi_Host *shpnt) | |||
1286 | k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10; | 1286 | k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10; |
1287 | atp_writeb_base(atpdev, 0x3a, k); | 1287 | atp_writeb_base(atpdev, 0x3a, k); |
1288 | atp_writeb_base(atpdev, 0x3a, k & 0xdf); | 1288 | atp_writeb_base(atpdev, 0x3a, k & 0xdf); |
1289 | mdelay(32); | 1289 | msleep(32); |
1290 | atp_writeb_base(atpdev, 0x3a, k); | 1290 | atp_writeb_base(atpdev, 0x3a, k); |
1291 | mdelay(32); | 1291 | msleep(32); |
1292 | atp_set_host_id(atpdev, 0, host_id); | 1292 | atp_set_host_id(atpdev, 0, host_id); |
1293 | 1293 | ||
1294 | tscam(shpnt, wide_chip, scam_on); | 1294 | tscam(shpnt, wide_chip, scam_on); |
@@ -1370,9 +1370,9 @@ static void atp880_init(struct Scsi_Host *shpnt) | |||
1370 | k = atp_readb_base(atpdev, 0x38) & 0x80; | 1370 | k = atp_readb_base(atpdev, 0x38) & 0x80; |
1371 | atp_writeb_base(atpdev, 0x38, k); | 1371 | atp_writeb_base(atpdev, 0x38, k); |
1372 | atp_writeb_base(atpdev, 0x3b, 0x20); | 1372 | atp_writeb_base(atpdev, 0x3b, 0x20); |
1373 | mdelay(32); | 1373 | msleep(32); |
1374 | atp_writeb_base(atpdev, 0x3b, 0); | 1374 | atp_writeb_base(atpdev, 0x3b, 0); |
1375 | mdelay(32); | 1375 | msleep(32); |
1376 | atp_readb_io(atpdev, 0, 0x1b); | 1376 | atp_readb_io(atpdev, 0, 0x1b); |
1377 | atp_readb_io(atpdev, 0, 0x17); | 1377 | atp_readb_io(atpdev, 0, 0x17); |
1378 | 1378 | ||
@@ -1454,10 +1454,10 @@ static void atp885_init(struct Scsi_Host *shpnt) | |||
1454 | atp_writeb_base(atpdev, 0x28, k); | 1454 | atp_writeb_base(atpdev, 0x28, k); |
1455 | atp_writeb_pci(atpdev, 0, 1, 0x80); | 1455 | atp_writeb_pci(atpdev, 0, 1, 0x80); |
1456 | atp_writeb_pci(atpdev, 1, 1, 0x80); | 1456 | atp_writeb_pci(atpdev, 1, 1, 0x80); |
1457 | mdelay(100); | 1457 | msleep(100); |
1458 | atp_writeb_pci(atpdev, 0, 1, 0); | 1458 | atp_writeb_pci(atpdev, 0, 1, 0); |
1459 | atp_writeb_pci(atpdev, 1, 1, 0); | 1459 | atp_writeb_pci(atpdev, 1, 1, 0); |
1460 | mdelay(1000); | 1460 | msleep(1000); |
1461 | atp_readb_io(atpdev, 0, 0x1b); | 1461 | atp_readb_io(atpdev, 0, 0x1b); |
1462 | atp_readb_io(atpdev, 0, 0x17); | 1462 | atp_readb_io(atpdev, 0, 0x17); |
1463 | atp_readb_io(atpdev, 1, 0x1b); | 1463 | atp_readb_io(atpdev, 1, 0x1b); |
@@ -1473,7 +1473,7 @@ static void atp885_init(struct Scsi_Host *shpnt) | |||
1473 | k = (k & 0x07) | 0x40; | 1473 | k = (k & 0x07) | 0x40; |
1474 | atp_set_host_id(atpdev, 1, k); | 1474 | atp_set_host_id(atpdev, 1, k); |
1475 | 1475 | ||
1476 | mdelay(600); /* this delay used to be called tscam_885() */ | 1476 | msleep(600); /* this delay used to be called tscam_885() */ |
1477 | dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n"); | 1477 | dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n"); |
1478 | atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7); | 1478 | atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7); |
1479 | atp_writeb_io(atpdev, 0, 0x16, 0x80); | 1479 | atp_writeb_io(atpdev, 0, 0x16, 0x80); |
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index 2eb66df3e3d6..c10aac4dbc5e 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c | |||
@@ -1545,7 +1545,7 @@ int beiscsi_set_host_data(struct beiscsi_hba *phba) | |||
1545 | snprintf((char *)ioctl->param.req.param_data, | 1545 | snprintf((char *)ioctl->param.req.param_data, |
1546 | sizeof(ioctl->param.req.param_data), | 1546 | sizeof(ioctl->param.req.param_data), |
1547 | "Linux iSCSI v%s", BUILD_STR); | 1547 | "Linux iSCSI v%s", BUILD_STR); |
1548 | ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4); | 1548 | ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4); |
1549 | if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION) | 1549 | if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION) |
1550 | ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION; | 1550 | ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION; |
1551 | ret = be_mbox_notify(ctrl); | 1551 | ret = be_mbox_notify(ctrl); |
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index a398c54139aa..c8f0a2144b44 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c | |||
@@ -1,11 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2017 Broadcom. All Rights Reserved. | 2 | * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI |
3 | * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. | 3 | * Host Bus Adapters. Refer to the README file included with this package |
4 | * for driver version and adapter compatibility. | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or | 6 | * Copyright (c) 2018 Broadcom. All Rights Reserved. |
6 | * modify it under the terms of the GNU General Public License version 2 | 7 | * The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries. |
7 | * as published by the Free Software Foundation. The full GNU General | 8 | * |
8 | * Public License is included in this distribution in the file called COPYING. | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of version 2 of the GNU General Public License as published | ||
11 | * by the Free Software Foundation. | ||
9 | * | 12 | * |
10 | * Contact Information: | 13 | * Contact Information: |
11 | * linux-drivers@broadcom.com | 14 | * linux-drivers@broadcom.com |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 818d185d63f0..3660059784f7 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
@@ -1,11 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2017 Broadcom. All Rights Reserved. | 2 | * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI |
3 | * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. | 3 | * Host Bus Adapters. Refer to the README file included with this package |
4 | * for driver version and adapter compatibility. | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or | 6 | * Copyright (c) 2018 Broadcom. All Rights Reserved. |
6 | * modify it under the terms of the GNU General Public License version 2 | 7 | * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. |
7 | * as published by the Free Software Foundation. The full GNU General | 8 | * |
8 | * Public License is included in this distribution in the file called COPYING. | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of version 2 of the GNU General Public License as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful. ALL EXPRESS | ||
14 | * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY | ||
15 | * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, | ||
16 | * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH | ||
17 | * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. | ||
18 | * See the GNU General Public License for more details, a copy of which | ||
19 | * can be found in the file COPYING included with this package. | ||
9 | * | 20 | * |
10 | * Contact Information: | 21 | * Contact Information: |
11 | * linux-drivers@broadcom.com | 22 | * linux-drivers@broadcom.com |
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 66ca967f2850..8fdc07b6c686 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c | |||
@@ -1,11 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2017 Broadcom. All Rights Reserved. | 2 | * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI |
3 | * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. | 3 | * Host Bus Adapters. Refer to the README file included with this package |
4 | * for driver version and adapter compatibility. | ||
4 | * | 5 | * |
5 | * This program is free software; you can redistribute it and/or | 6 | * Copyright (c) 2018 Broadcom. All Rights Reserved. |
6 | * modify it under the terms of the GNU General Public License version 2 | 7 | * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. |
7 | * as published by the Free Software Foundation. The full GNU General | 8 | * |
8 | * Public License is included in this distribution in the file called COPYING. | 9 | * This program is free software; you can redistribute it and/or modify it |
10 | * under the terms of version 2 of the GNU General Public License as published | ||
11 | * by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful. ALL EXPRESS | ||
14 | * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY | ||
15 | * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, | ||
16 | * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH | ||
17 | * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. | ||
18 | * See the GNU General Public License for more details, a copy of which | ||
19 | * can be found in the file COPYING included with this package. | ||
9 | * | 20 | * |
10 | * Contact Information: | 21 | * Contact Information: |
11 | * linux-drivers@broadcom.com | 22 | * linux-drivers@broadcom.com |
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index c05d6e91e4bd..c4a33317d344 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c | |||
@@ -70,21 +70,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio, | |||
70 | host_status = DID_ERROR; | 70 | host_status = DID_ERROR; |
71 | } | 71 | } |
72 | } | 72 | } |
73 | cmnd->result = ScsiResult(host_status, scsi_status); | 73 | cmnd->result = host_status << 16 | scsi_status; |
74 | 74 | ||
75 | break; | 75 | break; |
76 | 76 | ||
77 | case BFI_IOIM_STS_TIMEDOUT: | 77 | case BFI_IOIM_STS_TIMEDOUT: |
78 | host_status = DID_TIME_OUT; | 78 | cmnd->result = DID_TIME_OUT << 16; |
79 | cmnd->result = ScsiResult(host_status, 0); | ||
80 | break; | 79 | break; |
81 | case BFI_IOIM_STS_PATHTOV: | 80 | case BFI_IOIM_STS_PATHTOV: |
82 | host_status = DID_TRANSPORT_DISRUPTED; | 81 | cmnd->result = DID_TRANSPORT_DISRUPTED << 16; |
83 | cmnd->result = ScsiResult(host_status, 0); | ||
84 | break; | 82 | break; |
85 | default: | 83 | default: |
86 | host_status = DID_ERROR; | 84 | cmnd->result = DID_ERROR << 16; |
87 | cmnd->result = ScsiResult(host_status, 0); | ||
88 | } | 85 | } |
89 | 86 | ||
90 | /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ | 87 | /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ |
@@ -117,7 +114,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio) | |||
117 | struct bfad_itnim_data_s *itnim_data; | 114 | struct bfad_itnim_data_s *itnim_data; |
118 | struct bfad_itnim_s *itnim; | 115 | struct bfad_itnim_s *itnim; |
119 | 116 | ||
120 | cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD); | 117 | cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD; |
121 | 118 | ||
122 | /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ | 119 | /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ |
123 | if (cmnd->device->host != NULL) | 120 | if (cmnd->device->host != NULL) |
@@ -144,7 +141,7 @@ bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio) | |||
144 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; | 141 | struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio; |
145 | struct bfad_s *bfad = drv; | 142 | struct bfad_s *bfad = drv; |
146 | 143 | ||
147 | cmnd->result = ScsiResult(DID_ERROR, 0); | 144 | cmnd->result = DID_ERROR << 16; |
148 | 145 | ||
149 | /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ | 146 | /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */ |
150 | if (cmnd->device->host != NULL) | 147 | if (cmnd->device->host != NULL) |
@@ -1253,14 +1250,14 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd | |||
1253 | printk(KERN_WARNING | 1250 | printk(KERN_WARNING |
1254 | "bfad%d, queuecommand %p %x failed, BFA stopped\n", | 1251 | "bfad%d, queuecommand %p %x failed, BFA stopped\n", |
1255 | bfad->inst_no, cmnd, cmnd->cmnd[0]); | 1252 | bfad->inst_no, cmnd, cmnd->cmnd[0]); |
1256 | cmnd->result = ScsiResult(DID_NO_CONNECT, 0); | 1253 | cmnd->result = DID_NO_CONNECT << 16; |
1257 | goto out_fail_cmd; | 1254 | goto out_fail_cmd; |
1258 | } | 1255 | } |
1259 | 1256 | ||
1260 | 1257 | ||
1261 | itnim = itnim_data->itnim; | 1258 | itnim = itnim_data->itnim; |
1262 | if (!itnim) { | 1259 | if (!itnim) { |
1263 | cmnd->result = ScsiResult(DID_IMM_RETRY, 0); | 1260 | cmnd->result = DID_IMM_RETRY << 16; |
1264 | goto out_fail_cmd; | 1261 | goto out_fail_cmd; |
1265 | } | 1262 | } |
1266 | 1263 | ||
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index af66275570c3..e61ed8dad0b4 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h | |||
@@ -44,7 +44,6 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa); | |||
44 | #define MAX_FCP_LUN 16384 | 44 | #define MAX_FCP_LUN 16384 |
45 | #define BFAD_TARGET_RESET_TMO 60 | 45 | #define BFAD_TARGET_RESET_TMO 60 |
46 | #define BFAD_LUN_RESET_TMO 60 | 46 | #define BFAD_LUN_RESET_TMO 60 |
47 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) | ||
48 | #define BFA_QUEUE_FULL_RAMP_UP_TIME 120 | 47 | #define BFA_QUEUE_FULL_RAMP_UP_TIME 120 |
49 | 48 | ||
50 | /* | 49 | /* |
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 8f03a869ac98..e9e669a6c2bc 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c | |||
@@ -2727,6 +2727,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) | |||
2727 | BNX2X_DOORBELL_PCI_BAR); | 2727 | BNX2X_DOORBELL_PCI_BAR); |
2728 | reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); | 2728 | reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); |
2729 | ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); | 2729 | ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); |
2730 | if (!ep->qp.ctx_base) | ||
2731 | return -ENOMEM; | ||
2730 | goto arm_cq; | 2732 | goto arm_cq; |
2731 | } | 2733 | } |
2732 | 2734 | ||
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index c535c52e72e5..1c5051b1c125 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c | |||
@@ -199,7 +199,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len, | |||
199 | buflength, &sshdr, timeout * HZ, | 199 | buflength, &sshdr, timeout * HZ, |
200 | MAX_RETRIES, NULL); | 200 | MAX_RETRIES, NULL); |
201 | 201 | ||
202 | if (driver_byte(result) & DRIVER_SENSE) { | 202 | if (driver_byte(result) == DRIVER_SENSE) { |
203 | if (debug) | 203 | if (debug) |
204 | scsi_print_sense_hdr(ch->device, ch->name, &sshdr); | 204 | scsi_print_sense_hdr(ch->device, ch->name, &sshdr); |
205 | errno = ch_find_errno(&sshdr); | 205 | errno = ch_find_errno(&sshdr); |
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index a10cf25ee7f9..23d07e9f87d0 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c | |||
@@ -761,27 +761,116 @@ out: | |||
761 | static int | 761 | static int |
762 | csio_hw_get_flash_params(struct csio_hw *hw) | 762 | csio_hw_get_flash_params(struct csio_hw *hw) |
763 | { | 763 | { |
764 | /* Table for non-Numonix supported flash parts. Numonix parts are left | ||
765 | * to the preexisting code. All flash parts have 64KB sectors. | ||
766 | */ | ||
767 | static struct flash_desc { | ||
768 | u32 vendor_and_model_id; | ||
769 | u32 size_mb; | ||
770 | } supported_flash[] = { | ||
771 | { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ | ||
772 | }; | ||
773 | |||
774 | u32 part, manufacturer; | ||
775 | u32 density, size = 0; | ||
776 | u32 flashid = 0; | ||
764 | int ret; | 777 | int ret; |
765 | uint32_t info = 0; | ||
766 | 778 | ||
767 | ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); | 779 | ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID); |
768 | if (!ret) | 780 | if (!ret) |
769 | ret = csio_hw_sf1_read(hw, 3, 0, 1, &info); | 781 | ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid); |
770 | csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ | 782 | csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */ |
771 | if (ret != 0) | 783 | if (ret) |
772 | return ret; | 784 | return ret; |
773 | 785 | ||
774 | if ((info & 0xff) != 0x20) /* not a Numonix flash */ | 786 | /* Check to see if it's one of our non-standard supported Flash parts. |
775 | return -EINVAL; | 787 | */ |
776 | info >>= 16; /* log2 of size */ | 788 | for (part = 0; part < ARRAY_SIZE(supported_flash); part++) |
777 | if (info >= 0x14 && info < 0x18) | 789 | if (supported_flash[part].vendor_and_model_id == flashid) { |
778 | hw->params.sf_nsec = 1 << (info - 16); | 790 | hw->params.sf_size = supported_flash[part].size_mb; |
779 | else if (info == 0x18) | 791 | hw->params.sf_nsec = |
780 | hw->params.sf_nsec = 64; | 792 | hw->params.sf_size / SF_SEC_SIZE; |
781 | else | 793 | goto found; |
782 | return -EINVAL; | 794 | } |
783 | hw->params.sf_size = 1 << info; | 795 | |
796 | /* Decode Flash part size. The code below looks repetative with | ||
797 | * common encodings, but that's not guaranteed in the JEDEC | ||
798 | * specification for the Read JADEC ID command. The only thing that | ||
799 | * we're guaranteed by the JADEC specification is where the | ||
800 | * Manufacturer ID is in the returned result. After that each | ||
801 | * Manufacturer ~could~ encode things completely differently. | ||
802 | * Note, all Flash parts must have 64KB sectors. | ||
803 | */ | ||
804 | manufacturer = flashid & 0xff; | ||
805 | switch (manufacturer) { | ||
806 | case 0x20: { /* Micron/Numonix */ | ||
807 | /* This Density -> Size decoding table is taken from Micron | ||
808 | * Data Sheets. | ||
809 | */ | ||
810 | density = (flashid >> 16) & 0xff; | ||
811 | switch (density) { | ||
812 | case 0x14 ... 0x19: /* 1MB - 32MB */ | ||
813 | size = 1 << density; | ||
814 | break; | ||
815 | case 0x20: /* 64MB */ | ||
816 | size = 1 << 26; | ||
817 | break; | ||
818 | case 0x21: /* 128MB */ | ||
819 | size = 1 << 27; | ||
820 | break; | ||
821 | case 0x22: /* 256MB */ | ||
822 | size = 1 << 28; | ||
823 | } | ||
824 | break; | ||
825 | } | ||
826 | case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */ | ||
827 | /* This Density -> Size decoding table is taken from ISSI | ||
828 | * Data Sheets. | ||
829 | */ | ||
830 | density = (flashid >> 16) & 0xff; | ||
831 | switch (density) { | ||
832 | case 0x16: /* 32 MB */ | ||
833 | size = 1 << 25; | ||
834 | break; | ||
835 | case 0x17: /* 64MB */ | ||
836 | size = 1 << 26; | ||
837 | } | ||
838 | break; | ||
839 | } | ||
840 | case 0xc2: /* Macronix */ | ||
841 | case 0xef: /* Winbond */ { | ||
842 | /* This Density -> Size decoding table is taken from | ||
843 | * Macronix and Winbond Data Sheets. | ||
844 | */ | ||
845 | density = (flashid >> 16) & 0xff; | ||
846 | switch (density) { | ||
847 | case 0x17: /* 8MB */ | ||
848 | case 0x18: /* 16MB */ | ||
849 | size = 1 << density; | ||
850 | } | ||
851 | } | ||
852 | } | ||
853 | |||
854 | /* If we didn't recognize the FLASH part, that's no real issue: the | ||
855 | * Hardware/Software contract says that Hardware will _*ALWAYS*_ | ||
856 | * use a FLASH part which is at least 4MB in size and has 64KB | ||
857 | * sectors. The unrecognized FLASH part is likely to be much larger | ||
858 | * than 4MB, but that's all we really need. | ||
859 | */ | ||
860 | if (size == 0) { | ||
861 | csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n", | ||
862 | flashid); | ||
863 | size = 1 << 22; | ||
864 | } | ||
865 | |||
866 | /* Store decoded Flash size */ | ||
867 | hw->params.sf_size = size; | ||
868 | hw->params.sf_nsec = size / SF_SEC_SIZE; | ||
784 | 869 | ||
870 | found: | ||
871 | if (hw->params.sf_size < FLASH_MIN_SIZE) | ||
872 | csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n", | ||
873 | flashid, hw->params.sf_size, FLASH_MIN_SIZE); | ||
785 | return 0; | 874 | return 0; |
786 | } | 875 | } |
787 | 876 | ||
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index faa357b62c61..5022e82ccc4f 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/page.h> | 39 | #include <asm/page.h> |
40 | #include <linux/cache.h> | 40 | #include <linux/cache.h> |
41 | 41 | ||
42 | #include "t4_values.h" | ||
42 | #include "csio_hw.h" | 43 | #include "csio_hw.h" |
43 | #include "csio_wr.h" | 44 | #include "csio_wr.h" |
44 | #include "csio_mb.h" | 45 | #include "csio_mb.h" |
@@ -1309,8 +1310,11 @@ csio_wr_fixup_host_params(struct csio_hw *hw) | |||
1309 | struct csio_sge *sge = &wrm->sge; | 1310 | struct csio_sge *sge = &wrm->sge; |
1310 | uint32_t clsz = L1_CACHE_BYTES; | 1311 | uint32_t clsz = L1_CACHE_BYTES; |
1311 | uint32_t s_hps = PAGE_SHIFT - 10; | 1312 | uint32_t s_hps = PAGE_SHIFT - 10; |
1312 | uint32_t ingpad = 0; | ||
1313 | uint32_t stat_len = clsz > 64 ? 128 : 64; | 1313 | uint32_t stat_len = clsz > 64 ? 128 : 64; |
1314 | u32 fl_align = clsz < 32 ? 32 : clsz; | ||
1315 | u32 pack_align; | ||
1316 | u32 ingpad, ingpack; | ||
1317 | int pcie_cap; | ||
1314 | 1318 | ||
1315 | csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) | | 1319 | csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) | |
1316 | HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) | | 1320 | HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) | |
@@ -1318,14 +1322,82 @@ csio_wr_fixup_host_params(struct csio_hw *hw) | |||
1318 | HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps), | 1322 | HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps), |
1319 | SGE_HOST_PAGE_SIZE_A); | 1323 | SGE_HOST_PAGE_SIZE_A); |
1320 | 1324 | ||
1321 | sge->csio_fl_align = clsz < 32 ? 32 : clsz; | 1325 | /* T5 introduced the separation of the Free List Padding and |
1322 | ingpad = ilog2(sge->csio_fl_align) - 5; | 1326 | * Packing Boundaries. Thus, we can select a smaller Padding |
1327 | * Boundary to avoid uselessly chewing up PCIe Link and Memory | ||
1328 | * Bandwidth, and use a Packing Boundary which is large enough | ||
1329 | * to avoid false sharing between CPUs, etc. | ||
1330 | * | ||
1331 | * For the PCI Link, the smaller the Padding Boundary the | ||
1332 | * better. For the Memory Controller, a smaller Padding | ||
1333 | * Boundary is better until we cross under the Memory Line | ||
1334 | * Size (the minimum unit of transfer to/from Memory). If we | ||
1335 | * have a Padding Boundary which is smaller than the Memory | ||
1336 | * Line Size, that'll involve a Read-Modify-Write cycle on the | ||
1337 | * Memory Controller which is never good. | ||
1338 | */ | ||
1339 | |||
1340 | /* We want the Packing Boundary to be based on the Cache Line | ||
1341 | * Size in order to help avoid False Sharing performance | ||
1342 | * issues between CPUs, etc. We also want the Packing | ||
1343 | * Boundary to incorporate the PCI-E Maximum Payload Size. We | ||
1344 | * get best performance when the Packing Boundary is a | ||
1345 | * multiple of the Maximum Payload Size. | ||
1346 | */ | ||
1347 | pack_align = fl_align; | ||
1348 | pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP); | ||
1349 | if (pcie_cap) { | ||
1350 | u32 mps, mps_log; | ||
1351 | u16 devctl; | ||
1352 | |||
1353 | /* The PCIe Device Control Maximum Payload Size field | ||
1354 | * [bits 7:5] encodes sizes as powers of 2 starting at | ||
1355 | * 128 bytes. | ||
1356 | */ | ||
1357 | pci_read_config_word(hw->pdev, | ||
1358 | pcie_cap + PCI_EXP_DEVCTL, | ||
1359 | &devctl); | ||
1360 | mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; | ||
1361 | mps = 1 << mps_log; | ||
1362 | if (mps > pack_align) | ||
1363 | pack_align = mps; | ||
1364 | } | ||
1365 | |||
1366 | /* T5/T6 have a special interpretation of the "0" | ||
1367 | * value for the Packing Boundary. This corresponds to 16 | ||
1368 | * bytes instead of the expected 32 bytes. | ||
1369 | */ | ||
1370 | if (pack_align <= 16) { | ||
1371 | ingpack = INGPACKBOUNDARY_16B_X; | ||
1372 | fl_align = 16; | ||
1373 | } else if (pack_align == 32) { | ||
1374 | ingpack = INGPACKBOUNDARY_64B_X; | ||
1375 | fl_align = 64; | ||
1376 | } else { | ||
1377 | u32 pack_align_log = fls(pack_align) - 1; | ||
1378 | |||
1379 | ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X; | ||
1380 | fl_align = pack_align; | ||
1381 | } | ||
1382 | |||
1383 | /* Use the smallest Ingress Padding which isn't smaller than | ||
1384 | * the Memory Controller Read/Write Size. We'll take that as | ||
1385 | * being 8 bytes since we don't know of any system with a | ||
1386 | * wider Memory Controller Bus Width. | ||
1387 | */ | ||
1388 | if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK)) | ||
1389 | ingpad = INGPADBOUNDARY_32B_X; | ||
1390 | else | ||
1391 | ingpad = T6_INGPADBOUNDARY_8B_X; | ||
1323 | 1392 | ||
1324 | csio_set_reg_field(hw, SGE_CONTROL_A, | 1393 | csio_set_reg_field(hw, SGE_CONTROL_A, |
1325 | INGPADBOUNDARY_V(INGPADBOUNDARY_M) | | 1394 | INGPADBOUNDARY_V(INGPADBOUNDARY_M) | |
1326 | EGRSTATUSPAGESIZE_F, | 1395 | EGRSTATUSPAGESIZE_F, |
1327 | INGPADBOUNDARY_V(ingpad) | | 1396 | INGPADBOUNDARY_V(ingpad) | |
1328 | EGRSTATUSPAGESIZE_V(stat_len != 64)); | 1397 | EGRSTATUSPAGESIZE_V(stat_len != 64)); |
1398 | csio_set_reg_field(hw, SGE_CONTROL2_A, | ||
1399 | INGPACKBOUNDARY_V(INGPACKBOUNDARY_M), | ||
1400 | INGPACKBOUNDARY_V(ingpack)); | ||
1329 | 1401 | ||
1330 | /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ | 1402 | /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ |
1331 | csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A); | 1403 | csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A); |
@@ -1337,14 +1409,16 @@ csio_wr_fixup_host_params(struct csio_hw *hw) | |||
1337 | if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { | 1409 | if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) { |
1338 | csio_wr_reg32(hw, | 1410 | csio_wr_reg32(hw, |
1339 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) + | 1411 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) + |
1340 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), | 1412 | fl_align - 1) & ~(fl_align - 1), |
1341 | SGE_FL_BUFFER_SIZE2_A); | 1413 | SGE_FL_BUFFER_SIZE2_A); |
1342 | csio_wr_reg32(hw, | 1414 | csio_wr_reg32(hw, |
1343 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) + | 1415 | (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) + |
1344 | sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), | 1416 | fl_align - 1) & ~(fl_align - 1), |
1345 | SGE_FL_BUFFER_SIZE3_A); | 1417 | SGE_FL_BUFFER_SIZE3_A); |
1346 | } | 1418 | } |
1347 | 1419 | ||
1420 | sge->csio_fl_align = fl_align; | ||
1421 | |||
1348 | csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A); | 1422 | csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A); |
1349 | 1423 | ||
1350 | /* default value of rx_dma_offset of the NIC driver */ | 1424 | /* default value of rx_dma_offset of the NIC driver */ |
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c index a43d44e7e7dd..37b8dc60f5f6 100644 --- a/drivers/scsi/cxlflash/ocxl_hw.c +++ b/drivers/scsi/cxlflash/ocxl_hw.c | |||
@@ -1141,7 +1141,7 @@ static int afu_release(struct inode *inode, struct file *file) | |||
1141 | * | 1141 | * |
1142 | * Return: 0 on success, -errno on failure | 1142 | * Return: 0 on success, -errno on failure |
1143 | */ | 1143 | */ |
1144 | static int ocxlflash_mmap_fault(struct vm_fault *vmf) | 1144 | static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf) |
1145 | { | 1145 | { |
1146 | struct vm_area_struct *vma = vmf->vma; | 1146 | struct vm_area_struct *vma = vmf->vma; |
1147 | struct ocxlflash_context *ctx = vma->vm_file->private_data; | 1147 | struct ocxlflash_context *ctx = vma->vm_file->private_data; |
@@ -1164,8 +1164,7 @@ static int ocxlflash_mmap_fault(struct vm_fault *vmf) | |||
1164 | mmio_area = ctx->psn_phys; | 1164 | mmio_area = ctx->psn_phys; |
1165 | mmio_area += offset; | 1165 | mmio_area += offset; |
1166 | 1166 | ||
1167 | vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); | 1167 | return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT); |
1168 | return VM_FAULT_NOPAGE; | ||
1169 | } | 1168 | } |
1170 | 1169 | ||
1171 | static const struct vm_operations_struct ocxlflash_vmops = { | 1170 | static const struct vm_operations_struct ocxlflash_vmops = { |
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index 379890c4500b..acac6152f50b 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c | |||
@@ -1104,7 +1104,7 @@ out: | |||
1104 | * | 1104 | * |
1105 | * Return: 0 on success, VM_FAULT_SIGBUS on failure | 1105 | * Return: 0 on success, VM_FAULT_SIGBUS on failure |
1106 | */ | 1106 | */ |
1107 | static int cxlflash_mmap_fault(struct vm_fault *vmf) | 1107 | static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf) |
1108 | { | 1108 | { |
1109 | struct vm_area_struct *vma = vmf->vma; | 1109 | struct vm_area_struct *vma = vmf->vma; |
1110 | struct file *file = vma->vm_file; | 1110 | struct file *file = vma->vm_file; |
@@ -1115,7 +1115,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf) | |||
1115 | struct ctx_info *ctxi = NULL; | 1115 | struct ctx_info *ctxi = NULL; |
1116 | struct page *err_page = NULL; | 1116 | struct page *err_page = NULL; |
1117 | enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; | 1117 | enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; |
1118 | int rc = 0; | 1118 | vm_fault_t rc = 0; |
1119 | int ctxid; | 1119 | int ctxid; |
1120 | 1120 | ||
1121 | ctxid = cfg->ops->process_element(ctx); | 1121 | ctxid = cfg->ops->process_element(ctx); |
@@ -1155,7 +1155,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf) | |||
1155 | out: | 1155 | out: |
1156 | if (likely(ctxi)) | 1156 | if (likely(ctxi)) |
1157 | put_context(ctxi); | 1157 | put_context(ctxi); |
1158 | dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); | 1158 | dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc); |
1159 | return rc; | 1159 | return rc; |
1160 | 1160 | ||
1161 | err: | 1161 | err: |
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 60ef8df42b95..1ed2cd82129d 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c | |||
@@ -3473,9 +3473,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, | |||
3473 | 3473 | ||
3474 | /*if( srb->cmd->cmnd[0] == INQUIRY && */ | 3474 | /*if( srb->cmd->cmnd[0] == INQUIRY && */ |
3475 | /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */ | 3475 | /* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */ |
3476 | if ((cmd->result == (DID_OK << 16) | 3476 | if ((cmd->result == (DID_OK << 16) || |
3477 | || status_byte(cmd->result) & | 3477 | status_byte(cmd->result) == CHECK_CONDITION)) { |
3478 | CHECK_CONDITION)) { | ||
3479 | if (!dcb->init_tcq_flag) { | 3478 | if (!dcb->init_tcq_flag) { |
3480 | add_dev(acb, dcb, ptr); | 3479 | add_dev(acb, dcb, ptr); |
3481 | dcb->init_tcq_flag = 1; | 3480 | dcb->init_tcq_flag = 1; |
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index ffec695e0bfb..54da3166da8d 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c | |||
@@ -2175,15 +2175,13 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport) | |||
2175 | { | 2175 | { |
2176 | struct fc_rport_priv *rdata; | 2176 | struct fc_rport_priv *rdata; |
2177 | 2177 | ||
2178 | rcu_read_lock(); | 2178 | mutex_lock(&lport->disc.disc_mutex); |
2179 | list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { | 2179 | list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { |
2180 | if (kref_get_unless_zero(&rdata->kref)) { | 2180 | if (kref_get_unless_zero(&rdata->kref)) { |
2181 | fc_rport_logoff(rdata); | 2181 | fc_rport_logoff(rdata); |
2182 | kref_put(&rdata->kref, fc_rport_destroy); | 2182 | kref_put(&rdata->kref, fc_rport_destroy); |
2183 | } | 2183 | } |
2184 | } | 2184 | } |
2185 | rcu_read_unlock(); | ||
2186 | mutex_lock(&lport->disc.disc_mutex); | ||
2187 | lport->disc.disc_callback = NULL; | 2185 | lport->disc.disc_callback = NULL; |
2188 | mutex_unlock(&lport->disc.disc_mutex); | 2186 | mutex_unlock(&lport->disc.disc_mutex); |
2189 | } | 2187 | } |
@@ -2712,7 +2710,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) | |||
2712 | unsigned long deadline; | 2710 | unsigned long deadline; |
2713 | 2711 | ||
2714 | next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); | 2712 | next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10); |
2715 | rcu_read_lock(); | 2713 | mutex_lock(&lport->disc.disc_mutex); |
2716 | list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { | 2714 | list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { |
2717 | if (!kref_get_unless_zero(&rdata->kref)) | 2715 | if (!kref_get_unless_zero(&rdata->kref)) |
2718 | continue; | 2716 | continue; |
@@ -2733,7 +2731,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip) | |||
2733 | next_time = deadline; | 2731 | next_time = deadline; |
2734 | kref_put(&rdata->kref, fc_rport_destroy); | 2732 | kref_put(&rdata->kref, fc_rport_destroy); |
2735 | } | 2733 | } |
2736 | rcu_read_unlock(); | 2734 | mutex_unlock(&lport->disc.disc_mutex); |
2737 | return next_time; | 2735 | return next_time; |
2738 | } | 2736 | } |
2739 | 2737 | ||
@@ -3080,8 +3078,6 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) | |||
3080 | mutex_lock(&disc->disc_mutex); | 3078 | mutex_lock(&disc->disc_mutex); |
3081 | callback = disc->pending ? disc->disc_callback : NULL; | 3079 | callback = disc->pending ? disc->disc_callback : NULL; |
3082 | disc->pending = 0; | 3080 | disc->pending = 0; |
3083 | mutex_unlock(&disc->disc_mutex); | ||
3084 | rcu_read_lock(); | ||
3085 | list_for_each_entry_rcu(rdata, &disc->rports, peers) { | 3081 | list_for_each_entry_rcu(rdata, &disc->rports, peers) { |
3086 | if (!kref_get_unless_zero(&rdata->kref)) | 3082 | if (!kref_get_unless_zero(&rdata->kref)) |
3087 | continue; | 3083 | continue; |
@@ -3090,7 +3086,7 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip) | |||
3090 | fc_rport_login(rdata); | 3086 | fc_rport_login(rdata); |
3091 | kref_put(&rdata->kref, fc_rport_destroy); | 3087 | kref_put(&rdata->kref, fc_rport_destroy); |
3092 | } | 3088 | } |
3093 | rcu_read_unlock(); | 3089 | mutex_unlock(&disc->disc_mutex); |
3094 | if (callback) | 3090 | if (callback) |
3095 | callback(lport, DISC_EV_SUCCESS); | 3091 | callback(lport, DISC_EV_SUCCESS); |
3096 | } | 3092 | } |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 85604795d8ee..16709735b546 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -146,14 +146,14 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id); | |||
146 | static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, | 146 | static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, |
147 | int gdth_from_wait, int* pIndex); | 147 | int gdth_from_wait, int* pIndex); |
148 | static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, | 148 | static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, |
149 | Scsi_Cmnd *scp); | 149 | struct scsi_cmnd *scp); |
150 | static int gdth_async_event(gdth_ha_str *ha); | 150 | static int gdth_async_event(gdth_ha_str *ha); |
151 | static void gdth_log_event(gdth_evt_data *dvr, char *buffer); | 151 | static void gdth_log_event(gdth_evt_data *dvr, char *buffer); |
152 | 152 | ||
153 | static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority); | 153 | static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority); |
154 | static void gdth_next(gdth_ha_str *ha); | 154 | static void gdth_next(gdth_ha_str *ha); |
155 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b); | 155 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b); |
156 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); | 156 | static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp); |
157 | static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, | 157 | static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source, |
158 | u16 idx, gdth_evt_data *evt); | 158 | u16 idx, gdth_evt_data *evt); |
159 | static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); | 159 | static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr); |
@@ -161,10 +161,11 @@ static void gdth_readapp_event(gdth_ha_str *ha, u8 application, | |||
161 | gdth_evt_str *estr); | 161 | gdth_evt_str *estr); |
162 | static void gdth_clear_events(void); | 162 | static void gdth_clear_events(void); |
163 | 163 | ||
164 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 164 | static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp, |
165 | char *buffer, u16 count); | 165 | char *buffer, u16 count); |
166 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); | 166 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp); |
167 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive); | 167 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, |
168 | u16 hdrive); | ||
168 | 169 | ||
169 | static void gdth_enable_int(gdth_ha_str *ha); | 170 | static void gdth_enable_int(gdth_ha_str *ha); |
170 | static int gdth_test_busy(gdth_ha_str *ha); | 171 | static int gdth_test_busy(gdth_ha_str *ha); |
@@ -446,7 +447,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
446 | int timeout, u32 *info) | 447 | int timeout, u32 *info) |
447 | { | 448 | { |
448 | gdth_ha_str *ha = shost_priv(sdev->host); | 449 | gdth_ha_str *ha = shost_priv(sdev->host); |
449 | Scsi_Cmnd *scp; | 450 | struct scsi_cmnd *scp; |
450 | struct gdth_cmndinfo cmndinfo; | 451 | struct gdth_cmndinfo cmndinfo; |
451 | DECLARE_COMPLETION_ONSTACK(wait); | 452 | DECLARE_COMPLETION_ONSTACK(wait); |
452 | int rval; | 453 | int rval; |
@@ -1982,11 +1983,11 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive) | |||
1982 | 1983 | ||
1983 | /* command queueing/sending functions */ | 1984 | /* command queueing/sending functions */ |
1984 | 1985 | ||
1985 | static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority) | 1986 | static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority) |
1986 | { | 1987 | { |
1987 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | 1988 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); |
1988 | register Scsi_Cmnd *pscp; | 1989 | register struct scsi_cmnd *pscp; |
1989 | register Scsi_Cmnd *nscp; | 1990 | register struct scsi_cmnd *nscp; |
1990 | unsigned long flags; | 1991 | unsigned long flags; |
1991 | 1992 | ||
1992 | TRACE(("gdth_putq() priority %d\n",priority)); | 1993 | TRACE(("gdth_putq() priority %d\n",priority)); |
@@ -2000,11 +2001,11 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority) | |||
2000 | scp->SCp.ptr = NULL; | 2001 | scp->SCp.ptr = NULL; |
2001 | } else { /* queue not empty */ | 2002 | } else { /* queue not empty */ |
2002 | pscp = ha->req_first; | 2003 | pscp = ha->req_first; |
2003 | nscp = (Scsi_Cmnd *)pscp->SCp.ptr; | 2004 | nscp = (struct scsi_cmnd *)pscp->SCp.ptr; |
2004 | /* priority: 0-highest,..,0xff-lowest */ | 2005 | /* priority: 0-highest,..,0xff-lowest */ |
2005 | while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) { | 2006 | while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) { |
2006 | pscp = nscp; | 2007 | pscp = nscp; |
2007 | nscp = (Scsi_Cmnd *)pscp->SCp.ptr; | 2008 | nscp = (struct scsi_cmnd *)pscp->SCp.ptr; |
2008 | } | 2009 | } |
2009 | pscp->SCp.ptr = (char *)scp; | 2010 | pscp->SCp.ptr = (char *)scp; |
2010 | scp->SCp.ptr = (char *)nscp; | 2011 | scp->SCp.ptr = (char *)nscp; |
@@ -2013,7 +2014,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority) | |||
2013 | 2014 | ||
2014 | #ifdef GDTH_STATISTICS | 2015 | #ifdef GDTH_STATISTICS |
2015 | flags = 0; | 2016 | flags = 0; |
2016 | for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) | 2017 | for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr) |
2017 | ++flags; | 2018 | ++flags; |
2018 | if (max_rq < flags) { | 2019 | if (max_rq < flags) { |
2019 | max_rq = flags; | 2020 | max_rq = flags; |
@@ -2024,8 +2025,8 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority) | |||
2024 | 2025 | ||
2025 | static void gdth_next(gdth_ha_str *ha) | 2026 | static void gdth_next(gdth_ha_str *ha) |
2026 | { | 2027 | { |
2027 | register Scsi_Cmnd *pscp; | 2028 | register struct scsi_cmnd *pscp; |
2028 | register Scsi_Cmnd *nscp; | 2029 | register struct scsi_cmnd *nscp; |
2029 | u8 b, t, l, firsttime; | 2030 | u8 b, t, l, firsttime; |
2030 | u8 this_cmd, next_cmd; | 2031 | u8 this_cmd, next_cmd; |
2031 | unsigned long flags = 0; | 2032 | unsigned long flags = 0; |
@@ -2040,10 +2041,10 @@ static void gdth_next(gdth_ha_str *ha) | |||
2040 | next_cmd = gdth_polling ? FALSE:TRUE; | 2041 | next_cmd = gdth_polling ? FALSE:TRUE; |
2041 | cmd_index = 0; | 2042 | cmd_index = 0; |
2042 | 2043 | ||
2043 | for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) { | 2044 | for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) { |
2044 | struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp); | 2045 | struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp); |
2045 | if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr) | 2046 | if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr) |
2046 | pscp = (Scsi_Cmnd *)pscp->SCp.ptr; | 2047 | pscp = (struct scsi_cmnd *)pscp->SCp.ptr; |
2047 | if (!nscp_cmndinfo->internal_command) { | 2048 | if (!nscp_cmndinfo->internal_command) { |
2048 | b = nscp->device->channel; | 2049 | b = nscp->device->channel; |
2049 | t = nscp->device->id; | 2050 | t = nscp->device->id; |
@@ -2250,7 +2251,7 @@ static void gdth_next(gdth_ha_str *ha) | |||
2250 | if (!this_cmd) | 2251 | if (!this_cmd) |
2251 | break; | 2252 | break; |
2252 | if (nscp == ha->req_first) | 2253 | if (nscp == ha->req_first) |
2253 | ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr; | 2254 | ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr; |
2254 | else | 2255 | else |
2255 | pscp->SCp.ptr = nscp->SCp.ptr; | 2256 | pscp->SCp.ptr = nscp->SCp.ptr; |
2256 | if (!next_cmd) | 2257 | if (!next_cmd) |
@@ -2275,7 +2276,7 @@ static void gdth_next(gdth_ha_str *ha) | |||
2275 | * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's | 2276 | * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's |
2276 | * buffers, kmap_atomic() as needed. | 2277 | * buffers, kmap_atomic() as needed. |
2277 | */ | 2278 | */ |
2278 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 2279 | static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp, |
2279 | char *buffer, u16 count) | 2280 | char *buffer, u16 count) |
2280 | { | 2281 | { |
2281 | u16 cpcount,i, max_sg = scsi_sg_count(scp); | 2282 | u16 cpcount,i, max_sg = scsi_sg_count(scp); |
@@ -2317,7 +2318,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | |||
2317 | } | 2318 | } |
2318 | } | 2319 | } |
2319 | 2320 | ||
2320 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | 2321 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp) |
2321 | { | 2322 | { |
2322 | u8 t; | 2323 | u8 t; |
2323 | gdth_inq_data inq; | 2324 | gdth_inq_data inq; |
@@ -2419,7 +2420,8 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2419 | return 0; | 2420 | return 0; |
2420 | } | 2421 | } |
2421 | 2422 | ||
2422 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive) | 2423 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, |
2424 | u16 hdrive) | ||
2423 | { | 2425 | { |
2424 | register gdth_cmd_str *cmdp; | 2426 | register gdth_cmd_str *cmdp; |
2425 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | 2427 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); |
@@ -2594,7 +2596,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive) | |||
2594 | return cmd_index; | 2596 | return cmd_index; |
2595 | } | 2597 | } |
2596 | 2598 | ||
2597 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b) | 2599 | static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b) |
2598 | { | 2600 | { |
2599 | register gdth_cmd_str *cmdp; | 2601 | register gdth_cmd_str *cmdp; |
2600 | u16 i; | 2602 | u16 i; |
@@ -2767,7 +2769,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b) | |||
2767 | return cmd_index; | 2769 | return cmd_index; |
2768 | } | 2770 | } |
2769 | 2771 | ||
2770 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | 2772 | static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp) |
2771 | { | 2773 | { |
2772 | register gdth_cmd_str *cmdp; | 2774 | register gdth_cmd_str *cmdp; |
2773 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | 2775 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); |
@@ -2958,7 +2960,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha, | |||
2958 | gdt6m_dpram_str __iomem *dp6m_ptr = NULL; | 2960 | gdt6m_dpram_str __iomem *dp6m_ptr = NULL; |
2959 | gdt6_dpram_str __iomem *dp6_ptr; | 2961 | gdt6_dpram_str __iomem *dp6_ptr; |
2960 | gdt2_dpram_str __iomem *dp2_ptr; | 2962 | gdt2_dpram_str __iomem *dp2_ptr; |
2961 | Scsi_Cmnd *scp; | 2963 | struct scsi_cmnd *scp; |
2962 | int rval, i; | 2964 | int rval, i; |
2963 | u8 IStatus; | 2965 | u8 IStatus; |
2964 | u16 Service; | 2966 | u16 Service; |
@@ -3217,7 +3219,7 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id) | |||
3217 | } | 3219 | } |
3218 | 3220 | ||
3219 | static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, | 3221 | static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index, |
3220 | Scsi_Cmnd *scp) | 3222 | struct scsi_cmnd *scp) |
3221 | { | 3223 | { |
3222 | gdth_msg_str *msg; | 3224 | gdth_msg_str *msg; |
3223 | gdth_cmd_str *cmdp; | 3225 | gdth_cmd_str *cmdp; |
@@ -3708,7 +3710,7 @@ static u8 gdth_timer_running; | |||
3708 | static void gdth_timeout(struct timer_list *unused) | 3710 | static void gdth_timeout(struct timer_list *unused) |
3709 | { | 3711 | { |
3710 | u32 i; | 3712 | u32 i; |
3711 | Scsi_Cmnd *nscp; | 3713 | struct scsi_cmnd *nscp; |
3712 | gdth_ha_str *ha; | 3714 | gdth_ha_str *ha; |
3713 | unsigned long flags; | 3715 | unsigned long flags; |
3714 | 3716 | ||
@@ -3724,7 +3726,8 @@ static void gdth_timeout(struct timer_list *unused) | |||
3724 | if (ha->cmd_tab[i].cmnd != UNUSED_CMND) | 3726 | if (ha->cmd_tab[i].cmnd != UNUSED_CMND) |
3725 | ++act_stats; | 3727 | ++act_stats; |
3726 | 3728 | ||
3727 | for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr) | 3729 | for (act_rq=0, |
3730 | nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr) | ||
3728 | ++act_rq; | 3731 | ++act_rq; |
3729 | 3732 | ||
3730 | TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n", | 3733 | TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n", |
@@ -3909,12 +3912,12 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp) | |||
3909 | } | 3912 | } |
3910 | 3913 | ||
3911 | 3914 | ||
3912 | static int gdth_eh_bus_reset(Scsi_Cmnd *scp) | 3915 | static int gdth_eh_bus_reset(struct scsi_cmnd *scp) |
3913 | { | 3916 | { |
3914 | gdth_ha_str *ha = shost_priv(scp->device->host); | 3917 | gdth_ha_str *ha = shost_priv(scp->device->host); |
3915 | int i; | 3918 | int i; |
3916 | unsigned long flags; | 3919 | unsigned long flags; |
3917 | Scsi_Cmnd *cmnd; | 3920 | struct scsi_cmnd *cmnd; |
3918 | u8 b; | 3921 | u8 b; |
3919 | 3922 | ||
3920 | TRACE2(("gdth_eh_bus_reset()\n")); | 3923 | TRACE2(("gdth_eh_bus_reset()\n")); |
@@ -4465,7 +4468,7 @@ free_fail: | |||
4465 | static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) | 4468 | static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) |
4466 | { | 4469 | { |
4467 | gdth_ha_str *ha; | 4470 | gdth_ha_str *ha; |
4468 | Scsi_Cmnd *scp; | 4471 | struct scsi_cmnd *scp; |
4469 | unsigned long flags; | 4472 | unsigned long flags; |
4470 | char cmnd[MAX_COMMAND_SIZE]; | 4473 | char cmnd[MAX_COMMAND_SIZE]; |
4471 | void __user *argp = (void __user *)arg; | 4474 | void __user *argp = (void __user *)arg; |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index e6e5ccb1e0f3..ee6ffcf388e8 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -162,9 +162,9 @@ | |||
162 | #define BIGSECS 63 /* mapping 255*63 */ | 162 | #define BIGSECS 63 /* mapping 255*63 */ |
163 | 163 | ||
164 | /* special command ptr. */ | 164 | /* special command ptr. */ |
165 | #define UNUSED_CMND ((Scsi_Cmnd *)-1) | 165 | #define UNUSED_CMND ((struct scsi_cmnd *)-1) |
166 | #define INTERNAL_CMND ((Scsi_Cmnd *)-2) | 166 | #define INTERNAL_CMND ((struct scsi_cmnd *)-2) |
167 | #define SCREEN_CMND ((Scsi_Cmnd *)-3) | 167 | #define SCREEN_CMND ((struct scsi_cmnd *)-3) |
168 | #define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND) | 168 | #define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND) |
169 | 169 | ||
170 | /* controller services */ | 170 | /* controller services */ |
@@ -867,7 +867,7 @@ typedef struct { | |||
867 | u16 service; /* service/firmware ver./.. */ | 867 | u16 service; /* service/firmware ver./.. */ |
868 | u32 info; | 868 | u32 info; |
869 | u32 info2; /* additional info */ | 869 | u32 info2; /* additional info */ |
870 | Scsi_Cmnd *req_first; /* top of request queue */ | 870 | struct scsi_cmnd *req_first; /* top of request queue */ |
871 | struct { | 871 | struct { |
872 | u8 present; /* Flag: host drive present? */ | 872 | u8 present; /* Flag: host drive present? */ |
873 | u8 is_logdrv; /* Flag: log. drive (master)? */ | 873 | u8 is_logdrv; /* Flag: log. drive (master)? */ |
@@ -896,7 +896,7 @@ typedef struct { | |||
896 | u32 id_list[MAXID]; /* IDs of the phys. devices */ | 896 | u32 id_list[MAXID]; /* IDs of the phys. devices */ |
897 | } raw[MAXBUS]; /* SCSI channels */ | 897 | } raw[MAXBUS]; /* SCSI channels */ |
898 | struct { | 898 | struct { |
899 | Scsi_Cmnd *cmnd; /* pending request */ | 899 | struct scsi_cmnd *cmnd; /* pending request */ |
900 | u16 service; /* service */ | 900 | u16 service; /* service */ |
901 | } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */ | 901 | } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */ |
902 | struct gdth_cmndinfo { /* per-command private info */ | 902 | struct gdth_cmndinfo { /* per-command private info */ |
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c index 20add49cdd32..3a9751a80225 100644 --- a/drivers/scsi/gdth_proc.c +++ b/drivers/scsi/gdth_proc.c | |||
@@ -626,7 +626,7 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id) | |||
626 | { | 626 | { |
627 | unsigned long flags; | 627 | unsigned long flags; |
628 | int i; | 628 | int i; |
629 | Scsi_Cmnd *scp; | 629 | struct scsi_cmnd *scp; |
630 | struct gdth_cmndinfo *cmndinfo; | 630 | struct gdth_cmndinfo *cmndinfo; |
631 | u8 b, t; | 631 | u8 b, t; |
632 | 632 | ||
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 7052a5d45f7f..6c7d2e201abe 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/clk.h> | 16 | #include <linux/clk.h> |
17 | #include <linux/dmapool.h> | 17 | #include <linux/dmapool.h> |
18 | #include <linux/iopoll.h> | 18 | #include <linux/iopoll.h> |
19 | #include <linux/lcm.h> | ||
19 | #include <linux/mfd/syscon.h> | 20 | #include <linux/mfd/syscon.h> |
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
@@ -199,17 +200,17 @@ struct hisi_sas_slot { | |||
199 | int dlvry_queue_slot; | 200 | int dlvry_queue_slot; |
200 | int cmplt_queue; | 201 | int cmplt_queue; |
201 | int cmplt_queue_slot; | 202 | int cmplt_queue_slot; |
202 | int idx; | ||
203 | int abort; | 203 | int abort; |
204 | int ready; | 204 | int ready; |
205 | void *buf; | ||
206 | dma_addr_t buf_dma; | ||
207 | void *cmd_hdr; | 205 | void *cmd_hdr; |
208 | dma_addr_t cmd_hdr_dma; | 206 | dma_addr_t cmd_hdr_dma; |
209 | struct work_struct abort_slot; | ||
210 | struct timer_list internal_abort_timer; | 207 | struct timer_list internal_abort_timer; |
211 | bool is_internal; | 208 | bool is_internal; |
212 | struct hisi_sas_tmf_task *tmf; | 209 | struct hisi_sas_tmf_task *tmf; |
210 | /* Do not reorder/change members after here */ | ||
211 | void *buf; | ||
212 | dma_addr_t buf_dma; | ||
213 | int idx; | ||
213 | }; | 214 | }; |
214 | 215 | ||
215 | struct hisi_sas_hw { | 216 | struct hisi_sas_hw { |
@@ -277,6 +278,7 @@ struct hisi_hba { | |||
277 | 278 | ||
278 | int n_phy; | 279 | int n_phy; |
279 | spinlock_t lock; | 280 | spinlock_t lock; |
281 | struct semaphore sem; | ||
280 | 282 | ||
281 | struct timer_list timer; | 283 | struct timer_list timer; |
282 | struct workqueue_struct *wq; | 284 | struct workqueue_struct *wq; |
@@ -298,7 +300,6 @@ struct hisi_hba { | |||
298 | 300 | ||
299 | int queue_count; | 301 | int queue_count; |
300 | 302 | ||
301 | struct dma_pool *buffer_pool; | ||
302 | struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES]; | 303 | struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES]; |
303 | struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES]; | 304 | struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES]; |
304 | dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES]; | 305 | dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES]; |
@@ -319,6 +320,7 @@ struct hisi_hba { | |||
319 | const struct hisi_sas_hw *hw; /* Low level hw interface */ | 320 | const struct hisi_sas_hw *hw; /* Low level hw interface */ |
320 | unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; | 321 | unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)]; |
321 | struct work_struct rst_work; | 322 | struct work_struct rst_work; |
323 | u32 phy_state; | ||
322 | }; | 324 | }; |
323 | 325 | ||
324 | /* Generic HW DMA host memory structures */ | 326 | /* Generic HW DMA host memory structures */ |
@@ -479,4 +481,6 @@ extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, | |||
479 | enum hisi_sas_phy_event event); | 481 | enum hisi_sas_phy_event event); |
480 | extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); | 482 | extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba); |
481 | extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max); | 483 | extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max); |
484 | extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba); | ||
485 | extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba); | ||
482 | #endif | 486 | #endif |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 6f562974f8f6..a4e2e6aa9a6b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c | |||
@@ -242,20 +242,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, | |||
242 | task->data_dir); | 242 | task->data_dir); |
243 | } | 243 | } |
244 | 244 | ||
245 | if (slot->buf) | ||
246 | dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma); | ||
247 | 245 | ||
248 | spin_lock_irqsave(&dq->lock, flags); | 246 | spin_lock_irqsave(&dq->lock, flags); |
249 | list_del_init(&slot->entry); | 247 | list_del_init(&slot->entry); |
250 | spin_unlock_irqrestore(&dq->lock, flags); | 248 | spin_unlock_irqrestore(&dq->lock, flags); |
251 | slot->buf = NULL; | 249 | |
252 | slot->task = NULL; | 250 | memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); |
253 | slot->port = NULL; | 251 | |
254 | spin_lock_irqsave(&hisi_hba->lock, flags); | 252 | spin_lock_irqsave(&hisi_hba->lock, flags); |
255 | hisi_sas_slot_index_free(hisi_hba, slot->idx); | 253 | hisi_sas_slot_index_free(hisi_hba, slot->idx); |
256 | spin_unlock_irqrestore(&hisi_hba->lock, flags); | 254 | spin_unlock_irqrestore(&hisi_hba->lock, flags); |
257 | |||
258 | /* slot memory is fully zeroed when it is reused */ | ||
259 | } | 255 | } |
260 | EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); | 256 | EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); |
261 | 257 | ||
@@ -285,40 +281,6 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, | |||
285 | device_id, abort_flag, tag_to_abort); | 281 | device_id, abort_flag, tag_to_abort); |
286 | } | 282 | } |
287 | 283 | ||
288 | /* | ||
289 | * This function will issue an abort TMF regardless of whether the | ||
290 | * task is in the sdev or not. Then it will do the task complete | ||
291 | * cleanup and callbacks. | ||
292 | */ | ||
293 | static void hisi_sas_slot_abort(struct work_struct *work) | ||
294 | { | ||
295 | struct hisi_sas_slot *abort_slot = | ||
296 | container_of(work, struct hisi_sas_slot, abort_slot); | ||
297 | struct sas_task *task = abort_slot->task; | ||
298 | struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev); | ||
299 | struct scsi_cmnd *cmnd = task->uldd_task; | ||
300 | struct hisi_sas_tmf_task tmf_task; | ||
301 | struct scsi_lun lun; | ||
302 | struct device *dev = hisi_hba->dev; | ||
303 | int tag = abort_slot->idx; | ||
304 | |||
305 | if (!(task->task_proto & SAS_PROTOCOL_SSP)) { | ||
306 | dev_err(dev, "cannot abort slot for non-ssp task\n"); | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | int_to_scsilun(cmnd->device->lun, &lun); | ||
311 | tmf_task.tmf = TMF_ABORT_TASK; | ||
312 | tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag); | ||
313 | |||
314 | hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task); | ||
315 | out: | ||
316 | /* Do cleanup for this task */ | ||
317 | hisi_sas_slot_task_free(hisi_hba, task, abort_slot); | ||
318 | if (task->task_done) | ||
319 | task->task_done(task); | ||
320 | } | ||
321 | |||
322 | static int hisi_sas_task_prep(struct sas_task *task, | 284 | static int hisi_sas_task_prep(struct sas_task *task, |
323 | struct hisi_sas_dq **dq_pointer, | 285 | struct hisi_sas_dq **dq_pointer, |
324 | bool is_tmf, struct hisi_sas_tmf_task *tmf, | 286 | bool is_tmf, struct hisi_sas_tmf_task *tmf, |
@@ -334,8 +296,8 @@ static int hisi_sas_task_prep(struct sas_task *task, | |||
334 | struct device *dev = hisi_hba->dev; | 296 | struct device *dev = hisi_hba->dev; |
335 | int dlvry_queue_slot, dlvry_queue, rc, slot_idx; | 297 | int dlvry_queue_slot, dlvry_queue, rc, slot_idx; |
336 | int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; | 298 | int n_elem = 0, n_elem_req = 0, n_elem_resp = 0; |
337 | unsigned long flags, flags_dq; | ||
338 | struct hisi_sas_dq *dq; | 299 | struct hisi_sas_dq *dq; |
300 | unsigned long flags; | ||
339 | int wr_q_index; | 301 | int wr_q_index; |
340 | 302 | ||
341 | if (!sas_port) { | 303 | if (!sas_port) { |
@@ -430,30 +392,22 @@ static int hisi_sas_task_prep(struct sas_task *task, | |||
430 | goto err_out_dma_unmap; | 392 | goto err_out_dma_unmap; |
431 | 393 | ||
432 | slot = &hisi_hba->slot_info[slot_idx]; | 394 | slot = &hisi_hba->slot_info[slot_idx]; |
433 | memset(slot, 0, sizeof(struct hisi_sas_slot)); | ||
434 | |||
435 | slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, | ||
436 | GFP_ATOMIC, &slot->buf_dma); | ||
437 | if (!slot->buf) { | ||
438 | rc = -ENOMEM; | ||
439 | goto err_out_tag; | ||
440 | } | ||
441 | 395 | ||
442 | spin_lock_irqsave(&dq->lock, flags_dq); | 396 | spin_lock_irqsave(&dq->lock, flags); |
443 | wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); | 397 | wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); |
444 | if (wr_q_index < 0) { | 398 | if (wr_q_index < 0) { |
445 | spin_unlock_irqrestore(&dq->lock, flags_dq); | 399 | spin_unlock_irqrestore(&dq->lock, flags); |
446 | rc = -EAGAIN; | 400 | rc = -EAGAIN; |
447 | goto err_out_buf; | 401 | goto err_out_tag; |
448 | } | 402 | } |
449 | 403 | ||
450 | list_add_tail(&slot->delivery, &dq->list); | 404 | list_add_tail(&slot->delivery, &dq->list); |
451 | spin_unlock_irqrestore(&dq->lock, flags_dq); | 405 | list_add_tail(&slot->entry, &sas_dev->list); |
406 | spin_unlock_irqrestore(&dq->lock, flags); | ||
452 | 407 | ||
453 | dlvry_queue = dq->id; | 408 | dlvry_queue = dq->id; |
454 | dlvry_queue_slot = wr_q_index; | 409 | dlvry_queue_slot = wr_q_index; |
455 | 410 | ||
456 | slot->idx = slot_idx; | ||
457 | slot->n_elem = n_elem; | 411 | slot->n_elem = n_elem; |
458 | slot->dlvry_queue = dlvry_queue; | 412 | slot->dlvry_queue = dlvry_queue; |
459 | slot->dlvry_queue_slot = dlvry_queue_slot; | 413 | slot->dlvry_queue_slot = dlvry_queue_slot; |
@@ -464,7 +418,6 @@ static int hisi_sas_task_prep(struct sas_task *task, | |||
464 | slot->tmf = tmf; | 418 | slot->tmf = tmf; |
465 | slot->is_internal = is_tmf; | 419 | slot->is_internal = is_tmf; |
466 | task->lldd_task = slot; | 420 | task->lldd_task = slot; |
467 | INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort); | ||
468 | 421 | ||
469 | memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); | 422 | memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); |
470 | memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); | 423 | memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); |
@@ -488,21 +441,15 @@ static int hisi_sas_task_prep(struct sas_task *task, | |||
488 | break; | 441 | break; |
489 | } | 442 | } |
490 | 443 | ||
491 | spin_lock_irqsave(&dq->lock, flags); | ||
492 | list_add_tail(&slot->entry, &sas_dev->list); | ||
493 | spin_unlock_irqrestore(&dq->lock, flags); | ||
494 | spin_lock_irqsave(&task->task_state_lock, flags); | 444 | spin_lock_irqsave(&task->task_state_lock, flags); |
495 | task->task_state_flags |= SAS_TASK_AT_INITIATOR; | 445 | task->task_state_flags |= SAS_TASK_AT_INITIATOR; |
496 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 446 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
497 | 447 | ||
498 | ++(*pass); | 448 | ++(*pass); |
499 | slot->ready = 1; | 449 | WRITE_ONCE(slot->ready, 1); |
500 | 450 | ||
501 | return 0; | 451 | return 0; |
502 | 452 | ||
503 | err_out_buf: | ||
504 | dma_pool_free(hisi_hba->buffer_pool, slot->buf, | ||
505 | slot->buf_dma); | ||
506 | err_out_tag: | 453 | err_out_tag: |
507 | spin_lock_irqsave(&hisi_hba->lock, flags); | 454 | spin_lock_irqsave(&hisi_hba->lock, flags); |
508 | hisi_sas_slot_index_free(hisi_hba, slot_idx); | 455 | hisi_sas_slot_index_free(hisi_hba, slot_idx); |
@@ -536,8 +483,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, | |||
536 | struct device *dev = hisi_hba->dev; | 483 | struct device *dev = hisi_hba->dev; |
537 | struct hisi_sas_dq *dq = NULL; | 484 | struct hisi_sas_dq *dq = NULL; |
538 | 485 | ||
539 | if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) | 486 | if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { |
540 | return -EINVAL; | 487 | if (in_softirq()) |
488 | return -EINVAL; | ||
489 | |||
490 | down(&hisi_hba->sem); | ||
491 | up(&hisi_hba->sem); | ||
492 | } | ||
541 | 493 | ||
542 | /* protect task_prep and start_delivery sequence */ | 494 | /* protect task_prep and start_delivery sequence */ |
543 | rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); | 495 | rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass); |
@@ -819,6 +771,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) | |||
819 | 771 | ||
820 | for (i = 0; i < HISI_PHYES_NUM; i++) | 772 | for (i = 0; i < HISI_PHYES_NUM; i++) |
821 | INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); | 773 | INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); |
774 | |||
775 | spin_lock_init(&phy->lock); | ||
822 | } | 776 | } |
823 | 777 | ||
824 | static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) | 778 | static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) |
@@ -862,7 +816,6 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task | |||
862 | hisi_sas_slot_task_free(hisi_hba, task, slot); | 816 | hisi_sas_slot_task_free(hisi_hba, task, slot); |
863 | } | 817 | } |
864 | 818 | ||
865 | /* hisi_hba.lock should be locked */ | ||
866 | static void hisi_sas_release_task(struct hisi_hba *hisi_hba, | 819 | static void hisi_sas_release_task(struct hisi_hba *hisi_hba, |
867 | struct domain_device *device) | 820 | struct domain_device *device) |
868 | { | 821 | { |
@@ -914,7 +867,9 @@ static void hisi_sas_dev_gone(struct domain_device *device) | |||
914 | 867 | ||
915 | hisi_sas_dereg_device(hisi_hba, device); | 868 | hisi_sas_dereg_device(hisi_hba, device); |
916 | 869 | ||
870 | down(&hisi_hba->sem); | ||
917 | hisi_hba->hw->clear_itct(hisi_hba, sas_dev); | 871 | hisi_hba->hw->clear_itct(hisi_hba, sas_dev); |
872 | up(&hisi_hba->sem); | ||
918 | device->lldd_dev = NULL; | 873 | device->lldd_dev = NULL; |
919 | } | 874 | } |
920 | 875 | ||
@@ -1351,21 +1306,12 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba) | |||
1351 | } | 1306 | } |
1352 | } | 1307 | } |
1353 | 1308 | ||
1354 | static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) | 1309 | void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba) |
1355 | { | 1310 | { |
1356 | struct device *dev = hisi_hba->dev; | ||
1357 | struct Scsi_Host *shost = hisi_hba->shost; | 1311 | struct Scsi_Host *shost = hisi_hba->shost; |
1358 | u32 old_state, state; | ||
1359 | int rc; | ||
1360 | |||
1361 | if (!hisi_hba->hw->soft_reset) | ||
1362 | return -1; | ||
1363 | 1312 | ||
1364 | if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) | 1313 | down(&hisi_hba->sem); |
1365 | return -1; | 1314 | hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); |
1366 | |||
1367 | dev_info(dev, "controller resetting...\n"); | ||
1368 | old_state = hisi_hba->hw->get_phys_state(hisi_hba); | ||
1369 | 1315 | ||
1370 | scsi_block_requests(shost); | 1316 | scsi_block_requests(shost); |
1371 | hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); | 1317 | hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); |
@@ -1374,34 +1320,61 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) | |||
1374 | del_timer_sync(&hisi_hba->timer); | 1320 | del_timer_sync(&hisi_hba->timer); |
1375 | 1321 | ||
1376 | set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); | 1322 | set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); |
1377 | rc = hisi_hba->hw->soft_reset(hisi_hba); | 1323 | } |
1378 | if (rc) { | 1324 | EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare); |
1379 | dev_warn(dev, "controller reset failed (%d)\n", rc); | ||
1380 | clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); | ||
1381 | scsi_unblock_requests(shost); | ||
1382 | goto out; | ||
1383 | } | ||
1384 | 1325 | ||
1385 | clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); | 1326 | void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) |
1327 | { | ||
1328 | struct Scsi_Host *shost = hisi_hba->shost; | ||
1329 | u32 state; | ||
1386 | 1330 | ||
1387 | /* Init and wait for PHYs to come up and all libsas event finished. */ | 1331 | /* Init and wait for PHYs to come up and all libsas event finished. */ |
1388 | hisi_hba->hw->phys_init(hisi_hba); | 1332 | hisi_hba->hw->phys_init(hisi_hba); |
1389 | msleep(1000); | 1333 | msleep(1000); |
1390 | hisi_sas_refresh_port_id(hisi_hba); | 1334 | hisi_sas_refresh_port_id(hisi_hba); |
1335 | clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); | ||
1336 | up(&hisi_hba->sem); | ||
1391 | 1337 | ||
1392 | if (hisi_hba->reject_stp_links_msk) | 1338 | if (hisi_hba->reject_stp_links_msk) |
1393 | hisi_sas_terminate_stp_reject(hisi_hba); | 1339 | hisi_sas_terminate_stp_reject(hisi_hba); |
1394 | hisi_sas_reset_init_all_devices(hisi_hba); | 1340 | hisi_sas_reset_init_all_devices(hisi_hba); |
1395 | scsi_unblock_requests(shost); | 1341 | scsi_unblock_requests(shost); |
1342 | clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); | ||
1396 | 1343 | ||
1397 | state = hisi_hba->hw->get_phys_state(hisi_hba); | 1344 | state = hisi_hba->hw->get_phys_state(hisi_hba); |
1398 | hisi_sas_rescan_topology(hisi_hba, old_state, state); | 1345 | hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state); |
1399 | dev_info(dev, "controller reset complete\n"); | 1346 | } |
1347 | EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); | ||
1400 | 1348 | ||
1401 | out: | 1349 | static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) |
1402 | clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); | 1350 | { |
1351 | struct device *dev = hisi_hba->dev; | ||
1352 | struct Scsi_Host *shost = hisi_hba->shost; | ||
1353 | int rc; | ||
1403 | 1354 | ||
1404 | return rc; | 1355 | if (!hisi_hba->hw->soft_reset) |
1356 | return -1; | ||
1357 | |||
1358 | if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) | ||
1359 | return -1; | ||
1360 | |||
1361 | dev_info(dev, "controller resetting...\n"); | ||
1362 | hisi_sas_controller_reset_prepare(hisi_hba); | ||
1363 | |||
1364 | rc = hisi_hba->hw->soft_reset(hisi_hba); | ||
1365 | if (rc) { | ||
1366 | dev_warn(dev, "controller reset failed (%d)\n", rc); | ||
1367 | clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); | ||
1368 | up(&hisi_hba->sem); | ||
1369 | scsi_unblock_requests(shost); | ||
1370 | clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); | ||
1371 | return rc; | ||
1372 | } | ||
1373 | |||
1374 | hisi_sas_controller_reset_done(hisi_hba); | ||
1375 | dev_info(dev, "controller reset complete\n"); | ||
1376 | |||
1377 | return 0; | ||
1405 | } | 1378 | } |
1406 | 1379 | ||
1407 | static int hisi_sas_abort_task(struct sas_task *task) | 1380 | static int hisi_sas_abort_task(struct sas_task *task) |
@@ -1644,14 +1617,32 @@ out: | |||
1644 | static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) | 1617 | static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha) |
1645 | { | 1618 | { |
1646 | struct hisi_hba *hisi_hba = sas_ha->lldd_ha; | 1619 | struct hisi_hba *hisi_hba = sas_ha->lldd_ha; |
1620 | struct device *dev = hisi_hba->dev; | ||
1647 | HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); | 1621 | HISI_SAS_DECLARE_RST_WORK_ON_STACK(r); |
1622 | int rc, i; | ||
1648 | 1623 | ||
1649 | queue_work(hisi_hba->wq, &r.work); | 1624 | queue_work(hisi_hba->wq, &r.work); |
1650 | wait_for_completion(r.completion); | 1625 | wait_for_completion(r.completion); |
1651 | if (r.done) | 1626 | if (!r.done) |
1652 | return TMF_RESP_FUNC_COMPLETE; | 1627 | return TMF_RESP_FUNC_FAILED; |
1628 | |||
1629 | for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { | ||
1630 | struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; | ||
1631 | struct domain_device *device = sas_dev->sas_device; | ||
1653 | 1632 | ||
1654 | return TMF_RESP_FUNC_FAILED; | 1633 | if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || |
1634 | DEV_IS_EXPANDER(device->dev_type)) | ||
1635 | continue; | ||
1636 | |||
1637 | rc = hisi_sas_debug_I_T_nexus_reset(device); | ||
1638 | if (rc != TMF_RESP_FUNC_COMPLETE) | ||
1639 | dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n", | ||
1640 | sas_dev->device_id, rc); | ||
1641 | } | ||
1642 | |||
1643 | hisi_sas_release_tasks(hisi_hba); | ||
1644 | |||
1645 | return TMF_RESP_FUNC_COMPLETE; | ||
1655 | } | 1646 | } |
1656 | 1647 | ||
1657 | static int hisi_sas_query_task(struct sas_task *task) | 1648 | static int hisi_sas_query_task(struct sas_task *task) |
@@ -1723,21 +1714,13 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, | |||
1723 | spin_unlock_irqrestore(&hisi_hba->lock, flags); | 1714 | spin_unlock_irqrestore(&hisi_hba->lock, flags); |
1724 | 1715 | ||
1725 | slot = &hisi_hba->slot_info[slot_idx]; | 1716 | slot = &hisi_hba->slot_info[slot_idx]; |
1726 | memset(slot, 0, sizeof(struct hisi_sas_slot)); | ||
1727 | |||
1728 | slot->buf = dma_pool_alloc(hisi_hba->buffer_pool, | ||
1729 | GFP_ATOMIC, &slot->buf_dma); | ||
1730 | if (!slot->buf) { | ||
1731 | rc = -ENOMEM; | ||
1732 | goto err_out_tag; | ||
1733 | } | ||
1734 | 1717 | ||
1735 | spin_lock_irqsave(&dq->lock, flags_dq); | 1718 | spin_lock_irqsave(&dq->lock, flags_dq); |
1736 | wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); | 1719 | wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); |
1737 | if (wr_q_index < 0) { | 1720 | if (wr_q_index < 0) { |
1738 | spin_unlock_irqrestore(&dq->lock, flags_dq); | 1721 | spin_unlock_irqrestore(&dq->lock, flags_dq); |
1739 | rc = -EAGAIN; | 1722 | rc = -EAGAIN; |
1740 | goto err_out_buf; | 1723 | goto err_out_tag; |
1741 | } | 1724 | } |
1742 | list_add_tail(&slot->delivery, &dq->list); | 1725 | list_add_tail(&slot->delivery, &dq->list); |
1743 | spin_unlock_irqrestore(&dq->lock, flags_dq); | 1726 | spin_unlock_irqrestore(&dq->lock, flags_dq); |
@@ -1745,7 +1728,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, | |||
1745 | dlvry_queue = dq->id; | 1728 | dlvry_queue = dq->id; |
1746 | dlvry_queue_slot = wr_q_index; | 1729 | dlvry_queue_slot = wr_q_index; |
1747 | 1730 | ||
1748 | slot->idx = slot_idx; | ||
1749 | slot->n_elem = n_elem; | 1731 | slot->n_elem = n_elem; |
1750 | slot->dlvry_queue = dlvry_queue; | 1732 | slot->dlvry_queue = dlvry_queue; |
1751 | slot->dlvry_queue_slot = dlvry_queue_slot; | 1733 | slot->dlvry_queue_slot = dlvry_queue_slot; |
@@ -1767,7 +1749,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, | |||
1767 | task->task_state_flags |= SAS_TASK_AT_INITIATOR; | 1749 | task->task_state_flags |= SAS_TASK_AT_INITIATOR; |
1768 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 1750 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
1769 | 1751 | ||
1770 | slot->ready = 1; | 1752 | WRITE_ONCE(slot->ready, 1); |
1771 | /* send abort command to the chip */ | 1753 | /* send abort command to the chip */ |
1772 | spin_lock_irqsave(&dq->lock, flags); | 1754 | spin_lock_irqsave(&dq->lock, flags); |
1773 | list_add_tail(&slot->entry, &sas_dev->list); | 1755 | list_add_tail(&slot->entry, &sas_dev->list); |
@@ -1776,9 +1758,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, | |||
1776 | 1758 | ||
1777 | return 0; | 1759 | return 0; |
1778 | 1760 | ||
1779 | err_out_buf: | ||
1780 | dma_pool_free(hisi_hba->buffer_pool, slot->buf, | ||
1781 | slot->buf_dma); | ||
1782 | err_out_tag: | 1761 | err_out_tag: |
1783 | spin_lock_irqsave(&hisi_hba->lock, flags); | 1762 | spin_lock_irqsave(&hisi_hba->lock, flags); |
1784 | hisi_sas_slot_index_free(hisi_hba, slot_idx); | 1763 | hisi_sas_slot_index_free(hisi_hba, slot_idx); |
@@ -1919,7 +1898,8 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) | |||
1919 | } else { | 1898 | } else { |
1920 | struct hisi_sas_port *port = phy->port; | 1899 | struct hisi_sas_port *port = phy->port; |
1921 | 1900 | ||
1922 | if (phy->in_reset) { | 1901 | if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) || |
1902 | phy->in_reset) { | ||
1923 | dev_info(dev, "ignore flutter phy%d down\n", phy_no); | 1903 | dev_info(dev, "ignore flutter phy%d down\n", phy_no); |
1924 | return; | 1904 | return; |
1925 | } | 1905 | } |
@@ -2014,8 +1994,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem); | |||
2014 | int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) | 1994 | int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) |
2015 | { | 1995 | { |
2016 | struct device *dev = hisi_hba->dev; | 1996 | struct device *dev = hisi_hba->dev; |
2017 | int i, s, max_command_entries = hisi_hba->hw->max_command_entries; | 1997 | int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; |
1998 | int max_command_entries_ru, sz_slot_buf_ru; | ||
1999 | int blk_cnt, slots_per_blk; | ||
2018 | 2000 | ||
2001 | sema_init(&hisi_hba->sem, 1); | ||
2019 | spin_lock_init(&hisi_hba->lock); | 2002 | spin_lock_init(&hisi_hba->lock); |
2020 | for (i = 0; i < hisi_hba->n_phy; i++) { | 2003 | for (i = 0; i < hisi_hba->n_phy; i++) { |
2021 | hisi_sas_phy_init(hisi_hba, i); | 2004 | hisi_sas_phy_init(hisi_hba, i); |
@@ -2045,29 +2028,27 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) | |||
2045 | 2028 | ||
2046 | /* Delivery queue */ | 2029 | /* Delivery queue */ |
2047 | s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; | 2030 | s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; |
2048 | hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s, | 2031 | hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, |
2049 | &hisi_hba->cmd_hdr_dma[i], GFP_KERNEL); | 2032 | &hisi_hba->cmd_hdr_dma[i], |
2033 | GFP_KERNEL); | ||
2050 | if (!hisi_hba->cmd_hdr[i]) | 2034 | if (!hisi_hba->cmd_hdr[i]) |
2051 | goto err_out; | 2035 | goto err_out; |
2052 | 2036 | ||
2053 | /* Completion queue */ | 2037 | /* Completion queue */ |
2054 | s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; | 2038 | s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; |
2055 | hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s, | 2039 | hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, |
2056 | &hisi_hba->complete_hdr_dma[i], GFP_KERNEL); | 2040 | &hisi_hba->complete_hdr_dma[i], |
2041 | GFP_KERNEL); | ||
2057 | if (!hisi_hba->complete_hdr[i]) | 2042 | if (!hisi_hba->complete_hdr[i]) |
2058 | goto err_out; | 2043 | goto err_out; |
2059 | } | 2044 | } |
2060 | 2045 | ||
2061 | s = sizeof(struct hisi_sas_slot_buf_table); | ||
2062 | hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0); | ||
2063 | if (!hisi_hba->buffer_pool) | ||
2064 | goto err_out; | ||
2065 | |||
2066 | s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); | 2046 | s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); |
2067 | hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma, | 2047 | hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, |
2068 | GFP_KERNEL); | 2048 | GFP_KERNEL); |
2069 | if (!hisi_hba->itct) | 2049 | if (!hisi_hba->itct) |
2070 | goto err_out; | 2050 | goto err_out; |
2051 | memset(hisi_hba->itct, 0, s); | ||
2071 | 2052 | ||
2072 | hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, | 2053 | hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, |
2073 | sizeof(struct hisi_sas_slot), | 2054 | sizeof(struct hisi_sas_slot), |
@@ -2075,15 +2056,45 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) | |||
2075 | if (!hisi_hba->slot_info) | 2056 | if (!hisi_hba->slot_info) |
2076 | goto err_out; | 2057 | goto err_out; |
2077 | 2058 | ||
2059 | /* roundup to avoid overly large block size */ | ||
2060 | max_command_entries_ru = roundup(max_command_entries, 64); | ||
2061 | sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64); | ||
2062 | s = lcm(max_command_entries_ru, sz_slot_buf_ru); | ||
2063 | blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; | ||
2064 | slots_per_blk = s / sz_slot_buf_ru; | ||
2065 | for (i = 0; i < blk_cnt; i++) { | ||
2066 | struct hisi_sas_slot_buf_table *buf; | ||
2067 | dma_addr_t buf_dma; | ||
2068 | int slot_index = i * slots_per_blk; | ||
2069 | |||
2070 | buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL); | ||
2071 | if (!buf) | ||
2072 | goto err_out; | ||
2073 | memset(buf, 0, s); | ||
2074 | |||
2075 | for (j = 0; j < slots_per_blk; j++, slot_index++) { | ||
2076 | struct hisi_sas_slot *slot; | ||
2077 | |||
2078 | slot = &hisi_hba->slot_info[slot_index]; | ||
2079 | slot->buf = buf; | ||
2080 | slot->buf_dma = buf_dma; | ||
2081 | slot->idx = slot_index; | ||
2082 | |||
2083 | buf++; | ||
2084 | buf_dma += sizeof(*buf); | ||
2085 | } | ||
2086 | } | ||
2087 | |||
2078 | s = max_command_entries * sizeof(struct hisi_sas_iost); | 2088 | s = max_command_entries * sizeof(struct hisi_sas_iost); |
2079 | hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma, | 2089 | hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, |
2080 | GFP_KERNEL); | 2090 | GFP_KERNEL); |
2081 | if (!hisi_hba->iost) | 2091 | if (!hisi_hba->iost) |
2082 | goto err_out; | 2092 | goto err_out; |
2083 | 2093 | ||
2084 | s = max_command_entries * sizeof(struct hisi_sas_breakpoint); | 2094 | s = max_command_entries * sizeof(struct hisi_sas_breakpoint); |
2085 | hisi_hba->breakpoint = dma_alloc_coherent(dev, s, | 2095 | hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, |
2086 | &hisi_hba->breakpoint_dma, GFP_KERNEL); | 2096 | &hisi_hba->breakpoint_dma, |
2097 | GFP_KERNEL); | ||
2087 | if (!hisi_hba->breakpoint) | 2098 | if (!hisi_hba->breakpoint) |
2088 | goto err_out; | 2099 | goto err_out; |
2089 | 2100 | ||
@@ -2094,14 +2105,16 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost) | |||
2094 | goto err_out; | 2105 | goto err_out; |
2095 | 2106 | ||
2096 | s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; | 2107 | s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; |
2097 | hisi_hba->initial_fis = dma_alloc_coherent(dev, s, | 2108 | hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, |
2098 | &hisi_hba->initial_fis_dma, GFP_KERNEL); | 2109 | &hisi_hba->initial_fis_dma, |
2110 | GFP_KERNEL); | ||
2099 | if (!hisi_hba->initial_fis) | 2111 | if (!hisi_hba->initial_fis) |
2100 | goto err_out; | 2112 | goto err_out; |
2101 | 2113 | ||
2102 | s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); | 2114 | s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); |
2103 | hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s, | 2115 | hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, |
2104 | &hisi_hba->sata_breakpoint_dma, GFP_KERNEL); | 2116 | &hisi_hba->sata_breakpoint_dma, |
2117 | GFP_KERNEL); | ||
2105 | if (!hisi_hba->sata_breakpoint) | 2118 | if (!hisi_hba->sata_breakpoint) |
2106 | goto err_out; | 2119 | goto err_out; |
2107 | hisi_sas_init_mem(hisi_hba); | 2120 | hisi_sas_init_mem(hisi_hba); |
@@ -2122,54 +2135,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_alloc); | |||
2122 | 2135 | ||
2123 | void hisi_sas_free(struct hisi_hba *hisi_hba) | 2136 | void hisi_sas_free(struct hisi_hba *hisi_hba) |
2124 | { | 2137 | { |
2125 | struct device *dev = hisi_hba->dev; | ||
2126 | int i, s, max_command_entries = hisi_hba->hw->max_command_entries; | ||
2127 | |||
2128 | for (i = 0; i < hisi_hba->queue_count; i++) { | ||
2129 | s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; | ||
2130 | if (hisi_hba->cmd_hdr[i]) | ||
2131 | dma_free_coherent(dev, s, | ||
2132 | hisi_hba->cmd_hdr[i], | ||
2133 | hisi_hba->cmd_hdr_dma[i]); | ||
2134 | |||
2135 | s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; | ||
2136 | if (hisi_hba->complete_hdr[i]) | ||
2137 | dma_free_coherent(dev, s, | ||
2138 | hisi_hba->complete_hdr[i], | ||
2139 | hisi_hba->complete_hdr_dma[i]); | ||
2140 | } | ||
2141 | |||
2142 | dma_pool_destroy(hisi_hba->buffer_pool); | ||
2143 | |||
2144 | s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); | ||
2145 | if (hisi_hba->itct) | ||
2146 | dma_free_coherent(dev, s, | ||
2147 | hisi_hba->itct, hisi_hba->itct_dma); | ||
2148 | |||
2149 | s = max_command_entries * sizeof(struct hisi_sas_iost); | ||
2150 | if (hisi_hba->iost) | ||
2151 | dma_free_coherent(dev, s, | ||
2152 | hisi_hba->iost, hisi_hba->iost_dma); | ||
2153 | |||
2154 | s = max_command_entries * sizeof(struct hisi_sas_breakpoint); | ||
2155 | if (hisi_hba->breakpoint) | ||
2156 | dma_free_coherent(dev, s, | ||
2157 | hisi_hba->breakpoint, | ||
2158 | hisi_hba->breakpoint_dma); | ||
2159 | |||
2160 | |||
2161 | s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS; | ||
2162 | if (hisi_hba->initial_fis) | ||
2163 | dma_free_coherent(dev, s, | ||
2164 | hisi_hba->initial_fis, | ||
2165 | hisi_hba->initial_fis_dma); | ||
2166 | |||
2167 | s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint); | ||
2168 | if (hisi_hba->sata_breakpoint) | ||
2169 | dma_free_coherent(dev, s, | ||
2170 | hisi_hba->sata_breakpoint, | ||
2171 | hisi_hba->sata_breakpoint_dma); | ||
2172 | |||
2173 | if (hisi_hba->wq) | 2138 | if (hisi_hba->wq) |
2174 | destroy_workqueue(hisi_hba->wq); | 2139 | destroy_workqueue(hisi_hba->wq); |
2175 | } | 2140 | } |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 89ab18c1959c..8f60f0e04599 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | |||
@@ -903,23 +903,28 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) | |||
903 | static void start_delivery_v1_hw(struct hisi_sas_dq *dq) | 903 | static void start_delivery_v1_hw(struct hisi_sas_dq *dq) |
904 | { | 904 | { |
905 | struct hisi_hba *hisi_hba = dq->hisi_hba; | 905 | struct hisi_hba *hisi_hba = dq->hisi_hba; |
906 | struct hisi_sas_slot *s, *s1; | 906 | struct hisi_sas_slot *s, *s1, *s2 = NULL; |
907 | struct list_head *dq_list; | 907 | struct list_head *dq_list; |
908 | int dlvry_queue = dq->id; | 908 | int dlvry_queue = dq->id; |
909 | int wp, count = 0; | 909 | int wp; |
910 | 910 | ||
911 | dq_list = &dq->list; | 911 | dq_list = &dq->list; |
912 | list_for_each_entry_safe(s, s1, &dq->list, delivery) { | 912 | list_for_each_entry_safe(s, s1, &dq->list, delivery) { |
913 | if (!s->ready) | 913 | if (!s->ready) |
914 | break; | 914 | break; |
915 | count++; | 915 | s2 = s; |
916 | wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; | ||
917 | list_del(&s->delivery); | 916 | list_del(&s->delivery); |
918 | } | 917 | } |
919 | 918 | ||
920 | if (!count) | 919 | if (!s2) |
921 | return; | 920 | return; |
922 | 921 | ||
922 | /* | ||
923 | * Ensure that memories for slots built on other CPUs is observed. | ||
924 | */ | ||
925 | smp_rmb(); | ||
926 | wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; | ||
927 | |||
923 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); | 928 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); |
924 | } | 929 | } |
925 | 930 | ||
@@ -1296,11 +1301,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, | |||
1296 | !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { | 1301 | !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { |
1297 | 1302 | ||
1298 | slot_err_v1_hw(hisi_hba, task, slot); | 1303 | slot_err_v1_hw(hisi_hba, task, slot); |
1299 | if (unlikely(slot->abort)) { | 1304 | if (unlikely(slot->abort)) |
1300 | queue_work(hisi_hba->wq, &slot->abort_slot); | ||
1301 | /* immediately return and do not complete */ | ||
1302 | return ts->stat; | 1305 | return ts->stat; |
1303 | } | ||
1304 | goto out; | 1306 | goto out; |
1305 | } | 1307 | } |
1306 | 1308 | ||
@@ -1469,7 +1471,8 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p) | |||
1469 | goto end; | 1471 | goto end; |
1470 | } | 1472 | } |
1471 | 1473 | ||
1472 | sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | 1474 | if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) |
1475 | sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | ||
1473 | 1476 | ||
1474 | end: | 1477 | end: |
1475 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, | 1478 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 213c530e63f2..9c5c5a601332 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | |||
@@ -1665,23 +1665,28 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) | |||
1665 | static void start_delivery_v2_hw(struct hisi_sas_dq *dq) | 1665 | static void start_delivery_v2_hw(struct hisi_sas_dq *dq) |
1666 | { | 1666 | { |
1667 | struct hisi_hba *hisi_hba = dq->hisi_hba; | 1667 | struct hisi_hba *hisi_hba = dq->hisi_hba; |
1668 | struct hisi_sas_slot *s, *s1; | 1668 | struct hisi_sas_slot *s, *s1, *s2 = NULL; |
1669 | struct list_head *dq_list; | 1669 | struct list_head *dq_list; |
1670 | int dlvry_queue = dq->id; | 1670 | int dlvry_queue = dq->id; |
1671 | int wp, count = 0; | 1671 | int wp; |
1672 | 1672 | ||
1673 | dq_list = &dq->list; | 1673 | dq_list = &dq->list; |
1674 | list_for_each_entry_safe(s, s1, &dq->list, delivery) { | 1674 | list_for_each_entry_safe(s, s1, &dq->list, delivery) { |
1675 | if (!s->ready) | 1675 | if (!s->ready) |
1676 | break; | 1676 | break; |
1677 | count++; | 1677 | s2 = s; |
1678 | wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; | ||
1679 | list_del(&s->delivery); | 1678 | list_del(&s->delivery); |
1680 | } | 1679 | } |
1681 | 1680 | ||
1682 | if (!count) | 1681 | if (!s2) |
1683 | return; | 1682 | return; |
1684 | 1683 | ||
1684 | /* | ||
1685 | * Ensure that memories for slots built on other CPUs is observed. | ||
1686 | */ | ||
1687 | smp_rmb(); | ||
1688 | wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; | ||
1689 | |||
1685 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); | 1690 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); |
1686 | } | 1691 | } |
1687 | 1692 | ||
@@ -2840,7 +2845,8 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba) | |||
2840 | 2845 | ||
2841 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); | 2846 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); |
2842 | bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); | 2847 | bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); |
2843 | if (bcast_status & RX_BCAST_CHG_MSK) | 2848 | if ((bcast_status & RX_BCAST_CHG_MSK) && |
2849 | !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) | ||
2844 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | 2850 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); |
2845 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, | 2851 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, |
2846 | CHL_INT0_SL_RX_BCST_ACK_MSK); | 2852 | CHL_INT0_SL_RX_BCST_ACK_MSK); |
@@ -3234,8 +3240,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p) | |||
3234 | if (fis->status & ATA_ERR) { | 3240 | if (fis->status & ATA_ERR) { |
3235 | dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, | 3241 | dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no, |
3236 | fis->status); | 3242 | fis->status); |
3237 | disable_phy_v2_hw(hisi_hba, phy_no); | 3243 | hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); |
3238 | enable_phy_v2_hw(hisi_hba, phy_no); | ||
3239 | res = IRQ_NONE; | 3244 | res = IRQ_NONE; |
3240 | goto end; | 3245 | goto end; |
3241 | } | 3246 | } |
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 9f1e2d03f914..08b503e274b8 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | |||
@@ -51,7 +51,6 @@ | |||
51 | #define CFG_ABT_SET_IPTT_DONE 0xd8 | 51 | #define CFG_ABT_SET_IPTT_DONE 0xd8 |
52 | #define CFG_ABT_SET_IPTT_DONE_OFF 0 | 52 | #define CFG_ABT_SET_IPTT_DONE_OFF 0 |
53 | #define HGC_IOMB_PROC1_STATUS 0x104 | 53 | #define HGC_IOMB_PROC1_STATUS 0x104 |
54 | #define CFG_1US_TIMER_TRSH 0xcc | ||
55 | #define CHNL_INT_STATUS 0x148 | 54 | #define CHNL_INT_STATUS 0x148 |
56 | #define HGC_AXI_FIFO_ERR_INFO 0x154 | 55 | #define HGC_AXI_FIFO_ERR_INFO 0x154 |
57 | #define AXI_ERR_INFO_OFF 0 | 56 | #define AXI_ERR_INFO_OFF 0 |
@@ -121,6 +120,8 @@ | |||
121 | #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) | 120 | #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF) |
122 | #define PHY_CFG_DC_OPT_OFF 2 | 121 | #define PHY_CFG_DC_OPT_OFF 2 |
123 | #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) | 122 | #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF) |
123 | #define PHY_CFG_PHY_RST_OFF 3 | ||
124 | #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) | ||
124 | #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) | 125 | #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) |
125 | #define PHY_CTRL (PORT_BASE + 0x14) | 126 | #define PHY_CTRL (PORT_BASE + 0x14) |
126 | #define PHY_CTRL_RESET_OFF 0 | 127 | #define PHY_CTRL_RESET_OFF 0 |
@@ -131,6 +132,9 @@ | |||
131 | #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) | 132 | #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) |
132 | #define SL_CTA_OFF 17 | 133 | #define SL_CTA_OFF 17 |
133 | #define SL_CTA_MSK (0x1 << SL_CTA_OFF) | 134 | #define SL_CTA_MSK (0x1 << SL_CTA_OFF) |
135 | #define RX_PRIMS_STATUS (PORT_BASE + 0x98) | ||
136 | #define RX_BCAST_CHG_OFF 1 | ||
137 | #define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF) | ||
134 | #define TX_ID_DWORD0 (PORT_BASE + 0x9c) | 138 | #define TX_ID_DWORD0 (PORT_BASE + 0x9c) |
135 | #define TX_ID_DWORD1 (PORT_BASE + 0xa0) | 139 | #define TX_ID_DWORD1 (PORT_BASE + 0xa0) |
136 | #define TX_ID_DWORD2 (PORT_BASE + 0xa4) | 140 | #define TX_ID_DWORD2 (PORT_BASE + 0xa4) |
@@ -206,6 +210,8 @@ | |||
206 | 210 | ||
207 | #define AXI_MASTER_CFG_BASE (0x5000) | 211 | #define AXI_MASTER_CFG_BASE (0x5000) |
208 | #define AM_CTRL_GLOBAL (0x0) | 212 | #define AM_CTRL_GLOBAL (0x0) |
213 | #define AM_CTRL_SHUTDOWN_REQ_OFF 0 | ||
214 | #define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF) | ||
209 | #define AM_CURR_TRANS_RETURN (0x150) | 215 | #define AM_CURR_TRANS_RETURN (0x150) |
210 | 216 | ||
211 | #define AM_CFG_MAX_TRANS (0x5010) | 217 | #define AM_CFG_MAX_TRANS (0x5010) |
@@ -425,7 +431,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) | |||
425 | (u32)((1ULL << hisi_hba->queue_count) - 1)); | 431 | (u32)((1ULL << hisi_hba->queue_count) - 1)); |
426 | hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); | 432 | hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400); |
427 | hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); | 433 | hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108); |
428 | hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd); | ||
429 | hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); | 434 | hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1); |
430 | hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); | 435 | hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1); |
431 | hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); | 436 | hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1); |
@@ -486,6 +491,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba) | |||
486 | hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); | 491 | hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0); |
487 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); | 492 | hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1); |
488 | hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); | 493 | hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); |
494 | hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); | ||
489 | 495 | ||
490 | /* used for 12G negotiate */ | 496 | /* used for 12G negotiate */ |
491 | hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); | 497 | hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); |
@@ -758,15 +764,25 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |||
758 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); | 764 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); |
759 | 765 | ||
760 | cfg |= PHY_CFG_ENA_MSK; | 766 | cfg |= PHY_CFG_ENA_MSK; |
767 | cfg &= ~PHY_CFG_PHY_RST_MSK; | ||
761 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); | 768 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); |
762 | } | 769 | } |
763 | 770 | ||
764 | static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | 771 | static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) |
765 | { | 772 | { |
766 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); | 773 | u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG); |
774 | u32 state; | ||
767 | 775 | ||
768 | cfg &= ~PHY_CFG_ENA_MSK; | 776 | cfg &= ~PHY_CFG_ENA_MSK; |
769 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); | 777 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); |
778 | |||
779 | mdelay(50); | ||
780 | |||
781 | state = hisi_sas_read32(hisi_hba, PHY_STATE); | ||
782 | if (state & BIT(phy_no)) { | ||
783 | cfg |= PHY_CFG_PHY_RST_MSK; | ||
784 | hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg); | ||
785 | } | ||
770 | } | 786 | } |
771 | 787 | ||
772 | static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | 788 | static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no) |
@@ -866,23 +882,28 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) | |||
866 | static void start_delivery_v3_hw(struct hisi_sas_dq *dq) | 882 | static void start_delivery_v3_hw(struct hisi_sas_dq *dq) |
867 | { | 883 | { |
868 | struct hisi_hba *hisi_hba = dq->hisi_hba; | 884 | struct hisi_hba *hisi_hba = dq->hisi_hba; |
869 | struct hisi_sas_slot *s, *s1; | 885 | struct hisi_sas_slot *s, *s1, *s2 = NULL; |
870 | struct list_head *dq_list; | 886 | struct list_head *dq_list; |
871 | int dlvry_queue = dq->id; | 887 | int dlvry_queue = dq->id; |
872 | int wp, count = 0; | 888 | int wp; |
873 | 889 | ||
874 | dq_list = &dq->list; | 890 | dq_list = &dq->list; |
875 | list_for_each_entry_safe(s, s1, &dq->list, delivery) { | 891 | list_for_each_entry_safe(s, s1, &dq->list, delivery) { |
876 | if (!s->ready) | 892 | if (!s->ready) |
877 | break; | 893 | break; |
878 | count++; | 894 | s2 = s; |
879 | wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; | ||
880 | list_del(&s->delivery); | 895 | list_del(&s->delivery); |
881 | } | 896 | } |
882 | 897 | ||
883 | if (!count) | 898 | if (!s2) |
884 | return; | 899 | return; |
885 | 900 | ||
901 | /* | ||
902 | * Ensure that memories for slots built on other CPUs is observed. | ||
903 | */ | ||
904 | smp_rmb(); | ||
905 | wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS; | ||
906 | |||
886 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); | 907 | hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp); |
887 | } | 908 | } |
888 | 909 | ||
@@ -1170,6 +1191,16 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) | |||
1170 | dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); | 1191 | dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate); |
1171 | initial_fis = &hisi_hba->initial_fis[phy_no]; | 1192 | initial_fis = &hisi_hba->initial_fis[phy_no]; |
1172 | fis = &initial_fis->fis; | 1193 | fis = &initial_fis->fis; |
1194 | |||
1195 | /* check ERR bit of Status Register */ | ||
1196 | if (fis->status & ATA_ERR) { | ||
1197 | dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", | ||
1198 | phy_no, fis->status); | ||
1199 | hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); | ||
1200 | res = IRQ_NONE; | ||
1201 | goto end; | ||
1202 | } | ||
1203 | |||
1173 | sas_phy->oob_mode = SATA_OOB_MODE; | 1204 | sas_phy->oob_mode = SATA_OOB_MODE; |
1174 | attached_sas_addr[0] = 0x50; | 1205 | attached_sas_addr[0] = 0x50; |
1175 | attached_sas_addr[7] = phy_no; | 1206 | attached_sas_addr[7] = phy_no; |
@@ -1256,9 +1287,13 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba) | |||
1256 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | 1287 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; |
1257 | struct asd_sas_phy *sas_phy = &phy->sas_phy; | 1288 | struct asd_sas_phy *sas_phy = &phy->sas_phy; |
1258 | struct sas_ha_struct *sas_ha = &hisi_hba->sha; | 1289 | struct sas_ha_struct *sas_ha = &hisi_hba->sha; |
1290 | u32 bcast_status; | ||
1259 | 1291 | ||
1260 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); | 1292 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1); |
1261 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | 1293 | bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS); |
1294 | if ((bcast_status & RX_BCAST_CHG_MSK) && | ||
1295 | !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) | ||
1296 | sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); | ||
1262 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, | 1297 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, |
1263 | CHL_INT0_SL_RX_BCST_ACK_MSK); | 1298 | CHL_INT0_SL_RX_BCST_ACK_MSK); |
1264 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); | 1299 | hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0); |
@@ -1327,11 +1362,77 @@ static const struct hisi_sas_hw_error port_axi_error[] = { | |||
1327 | }, | 1362 | }, |
1328 | }; | 1363 | }; |
1329 | 1364 | ||
1330 | static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) | 1365 | static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no) |
1331 | { | 1366 | { |
1332 | struct hisi_hba *hisi_hba = p; | 1367 | u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1); |
1368 | u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK); | ||
1333 | struct device *dev = hisi_hba->dev; | 1369 | struct device *dev = hisi_hba->dev; |
1370 | int i; | ||
1371 | |||
1372 | irq_value &= ~irq_msk; | ||
1373 | if (!irq_value) | ||
1374 | return; | ||
1375 | |||
1376 | for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { | ||
1377 | const struct hisi_sas_hw_error *error = &port_axi_error[i]; | ||
1378 | |||
1379 | if (!(irq_value & error->irq_msk)) | ||
1380 | continue; | ||
1381 | |||
1382 | dev_err(dev, "%s error (phy%d 0x%x) found!\n", | ||
1383 | error->msg, phy_no, irq_value); | ||
1384 | queue_work(hisi_hba->wq, &hisi_hba->rst_work); | ||
1385 | } | ||
1386 | |||
1387 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); | ||
1388 | } | ||
1389 | |||
1390 | static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | ||
1391 | { | ||
1392 | u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); | ||
1393 | u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2); | ||
1394 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | ||
1334 | struct pci_dev *pci_dev = hisi_hba->pci_dev; | 1395 | struct pci_dev *pci_dev = hisi_hba->pci_dev; |
1396 | struct device *dev = hisi_hba->dev; | ||
1397 | |||
1398 | irq_value &= ~irq_msk; | ||
1399 | if (!irq_value) | ||
1400 | return; | ||
1401 | |||
1402 | if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { | ||
1403 | dev_warn(dev, "phy%d identify timeout\n", phy_no); | ||
1404 | hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); | ||
1405 | } | ||
1406 | |||
1407 | if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { | ||
1408 | u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, | ||
1409 | STP_LINK_TIMEOUT_STATE); | ||
1410 | |||
1411 | dev_warn(dev, "phy%d stp link timeout (0x%x)\n", | ||
1412 | phy_no, reg_value); | ||
1413 | if (reg_value & BIT(4)) | ||
1414 | hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); | ||
1415 | } | ||
1416 | |||
1417 | if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && | ||
1418 | (pci_dev->revision == 0x20)) { | ||
1419 | u32 reg_value; | ||
1420 | int rc; | ||
1421 | |||
1422 | rc = hisi_sas_read32_poll_timeout_atomic( | ||
1423 | HILINK_ERR_DFX, reg_value, | ||
1424 | !((reg_value >> 8) & BIT(phy_no)), | ||
1425 | 1000, 10000); | ||
1426 | if (rc) | ||
1427 | hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); | ||
1428 | } | ||
1429 | |||
1430 | hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value); | ||
1431 | } | ||
1432 | |||
1433 | static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) | ||
1434 | { | ||
1435 | struct hisi_hba *hisi_hba = p; | ||
1335 | u32 irq_msk; | 1436 | u32 irq_msk; |
1336 | int phy_no = 0; | 1437 | int phy_no = 0; |
1337 | 1438 | ||
@@ -1341,84 +1442,12 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) | |||
1341 | while (irq_msk) { | 1442 | while (irq_msk) { |
1342 | u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, | 1443 | u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, |
1343 | CHL_INT0); | 1444 | CHL_INT0); |
1344 | u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no, | ||
1345 | CHL_INT1); | ||
1346 | u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no, | ||
1347 | CHL_INT2); | ||
1348 | u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no, | ||
1349 | CHL_INT1_MSK); | ||
1350 | u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no, | ||
1351 | CHL_INT2_MSK); | ||
1352 | |||
1353 | irq_value1 &= ~irq_msk1; | ||
1354 | irq_value2 &= ~irq_msk2; | ||
1355 | |||
1356 | if ((irq_msk & (4 << (phy_no * 4))) && | ||
1357 | irq_value1) { | ||
1358 | int i; | ||
1359 | |||
1360 | for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) { | ||
1361 | const struct hisi_sas_hw_error *error = | ||
1362 | &port_axi_error[i]; | ||
1363 | |||
1364 | if (!(irq_value1 & error->irq_msk)) | ||
1365 | continue; | ||
1366 | |||
1367 | dev_err(dev, "%s error (phy%d 0x%x) found!\n", | ||
1368 | error->msg, phy_no, irq_value1); | ||
1369 | queue_work(hisi_hba->wq, &hisi_hba->rst_work); | ||
1370 | } | ||
1371 | |||
1372 | hisi_sas_phy_write32(hisi_hba, phy_no, | ||
1373 | CHL_INT1, irq_value1); | ||
1374 | } | ||
1375 | 1445 | ||
1376 | if (irq_msk & (8 << (phy_no * 4)) && irq_value2) { | 1446 | if (irq_msk & (4 << (phy_no * 4))) |
1377 | struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; | 1447 | handle_chl_int1_v3_hw(hisi_hba, phy_no); |
1378 | 1448 | ||
1379 | if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) { | 1449 | if (irq_msk & (8 << (phy_no * 4))) |
1380 | dev_warn(dev, "phy%d identify timeout\n", | 1450 | handle_chl_int2_v3_hw(hisi_hba, phy_no); |
1381 | phy_no); | ||
1382 | hisi_sas_notify_phy_event(phy, | ||
1383 | HISI_PHYE_LINK_RESET); | ||
1384 | |||
1385 | } | ||
1386 | |||
1387 | if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) { | ||
1388 | u32 reg_value = hisi_sas_phy_read32(hisi_hba, | ||
1389 | phy_no, STP_LINK_TIMEOUT_STATE); | ||
1390 | |||
1391 | dev_warn(dev, "phy%d stp link timeout (0x%x)\n", | ||
1392 | phy_no, reg_value); | ||
1393 | if (reg_value & BIT(4)) | ||
1394 | hisi_sas_notify_phy_event(phy, | ||
1395 | HISI_PHYE_LINK_RESET); | ||
1396 | } | ||
1397 | |||
1398 | hisi_sas_phy_write32(hisi_hba, phy_no, | ||
1399 | CHL_INT2, irq_value2); | ||
1400 | |||
1401 | if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && | ||
1402 | (pci_dev->revision == 0x20)) { | ||
1403 | u32 reg_value; | ||
1404 | int rc; | ||
1405 | |||
1406 | rc = hisi_sas_read32_poll_timeout_atomic( | ||
1407 | HILINK_ERR_DFX, reg_value, | ||
1408 | !((reg_value >> 8) & BIT(phy_no)), | ||
1409 | 1000, 10000); | ||
1410 | if (rc) { | ||
1411 | disable_phy_v3_hw(hisi_hba, phy_no); | ||
1412 | hisi_sas_phy_write32(hisi_hba, phy_no, | ||
1413 | CHL_INT2, | ||
1414 | BIT(CHL_INT2_RX_INVLD_DW_OFF)); | ||
1415 | hisi_sas_phy_read32(hisi_hba, phy_no, | ||
1416 | ERR_CNT_INVLD_DW); | ||
1417 | mdelay(1); | ||
1418 | enable_phy_v3_hw(hisi_hba, phy_no); | ||
1419 | } | ||
1420 | } | ||
1421 | } | ||
1422 | 1451 | ||
1423 | if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { | 1452 | if (irq_msk & (2 << (phy_no * 4)) && irq_value0) { |
1424 | hisi_sas_phy_write32(hisi_hba, phy_no, | 1453 | hisi_sas_phy_write32(hisi_hba, phy_no, |
@@ -1964,11 +1993,11 @@ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no) | |||
1964 | 1993 | ||
1965 | } | 1994 | } |
1966 | 1995 | ||
1967 | static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) | 1996 | static int disable_host_v3_hw(struct hisi_hba *hisi_hba) |
1968 | { | 1997 | { |
1969 | struct device *dev = hisi_hba->dev; | 1998 | struct device *dev = hisi_hba->dev; |
1999 | u32 status, reg_val; | ||
1970 | int rc; | 2000 | int rc; |
1971 | u32 status; | ||
1972 | 2001 | ||
1973 | interrupt_disable_v3_hw(hisi_hba); | 2002 | interrupt_disable_v3_hw(hisi_hba); |
1974 | hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); | 2003 | hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); |
@@ -1978,14 +2007,32 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) | |||
1978 | 2007 | ||
1979 | mdelay(10); | 2008 | mdelay(10); |
1980 | 2009 | ||
1981 | hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1); | 2010 | reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + |
2011 | AM_CTRL_GLOBAL); | ||
2012 | reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK; | ||
2013 | hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + | ||
2014 | AM_CTRL_GLOBAL, reg_val); | ||
1982 | 2015 | ||
1983 | /* wait until bus idle */ | 2016 | /* wait until bus idle */ |
1984 | rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + | 2017 | rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + |
1985 | AM_CURR_TRANS_RETURN, status, | 2018 | AM_CURR_TRANS_RETURN, status, |
1986 | status == 0x3, 10, 100); | 2019 | status == 0x3, 10, 100); |
1987 | if (rc) { | 2020 | if (rc) { |
1988 | dev_err(dev, "axi bus is not idle, rc = %d\n", rc); | 2021 | dev_err(dev, "axi bus is not idle, rc=%d\n", rc); |
2022 | return rc; | ||
2023 | } | ||
2024 | |||
2025 | return 0; | ||
2026 | } | ||
2027 | |||
2028 | static int soft_reset_v3_hw(struct hisi_hba *hisi_hba) | ||
2029 | { | ||
2030 | struct device *dev = hisi_hba->dev; | ||
2031 | int rc; | ||
2032 | |||
2033 | rc = disable_host_v3_hw(hisi_hba); | ||
2034 | if (rc) { | ||
2035 | dev_err(dev, "soft reset: disable host failed rc=%d\n", rc); | ||
1989 | return rc; | 2036 | return rc; |
1990 | } | 2037 | } |
1991 | 2038 | ||
@@ -2433,6 +2480,41 @@ static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev) | |||
2433 | return PCI_ERS_RESULT_DISCONNECT; | 2480 | return PCI_ERS_RESULT_DISCONNECT; |
2434 | } | 2481 | } |
2435 | 2482 | ||
2483 | static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev) | ||
2484 | { | ||
2485 | struct sas_ha_struct *sha = pci_get_drvdata(pdev); | ||
2486 | struct hisi_hba *hisi_hba = sha->lldd_ha; | ||
2487 | struct device *dev = hisi_hba->dev; | ||
2488 | int rc; | ||
2489 | |||
2490 | dev_info(dev, "FLR prepare\n"); | ||
2491 | set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); | ||
2492 | hisi_sas_controller_reset_prepare(hisi_hba); | ||
2493 | |||
2494 | rc = disable_host_v3_hw(hisi_hba); | ||
2495 | if (rc) | ||
2496 | dev_err(dev, "FLR: disable host failed rc=%d\n", rc); | ||
2497 | } | ||
2498 | |||
2499 | static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev) | ||
2500 | { | ||
2501 | struct sas_ha_struct *sha = pci_get_drvdata(pdev); | ||
2502 | struct hisi_hba *hisi_hba = sha->lldd_ha; | ||
2503 | struct device *dev = hisi_hba->dev; | ||
2504 | int rc; | ||
2505 | |||
2506 | hisi_sas_init_mem(hisi_hba); | ||
2507 | |||
2508 | rc = hw_init_v3_hw(hisi_hba); | ||
2509 | if (rc) { | ||
2510 | dev_err(dev, "FLR: hw init failed rc=%d\n", rc); | ||
2511 | return; | ||
2512 | } | ||
2513 | |||
2514 | hisi_sas_controller_reset_done(hisi_hba); | ||
2515 | dev_info(dev, "FLR done\n"); | ||
2516 | } | ||
2517 | |||
2436 | enum { | 2518 | enum { |
2437 | /* instances of the controller */ | 2519 | /* instances of the controller */ |
2438 | hip08, | 2520 | hip08, |
@@ -2444,38 +2526,24 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2444 | struct hisi_hba *hisi_hba = sha->lldd_ha; | 2526 | struct hisi_hba *hisi_hba = sha->lldd_ha; |
2445 | struct device *dev = hisi_hba->dev; | 2527 | struct device *dev = hisi_hba->dev; |
2446 | struct Scsi_Host *shost = hisi_hba->shost; | 2528 | struct Scsi_Host *shost = hisi_hba->shost; |
2447 | u32 device_state, status; | 2529 | u32 device_state; |
2448 | int rc; | 2530 | int rc; |
2449 | u32 reg_val; | ||
2450 | 2531 | ||
2451 | if (!pdev->pm_cap) { | 2532 | if (!pdev->pm_cap) { |
2452 | dev_err(dev, "PCI PM not supported\n"); | 2533 | dev_err(dev, "PCI PM not supported\n"); |
2453 | return -ENODEV; | 2534 | return -ENODEV; |
2454 | } | 2535 | } |
2455 | 2536 | ||
2456 | set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); | 2537 | if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) |
2538 | return -1; | ||
2539 | |||
2457 | scsi_block_requests(shost); | 2540 | scsi_block_requests(shost); |
2458 | set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); | 2541 | set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); |
2459 | flush_workqueue(hisi_hba->wq); | 2542 | flush_workqueue(hisi_hba->wq); |
2460 | /* disable DQ/PHY/bus */ | ||
2461 | interrupt_disable_v3_hw(hisi_hba); | ||
2462 | hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); | ||
2463 | hisi_sas_kill_tasklets(hisi_hba); | ||
2464 | |||
2465 | hisi_sas_stop_phys(hisi_hba); | ||
2466 | 2543 | ||
2467 | reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE + | 2544 | rc = disable_host_v3_hw(hisi_hba); |
2468 | AM_CTRL_GLOBAL); | ||
2469 | reg_val |= 0x1; | ||
2470 | hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + | ||
2471 | AM_CTRL_GLOBAL, reg_val); | ||
2472 | |||
2473 | /* wait until bus idle */ | ||
2474 | rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE + | ||
2475 | AM_CURR_TRANS_RETURN, status, | ||
2476 | status == 0x3, 10, 100); | ||
2477 | if (rc) { | 2545 | if (rc) { |
2478 | dev_err(dev, "axi bus is not idle, rc = %d\n", rc); | 2546 | dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc); |
2479 | clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); | 2547 | clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); |
2480 | clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); | 2548 | clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); |
2481 | scsi_unblock_requests(shost); | 2549 | scsi_unblock_requests(shost); |
@@ -2538,6 +2606,8 @@ static const struct pci_error_handlers hisi_sas_err_handler = { | |||
2538 | .error_detected = hisi_sas_error_detected_v3_hw, | 2606 | .error_detected = hisi_sas_error_detected_v3_hw, |
2539 | .mmio_enabled = hisi_sas_mmio_enabled_v3_hw, | 2607 | .mmio_enabled = hisi_sas_mmio_enabled_v3_hw, |
2540 | .slot_reset = hisi_sas_slot_reset_v3_hw, | 2608 | .slot_reset = hisi_sas_slot_reset_v3_hw, |
2609 | .reset_prepare = hisi_sas_reset_prepare_v3_hw, | ||
2610 | .reset_done = hisi_sas_reset_done_v3_hw, | ||
2541 | }; | 2611 | }; |
2542 | 2612 | ||
2543 | static struct pci_driver sas_v3_pci_driver = { | 2613 | static struct pci_driver sas_v3_pci_driver = { |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 3771e59a9fae..f02dcc875a09 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -563,6 +563,38 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) | |||
563 | } | 563 | } |
564 | EXPORT_SYMBOL(scsi_host_get); | 564 | EXPORT_SYMBOL(scsi_host_get); |
565 | 565 | ||
566 | struct scsi_host_mq_in_flight { | ||
567 | int cnt; | ||
568 | }; | ||
569 | |||
570 | static void scsi_host_check_in_flight(struct request *rq, void *data, | ||
571 | bool reserved) | ||
572 | { | ||
573 | struct scsi_host_mq_in_flight *in_flight = data; | ||
574 | |||
575 | if (blk_mq_request_started(rq)) | ||
576 | in_flight->cnt++; | ||
577 | } | ||
578 | |||
579 | /** | ||
580 | * scsi_host_busy - Return the host busy counter | ||
581 | * @shost: Pointer to Scsi_Host to inc. | ||
582 | **/ | ||
583 | int scsi_host_busy(struct Scsi_Host *shost) | ||
584 | { | ||
585 | struct scsi_host_mq_in_flight in_flight = { | ||
586 | .cnt = 0, | ||
587 | }; | ||
588 | |||
589 | if (!shost->use_blk_mq) | ||
590 | return atomic_read(&shost->host_busy); | ||
591 | |||
592 | blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight, | ||
593 | &in_flight); | ||
594 | return in_flight.cnt; | ||
595 | } | ||
596 | EXPORT_SYMBOL(scsi_host_busy); | ||
597 | |||
566 | /** | 598 | /** |
567 | * scsi_host_put - dec a Scsi_Host ref count | 599 | * scsi_host_put - dec a Scsi_Host ref count |
568 | * @shost: Pointer to Scsi_Host to dec. | 600 | * @shost: Pointer to Scsi_Host to dec. |
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index daefe8172b04..b64ca977825d 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -1322,7 +1322,7 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg, | |||
1322 | 1322 | ||
1323 | /** | 1323 | /** |
1324 | * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields | 1324 | * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields |
1325 | * @scmd: Scsi_Cmnd with the scatterlist | 1325 | * @scmd: struct scsi_cmnd with the scatterlist |
1326 | * @evt: ibmvfc event struct | 1326 | * @evt: ibmvfc event struct |
1327 | * @vfc_cmd: vfc_cmd that contains the memory descriptor | 1327 | * @vfc_cmd: vfc_cmd that contains the memory descriptor |
1328 | * @dev: device for which to map dma memory | 1328 | * @dev: device for which to map dma memory |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 17df76f0be3c..9df8a1a2299c 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT; | |||
93 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; | 93 | static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2; |
94 | static int fast_fail = 1; | 94 | static int fast_fail = 1; |
95 | static int client_reserve = 1; | 95 | static int client_reserve = 1; |
96 | static char partition_name[97] = "UNKNOWN"; | 96 | static char partition_name[96] = "UNKNOWN"; |
97 | static unsigned int partition_number = -1; | 97 | static unsigned int partition_number = -1; |
98 | static LIST_HEAD(ibmvscsi_head); | 98 | static LIST_HEAD(ibmvscsi_head); |
99 | 99 | ||
@@ -262,7 +262,7 @@ static void gather_partition_info(void) | |||
262 | 262 | ||
263 | ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL); | 263 | ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL); |
264 | if (ppartition_name) | 264 | if (ppartition_name) |
265 | strncpy(partition_name, ppartition_name, | 265 | strlcpy(partition_name, ppartition_name, |
266 | sizeof(partition_name)); | 266 | sizeof(partition_name)); |
267 | p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL); | 267 | p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL); |
268 | if (p_number_ptr) | 268 | if (p_number_ptr) |
@@ -681,7 +681,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg, | |||
681 | 681 | ||
682 | /** | 682 | /** |
683 | * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields | 683 | * map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields |
684 | * @cmd: Scsi_Cmnd with the scatterlist | 684 | * @cmd: struct scsi_cmnd with the scatterlist |
685 | * @srp_cmd: srp_cmd that contains the memory descriptor | 685 | * @srp_cmd: srp_cmd that contains the memory descriptor |
686 | * @dev: device for which to map dma memory | 686 | * @dev: device for which to map dma memory |
687 | * | 687 | * |
@@ -1274,14 +1274,12 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) | |||
1274 | if (hostdata->client_migrated) | 1274 | if (hostdata->client_migrated) |
1275 | hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); | 1275 | hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); |
1276 | 1276 | ||
1277 | strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), | 1277 | strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), |
1278 | sizeof(hostdata->caps.name)); | 1278 | sizeof(hostdata->caps.name)); |
1279 | hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0'; | ||
1280 | 1279 | ||
1281 | location = of_get_property(of_node, "ibm,loc-code", NULL); | 1280 | location = of_get_property(of_node, "ibm,loc-code", NULL); |
1282 | location = location ? location : dev_name(hostdata->dev); | 1281 | location = location ? location : dev_name(hostdata->dev); |
1283 | strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); | 1282 | strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); |
1284 | hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; | ||
1285 | 1283 | ||
1286 | req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); | 1284 | req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); |
1287 | req->buffer = cpu_to_be64(hostdata->caps_addr); | 1285 | req->buffer = cpu_to_be64(hostdata->caps_addr); |
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index c3a76af9f5fa..fac377320158 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
@@ -2233,7 +2233,7 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport) | |||
2233 | return -ENOMEM; | 2233 | return -ENOMEM; |
2234 | } | 2234 | } |
2235 | 2235 | ||
2236 | nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0, | 2236 | nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0, |
2237 | TARGET_PROT_NORMAL, name, nexus, | 2237 | TARGET_PROT_NORMAL, name, nexus, |
2238 | NULL); | 2238 | NULL); |
2239 | if (IS_ERR(nexus->se_sess)) { | 2239 | if (IS_ERR(nexus->se_sess)) { |
@@ -2267,8 +2267,7 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport) | |||
2267 | * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port | 2267 | * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port |
2268 | */ | 2268 | */ |
2269 | target_wait_for_sess_cmds(se_sess); | 2269 | target_wait_for_sess_cmds(se_sess); |
2270 | transport_deregister_session_configfs(se_sess); | 2270 | target_remove_session(se_sess); |
2271 | transport_deregister_session(se_sess); | ||
2272 | tport->ibmv_nexus = NULL; | 2271 | tport->ibmv_nexus = NULL; |
2273 | kfree(nexus); | 2272 | kfree(nexus); |
2274 | 2273 | ||
@@ -3928,7 +3927,6 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn) | |||
3928 | } | 3927 | } |
3929 | 3928 | ||
3930 | static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, | 3929 | static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, |
3931 | struct config_group *group, | ||
3932 | const char *name) | 3930 | const char *name) |
3933 | { | 3931 | { |
3934 | struct ibmvscsis_tport *tport = | 3932 | struct ibmvscsis_tport *tport = |
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 87c94191033b..8c6627bc8a39 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c | |||
@@ -892,7 +892,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) | |||
892 | /* Check for optional message byte */ | 892 | /* Check for optional message byte */ |
893 | if (imm_wait(dev) == (unsigned char) 0xb8) | 893 | if (imm_wait(dev) == (unsigned char) 0xb8) |
894 | imm_in(dev, &h, 1); | 894 | imm_in(dev, &h, 1); |
895 | cmd->result = (DID_OK << 16) + (l & STATUS_MASK); | 895 | cmd->result = (DID_OK << 16) | (l & STATUS_MASK); |
896 | } | 896 | } |
897 | if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) { | 897 | if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) { |
898 | w_ctr(ppb, 0x4); | 898 | w_ctr(ppb, 0x4); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 02d65dce74e5..f2ec80b0ffc0 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -2412,6 +2412,28 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg, | |||
2412 | } | 2412 | } |
2413 | 2413 | ||
2414 | /** | 2414 | /** |
2415 | * ipr_log_sis64_service_required_error - Log a sis64 service required error. | ||
2416 | * @ioa_cfg: ioa config struct | ||
2417 | * @hostrcb: hostrcb struct | ||
2418 | * | ||
2419 | * Return value: | ||
2420 | * none | ||
2421 | **/ | ||
2422 | static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg, | ||
2423 | struct ipr_hostrcb *hostrcb) | ||
2424 | { | ||
2425 | struct ipr_hostrcb_type_41_error *error; | ||
2426 | |||
2427 | error = &hostrcb->hcam.u.error64.u.type_41_error; | ||
2428 | |||
2429 | error->failure_reason[sizeof(error->failure_reason) - 1] = '\0'; | ||
2430 | ipr_err("Primary Failure Reason: %s\n", error->failure_reason); | ||
2431 | ipr_log_hex_data(ioa_cfg, error->data, | ||
2432 | be32_to_cpu(hostrcb->hcam.length) - | ||
2433 | (offsetof(struct ipr_hostrcb_error, u) + | ||
2434 | offsetof(struct ipr_hostrcb_type_41_error, data))); | ||
2435 | } | ||
2436 | /** | ||
2415 | * ipr_log_generic_error - Log an adapter error. | 2437 | * ipr_log_generic_error - Log an adapter error. |
2416 | * @ioa_cfg: ioa config struct | 2438 | * @ioa_cfg: ioa config struct |
2417 | * @hostrcb: hostrcb struct | 2439 | * @hostrcb: hostrcb struct |
@@ -2586,6 +2608,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg, | |||
2586 | case IPR_HOST_RCB_OVERLAY_ID_30: | 2608 | case IPR_HOST_RCB_OVERLAY_ID_30: |
2587 | ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); | 2609 | ipr_log_sis64_fabric_error(ioa_cfg, hostrcb); |
2588 | break; | 2610 | break; |
2611 | case IPR_HOST_RCB_OVERLAY_ID_41: | ||
2612 | ipr_log_sis64_service_required_error(ioa_cfg, hostrcb); | ||
2613 | break; | ||
2589 | case IPR_HOST_RCB_OVERLAY_ID_1: | 2614 | case IPR_HOST_RCB_OVERLAY_ID_1: |
2590 | case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: | 2615 | case IPR_HOST_RCB_OVERLAY_ID_DEFAULT: |
2591 | default: | 2616 | default: |
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index 93570734cbfb..68afbbde54d3 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h | |||
@@ -1135,6 +1135,11 @@ struct ipr_hostrcb_type_30_error { | |||
1135 | struct ipr_hostrcb64_fabric_desc desc[1]; | 1135 | struct ipr_hostrcb64_fabric_desc desc[1]; |
1136 | }__attribute__((packed, aligned (4))); | 1136 | }__attribute__((packed, aligned (4))); |
1137 | 1137 | ||
1138 | struct ipr_hostrcb_type_41_error { | ||
1139 | u8 failure_reason[64]; | ||
1140 | __be32 data[200]; | ||
1141 | }__attribute__((packed, aligned (4))); | ||
1142 | |||
1138 | struct ipr_hostrcb_error { | 1143 | struct ipr_hostrcb_error { |
1139 | __be32 fd_ioasc; | 1144 | __be32 fd_ioasc; |
1140 | struct ipr_res_addr fd_res_addr; | 1145 | struct ipr_res_addr fd_res_addr; |
@@ -1173,6 +1178,7 @@ struct ipr_hostrcb64_error { | |||
1173 | struct ipr_hostrcb_type_23_error type_23_error; | 1178 | struct ipr_hostrcb_type_23_error type_23_error; |
1174 | struct ipr_hostrcb_type_24_error type_24_error; | 1179 | struct ipr_hostrcb_type_24_error type_24_error; |
1175 | struct ipr_hostrcb_type_30_error type_30_error; | 1180 | struct ipr_hostrcb_type_30_error type_30_error; |
1181 | struct ipr_hostrcb_type_41_error type_41_error; | ||
1176 | } u; | 1182 | } u; |
1177 | }__attribute__((packed, aligned (8))); | 1183 | }__attribute__((packed, aligned (8))); |
1178 | 1184 | ||
@@ -1218,6 +1224,7 @@ struct ipr_hcam { | |||
1218 | #define IPR_HOST_RCB_OVERLAY_ID_24 0x24 | 1224 | #define IPR_HOST_RCB_OVERLAY_ID_24 0x24 |
1219 | #define IPR_HOST_RCB_OVERLAY_ID_26 0x26 | 1225 | #define IPR_HOST_RCB_OVERLAY_ID_26 0x26 |
1220 | #define IPR_HOST_RCB_OVERLAY_ID_30 0x30 | 1226 | #define IPR_HOST_RCB_OVERLAY_ID_30 0x30 |
1227 | #define IPR_HOST_RCB_OVERLAY_ID_41 0x41 | ||
1221 | #define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF | 1228 | #define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF |
1222 | 1229 | ||
1223 | u8 reserved1[3]; | 1230 | u8 reserved1[3]; |
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 3f3569ec5ce3..f969a71348ef 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c | |||
@@ -59,34 +59,25 @@ static void fc_disc_restart(struct fc_disc *); | |||
59 | /** | 59 | /** |
60 | * fc_disc_stop_rports() - Delete all the remote ports associated with the lport | 60 | * fc_disc_stop_rports() - Delete all the remote ports associated with the lport |
61 | * @disc: The discovery job to stop remote ports on | 61 | * @disc: The discovery job to stop remote ports on |
62 | * | ||
63 | * Locking Note: This function expects that the lport mutex is locked before | ||
64 | * calling it. | ||
65 | */ | 62 | */ |
66 | static void fc_disc_stop_rports(struct fc_disc *disc) | 63 | static void fc_disc_stop_rports(struct fc_disc *disc) |
67 | { | 64 | { |
68 | struct fc_lport *lport; | ||
69 | struct fc_rport_priv *rdata; | 65 | struct fc_rport_priv *rdata; |
70 | 66 | ||
71 | lport = fc_disc_lport(disc); | 67 | lockdep_assert_held(&disc->disc_mutex); |
72 | 68 | ||
73 | rcu_read_lock(); | 69 | list_for_each_entry(rdata, &disc->rports, peers) { |
74 | list_for_each_entry_rcu(rdata, &disc->rports, peers) { | ||
75 | if (kref_get_unless_zero(&rdata->kref)) { | 70 | if (kref_get_unless_zero(&rdata->kref)) { |
76 | fc_rport_logoff(rdata); | 71 | fc_rport_logoff(rdata); |
77 | kref_put(&rdata->kref, fc_rport_destroy); | 72 | kref_put(&rdata->kref, fc_rport_destroy); |
78 | } | 73 | } |
79 | } | 74 | } |
80 | rcu_read_unlock(); | ||
81 | } | 75 | } |
82 | 76 | ||
83 | /** | 77 | /** |
84 | * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) | 78 | * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN) |
85 | * @disc: The discovery object to which the RSCN applies | 79 | * @disc: The discovery object to which the RSCN applies |
86 | * @fp: The RSCN frame | 80 | * @fp: The RSCN frame |
87 | * | ||
88 | * Locking Note: This function expects that the disc_mutex is locked | ||
89 | * before it is called. | ||
90 | */ | 81 | */ |
91 | static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) | 82 | static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) |
92 | { | 83 | { |
@@ -101,6 +92,8 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp) | |||
101 | LIST_HEAD(disc_ports); | 92 | LIST_HEAD(disc_ports); |
102 | struct fc_disc_port *dp, *next; | 93 | struct fc_disc_port *dp, *next; |
103 | 94 | ||
95 | lockdep_assert_held(&disc->disc_mutex); | ||
96 | |||
104 | lport = fc_disc_lport(disc); | 97 | lport = fc_disc_lport(disc); |
105 | 98 | ||
106 | FC_DISC_DBG(disc, "Received an RSCN event\n"); | 99 | FC_DISC_DBG(disc, "Received an RSCN event\n"); |
@@ -220,12 +213,11 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp) | |||
220 | /** | 213 | /** |
221 | * fc_disc_restart() - Restart discovery | 214 | * fc_disc_restart() - Restart discovery |
222 | * @disc: The discovery object to be restarted | 215 | * @disc: The discovery object to be restarted |
223 | * | ||
224 | * Locking Note: This function expects that the disc mutex | ||
225 | * is already locked. | ||
226 | */ | 216 | */ |
227 | static void fc_disc_restart(struct fc_disc *disc) | 217 | static void fc_disc_restart(struct fc_disc *disc) |
228 | { | 218 | { |
219 | lockdep_assert_held(&disc->disc_mutex); | ||
220 | |||
229 | if (!disc->disc_callback) | 221 | if (!disc->disc_callback) |
230 | return; | 222 | return; |
231 | 223 | ||
@@ -271,16 +263,13 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *, | |||
271 | * fc_disc_done() - Discovery has been completed | 263 | * fc_disc_done() - Discovery has been completed |
272 | * @disc: The discovery context | 264 | * @disc: The discovery context |
273 | * @event: The discovery completion status | 265 | * @event: The discovery completion status |
274 | * | ||
275 | * Locking Note: This function expects that the disc mutex is locked before | ||
276 | * it is called. The discovery callback is then made with the lock released, | ||
277 | * and the lock is re-taken before returning from this function | ||
278 | */ | 266 | */ |
279 | static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) | 267 | static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) |
280 | { | 268 | { |
281 | struct fc_lport *lport = fc_disc_lport(disc); | 269 | struct fc_lport *lport = fc_disc_lport(disc); |
282 | struct fc_rport_priv *rdata; | 270 | struct fc_rport_priv *rdata; |
283 | 271 | ||
272 | lockdep_assert_held(&disc->disc_mutex); | ||
284 | FC_DISC_DBG(disc, "Discovery complete\n"); | 273 | FC_DISC_DBG(disc, "Discovery complete\n"); |
285 | 274 | ||
286 | disc->pending = 0; | 275 | disc->pending = 0; |
@@ -294,9 +283,11 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) | |||
294 | * discovery, reverify or log them in. Otherwise, log them out. | 283 | * discovery, reverify or log them in. Otherwise, log them out. |
295 | * Skip ports which were never discovered. These are the dNS port | 284 | * Skip ports which were never discovered. These are the dNS port |
296 | * and ports which were created by PLOGI. | 285 | * and ports which were created by PLOGI. |
286 | * | ||
287 | * We don't need to use the _rcu variant here as the rport list | ||
288 | * is protected by the disc mutex which is already held on entry. | ||
297 | */ | 289 | */ |
298 | rcu_read_lock(); | 290 | list_for_each_entry(rdata, &disc->rports, peers) { |
299 | list_for_each_entry_rcu(rdata, &disc->rports, peers) { | ||
300 | if (!kref_get_unless_zero(&rdata->kref)) | 291 | if (!kref_get_unless_zero(&rdata->kref)) |
301 | continue; | 292 | continue; |
302 | if (rdata->disc_id) { | 293 | if (rdata->disc_id) { |
@@ -307,7 +298,6 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event) | |||
307 | } | 298 | } |
308 | kref_put(&rdata->kref, fc_rport_destroy); | 299 | kref_put(&rdata->kref, fc_rport_destroy); |
309 | } | 300 | } |
310 | rcu_read_unlock(); | ||
311 | mutex_unlock(&disc->disc_mutex); | 301 | mutex_unlock(&disc->disc_mutex); |
312 | disc->disc_callback(lport, event); | 302 | disc->disc_callback(lport, event); |
313 | mutex_lock(&disc->disc_mutex); | 303 | mutex_lock(&disc->disc_mutex); |
@@ -360,15 +350,14 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp) | |||
360 | /** | 350 | /** |
361 | * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request | 351 | * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request |
362 | * @lport: The discovery context | 352 | * @lport: The discovery context |
363 | * | ||
364 | * Locking Note: This function expects that the disc_mutex is locked | ||
365 | * before it is called. | ||
366 | */ | 353 | */ |
367 | static void fc_disc_gpn_ft_req(struct fc_disc *disc) | 354 | static void fc_disc_gpn_ft_req(struct fc_disc *disc) |
368 | { | 355 | { |
369 | struct fc_frame *fp; | 356 | struct fc_frame *fp; |
370 | struct fc_lport *lport = fc_disc_lport(disc); | 357 | struct fc_lport *lport = fc_disc_lport(disc); |
371 | 358 | ||
359 | lockdep_assert_held(&disc->disc_mutex); | ||
360 | |||
372 | WARN_ON(!fc_lport_test_ready(lport)); | 361 | WARN_ON(!fc_lport_test_ready(lport)); |
373 | 362 | ||
374 | disc->pending = 1; | 363 | disc->pending = 1; |
@@ -658,8 +647,6 @@ out: | |||
658 | * @lport: The local port to initiate discovery on | 647 | * @lport: The local port to initiate discovery on |
659 | * @rdata: remote port private data | 648 | * @rdata: remote port private data |
660 | * | 649 | * |
661 | * Locking Note: This function expects that the disc_mutex is locked | ||
662 | * before it is called. | ||
663 | * On failure, an error code is returned. | 650 | * On failure, an error code is returned. |
664 | */ | 651 | */ |
665 | static int fc_disc_gpn_id_req(struct fc_lport *lport, | 652 | static int fc_disc_gpn_id_req(struct fc_lport *lport, |
@@ -667,6 +654,7 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, | |||
667 | { | 654 | { |
668 | struct fc_frame *fp; | 655 | struct fc_frame *fp; |
669 | 656 | ||
657 | lockdep_assert_held(&lport->disc.disc_mutex); | ||
670 | fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + | 658 | fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + |
671 | sizeof(struct fc_ns_fid)); | 659 | sizeof(struct fc_ns_fid)); |
672 | if (!fp) | 660 | if (!fp) |
@@ -683,14 +671,13 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport, | |||
683 | * fc_disc_single() - Discover the directory information for a single target | 671 | * fc_disc_single() - Discover the directory information for a single target |
684 | * @lport: The local port the remote port is associated with | 672 | * @lport: The local port the remote port is associated with |
685 | * @dp: The port to rediscover | 673 | * @dp: The port to rediscover |
686 | * | ||
687 | * Locking Note: This function expects that the disc_mutex is locked | ||
688 | * before it is called. | ||
689 | */ | 674 | */ |
690 | static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) | 675 | static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp) |
691 | { | 676 | { |
692 | struct fc_rport_priv *rdata; | 677 | struct fc_rport_priv *rdata; |
693 | 678 | ||
679 | lockdep_assert_held(&lport->disc.disc_mutex); | ||
680 | |||
694 | rdata = fc_rport_create(lport, dp->port_id); | 681 | rdata = fc_rport_create(lport, dp->port_id); |
695 | if (!rdata) | 682 | if (!rdata) |
696 | return -ENOMEM; | 683 | return -ENOMEM; |
@@ -708,7 +695,9 @@ static void fc_disc_stop(struct fc_lport *lport) | |||
708 | 695 | ||
709 | if (disc->pending) | 696 | if (disc->pending) |
710 | cancel_delayed_work_sync(&disc->disc_work); | 697 | cancel_delayed_work_sync(&disc->disc_work); |
698 | mutex_lock(&disc->disc_mutex); | ||
711 | fc_disc_stop_rports(disc); | 699 | fc_disc_stop_rports(disc); |
700 | mutex_unlock(&disc->disc_mutex); | ||
712 | } | 701 | } |
713 | 702 | ||
714 | /** | 703 | /** |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 21be672679fb..be83590ed955 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -237,14 +237,13 @@ static const char *fc_lport_state(struct fc_lport *lport) | |||
237 | * @remote_fid: The FID of the ptp rport | 237 | * @remote_fid: The FID of the ptp rport |
238 | * @remote_wwpn: The WWPN of the ptp rport | 238 | * @remote_wwpn: The WWPN of the ptp rport |
239 | * @remote_wwnn: The WWNN of the ptp rport | 239 | * @remote_wwnn: The WWNN of the ptp rport |
240 | * | ||
241 | * Locking Note: The lport lock is expected to be held before calling | ||
242 | * this routine. | ||
243 | */ | 240 | */ |
244 | static void fc_lport_ptp_setup(struct fc_lport *lport, | 241 | static void fc_lport_ptp_setup(struct fc_lport *lport, |
245 | u32 remote_fid, u64 remote_wwpn, | 242 | u32 remote_fid, u64 remote_wwpn, |
246 | u64 remote_wwnn) | 243 | u64 remote_wwnn) |
247 | { | 244 | { |
245 | lockdep_assert_held(&lport->lp_mutex); | ||
246 | |||
248 | if (lport->ptp_rdata) { | 247 | if (lport->ptp_rdata) { |
249 | fc_rport_logoff(lport->ptp_rdata); | 248 | fc_rport_logoff(lport->ptp_rdata); |
250 | kref_put(&lport->ptp_rdata->kref, fc_rport_destroy); | 249 | kref_put(&lport->ptp_rdata->kref, fc_rport_destroy); |
@@ -403,12 +402,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type) | |||
403 | * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. | 402 | * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report. |
404 | * @lport: Fibre Channel local port receiving the RLIR | 403 | * @lport: Fibre Channel local port receiving the RLIR |
405 | * @fp: The RLIR request frame | 404 | * @fp: The RLIR request frame |
406 | * | ||
407 | * Locking Note: The lport lock is expected to be held before calling | ||
408 | * this function. | ||
409 | */ | 405 | */ |
410 | static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) | 406 | static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) |
411 | { | 407 | { |
408 | lockdep_assert_held(&lport->lp_mutex); | ||
409 | |||
412 | FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", | 410 | FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n", |
413 | fc_lport_state(lport)); | 411 | fc_lport_state(lport)); |
414 | 412 | ||
@@ -420,9 +418,6 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp) | |||
420 | * fc_lport_recv_echo_req() - Handle received ECHO request | 418 | * fc_lport_recv_echo_req() - Handle received ECHO request |
421 | * @lport: The local port receiving the ECHO | 419 | * @lport: The local port receiving the ECHO |
422 | * @fp: ECHO request frame | 420 | * @fp: ECHO request frame |
423 | * | ||
424 | * Locking Note: The lport lock is expected to be held before calling | ||
425 | * this function. | ||
426 | */ | 421 | */ |
427 | static void fc_lport_recv_echo_req(struct fc_lport *lport, | 422 | static void fc_lport_recv_echo_req(struct fc_lport *lport, |
428 | struct fc_frame *in_fp) | 423 | struct fc_frame *in_fp) |
@@ -432,6 +427,8 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport, | |||
432 | void *pp; | 427 | void *pp; |
433 | void *dp; | 428 | void *dp; |
434 | 429 | ||
430 | lockdep_assert_held(&lport->lp_mutex); | ||
431 | |||
435 | FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", | 432 | FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n", |
436 | fc_lport_state(lport)); | 433 | fc_lport_state(lport)); |
437 | 434 | ||
@@ -456,9 +453,6 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport, | |||
456 | * fc_lport_recv_rnid_req() - Handle received Request Node ID data request | 453 | * fc_lport_recv_rnid_req() - Handle received Request Node ID data request |
457 | * @lport: The local port receiving the RNID | 454 | * @lport: The local port receiving the RNID |
458 | * @fp: The RNID request frame | 455 | * @fp: The RNID request frame |
459 | * | ||
460 | * Locking Note: The lport lock is expected to be held before calling | ||
461 | * this function. | ||
462 | */ | 456 | */ |
463 | static void fc_lport_recv_rnid_req(struct fc_lport *lport, | 457 | static void fc_lport_recv_rnid_req(struct fc_lport *lport, |
464 | struct fc_frame *in_fp) | 458 | struct fc_frame *in_fp) |
@@ -474,6 +468,8 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport, | |||
474 | u8 fmt; | 468 | u8 fmt; |
475 | size_t len; | 469 | size_t len; |
476 | 470 | ||
471 | lockdep_assert_held(&lport->lp_mutex); | ||
472 | |||
477 | FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", | 473 | FC_LPORT_DBG(lport, "Received RNID request while in state %s\n", |
478 | fc_lport_state(lport)); | 474 | fc_lport_state(lport)); |
479 | 475 | ||
@@ -515,12 +511,11 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport, | |||
515 | * fc_lport_recv_logo_req() - Handle received fabric LOGO request | 511 | * fc_lport_recv_logo_req() - Handle received fabric LOGO request |
516 | * @lport: The local port receiving the LOGO | 512 | * @lport: The local port receiving the LOGO |
517 | * @fp: The LOGO request frame | 513 | * @fp: The LOGO request frame |
518 | * | ||
519 | * Locking Note: The lport lock is expected to be held before calling | ||
520 | * this function. | ||
521 | */ | 514 | */ |
522 | static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) | 515 | static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) |
523 | { | 516 | { |
517 | lockdep_assert_held(&lport->lp_mutex); | ||
518 | |||
524 | fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); | 519 | fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); |
525 | fc_lport_enter_reset(lport); | 520 | fc_lport_enter_reset(lport); |
526 | fc_frame_free(fp); | 521 | fc_frame_free(fp); |
@@ -553,11 +548,11 @@ EXPORT_SYMBOL(fc_fabric_login); | |||
553 | /** | 548 | /** |
554 | * __fc_linkup() - Handler for transport linkup events | 549 | * __fc_linkup() - Handler for transport linkup events |
555 | * @lport: The lport whose link is up | 550 | * @lport: The lport whose link is up |
556 | * | ||
557 | * Locking: must be called with the lp_mutex held | ||
558 | */ | 551 | */ |
559 | void __fc_linkup(struct fc_lport *lport) | 552 | void __fc_linkup(struct fc_lport *lport) |
560 | { | 553 | { |
554 | lockdep_assert_held(&lport->lp_mutex); | ||
555 | |||
561 | if (!lport->link_up) { | 556 | if (!lport->link_up) { |
562 | lport->link_up = 1; | 557 | lport->link_up = 1; |
563 | 558 | ||
@@ -584,11 +579,11 @@ EXPORT_SYMBOL(fc_linkup); | |||
584 | /** | 579 | /** |
585 | * __fc_linkdown() - Handler for transport linkdown events | 580 | * __fc_linkdown() - Handler for transport linkdown events |
586 | * @lport: The lport whose link is down | 581 | * @lport: The lport whose link is down |
587 | * | ||
588 | * Locking: must be called with the lp_mutex held | ||
589 | */ | 582 | */ |
590 | void __fc_linkdown(struct fc_lport *lport) | 583 | void __fc_linkdown(struct fc_lport *lport) |
591 | { | 584 | { |
585 | lockdep_assert_held(&lport->lp_mutex); | ||
586 | |||
592 | if (lport->link_up) { | 587 | if (lport->link_up) { |
593 | lport->link_up = 0; | 588 | lport->link_up = 0; |
594 | fc_lport_enter_reset(lport); | 589 | fc_lport_enter_reset(lport); |
@@ -722,12 +717,11 @@ static void fc_lport_disc_callback(struct fc_lport *lport, | |||
722 | /** | 717 | /** |
723 | * fc_rport_enter_ready() - Enter the ready state and start discovery | 718 | * fc_rport_enter_ready() - Enter the ready state and start discovery |
724 | * @lport: The local port that is ready | 719 | * @lport: The local port that is ready |
725 | * | ||
726 | * Locking Note: The lport lock is expected to be held before calling | ||
727 | * this routine. | ||
728 | */ | 720 | */ |
729 | static void fc_lport_enter_ready(struct fc_lport *lport) | 721 | static void fc_lport_enter_ready(struct fc_lport *lport) |
730 | { | 722 | { |
723 | lockdep_assert_held(&lport->lp_mutex); | ||
724 | |||
731 | FC_LPORT_DBG(lport, "Entered READY from state %s\n", | 725 | FC_LPORT_DBG(lport, "Entered READY from state %s\n", |
732 | fc_lport_state(lport)); | 726 | fc_lport_state(lport)); |
733 | 727 | ||
@@ -745,13 +739,12 @@ static void fc_lport_enter_ready(struct fc_lport *lport) | |||
745 | * @lport: The local port which will have its Port ID set. | 739 | * @lport: The local port which will have its Port ID set. |
746 | * @port_id: The new port ID. | 740 | * @port_id: The new port ID. |
747 | * @fp: The frame containing the incoming request, or NULL. | 741 | * @fp: The frame containing the incoming request, or NULL. |
748 | * | ||
749 | * Locking Note: The lport lock is expected to be held before calling | ||
750 | * this function. | ||
751 | */ | 742 | */ |
752 | static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, | 743 | static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id, |
753 | struct fc_frame *fp) | 744 | struct fc_frame *fp) |
754 | { | 745 | { |
746 | lockdep_assert_held(&lport->lp_mutex); | ||
747 | |||
755 | if (port_id) | 748 | if (port_id) |
756 | printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", | 749 | printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n", |
757 | lport->host->host_no, port_id); | 750 | lport->host->host_no, port_id); |
@@ -801,9 +794,6 @@ EXPORT_SYMBOL(fc_lport_set_local_id); | |||
801 | * A received FLOGI request indicates a point-to-point connection. | 794 | * A received FLOGI request indicates a point-to-point connection. |
802 | * Accept it with the common service parameters indicating our N port. | 795 | * Accept it with the common service parameters indicating our N port. |
803 | * Set up to do a PLOGI if we have the higher-number WWPN. | 796 | * Set up to do a PLOGI if we have the higher-number WWPN. |
804 | * | ||
805 | * Locking Note: The lport lock is expected to be held before calling | ||
806 | * this function. | ||
807 | */ | 797 | */ |
808 | static void fc_lport_recv_flogi_req(struct fc_lport *lport, | 798 | static void fc_lport_recv_flogi_req(struct fc_lport *lport, |
809 | struct fc_frame *rx_fp) | 799 | struct fc_frame *rx_fp) |
@@ -816,6 +806,8 @@ static void fc_lport_recv_flogi_req(struct fc_lport *lport, | |||
816 | u32 remote_fid; | 806 | u32 remote_fid; |
817 | u32 local_fid; | 807 | u32 local_fid; |
818 | 808 | ||
809 | lockdep_assert_held(&lport->lp_mutex); | ||
810 | |||
819 | FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", | 811 | FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n", |
820 | fc_lport_state(lport)); | 812 | fc_lport_state(lport)); |
821 | 813 | ||
@@ -1006,12 +998,11 @@ EXPORT_SYMBOL(fc_lport_reset); | |||
1006 | /** | 998 | /** |
1007 | * fc_lport_reset_locked() - Reset the local port w/ the lport lock held | 999 | * fc_lport_reset_locked() - Reset the local port w/ the lport lock held |
1008 | * @lport: The local port to be reset | 1000 | * @lport: The local port to be reset |
1009 | * | ||
1010 | * Locking Note: The lport lock is expected to be held before calling | ||
1011 | * this routine. | ||
1012 | */ | 1001 | */ |
1013 | static void fc_lport_reset_locked(struct fc_lport *lport) | 1002 | static void fc_lport_reset_locked(struct fc_lport *lport) |
1014 | { | 1003 | { |
1004 | lockdep_assert_held(&lport->lp_mutex); | ||
1005 | |||
1015 | if (lport->dns_rdata) { | 1006 | if (lport->dns_rdata) { |
1016 | fc_rport_logoff(lport->dns_rdata); | 1007 | fc_rport_logoff(lport->dns_rdata); |
1017 | lport->dns_rdata = NULL; | 1008 | lport->dns_rdata = NULL; |
@@ -1035,12 +1026,11 @@ static void fc_lport_reset_locked(struct fc_lport *lport) | |||
1035 | /** | 1026 | /** |
1036 | * fc_lport_enter_reset() - Reset the local port | 1027 | * fc_lport_enter_reset() - Reset the local port |
1037 | * @lport: The local port to be reset | 1028 | * @lport: The local port to be reset |
1038 | * | ||
1039 | * Locking Note: The lport lock is expected to be held before calling | ||
1040 | * this routine. | ||
1041 | */ | 1029 | */ |
1042 | static void fc_lport_enter_reset(struct fc_lport *lport) | 1030 | static void fc_lport_enter_reset(struct fc_lport *lport) |
1043 | { | 1031 | { |
1032 | lockdep_assert_held(&lport->lp_mutex); | ||
1033 | |||
1044 | FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", | 1034 | FC_LPORT_DBG(lport, "Entered RESET state from %s state\n", |
1045 | fc_lport_state(lport)); | 1035 | fc_lport_state(lport)); |
1046 | 1036 | ||
@@ -1065,12 +1055,11 @@ static void fc_lport_enter_reset(struct fc_lport *lport) | |||
1065 | /** | 1055 | /** |
1066 | * fc_lport_enter_disabled() - Disable the local port | 1056 | * fc_lport_enter_disabled() - Disable the local port |
1067 | * @lport: The local port to be reset | 1057 | * @lport: The local port to be reset |
1068 | * | ||
1069 | * Locking Note: The lport lock is expected to be held before calling | ||
1070 | * this routine. | ||
1071 | */ | 1058 | */ |
1072 | static void fc_lport_enter_disabled(struct fc_lport *lport) | 1059 | static void fc_lport_enter_disabled(struct fc_lport *lport) |
1073 | { | 1060 | { |
1061 | lockdep_assert_held(&lport->lp_mutex); | ||
1062 | |||
1074 | FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", | 1063 | FC_LPORT_DBG(lport, "Entered disabled state from %s state\n", |
1075 | fc_lport_state(lport)); | 1064 | fc_lport_state(lport)); |
1076 | 1065 | ||
@@ -1321,14 +1310,13 @@ err: | |||
1321 | /** | 1310 | /** |
1322 | * fc_lport_enter_scr() - Send a SCR (State Change Register) request | 1311 | * fc_lport_enter_scr() - Send a SCR (State Change Register) request |
1323 | * @lport: The local port to register for state changes | 1312 | * @lport: The local port to register for state changes |
1324 | * | ||
1325 | * Locking Note: The lport lock is expected to be held before calling | ||
1326 | * this routine. | ||
1327 | */ | 1313 | */ |
1328 | static void fc_lport_enter_scr(struct fc_lport *lport) | 1314 | static void fc_lport_enter_scr(struct fc_lport *lport) |
1329 | { | 1315 | { |
1330 | struct fc_frame *fp; | 1316 | struct fc_frame *fp; |
1331 | 1317 | ||
1318 | lockdep_assert_held(&lport->lp_mutex); | ||
1319 | |||
1332 | FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", | 1320 | FC_LPORT_DBG(lport, "Entered SCR state from %s state\n", |
1333 | fc_lport_state(lport)); | 1321 | fc_lport_state(lport)); |
1334 | 1322 | ||
@@ -1349,9 +1337,6 @@ static void fc_lport_enter_scr(struct fc_lport *lport) | |||
1349 | /** | 1337 | /** |
1350 | * fc_lport_enter_ns() - register some object with the name server | 1338 | * fc_lport_enter_ns() - register some object with the name server |
1351 | * @lport: Fibre Channel local port to register | 1339 | * @lport: Fibre Channel local port to register |
1352 | * | ||
1353 | * Locking Note: The lport lock is expected to be held before calling | ||
1354 | * this routine. | ||
1355 | */ | 1340 | */ |
1356 | static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) | 1341 | static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) |
1357 | { | 1342 | { |
@@ -1360,6 +1345,8 @@ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state) | |||
1360 | int size = sizeof(struct fc_ct_hdr); | 1345 | int size = sizeof(struct fc_ct_hdr); |
1361 | size_t len; | 1346 | size_t len; |
1362 | 1347 | ||
1348 | lockdep_assert_held(&lport->lp_mutex); | ||
1349 | |||
1363 | FC_LPORT_DBG(lport, "Entered %s state from %s state\n", | 1350 | FC_LPORT_DBG(lport, "Entered %s state from %s state\n", |
1364 | fc_lport_state_names[state], | 1351 | fc_lport_state_names[state], |
1365 | fc_lport_state(lport)); | 1352 | fc_lport_state(lport)); |
@@ -1419,14 +1406,13 @@ static struct fc_rport_operations fc_lport_rport_ops = { | |||
1419 | /** | 1406 | /** |
1420 | * fc_rport_enter_dns() - Create a fc_rport for the name server | 1407 | * fc_rport_enter_dns() - Create a fc_rport for the name server |
1421 | * @lport: The local port requesting a remote port for the name server | 1408 | * @lport: The local port requesting a remote port for the name server |
1422 | * | ||
1423 | * Locking Note: The lport lock is expected to be held before calling | ||
1424 | * this routine. | ||
1425 | */ | 1409 | */ |
1426 | static void fc_lport_enter_dns(struct fc_lport *lport) | 1410 | static void fc_lport_enter_dns(struct fc_lport *lport) |
1427 | { | 1411 | { |
1428 | struct fc_rport_priv *rdata; | 1412 | struct fc_rport_priv *rdata; |
1429 | 1413 | ||
1414 | lockdep_assert_held(&lport->lp_mutex); | ||
1415 | |||
1430 | FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", | 1416 | FC_LPORT_DBG(lport, "Entered DNS state from %s state\n", |
1431 | fc_lport_state(lport)); | 1417 | fc_lport_state(lport)); |
1432 | 1418 | ||
@@ -1449,9 +1435,6 @@ err: | |||
1449 | /** | 1435 | /** |
1450 | * fc_lport_enter_ms() - management server commands | 1436 | * fc_lport_enter_ms() - management server commands |
1451 | * @lport: Fibre Channel local port to register | 1437 | * @lport: Fibre Channel local port to register |
1452 | * | ||
1453 | * Locking Note: The lport lock is expected to be held before calling | ||
1454 | * this routine. | ||
1455 | */ | 1438 | */ |
1456 | static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state) | 1439 | static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state) |
1457 | { | 1440 | { |
@@ -1461,6 +1444,8 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state) | |||
1461 | size_t len; | 1444 | size_t len; |
1462 | int numattrs; | 1445 | int numattrs; |
1463 | 1446 | ||
1447 | lockdep_assert_held(&lport->lp_mutex); | ||
1448 | |||
1464 | FC_LPORT_DBG(lport, "Entered %s state from %s state\n", | 1449 | FC_LPORT_DBG(lport, "Entered %s state from %s state\n", |
1465 | fc_lport_state_names[state], | 1450 | fc_lport_state_names[state], |
1466 | fc_lport_state(lport)); | 1451 | fc_lport_state(lport)); |
@@ -1536,14 +1521,13 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state) | |||
1536 | /** | 1521 | /** |
1537 | * fc_rport_enter_fdmi() - Create a fc_rport for the management server | 1522 | * fc_rport_enter_fdmi() - Create a fc_rport for the management server |
1538 | * @lport: The local port requesting a remote port for the management server | 1523 | * @lport: The local port requesting a remote port for the management server |
1539 | * | ||
1540 | * Locking Note: The lport lock is expected to be held before calling | ||
1541 | * this routine. | ||
1542 | */ | 1524 | */ |
1543 | static void fc_lport_enter_fdmi(struct fc_lport *lport) | 1525 | static void fc_lport_enter_fdmi(struct fc_lport *lport) |
1544 | { | 1526 | { |
1545 | struct fc_rport_priv *rdata; | 1527 | struct fc_rport_priv *rdata; |
1546 | 1528 | ||
1529 | lockdep_assert_held(&lport->lp_mutex); | ||
1530 | |||
1547 | FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n", | 1531 | FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n", |
1548 | fc_lport_state(lport)); | 1532 | fc_lport_state(lport)); |
1549 | 1533 | ||
@@ -1668,15 +1652,14 @@ EXPORT_SYMBOL(fc_lport_logo_resp); | |||
1668 | /** | 1652 | /** |
1669 | * fc_rport_enter_logo() - Logout of the fabric | 1653 | * fc_rport_enter_logo() - Logout of the fabric |
1670 | * @lport: The local port to be logged out | 1654 | * @lport: The local port to be logged out |
1671 | * | ||
1672 | * Locking Note: The lport lock is expected to be held before calling | ||
1673 | * this routine. | ||
1674 | */ | 1655 | */ |
1675 | static void fc_lport_enter_logo(struct fc_lport *lport) | 1656 | static void fc_lport_enter_logo(struct fc_lport *lport) |
1676 | { | 1657 | { |
1677 | struct fc_frame *fp; | 1658 | struct fc_frame *fp; |
1678 | struct fc_els_logo *logo; | 1659 | struct fc_els_logo *logo; |
1679 | 1660 | ||
1661 | lockdep_assert_held(&lport->lp_mutex); | ||
1662 | |||
1680 | FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", | 1663 | FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n", |
1681 | fc_lport_state(lport)); | 1664 | fc_lport_state(lport)); |
1682 | 1665 | ||
@@ -1811,14 +1794,13 @@ EXPORT_SYMBOL(fc_lport_flogi_resp); | |||
1811 | /** | 1794 | /** |
1812 | * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager | 1795 | * fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager |
1813 | * @lport: Fibre Channel local port to be logged in to the fabric | 1796 | * @lport: Fibre Channel local port to be logged in to the fabric |
1814 | * | ||
1815 | * Locking Note: The lport lock is expected to be held before calling | ||
1816 | * this routine. | ||
1817 | */ | 1797 | */ |
1818 | static void fc_lport_enter_flogi(struct fc_lport *lport) | 1798 | static void fc_lport_enter_flogi(struct fc_lport *lport) |
1819 | { | 1799 | { |
1820 | struct fc_frame *fp; | 1800 | struct fc_frame *fp; |
1821 | 1801 | ||
1802 | lockdep_assert_held(&lport->lp_mutex); | ||
1803 | |||
1822 | FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", | 1804 | FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n", |
1823 | fc_lport_state(lport)); | 1805 | fc_lport_state(lport)); |
1824 | 1806 | ||
@@ -1962,9 +1944,6 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1962 | * @job: The BSG Passthrough job | 1944 | * @job: The BSG Passthrough job |
1963 | * @lport: The local port sending the request | 1945 | * @lport: The local port sending the request |
1964 | * @did: The destination port id | 1946 | * @did: The destination port id |
1965 | * | ||
1966 | * Locking Note: The lport lock is expected to be held before calling | ||
1967 | * this routine. | ||
1968 | */ | 1947 | */ |
1969 | static int fc_lport_els_request(struct bsg_job *job, | 1948 | static int fc_lport_els_request(struct bsg_job *job, |
1970 | struct fc_lport *lport, | 1949 | struct fc_lport *lport, |
@@ -1976,6 +1955,8 @@ static int fc_lport_els_request(struct bsg_job *job, | |||
1976 | char *pp; | 1955 | char *pp; |
1977 | int len; | 1956 | int len; |
1978 | 1957 | ||
1958 | lockdep_assert_held(&lport->lp_mutex); | ||
1959 | |||
1979 | fp = fc_frame_alloc(lport, job->request_payload.payload_len); | 1960 | fp = fc_frame_alloc(lport, job->request_payload.payload_len); |
1980 | if (!fp) | 1961 | if (!fp) |
1981 | return -ENOMEM; | 1962 | return -ENOMEM; |
@@ -2023,9 +2004,6 @@ static int fc_lport_els_request(struct bsg_job *job, | |||
2023 | * @lport: The local port sending the request | 2004 | * @lport: The local port sending the request |
2024 | * @did: The destination FC-ID | 2005 | * @did: The destination FC-ID |
2025 | * @tov: The timeout period to wait for the response | 2006 | * @tov: The timeout period to wait for the response |
2026 | * | ||
2027 | * Locking Note: The lport lock is expected to be held before calling | ||
2028 | * this routine. | ||
2029 | */ | 2007 | */ |
2030 | static int fc_lport_ct_request(struct bsg_job *job, | 2008 | static int fc_lport_ct_request(struct bsg_job *job, |
2031 | struct fc_lport *lport, u32 did, u32 tov) | 2009 | struct fc_lport *lport, u32 did, u32 tov) |
@@ -2036,6 +2014,8 @@ static int fc_lport_ct_request(struct bsg_job *job, | |||
2036 | struct fc_ct_req *ct; | 2014 | struct fc_ct_req *ct; |
2037 | size_t len; | 2015 | size_t len; |
2038 | 2016 | ||
2017 | lockdep_assert_held(&lport->lp_mutex); | ||
2018 | |||
2039 | fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + | 2019 | fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) + |
2040 | job->request_payload.payload_len); | 2020 | job->request_payload.payload_len); |
2041 | if (!fp) | 2021 | if (!fp) |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 89b1f1af2fd4..372387a450df 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -136,13 +136,13 @@ EXPORT_SYMBOL(fc_rport_lookup); | |||
136 | * @ids: The identifiers for the new remote port | 136 | * @ids: The identifiers for the new remote port |
137 | * | 137 | * |
138 | * The remote port will start in the INIT state. | 138 | * The remote port will start in the INIT state. |
139 | * | ||
140 | * Locking note: must be called with the disc_mutex held. | ||
141 | */ | 139 | */ |
142 | struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) | 140 | struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id) |
143 | { | 141 | { |
144 | struct fc_rport_priv *rdata; | 142 | struct fc_rport_priv *rdata; |
145 | 143 | ||
144 | lockdep_assert_held(&lport->disc.disc_mutex); | ||
145 | |||
146 | rdata = fc_rport_lookup(lport, port_id); | 146 | rdata = fc_rport_lookup(lport, port_id); |
147 | if (rdata) | 147 | if (rdata) |
148 | return rdata; | 148 | return rdata; |
@@ -184,6 +184,7 @@ void fc_rport_destroy(struct kref *kref) | |||
184 | struct fc_rport_priv *rdata; | 184 | struct fc_rport_priv *rdata; |
185 | 185 | ||
186 | rdata = container_of(kref, struct fc_rport_priv, kref); | 186 | rdata = container_of(kref, struct fc_rport_priv, kref); |
187 | WARN_ON(!list_empty(&rdata->peers)); | ||
187 | kfree_rcu(rdata, rcu); | 188 | kfree_rcu(rdata, rcu); |
188 | } | 189 | } |
189 | EXPORT_SYMBOL(fc_rport_destroy); | 190 | EXPORT_SYMBOL(fc_rport_destroy); |
@@ -245,12 +246,12 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, | |||
245 | * fc_rport_state_enter() - Change the state of a remote port | 246 | * fc_rport_state_enter() - Change the state of a remote port |
246 | * @rdata: The remote port whose state should change | 247 | * @rdata: The remote port whose state should change |
247 | * @new: The new state | 248 | * @new: The new state |
248 | * | ||
249 | * Locking Note: Called with the rport lock held | ||
250 | */ | 249 | */ |
251 | static void fc_rport_state_enter(struct fc_rport_priv *rdata, | 250 | static void fc_rport_state_enter(struct fc_rport_priv *rdata, |
252 | enum fc_rport_state new) | 251 | enum fc_rport_state new) |
253 | { | 252 | { |
253 | lockdep_assert_held(&rdata->rp_mutex); | ||
254 | |||
254 | if (rdata->rp_state != new) | 255 | if (rdata->rp_state != new) |
255 | rdata->retries = 0; | 256 | rdata->retries = 0; |
256 | rdata->rp_state = new; | 257 | rdata->rp_state = new; |
@@ -469,8 +470,6 @@ EXPORT_SYMBOL(fc_rport_login); | |||
469 | * @rdata: The remote port to be deleted | 470 | * @rdata: The remote port to be deleted |
470 | * @event: The event to report as the reason for deletion | 471 | * @event: The event to report as the reason for deletion |
471 | * | 472 | * |
472 | * Locking Note: Called with the rport lock held. | ||
473 | * | ||
474 | * Allow state change into DELETE only once. | 473 | * Allow state change into DELETE only once. |
475 | * | 474 | * |
476 | * Call queue_work only if there's no event already pending. | 475 | * Call queue_work only if there's no event already pending. |
@@ -483,6 +482,8 @@ EXPORT_SYMBOL(fc_rport_login); | |||
483 | static void fc_rport_enter_delete(struct fc_rport_priv *rdata, | 482 | static void fc_rport_enter_delete(struct fc_rport_priv *rdata, |
484 | enum fc_rport_event event) | 483 | enum fc_rport_event event) |
485 | { | 484 | { |
485 | lockdep_assert_held(&rdata->rp_mutex); | ||
486 | |||
486 | if (rdata->rp_state == RPORT_ST_DELETE) | 487 | if (rdata->rp_state == RPORT_ST_DELETE) |
487 | return; | 488 | return; |
488 | 489 | ||
@@ -546,13 +547,12 @@ EXPORT_SYMBOL(fc_rport_logoff); | |||
546 | * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state | 547 | * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state |
547 | * @rdata: The remote port that is ready | 548 | * @rdata: The remote port that is ready |
548 | * | 549 | * |
549 | * Locking Note: The rport lock is expected to be held before calling | ||
550 | * this routine. | ||
551 | * | ||
552 | * Reference counting: schedules workqueue, does not modify kref | 550 | * Reference counting: schedules workqueue, does not modify kref |
553 | */ | 551 | */ |
554 | static void fc_rport_enter_ready(struct fc_rport_priv *rdata) | 552 | static void fc_rport_enter_ready(struct fc_rport_priv *rdata) |
555 | { | 553 | { |
554 | lockdep_assert_held(&rdata->rp_mutex); | ||
555 | |||
556 | fc_rport_state_enter(rdata, RPORT_ST_READY); | 556 | fc_rport_state_enter(rdata, RPORT_ST_READY); |
557 | 557 | ||
558 | FC_RPORT_DBG(rdata, "Port is Ready\n"); | 558 | FC_RPORT_DBG(rdata, "Port is Ready\n"); |
@@ -615,15 +615,14 @@ static void fc_rport_timeout(struct work_struct *work) | |||
615 | * @rdata: The remote port the error is happened on | 615 | * @rdata: The remote port the error is happened on |
616 | * @err: The error code | 616 | * @err: The error code |
617 | * | 617 | * |
618 | * Locking Note: The rport lock is expected to be held before | ||
619 | * calling this routine | ||
620 | * | ||
621 | * Reference counting: does not modify kref | 618 | * Reference counting: does not modify kref |
622 | */ | 619 | */ |
623 | static void fc_rport_error(struct fc_rport_priv *rdata, int err) | 620 | static void fc_rport_error(struct fc_rport_priv *rdata, int err) |
624 | { | 621 | { |
625 | struct fc_lport *lport = rdata->local_port; | 622 | struct fc_lport *lport = rdata->local_port; |
626 | 623 | ||
624 | lockdep_assert_held(&rdata->rp_mutex); | ||
625 | |||
627 | FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n", | 626 | FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n", |
628 | -err, fc_rport_state(rdata), rdata->retries); | 627 | -err, fc_rport_state(rdata), rdata->retries); |
629 | 628 | ||
@@ -662,15 +661,14 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err) | |||
662 | * If the error was an exchange timeout retry immediately, | 661 | * If the error was an exchange timeout retry immediately, |
663 | * otherwise wait for E_D_TOV. | 662 | * otherwise wait for E_D_TOV. |
664 | * | 663 | * |
665 | * Locking Note: The rport lock is expected to be held before | ||
666 | * calling this routine | ||
667 | * | ||
668 | * Reference counting: increments kref when scheduling retry_work | 664 | * Reference counting: increments kref when scheduling retry_work |
669 | */ | 665 | */ |
670 | static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err) | 666 | static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err) |
671 | { | 667 | { |
672 | unsigned long delay = msecs_to_jiffies(rdata->e_d_tov); | 668 | unsigned long delay = msecs_to_jiffies(rdata->e_d_tov); |
673 | 669 | ||
670 | lockdep_assert_held(&rdata->rp_mutex); | ||
671 | |||
674 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ | 672 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ |
675 | if (err == -FC_EX_CLOSED) | 673 | if (err == -FC_EX_CLOSED) |
676 | goto out; | 674 | goto out; |
@@ -822,9 +820,6 @@ bad: | |||
822 | * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp | 820 | * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp |
823 | * @rdata: The remote port to send a FLOGI to | 821 | * @rdata: The remote port to send a FLOGI to |
824 | * | 822 | * |
825 | * Locking Note: The rport lock is expected to be held before calling | ||
826 | * this routine. | ||
827 | * | ||
828 | * Reference counting: increments kref when sending ELS | 823 | * Reference counting: increments kref when sending ELS |
829 | */ | 824 | */ |
830 | static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) | 825 | static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) |
@@ -832,6 +827,8 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata) | |||
832 | struct fc_lport *lport = rdata->local_port; | 827 | struct fc_lport *lport = rdata->local_port; |
833 | struct fc_frame *fp; | 828 | struct fc_frame *fp; |
834 | 829 | ||
830 | lockdep_assert_held(&rdata->rp_mutex); | ||
831 | |||
835 | if (!lport->point_to_multipoint) | 832 | if (!lport->point_to_multipoint) |
836 | return fc_rport_enter_plogi(rdata); | 833 | return fc_rport_enter_plogi(rdata); |
837 | 834 | ||
@@ -1071,9 +1068,6 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata) | |||
1071 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request | 1068 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request |
1072 | * @rdata: The remote port to send a PLOGI to | 1069 | * @rdata: The remote port to send a PLOGI to |
1073 | * | 1070 | * |
1074 | * Locking Note: The rport lock is expected to be held before calling | ||
1075 | * this routine. | ||
1076 | * | ||
1077 | * Reference counting: increments kref when sending ELS | 1071 | * Reference counting: increments kref when sending ELS |
1078 | */ | 1072 | */ |
1079 | static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) | 1073 | static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) |
@@ -1081,6 +1075,8 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata) | |||
1081 | struct fc_lport *lport = rdata->local_port; | 1075 | struct fc_lport *lport = rdata->local_port; |
1082 | struct fc_frame *fp; | 1076 | struct fc_frame *fp; |
1083 | 1077 | ||
1078 | lockdep_assert_held(&rdata->rp_mutex); | ||
1079 | |||
1084 | if (!fc_rport_compatible_roles(lport, rdata)) { | 1080 | if (!fc_rport_compatible_roles(lport, rdata)) { |
1085 | FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n"); | 1081 | FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n"); |
1086 | fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); | 1082 | fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT); |
@@ -1232,9 +1228,6 @@ put: | |||
1232 | * fc_rport_enter_prli() - Send Process Login (PRLI) request | 1228 | * fc_rport_enter_prli() - Send Process Login (PRLI) request |
1233 | * @rdata: The remote port to send the PRLI request to | 1229 | * @rdata: The remote port to send the PRLI request to |
1234 | * | 1230 | * |
1235 | * Locking Note: The rport lock is expected to be held before calling | ||
1236 | * this routine. | ||
1237 | * | ||
1238 | * Reference counting: increments kref when sending ELS | 1231 | * Reference counting: increments kref when sending ELS |
1239 | */ | 1232 | */ |
1240 | static void fc_rport_enter_prli(struct fc_rport_priv *rdata) | 1233 | static void fc_rport_enter_prli(struct fc_rport_priv *rdata) |
@@ -1247,6 +1240,8 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata) | |||
1247 | struct fc_frame *fp; | 1240 | struct fc_frame *fp; |
1248 | struct fc4_prov *prov; | 1241 | struct fc4_prov *prov; |
1249 | 1242 | ||
1243 | lockdep_assert_held(&rdata->rp_mutex); | ||
1244 | |||
1250 | /* | 1245 | /* |
1251 | * If the rport is one of the well known addresses | 1246 | * If the rport is one of the well known addresses |
1252 | * we skip PRLI and RTV and go straight to READY. | 1247 | * we skip PRLI and RTV and go straight to READY. |
@@ -1372,9 +1367,6 @@ put: | |||
1372 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request | 1367 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request |
1373 | * @rdata: The remote port to send the RTV request to | 1368 | * @rdata: The remote port to send the RTV request to |
1374 | * | 1369 | * |
1375 | * Locking Note: The rport lock is expected to be held before calling | ||
1376 | * this routine. | ||
1377 | * | ||
1378 | * Reference counting: increments kref when sending ELS | 1370 | * Reference counting: increments kref when sending ELS |
1379 | */ | 1371 | */ |
1380 | static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) | 1372 | static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) |
@@ -1382,6 +1374,8 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) | |||
1382 | struct fc_frame *fp; | 1374 | struct fc_frame *fp; |
1383 | struct fc_lport *lport = rdata->local_port; | 1375 | struct fc_lport *lport = rdata->local_port; |
1384 | 1376 | ||
1377 | lockdep_assert_held(&rdata->rp_mutex); | ||
1378 | |||
1385 | FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", | 1379 | FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n", |
1386 | fc_rport_state(rdata)); | 1380 | fc_rport_state(rdata)); |
1387 | 1381 | ||
@@ -1406,8 +1400,6 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata) | |||
1406 | * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests | 1400 | * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests |
1407 | * @rdata: The remote port that sent the RTV request | 1401 | * @rdata: The remote port that sent the RTV request |
1408 | * @in_fp: The RTV request frame | 1402 | * @in_fp: The RTV request frame |
1409 | * | ||
1410 | * Locking Note: Called with the lport and rport locks held. | ||
1411 | */ | 1403 | */ |
1412 | static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata, | 1404 | static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata, |
1413 | struct fc_frame *in_fp) | 1405 | struct fc_frame *in_fp) |
@@ -1417,6 +1409,9 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata, | |||
1417 | struct fc_els_rtv_acc *rtv; | 1409 | struct fc_els_rtv_acc *rtv; |
1418 | struct fc_seq_els_data rjt_data; | 1410 | struct fc_seq_els_data rjt_data; |
1419 | 1411 | ||
1412 | lockdep_assert_held(&rdata->rp_mutex); | ||
1413 | lockdep_assert_held(&lport->lp_mutex); | ||
1414 | |||
1420 | FC_RPORT_DBG(rdata, "Received RTV request\n"); | 1415 | FC_RPORT_DBG(rdata, "Received RTV request\n"); |
1421 | 1416 | ||
1422 | fp = fc_frame_alloc(lport, sizeof(*rtv)); | 1417 | fp = fc_frame_alloc(lport, sizeof(*rtv)); |
@@ -1460,9 +1455,6 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1460 | * fc_rport_enter_logo() - Send a logout (LOGO) request | 1455 | * fc_rport_enter_logo() - Send a logout (LOGO) request |
1461 | * @rdata: The remote port to send the LOGO request to | 1456 | * @rdata: The remote port to send the LOGO request to |
1462 | * | 1457 | * |
1463 | * Locking Note: The rport lock is expected to be held before calling | ||
1464 | * this routine. | ||
1465 | * | ||
1466 | * Reference counting: increments kref when sending ELS | 1458 | * Reference counting: increments kref when sending ELS |
1467 | */ | 1459 | */ |
1468 | static void fc_rport_enter_logo(struct fc_rport_priv *rdata) | 1460 | static void fc_rport_enter_logo(struct fc_rport_priv *rdata) |
@@ -1470,6 +1462,8 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata) | |||
1470 | struct fc_lport *lport = rdata->local_port; | 1462 | struct fc_lport *lport = rdata->local_port; |
1471 | struct fc_frame *fp; | 1463 | struct fc_frame *fp; |
1472 | 1464 | ||
1465 | lockdep_assert_held(&rdata->rp_mutex); | ||
1466 | |||
1473 | FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n", | 1467 | FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n", |
1474 | fc_rport_state(rdata)); | 1468 | fc_rport_state(rdata)); |
1475 | 1469 | ||
@@ -1548,9 +1542,6 @@ put: | |||
1548 | * fc_rport_enter_adisc() - Send Address Discover (ADISC) request | 1542 | * fc_rport_enter_adisc() - Send Address Discover (ADISC) request |
1549 | * @rdata: The remote port to send the ADISC request to | 1543 | * @rdata: The remote port to send the ADISC request to |
1550 | * | 1544 | * |
1551 | * Locking Note: The rport lock is expected to be held before calling | ||
1552 | * this routine. | ||
1553 | * | ||
1554 | * Reference counting: increments kref when sending ELS | 1545 | * Reference counting: increments kref when sending ELS |
1555 | */ | 1546 | */ |
1556 | static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) | 1547 | static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) |
@@ -1558,6 +1549,8 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) | |||
1558 | struct fc_lport *lport = rdata->local_port; | 1549 | struct fc_lport *lport = rdata->local_port; |
1559 | struct fc_frame *fp; | 1550 | struct fc_frame *fp; |
1560 | 1551 | ||
1552 | lockdep_assert_held(&rdata->rp_mutex); | ||
1553 | |||
1561 | FC_RPORT_DBG(rdata, "sending ADISC from %s state\n", | 1554 | FC_RPORT_DBG(rdata, "sending ADISC from %s state\n", |
1562 | fc_rport_state(rdata)); | 1555 | fc_rport_state(rdata)); |
1563 | 1556 | ||
@@ -1581,8 +1574,6 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata) | |||
1581 | * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests | 1574 | * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests |
1582 | * @rdata: The remote port that sent the ADISC request | 1575 | * @rdata: The remote port that sent the ADISC request |
1583 | * @in_fp: The ADISC request frame | 1576 | * @in_fp: The ADISC request frame |
1584 | * | ||
1585 | * Locking Note: Called with the lport and rport locks held. | ||
1586 | */ | 1577 | */ |
1587 | static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, | 1578 | static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, |
1588 | struct fc_frame *in_fp) | 1579 | struct fc_frame *in_fp) |
@@ -1592,6 +1583,9 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata, | |||
1592 | struct fc_els_adisc *adisc; | 1583 | struct fc_els_adisc *adisc; |
1593 | struct fc_seq_els_data rjt_data; | 1584 | struct fc_seq_els_data rjt_data; |
1594 | 1585 | ||
1586 | lockdep_assert_held(&rdata->rp_mutex); | ||
1587 | lockdep_assert_held(&lport->lp_mutex); | ||
1588 | |||
1595 | FC_RPORT_DBG(rdata, "Received ADISC request\n"); | 1589 | FC_RPORT_DBG(rdata, "Received ADISC request\n"); |
1596 | 1590 | ||
1597 | adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); | 1591 | adisc = fc_frame_payload_get(in_fp, sizeof(*adisc)); |
@@ -1618,9 +1612,6 @@ drop: | |||
1618 | * fc_rport_recv_rls_req() - Handle received Read Link Status request | 1612 | * fc_rport_recv_rls_req() - Handle received Read Link Status request |
1619 | * @rdata: The remote port that sent the RLS request | 1613 | * @rdata: The remote port that sent the RLS request |
1620 | * @rx_fp: The PRLI request frame | 1614 | * @rx_fp: The PRLI request frame |
1621 | * | ||
1622 | * Locking Note: The rport lock is expected to be held before calling | ||
1623 | * this function. | ||
1624 | */ | 1615 | */ |
1625 | static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, | 1616 | static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, |
1626 | struct fc_frame *rx_fp) | 1617 | struct fc_frame *rx_fp) |
@@ -1634,6 +1625,8 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata, | |||
1634 | struct fc_seq_els_data rjt_data; | 1625 | struct fc_seq_els_data rjt_data; |
1635 | struct fc_host_statistics *hst; | 1626 | struct fc_host_statistics *hst; |
1636 | 1627 | ||
1628 | lockdep_assert_held(&rdata->rp_mutex); | ||
1629 | |||
1637 | FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", | 1630 | FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n", |
1638 | fc_rport_state(rdata)); | 1631 | fc_rport_state(rdata)); |
1639 | 1632 | ||
@@ -1687,8 +1680,6 @@ out: | |||
1687 | * Handle incoming ELS requests that require port login. | 1680 | * Handle incoming ELS requests that require port login. |
1688 | * The ELS opcode has already been validated by the caller. | 1681 | * The ELS opcode has already been validated by the caller. |
1689 | * | 1682 | * |
1690 | * Locking Note: Called with the lport lock held. | ||
1691 | * | ||
1692 | * Reference counting: does not modify kref | 1683 | * Reference counting: does not modify kref |
1693 | */ | 1684 | */ |
1694 | static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) | 1685 | static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) |
@@ -1696,6 +1687,8 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp) | |||
1696 | struct fc_rport_priv *rdata; | 1687 | struct fc_rport_priv *rdata; |
1697 | struct fc_seq_els_data els_data; | 1688 | struct fc_seq_els_data els_data; |
1698 | 1689 | ||
1690 | lockdep_assert_held(&lport->lp_mutex); | ||
1691 | |||
1699 | rdata = fc_rport_lookup(lport, fc_frame_sid(fp)); | 1692 | rdata = fc_rport_lookup(lport, fc_frame_sid(fp)); |
1700 | if (!rdata) { | 1693 | if (!rdata) { |
1701 | FC_RPORT_ID_DBG(lport, fc_frame_sid(fp), | 1694 | FC_RPORT_ID_DBG(lport, fc_frame_sid(fp), |
@@ -1783,14 +1776,14 @@ busy: | |||
1783 | * @lport: The local port that received the request | 1776 | * @lport: The local port that received the request |
1784 | * @fp: The request frame | 1777 | * @fp: The request frame |
1785 | * | 1778 | * |
1786 | * Locking Note: Called with the lport lock held. | ||
1787 | * | ||
1788 | * Reference counting: does not modify kref | 1779 | * Reference counting: does not modify kref |
1789 | */ | 1780 | */ |
1790 | void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) | 1781 | void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp) |
1791 | { | 1782 | { |
1792 | struct fc_seq_els_data els_data; | 1783 | struct fc_seq_els_data els_data; |
1793 | 1784 | ||
1785 | lockdep_assert_held(&lport->lp_mutex); | ||
1786 | |||
1794 | /* | 1787 | /* |
1795 | * Handle FLOGI, PLOGI and LOGO requests separately, since they | 1788 | * Handle FLOGI, PLOGI and LOGO requests separately, since they |
1796 | * don't require prior login. | 1789 | * don't require prior login. |
@@ -1831,8 +1824,6 @@ EXPORT_SYMBOL(fc_rport_recv_req); | |||
1831 | * @lport: The local port that received the PLOGI request | 1824 | * @lport: The local port that received the PLOGI request |
1832 | * @rx_fp: The PLOGI request frame | 1825 | * @rx_fp: The PLOGI request frame |
1833 | * | 1826 | * |
1834 | * Locking Note: The rport lock is held before calling this function. | ||
1835 | * | ||
1836 | * Reference counting: increments kref on return | 1827 | * Reference counting: increments kref on return |
1837 | */ | 1828 | */ |
1838 | static void fc_rport_recv_plogi_req(struct fc_lport *lport, | 1829 | static void fc_rport_recv_plogi_req(struct fc_lport *lport, |
@@ -1845,6 +1836,8 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport, | |||
1845 | struct fc_seq_els_data rjt_data; | 1836 | struct fc_seq_els_data rjt_data; |
1846 | u32 sid; | 1837 | u32 sid; |
1847 | 1838 | ||
1839 | lockdep_assert_held(&lport->lp_mutex); | ||
1840 | |||
1848 | sid = fc_frame_sid(fp); | 1841 | sid = fc_frame_sid(fp); |
1849 | 1842 | ||
1850 | FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); | 1843 | FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n"); |
@@ -1955,9 +1948,6 @@ reject: | |||
1955 | * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests | 1948 | * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests |
1956 | * @rdata: The remote port that sent the PRLI request | 1949 | * @rdata: The remote port that sent the PRLI request |
1957 | * @rx_fp: The PRLI request frame | 1950 | * @rx_fp: The PRLI request frame |
1958 | * | ||
1959 | * Locking Note: The rport lock is expected to be held before calling | ||
1960 | * this function. | ||
1961 | */ | 1951 | */ |
1962 | static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, | 1952 | static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, |
1963 | struct fc_frame *rx_fp) | 1953 | struct fc_frame *rx_fp) |
@@ -1976,6 +1966,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata, | |||
1976 | struct fc_seq_els_data rjt_data; | 1966 | struct fc_seq_els_data rjt_data; |
1977 | struct fc4_prov *prov; | 1967 | struct fc4_prov *prov; |
1978 | 1968 | ||
1969 | lockdep_assert_held(&rdata->rp_mutex); | ||
1970 | |||
1979 | FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", | 1971 | FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n", |
1980 | fc_rport_state(rdata)); | 1972 | fc_rport_state(rdata)); |
1981 | 1973 | ||
@@ -2072,9 +2064,6 @@ drop: | |||
2072 | * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests | 2064 | * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests |
2073 | * @rdata: The remote port that sent the PRLO request | 2065 | * @rdata: The remote port that sent the PRLO request |
2074 | * @rx_fp: The PRLO request frame | 2066 | * @rx_fp: The PRLO request frame |
2075 | * | ||
2076 | * Locking Note: The rport lock is expected to be held before calling | ||
2077 | * this function. | ||
2078 | */ | 2067 | */ |
2079 | static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, | 2068 | static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, |
2080 | struct fc_frame *rx_fp) | 2069 | struct fc_frame *rx_fp) |
@@ -2091,6 +2080,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata, | |||
2091 | unsigned int plen; | 2080 | unsigned int plen; |
2092 | struct fc_seq_els_data rjt_data; | 2081 | struct fc_seq_els_data rjt_data; |
2093 | 2082 | ||
2083 | lockdep_assert_held(&rdata->rp_mutex); | ||
2084 | |||
2094 | FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", | 2085 | FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n", |
2095 | fc_rport_state(rdata)); | 2086 | fc_rport_state(rdata)); |
2096 | 2087 | ||
@@ -2144,9 +2135,6 @@ drop: | |||
2144 | * @lport: The local port that received the LOGO request | 2135 | * @lport: The local port that received the LOGO request |
2145 | * @fp: The LOGO request frame | 2136 | * @fp: The LOGO request frame |
2146 | * | 2137 | * |
2147 | * Locking Note: The rport lock is expected to be held before calling | ||
2148 | * this function. | ||
2149 | * | ||
2150 | * Reference counting: drops kref on return | 2138 | * Reference counting: drops kref on return |
2151 | */ | 2139 | */ |
2152 | static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) | 2140 | static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) |
@@ -2154,6 +2142,8 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp) | |||
2154 | struct fc_rport_priv *rdata; | 2142 | struct fc_rport_priv *rdata; |
2155 | u32 sid; | 2143 | u32 sid; |
2156 | 2144 | ||
2145 | lockdep_assert_held(&lport->lp_mutex); | ||
2146 | |||
2157 | fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); | 2147 | fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL); |
2158 | 2148 | ||
2159 | sid = fc_frame_sid(fp); | 2149 | sid = fc_frame_sid(fp); |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c972cc2b3d5b..93c66ebad907 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1705,6 +1705,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) | |||
1705 | sc->result = DID_NO_CONNECT << 16; | 1705 | sc->result = DID_NO_CONNECT << 16; |
1706 | break; | 1706 | break; |
1707 | } | 1707 | } |
1708 | /* fall through */ | ||
1708 | case ISCSI_STATE_IN_RECOVERY: | 1709 | case ISCSI_STATE_IN_RECOVERY: |
1709 | reason = FAILURE_SESSION_IN_RECOVERY; | 1710 | reason = FAILURE_SESSION_IN_RECOVERY; |
1710 | sc->result = DID_IMM_RETRY << 16; | 1711 | sc->result = DID_IMM_RETRY << 16; |
@@ -1832,6 +1833,7 @@ static void iscsi_tmf_timedout(struct timer_list *t) | |||
1832 | static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, | 1833 | static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, |
1833 | struct iscsi_tm *hdr, int age, | 1834 | struct iscsi_tm *hdr, int age, |
1834 | int timeout) | 1835 | int timeout) |
1836 | __must_hold(&session->frwd_lock) | ||
1835 | { | 1837 | { |
1836 | struct iscsi_session *session = conn->session; | 1838 | struct iscsi_session *session = conn->session; |
1837 | struct iscsi_task *task; | 1839 | struct iscsi_task *task; |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index 369ef8f23b24..4fcb9e65be57 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
@@ -695,7 +695,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr) | |||
695 | struct scsi_data_buffer *sdb = scsi_in(task->sc); | 695 | struct scsi_data_buffer *sdb = scsi_in(task->sc); |
696 | 696 | ||
697 | /* | 697 | /* |
698 | * Setup copy of Data-In into the Scsi_Cmnd | 698 | * Setup copy of Data-In into the struct scsi_cmnd |
699 | * Scatterlist case: | 699 | * Scatterlist case: |
700 | * We set up the iscsi_segment to point to the next | 700 | * We set up the iscsi_segment to point to the next |
701 | * scatterlist entry to copy to. As we go along, | 701 | * scatterlist entry to copy to. As we go along, |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index ff1d612f6fb9..64a958a99f6a 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -176,7 +176,6 @@ qc_already_gone: | |||
176 | 176 | ||
177 | static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) | 177 | static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) |
178 | { | 178 | { |
179 | unsigned long flags; | ||
180 | struct sas_task *task; | 179 | struct sas_task *task; |
181 | struct scatterlist *sg; | 180 | struct scatterlist *sg; |
182 | int ret = AC_ERR_SYSTEM; | 181 | int ret = AC_ERR_SYSTEM; |
@@ -187,10 +186,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) | |||
187 | struct Scsi_Host *host = sas_ha->core.shost; | 186 | struct Scsi_Host *host = sas_ha->core.shost; |
188 | struct sas_internal *i = to_sas_internal(host->transportt); | 187 | struct sas_internal *i = to_sas_internal(host->transportt); |
189 | 188 | ||
190 | /* TODO: audit callers to ensure they are ready for qc_issue to | 189 | /* TODO: we should try to remove that unlock */ |
191 | * unconditionally re-enable interrupts | ||
192 | */ | ||
193 | local_irq_save(flags); | ||
194 | spin_unlock(ap->lock); | 190 | spin_unlock(ap->lock); |
195 | 191 | ||
196 | /* If the device fell off, no sense in issuing commands */ | 192 | /* If the device fell off, no sense in issuing commands */ |
@@ -252,7 +248,6 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) | |||
252 | 248 | ||
253 | out: | 249 | out: |
254 | spin_lock(ap->lock); | 250 | spin_lock(ap->lock); |
255 | local_irq_restore(flags); | ||
256 | return ret; | 251 | return ret; |
257 | } | 252 | } |
258 | 253 | ||
@@ -557,34 +552,46 @@ int sas_ata_init(struct domain_device *found_dev) | |||
557 | { | 552 | { |
558 | struct sas_ha_struct *ha = found_dev->port->ha; | 553 | struct sas_ha_struct *ha = found_dev->port->ha; |
559 | struct Scsi_Host *shost = ha->core.shost; | 554 | struct Scsi_Host *shost = ha->core.shost; |
555 | struct ata_host *ata_host; | ||
560 | struct ata_port *ap; | 556 | struct ata_port *ap; |
561 | int rc; | 557 | int rc; |
562 | 558 | ||
563 | ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops); | 559 | ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL); |
564 | ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host, | 560 | if (!ata_host) { |
565 | &sata_port_info, | 561 | SAS_DPRINTK("ata host alloc failed.\n"); |
566 | shost); | 562 | return -ENOMEM; |
563 | } | ||
564 | |||
565 | ata_host_init(ata_host, ha->dev, &sas_sata_ops); | ||
566 | |||
567 | ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost); | ||
567 | if (!ap) { | 568 | if (!ap) { |
568 | SAS_DPRINTK("ata_sas_port_alloc failed.\n"); | 569 | SAS_DPRINTK("ata_sas_port_alloc failed.\n"); |
569 | return -ENODEV; | 570 | rc = -ENODEV; |
571 | goto free_host; | ||
570 | } | 572 | } |
571 | 573 | ||
572 | ap->private_data = found_dev; | 574 | ap->private_data = found_dev; |
573 | ap->cbl = ATA_CBL_SATA; | 575 | ap->cbl = ATA_CBL_SATA; |
574 | ap->scsi_host = shost; | 576 | ap->scsi_host = shost; |
575 | rc = ata_sas_port_init(ap); | 577 | rc = ata_sas_port_init(ap); |
576 | if (rc) { | 578 | if (rc) |
577 | ata_sas_port_destroy(ap); | 579 | goto destroy_port; |
578 | return rc; | 580 | |
579 | } | 581 | rc = ata_sas_tport_add(ata_host->dev, ap); |
580 | rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap); | 582 | if (rc) |
581 | if (rc) { | 583 | goto destroy_port; |
582 | ata_sas_port_destroy(ap); | 584 | |
583 | return rc; | 585 | found_dev->sata_dev.ata_host = ata_host; |
584 | } | ||
585 | found_dev->sata_dev.ap = ap; | 586 | found_dev->sata_dev.ap = ap; |
586 | 587 | ||
587 | return 0; | 588 | return 0; |
589 | |||
590 | destroy_port: | ||
591 | ata_sas_port_destroy(ap); | ||
592 | free_host: | ||
593 | ata_host_put(ata_host); | ||
594 | return rc; | ||
588 | } | 595 | } |
589 | 596 | ||
590 | void sas_ata_task_abort(struct sas_task *task) | 597 | void sas_ata_task_abort(struct sas_task *task) |
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 1ffca28fe6a8..0148ae62a52a 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c | |||
@@ -316,6 +316,8 @@ void sas_free_device(struct kref *kref) | |||
316 | if (dev_is_sata(dev) && dev->sata_dev.ap) { | 316 | if (dev_is_sata(dev) && dev->sata_dev.ap) { |
317 | ata_sas_tport_delete(dev->sata_dev.ap); | 317 | ata_sas_tport_delete(dev->sata_dev.ap); |
318 | ata_sas_port_destroy(dev->sata_dev.ap); | 318 | ata_sas_port_destroy(dev->sata_dev.ap); |
319 | ata_host_put(dev->sata_dev.ata_host); | ||
320 | dev->sata_dev.ata_host = NULL; | ||
319 | dev->sata_dev.ap = NULL; | 321 | dev->sata_dev.ap = NULL; |
320 | } | 322 | } |
321 | 323 | ||
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index ceab5e5c41c2..33229348dcb6 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -759,7 +759,7 @@ retry: | |||
759 | spin_unlock_irq(shost->host_lock); | 759 | spin_unlock_irq(shost->host_lock); |
760 | 760 | ||
761 | SAS_DPRINTK("Enter %s busy: %d failed: %d\n", | 761 | SAS_DPRINTK("Enter %s busy: %d failed: %d\n", |
762 | __func__, atomic_read(&shost->host_busy), shost->host_failed); | 762 | __func__, scsi_host_busy(shost), shost->host_failed); |
763 | /* | 763 | /* |
764 | * Deal with commands that still have SAS tasks (i.e. they didn't | 764 | * Deal with commands that still have SAS tasks (i.e. they didn't |
765 | * complete via the normal sas_task completion mechanism), | 765 | * complete via the normal sas_task completion mechanism), |
@@ -801,7 +801,7 @@ out: | |||
801 | goto retry; | 801 | goto retry; |
802 | 802 | ||
803 | SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n", | 803 | SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n", |
804 | __func__, atomic_read(&shost->host_busy), | 804 | __func__, scsi_host_busy(shost), |
805 | shost->host_failed, tries); | 805 | shost->host_failed, tries); |
806 | } | 806 | } |
807 | 807 | ||
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile index cb6aa802c48e..092a971d066b 100644 --- a/drivers/scsi/lpfc/Makefile +++ b/drivers/scsi/lpfc/Makefile | |||
@@ -1,8 +1,8 @@ | |||
1 | #/******************************************************************* | 1 | #/******************************************************************* |
2 | # * This file is part of the Emulex Linux Device Driver for * | 2 | # * This file is part of the Emulex Linux Device Driver for * |
3 | # * Fibre Channel Host Bus Adapters. * | 3 | # * Fibre Channel Host Bus Adapters. * |
4 | # * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | # * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | # * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | # * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | # * Copyright (C) 2004-2012 Emulex. All rights reserved. * | 6 | # * Copyright (C) 2004-2012 Emulex. All rights reserved. * |
7 | # * EMULEX and SLI are trademarks of Emulex. * | 7 | # * EMULEX and SLI are trademarks of Emulex. * |
8 | # * www.broadcom.com * | 8 | # * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 20b249a649dd..e0d0da5f43d6 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -840,8 +840,7 @@ struct lpfc_hba { | |||
840 | #define LPFC_ENABLE_FCP 1 | 840 | #define LPFC_ENABLE_FCP 1 |
841 | #define LPFC_ENABLE_NVME 2 | 841 | #define LPFC_ENABLE_NVME 2 |
842 | #define LPFC_ENABLE_BOTH 3 | 842 | #define LPFC_ENABLE_BOTH 3 |
843 | uint32_t nvme_embed_pbde; | 843 | uint32_t cfg_enable_pbde; |
844 | uint32_t fcp_embed_pbde; | ||
845 | uint32_t io_channel_irqs; /* number of irqs for io channels */ | 844 | uint32_t io_channel_irqs; /* number of irqs for io channels */ |
846 | struct nvmet_fc_target_port *targetport; | 845 | struct nvmet_fc_target_port *targetport; |
847 | lpfc_vpd_t vpd; /* vital product data */ | 846 | lpfc_vpd_t vpd; /* vital product data */ |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 729d343861f4..5a25553415f8 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -64,6 +64,9 @@ | |||
64 | #define LPFC_MIN_MRQ_POST 512 | 64 | #define LPFC_MIN_MRQ_POST 512 |
65 | #define LPFC_MAX_MRQ_POST 2048 | 65 | #define LPFC_MAX_MRQ_POST 2048 |
66 | 66 | ||
67 | #define LPFC_MAX_NVME_INFO_TMP_LEN 100 | ||
68 | #define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n" | ||
69 | |||
67 | /* | 70 | /* |
68 | * Write key size should be multiple of 4. If write key is changed | 71 | * Write key size should be multiple of 4. If write key is changed |
69 | * make sure that library write key is also changed. | 72 | * make sure that library write key is also changed. |
@@ -158,14 +161,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
158 | char *statep; | 161 | char *statep; |
159 | int i; | 162 | int i; |
160 | int len = 0; | 163 | int len = 0; |
164 | char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; | ||
161 | 165 | ||
162 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { | 166 | if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { |
163 | len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n"); | 167 | len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); |
164 | return len; | 168 | return len; |
165 | } | 169 | } |
166 | if (phba->nvmet_support) { | 170 | if (phba->nvmet_support) { |
167 | if (!phba->targetport) { | 171 | if (!phba->targetport) { |
168 | len = snprintf(buf, PAGE_SIZE, | 172 | len = scnprintf(buf, PAGE_SIZE, |
169 | "NVME Target: x%llx is not allocated\n", | 173 | "NVME Target: x%llx is not allocated\n", |
170 | wwn_to_u64(vport->fc_portname.u.wwn)); | 174 | wwn_to_u64(vport->fc_portname.u.wwn)); |
171 | return len; | 175 | return len; |
@@ -175,135 +179,169 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
175 | statep = "REGISTERED"; | 179 | statep = "REGISTERED"; |
176 | else | 180 | else |
177 | statep = "INIT"; | 181 | statep = "INIT"; |
178 | len += snprintf(buf + len, PAGE_SIZE - len, | 182 | scnprintf(tmp, sizeof(tmp), |
179 | "NVME Target Enabled State %s\n", | 183 | "NVME Target Enabled State %s\n", |
180 | statep); | 184 | statep); |
181 | len += snprintf(buf + len, PAGE_SIZE - len, | 185 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
182 | "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", | 186 | goto buffer_done; |
183 | "NVME Target: lpfc", | 187 | |
184 | phba->brd_no, | 188 | scnprintf(tmp, sizeof(tmp), |
185 | wwn_to_u64(vport->fc_portname.u.wwn), | 189 | "%s%d WWPN x%llx WWNN x%llx DID x%06x\n", |
186 | wwn_to_u64(vport->fc_nodename.u.wwn), | 190 | "NVME Target: lpfc", |
187 | phba->targetport->port_id); | 191 | phba->brd_no, |
188 | 192 | wwn_to_u64(vport->fc_portname.u.wwn), | |
189 | len += snprintf(buf + len, PAGE_SIZE - len, | 193 | wwn_to_u64(vport->fc_nodename.u.wwn), |
190 | "\nNVME Target: Statistics\n"); | 194 | phba->targetport->port_id); |
195 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
196 | goto buffer_done; | ||
197 | |||
198 | if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE) | ||
199 | >= PAGE_SIZE) | ||
200 | goto buffer_done; | ||
201 | |||
191 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 202 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
192 | len += snprintf(buf+len, PAGE_SIZE-len, | 203 | scnprintf(tmp, sizeof(tmp), |
193 | "LS: Rcv %08x Drop %08x Abort %08x\n", | 204 | "LS: Rcv %08x Drop %08x Abort %08x\n", |
194 | atomic_read(&tgtp->rcv_ls_req_in), | 205 | atomic_read(&tgtp->rcv_ls_req_in), |
195 | atomic_read(&tgtp->rcv_ls_req_drop), | 206 | atomic_read(&tgtp->rcv_ls_req_drop), |
196 | atomic_read(&tgtp->xmt_ls_abort)); | 207 | atomic_read(&tgtp->xmt_ls_abort)); |
208 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
209 | goto buffer_done; | ||
210 | |||
197 | if (atomic_read(&tgtp->rcv_ls_req_in) != | 211 | if (atomic_read(&tgtp->rcv_ls_req_in) != |
198 | atomic_read(&tgtp->rcv_ls_req_out)) { | 212 | atomic_read(&tgtp->rcv_ls_req_out)) { |
199 | len += snprintf(buf+len, PAGE_SIZE-len, | 213 | scnprintf(tmp, sizeof(tmp), |
200 | "Rcv LS: in %08x != out %08x\n", | 214 | "Rcv LS: in %08x != out %08x\n", |
201 | atomic_read(&tgtp->rcv_ls_req_in), | 215 | atomic_read(&tgtp->rcv_ls_req_in), |
202 | atomic_read(&tgtp->rcv_ls_req_out)); | 216 | atomic_read(&tgtp->rcv_ls_req_out)); |
217 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
218 | goto buffer_done; | ||
203 | } | 219 | } |
204 | 220 | ||
205 | len += snprintf(buf+len, PAGE_SIZE-len, | 221 | scnprintf(tmp, sizeof(tmp), |
206 | "LS: Xmt %08x Drop %08x Cmpl %08x\n", | 222 | "LS: Xmt %08x Drop %08x Cmpl %08x\n", |
207 | atomic_read(&tgtp->xmt_ls_rsp), | 223 | atomic_read(&tgtp->xmt_ls_rsp), |
208 | atomic_read(&tgtp->xmt_ls_drop), | 224 | atomic_read(&tgtp->xmt_ls_drop), |
209 | atomic_read(&tgtp->xmt_ls_rsp_cmpl)); | 225 | atomic_read(&tgtp->xmt_ls_rsp_cmpl)); |
210 | 226 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | |
211 | len += snprintf(buf + len, PAGE_SIZE - len, | 227 | goto buffer_done; |
212 | "LS: RSP Abort %08x xb %08x Err %08x\n", | 228 | |
213 | atomic_read(&tgtp->xmt_ls_rsp_aborted), | 229 | scnprintf(tmp, sizeof(tmp), |
214 | atomic_read(&tgtp->xmt_ls_rsp_xb_set), | 230 | "LS: RSP Abort %08x xb %08x Err %08x\n", |
215 | atomic_read(&tgtp->xmt_ls_rsp_error)); | 231 | atomic_read(&tgtp->xmt_ls_rsp_aborted), |
216 | 232 | atomic_read(&tgtp->xmt_ls_rsp_xb_set), | |
217 | len += snprintf(buf+len, PAGE_SIZE-len, | 233 | atomic_read(&tgtp->xmt_ls_rsp_error)); |
218 | "FCP: Rcv %08x Defer %08x Release %08x " | 234 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
219 | "Drop %08x\n", | 235 | goto buffer_done; |
220 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 236 | |
221 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | 237 | scnprintf(tmp, sizeof(tmp), |
222 | atomic_read(&tgtp->xmt_fcp_release), | 238 | "FCP: Rcv %08x Defer %08x Release %08x " |
223 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | 239 | "Drop %08x\n", |
240 | atomic_read(&tgtp->rcv_fcp_cmd_in), | ||
241 | atomic_read(&tgtp->rcv_fcp_cmd_defer), | ||
242 | atomic_read(&tgtp->xmt_fcp_release), | ||
243 | atomic_read(&tgtp->rcv_fcp_cmd_drop)); | ||
244 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
245 | goto buffer_done; | ||
224 | 246 | ||
225 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != | 247 | if (atomic_read(&tgtp->rcv_fcp_cmd_in) != |
226 | atomic_read(&tgtp->rcv_fcp_cmd_out)) { | 248 | atomic_read(&tgtp->rcv_fcp_cmd_out)) { |
227 | len += snprintf(buf+len, PAGE_SIZE-len, | 249 | scnprintf(tmp, sizeof(tmp), |
228 | "Rcv FCP: in %08x != out %08x\n", | 250 | "Rcv FCP: in %08x != out %08x\n", |
229 | atomic_read(&tgtp->rcv_fcp_cmd_in), | 251 | atomic_read(&tgtp->rcv_fcp_cmd_in), |
230 | atomic_read(&tgtp->rcv_fcp_cmd_out)); | 252 | atomic_read(&tgtp->rcv_fcp_cmd_out)); |
253 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
254 | goto buffer_done; | ||
231 | } | 255 | } |
232 | 256 | ||
233 | len += snprintf(buf+len, PAGE_SIZE-len, | 257 | scnprintf(tmp, sizeof(tmp), |
234 | "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " | 258 | "FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x " |
235 | "drop %08x\n", | 259 | "drop %08x\n", |
236 | atomic_read(&tgtp->xmt_fcp_read), | 260 | atomic_read(&tgtp->xmt_fcp_read), |
237 | atomic_read(&tgtp->xmt_fcp_read_rsp), | 261 | atomic_read(&tgtp->xmt_fcp_read_rsp), |
238 | atomic_read(&tgtp->xmt_fcp_write), | 262 | atomic_read(&tgtp->xmt_fcp_write), |
239 | atomic_read(&tgtp->xmt_fcp_rsp), | 263 | atomic_read(&tgtp->xmt_fcp_rsp), |
240 | atomic_read(&tgtp->xmt_fcp_drop)); | 264 | atomic_read(&tgtp->xmt_fcp_drop)); |
241 | 265 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | |
242 | len += snprintf(buf+len, PAGE_SIZE-len, | 266 | goto buffer_done; |
243 | "FCP Rsp Cmpl: %08x err %08x drop %08x\n", | 267 | |
244 | atomic_read(&tgtp->xmt_fcp_rsp_cmpl), | 268 | scnprintf(tmp, sizeof(tmp), |
245 | atomic_read(&tgtp->xmt_fcp_rsp_error), | 269 | "FCP Rsp Cmpl: %08x err %08x drop %08x\n", |
246 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); | 270 | atomic_read(&tgtp->xmt_fcp_rsp_cmpl), |
247 | 271 | atomic_read(&tgtp->xmt_fcp_rsp_error), | |
248 | len += snprintf(buf+len, PAGE_SIZE-len, | 272 | atomic_read(&tgtp->xmt_fcp_rsp_drop)); |
249 | "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", | 273 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
250 | atomic_read(&tgtp->xmt_fcp_rsp_aborted), | 274 | goto buffer_done; |
251 | atomic_read(&tgtp->xmt_fcp_rsp_xb_set), | 275 | |
252 | atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); | 276 | scnprintf(tmp, sizeof(tmp), |
253 | 277 | "FCP Rsp Abort: %08x xb %08x xricqe %08x\n", | |
254 | len += snprintf(buf + len, PAGE_SIZE - len, | 278 | atomic_read(&tgtp->xmt_fcp_rsp_aborted), |
255 | "ABORT: Xmt %08x Cmpl %08x\n", | 279 | atomic_read(&tgtp->xmt_fcp_rsp_xb_set), |
256 | atomic_read(&tgtp->xmt_fcp_abort), | 280 | atomic_read(&tgtp->xmt_fcp_xri_abort_cqe)); |
257 | atomic_read(&tgtp->xmt_fcp_abort_cmpl)); | 281 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
258 | 282 | goto buffer_done; | |
259 | len += snprintf(buf + len, PAGE_SIZE - len, | 283 | |
260 | "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x", | 284 | scnprintf(tmp, sizeof(tmp), |
261 | atomic_read(&tgtp->xmt_abort_sol), | 285 | "ABORT: Xmt %08x Cmpl %08x\n", |
262 | atomic_read(&tgtp->xmt_abort_unsol), | 286 | atomic_read(&tgtp->xmt_fcp_abort), |
263 | atomic_read(&tgtp->xmt_abort_rsp), | 287 | atomic_read(&tgtp->xmt_fcp_abort_cmpl)); |
264 | atomic_read(&tgtp->xmt_abort_rsp_error)); | 288 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
265 | 289 | goto buffer_done; | |
266 | len += snprintf(buf + len, PAGE_SIZE - len, | 290 | |
267 | "DELAY: ctx %08x fod %08x wqfull %08x\n", | 291 | scnprintf(tmp, sizeof(tmp), |
268 | atomic_read(&tgtp->defer_ctx), | 292 | "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n", |
269 | atomic_read(&tgtp->defer_fod), | 293 | atomic_read(&tgtp->xmt_abort_sol), |
270 | atomic_read(&tgtp->defer_wqfull)); | 294 | atomic_read(&tgtp->xmt_abort_unsol), |
295 | atomic_read(&tgtp->xmt_abort_rsp), | ||
296 | atomic_read(&tgtp->xmt_abort_rsp_error)); | ||
297 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
298 | goto buffer_done; | ||
299 | |||
300 | scnprintf(tmp, sizeof(tmp), | ||
301 | "DELAY: ctx %08x fod %08x wqfull %08x\n", | ||
302 | atomic_read(&tgtp->defer_ctx), | ||
303 | atomic_read(&tgtp->defer_fod), | ||
304 | atomic_read(&tgtp->defer_wqfull)); | ||
305 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
306 | goto buffer_done; | ||
271 | 307 | ||
272 | /* Calculate outstanding IOs */ | 308 | /* Calculate outstanding IOs */ |
273 | tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); | 309 | tot = atomic_read(&tgtp->rcv_fcp_cmd_drop); |
274 | tot += atomic_read(&tgtp->xmt_fcp_release); | 310 | tot += atomic_read(&tgtp->xmt_fcp_release); |
275 | tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; | 311 | tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot; |
276 | 312 | ||
277 | len += snprintf(buf + len, PAGE_SIZE - len, | 313 | scnprintf(tmp, sizeof(tmp), |
278 | "IO_CTX: %08x WAIT: cur %08x tot %08x\n" | 314 | "IO_CTX: %08x WAIT: cur %08x tot %08x\n" |
279 | "CTX Outstanding %08llx\n", | 315 | "CTX Outstanding %08llx\n\n", |
280 | phba->sli4_hba.nvmet_xri_cnt, | 316 | phba->sli4_hba.nvmet_xri_cnt, |
281 | phba->sli4_hba.nvmet_io_wait_cnt, | 317 | phba->sli4_hba.nvmet_io_wait_cnt, |
282 | phba->sli4_hba.nvmet_io_wait_total, | 318 | phba->sli4_hba.nvmet_io_wait_total, |
283 | tot); | 319 | tot); |
284 | 320 | strlcat(buf, tmp, PAGE_SIZE); | |
285 | len += snprintf(buf+len, PAGE_SIZE-len, "\n"); | 321 | goto buffer_done; |
286 | return len; | ||
287 | } | 322 | } |
288 | 323 | ||
289 | localport = vport->localport; | 324 | localport = vport->localport; |
290 | if (!localport) { | 325 | if (!localport) { |
291 | len = snprintf(buf, PAGE_SIZE, | 326 | len = scnprintf(buf, PAGE_SIZE, |
292 | "NVME Initiator x%llx is not allocated\n", | 327 | "NVME Initiator x%llx is not allocated\n", |
293 | wwn_to_u64(vport->fc_portname.u.wwn)); | 328 | wwn_to_u64(vport->fc_portname.u.wwn)); |
294 | return len; | 329 | return len; |
295 | } | 330 | } |
296 | lport = (struct lpfc_nvme_lport *)localport->private; | 331 | lport = (struct lpfc_nvme_lport *)localport->private; |
297 | len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n"); | 332 | if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) |
298 | 333 | goto buffer_done; | |
299 | spin_lock_irq(shost->host_lock); | 334 | |
300 | len += snprintf(buf + len, PAGE_SIZE - len, | 335 | rcu_read_lock(); |
301 | "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", | 336 | scnprintf(tmp, sizeof(tmp), |
302 | phba->brd_no, | 337 | "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n", |
303 | phba->sli4_hba.max_cfg_param.max_xri, | 338 | phba->brd_no, |
304 | phba->sli4_hba.nvme_xri_max, | 339 | phba->sli4_hba.max_cfg_param.max_xri, |
305 | phba->sli4_hba.scsi_xri_max, | 340 | phba->sli4_hba.nvme_xri_max, |
306 | lpfc_sli4_get_els_iocb_cnt(phba)); | 341 | phba->sli4_hba.scsi_xri_max, |
342 | lpfc_sli4_get_els_iocb_cnt(phba)); | ||
343 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
344 | goto buffer_done; | ||
307 | 345 | ||
308 | /* Port state is only one of two values for now. */ | 346 | /* Port state is only one of two values for now. */ |
309 | if (localport->port_id) | 347 | if (localport->port_id) |
@@ -311,13 +349,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
311 | else | 349 | else |
312 | statep = "UNKNOWN "; | 350 | statep = "UNKNOWN "; |
313 | 351 | ||
314 | len += snprintf(buf + len, PAGE_SIZE - len, | 352 | scnprintf(tmp, sizeof(tmp), |
315 | "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", | 353 | "%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n", |
316 | "NVME LPORT lpfc", | 354 | "NVME LPORT lpfc", |
317 | phba->brd_no, | 355 | phba->brd_no, |
318 | wwn_to_u64(vport->fc_portname.u.wwn), | 356 | wwn_to_u64(vport->fc_portname.u.wwn), |
319 | wwn_to_u64(vport->fc_nodename.u.wwn), | 357 | wwn_to_u64(vport->fc_nodename.u.wwn), |
320 | localport->port_id, statep); | 358 | localport->port_id, statep); |
359 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
360 | goto buffer_done; | ||
321 | 361 | ||
322 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { | 362 | list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { |
323 | rport = lpfc_ndlp_get_nrport(ndlp); | 363 | rport = lpfc_ndlp_get_nrport(ndlp); |
@@ -343,56 +383,77 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
343 | } | 383 | } |
344 | 384 | ||
345 | /* Tab in to show lport ownership. */ | 385 | /* Tab in to show lport ownership. */ |
346 | len += snprintf(buf + len, PAGE_SIZE - len, | 386 | if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) |
347 | "NVME RPORT "); | 387 | goto buffer_done; |
348 | if (phba->brd_no >= 10) | 388 | if (phba->brd_no >= 10) { |
349 | len += snprintf(buf + len, PAGE_SIZE - len, " "); | 389 | if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) |
350 | 390 | goto buffer_done; | |
351 | len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ", | 391 | } |
352 | nrport->port_name); | 392 | |
353 | len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ", | 393 | scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", |
354 | nrport->node_name); | 394 | nrport->port_name); |
355 | len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ", | 395 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
356 | nrport->port_id); | 396 | goto buffer_done; |
397 | |||
398 | scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", | ||
399 | nrport->node_name); | ||
400 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
401 | goto buffer_done; | ||
402 | |||
403 | scnprintf(tmp, sizeof(tmp), "DID x%06x ", | ||
404 | nrport->port_id); | ||
405 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
406 | goto buffer_done; | ||
357 | 407 | ||
358 | /* An NVME rport can have multiple roles. */ | 408 | /* An NVME rport can have multiple roles. */ |
359 | if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) | 409 | if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { |
360 | len += snprintf(buf + len, PAGE_SIZE - len, | 410 | if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) |
361 | "INITIATOR "); | 411 | goto buffer_done; |
362 | if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) | 412 | } |
363 | len += snprintf(buf + len, PAGE_SIZE - len, | 413 | if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { |
364 | "TARGET "); | 414 | if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) |
365 | if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) | 415 | goto buffer_done; |
366 | len += snprintf(buf + len, PAGE_SIZE - len, | 416 | } |
367 | "DISCSRVC "); | 417 | if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { |
418 | if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) | ||
419 | goto buffer_done; | ||
420 | } | ||
368 | if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | | 421 | if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | |
369 | FC_PORT_ROLE_NVME_TARGET | | 422 | FC_PORT_ROLE_NVME_TARGET | |
370 | FC_PORT_ROLE_NVME_DISCOVERY)) | 423 | FC_PORT_ROLE_NVME_DISCOVERY)) { |
371 | len += snprintf(buf + len, PAGE_SIZE - len, | 424 | scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", |
372 | "UNKNOWN ROLE x%x", | 425 | nrport->port_role); |
373 | nrport->port_role); | 426 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
374 | 427 | goto buffer_done; | |
375 | len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep); | 428 | } |
376 | /* Terminate the string. */ | 429 | |
377 | len += snprintf(buf + len, PAGE_SIZE - len, "\n"); | 430 | scnprintf(tmp, sizeof(tmp), "%s\n", statep); |
431 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
432 | goto buffer_done; | ||
378 | } | 433 | } |
379 | spin_unlock_irq(shost->host_lock); | 434 | rcu_read_unlock(); |
380 | 435 | ||
381 | if (!lport) | 436 | if (!lport) |
382 | return len; | 437 | goto buffer_done; |
383 | 438 | ||
384 | len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n"); | 439 | if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE) |
385 | len += snprintf(buf+len, PAGE_SIZE-len, | 440 | goto buffer_done; |
386 | "LS: Xmt %010x Cmpl %010x Abort %08x\n", | 441 | |
387 | atomic_read(&lport->fc4NvmeLsRequests), | 442 | scnprintf(tmp, sizeof(tmp), |
388 | atomic_read(&lport->fc4NvmeLsCmpls), | 443 | "LS: Xmt %010x Cmpl %010x Abort %08x\n", |
389 | atomic_read(&lport->xmt_ls_abort)); | 444 | atomic_read(&lport->fc4NvmeLsRequests), |
390 | 445 | atomic_read(&lport->fc4NvmeLsCmpls), | |
391 | len += snprintf(buf + len, PAGE_SIZE - len, | 446 | atomic_read(&lport->xmt_ls_abort)); |
392 | "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", | 447 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
393 | atomic_read(&lport->xmt_ls_err), | 448 | goto buffer_done; |
394 | atomic_read(&lport->cmpl_ls_xb), | 449 | |
395 | atomic_read(&lport->cmpl_ls_err)); | 450 | scnprintf(tmp, sizeof(tmp), |
451 | "LS XMIT: Err %08x CMPL: xb %08x Err %08x\n", | ||
452 | atomic_read(&lport->xmt_ls_err), | ||
453 | atomic_read(&lport->cmpl_ls_xb), | ||
454 | atomic_read(&lport->cmpl_ls_err)); | ||
455 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | ||
456 | goto buffer_done; | ||
396 | 457 | ||
397 | totin = 0; | 458 | totin = 0; |
398 | totout = 0; | 459 | totout = 0; |
@@ -405,25 +466,46 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, | |||
405 | data3 = atomic_read(&cstat->fc4NvmeControlRequests); | 466 | data3 = atomic_read(&cstat->fc4NvmeControlRequests); |
406 | totout += (data1 + data2 + data3); | 467 | totout += (data1 + data2 + data3); |
407 | } | 468 | } |
408 | len += snprintf(buf+len, PAGE_SIZE-len, | 469 | scnprintf(tmp, sizeof(tmp), |
409 | "Total FCP Cmpl %016llx Issue %016llx " | 470 | "Total FCP Cmpl %016llx Issue %016llx " |
410 | "OutIO %016llx\n", | 471 | "OutIO %016llx\n", |
411 | totin, totout, totout - totin); | 472 | totin, totout, totout - totin); |
412 | 473 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) | |
413 | len += snprintf(buf+len, PAGE_SIZE-len, | 474 | goto buffer_done; |
414 | " abort %08x noxri %08x nondlp %08x qdepth %08x " | 475 | |
415 | "wqerr %08x err %08x\n", | 476 | scnprintf(tmp, sizeof(tmp), |
416 | atomic_read(&lport->xmt_fcp_abort), | 477 | "\tabort %08x noxri %08x nondlp %08x qdepth %08x " |
417 | atomic_read(&lport->xmt_fcp_noxri), | 478 | "wqerr %08x err %08x\n", |
418 | atomic_read(&lport->xmt_fcp_bad_ndlp), | 479 | atomic_read(&lport->xmt_fcp_abort), |
419 | atomic_read(&lport->xmt_fcp_qdepth), | 480 | atomic_read(&lport->xmt_fcp_noxri), |
420 | atomic_read(&lport->xmt_fcp_err), | 481 | atomic_read(&lport->xmt_fcp_bad_ndlp), |
421 | atomic_read(&lport->xmt_fcp_wqerr)); | 482 | atomic_read(&lport->xmt_fcp_qdepth), |
422 | 483 | atomic_read(&lport->xmt_fcp_err), | |
423 | len += snprintf(buf + len, PAGE_SIZE - len, | 484 | atomic_read(&lport->xmt_fcp_wqerr)); |
424 | "FCP CMPL: xb %08x Err %08x\n", | 485 | if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) |
425 | atomic_read(&lport->cmpl_fcp_xb), | 486 | goto buffer_done; |
426 | atomic_read(&lport->cmpl_fcp_err)); | 487 | |
488 | scnprintf(tmp, sizeof(tmp), | ||
489 | "FCP CMPL: xb %08x Err %08x\n", | ||
490 | atomic_read(&lport->cmpl_fcp_xb), | ||
491 | atomic_read(&lport->cmpl_fcp_err)); | ||
492 | strlcat(buf, tmp, PAGE_SIZE); | ||
493 | |||
494 | buffer_done: | ||
495 | len = strnlen(buf, PAGE_SIZE); | ||
496 | |||
497 | if (unlikely(len >= (PAGE_SIZE - 1))) { | ||
498 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME, | ||
499 | "6314 Catching potential buffer " | ||
500 | "overflow > PAGE_SIZE = %lu bytes\n", | ||
501 | PAGE_SIZE); | ||
502 | strlcpy(buf + PAGE_SIZE - 1 - | ||
503 | strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1), | ||
504 | LPFC_NVME_INFO_MORE_STR, | ||
505 | strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1) | ||
506 | + 1); | ||
507 | } | ||
508 | |||
427 | return len; | 509 | return len; |
428 | } | 510 | } |
429 | 511 | ||
@@ -5836,6 +5918,24 @@ lpfc_get_host_speed(struct Scsi_Host *shost) | |||
5836 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | 5918 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; |
5837 | break; | 5919 | break; |
5838 | } | 5920 | } |
5921 | } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { | ||
5922 | switch (phba->fc_linkspeed) { | ||
5923 | case LPFC_ASYNC_LINK_SPEED_10GBPS: | ||
5924 | fc_host_speed(shost) = FC_PORTSPEED_10GBIT; | ||
5925 | break; | ||
5926 | case LPFC_ASYNC_LINK_SPEED_25GBPS: | ||
5927 | fc_host_speed(shost) = FC_PORTSPEED_25GBIT; | ||
5928 | break; | ||
5929 | case LPFC_ASYNC_LINK_SPEED_40GBPS: | ||
5930 | fc_host_speed(shost) = FC_PORTSPEED_40GBIT; | ||
5931 | break; | ||
5932 | case LPFC_ASYNC_LINK_SPEED_100GBPS: | ||
5933 | fc_host_speed(shost) = FC_PORTSPEED_100GBIT; | ||
5934 | break; | ||
5935 | default: | ||
5936 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | ||
5937 | break; | ||
5938 | } | ||
5839 | } else | 5939 | } else |
5840 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; | 5940 | fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; |
5841 | 5941 | ||
@@ -5891,7 +5991,6 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
5891 | struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; | 5991 | struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets; |
5892 | LPFC_MBOXQ_t *pmboxq; | 5992 | LPFC_MBOXQ_t *pmboxq; |
5893 | MAILBOX_t *pmb; | 5993 | MAILBOX_t *pmb; |
5894 | unsigned long seconds; | ||
5895 | int rc = 0; | 5994 | int rc = 0; |
5896 | 5995 | ||
5897 | /* | 5996 | /* |
@@ -5992,12 +6091,7 @@ lpfc_get_stats(struct Scsi_Host *shost) | |||
5992 | 6091 | ||
5993 | hs->dumped_frames = -1; | 6092 | hs->dumped_frames = -1; |
5994 | 6093 | ||
5995 | seconds = get_seconds(); | 6094 | hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start; |
5996 | if (seconds < psli->stats_start) | ||
5997 | hs->seconds_since_last_reset = seconds + | ||
5998 | ((unsigned long)-1 - psli->stats_start); | ||
5999 | else | ||
6000 | hs->seconds_since_last_reset = seconds - psli->stats_start; | ||
6001 | 6095 | ||
6002 | mempool_free(pmboxq, phba->mbox_mem_pool); | 6096 | mempool_free(pmboxq, phba->mbox_mem_pool); |
6003 | 6097 | ||
@@ -6076,7 +6170,7 @@ lpfc_reset_stats(struct Scsi_Host *shost) | |||
6076 | else | 6170 | else |
6077 | lso->link_events = (phba->fc_eventTag >> 1); | 6171 | lso->link_events = (phba->fc_eventTag >> 1); |
6078 | 6172 | ||
6079 | psli->stats_start = get_seconds(); | 6173 | psli->stats_start = ktime_get_seconds(); |
6080 | 6174 | ||
6081 | mempool_free(pmboxq, phba->mbox_mem_pool); | 6175 | mempool_free(pmboxq, phba->mbox_mem_pool); |
6082 | 6176 | ||
@@ -6454,6 +6548,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
6454 | phba->cfg_auto_imax = 0; | 6548 | phba->cfg_auto_imax = 0; |
6455 | phba->initial_imax = phba->cfg_fcp_imax; | 6549 | phba->initial_imax = phba->cfg_fcp_imax; |
6456 | 6550 | ||
6551 | phba->cfg_enable_pbde = 0; | ||
6552 | |||
6457 | /* A value of 0 means use the number of CPUs found in the system */ | 6553 | /* A value of 0 means use the number of CPUs found in the system */ |
6458 | if (phba->cfg_fcp_io_channel == 0) | 6554 | if (phba->cfg_fcp_io_channel == 0) |
6459 | phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; | 6555 | phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.h b/drivers/scsi/lpfc/lpfc_attr.h index 931db52692f5..9659a8fff971 100644 --- a/drivers/scsi/lpfc/lpfc_attr.h +++ b/drivers/scsi/lpfc/lpfc_attr.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index edb1a18a6414..90745feca808 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2009-2015 Emulex. All rights reserved. * | 6 | * Copyright (C) 2009-2015 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h index e7d95a4e8042..32347c87e3b4 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.h +++ b/drivers/scsi/lpfc/lpfc_bsg.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2010-2015 Emulex. All rights reserved. * | 6 | * Copyright (C) 2010-2015 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h index 6b32b0ae7506..43cf46a3a71f 100644 --- a/drivers/scsi/lpfc/lpfc_compat.h +++ b/drivers/scsi/lpfc/lpfc_compat.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2011 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 4ae9ba425e78..bea24bc4410a 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -469,7 +469,6 @@ int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); | |||
469 | void lpfc_start_fdiscs(struct lpfc_hba *phba); | 469 | void lpfc_start_fdiscs(struct lpfc_hba *phba); |
470 | struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); | 470 | struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); |
471 | struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); | 471 | struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t); |
472 | #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) | ||
473 | #define HBA_EVENT_RSCN 5 | 472 | #define HBA_EVENT_RSCN 5 |
474 | #define HBA_EVENT_LINK_UP 2 | 473 | #define HBA_EVENT_LINK_UP 2 |
475 | #define HBA_EVENT_LINK_DOWN 3 | 474 | #define HBA_EVENT_LINK_DOWN 3 |
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index d4a200ae5a6f..1cbdc892ff95 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index f32eaeb2225a..30efc7bf91bd 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2007-2011 Emulex. All rights reserved. * | 6 | * Copyright (C) 2007-2011 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 5a7547f9d8d8..28e2b60fc5c0 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2013 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2013 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -150,6 +150,9 @@ struct lpfc_node_rrq { | |||
150 | unsigned long rrq_stop_time; | 150 | unsigned long rrq_stop_time; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | #define lpfc_ndlp_check_qdepth(phba, ndlp) \ | ||
154 | (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) | ||
155 | |||
153 | /* Defines for nlp_flag (uint32) */ | 156 | /* Defines for nlp_flag (uint32) */ |
154 | #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ | 157 | #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ |
155 | #define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ | 158 | #define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ |
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 6d84a10fef07..4dda969e947c 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -5640,8 +5640,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
5640 | " mbx status x%x\n", | 5640 | " mbx status x%x\n", |
5641 | shdr_status, shdr_add_status, mb->mbxStatus); | 5641 | shdr_status, shdr_add_status, mb->mbxStatus); |
5642 | 5642 | ||
5643 | if (mb->mbxStatus && !(shdr_status && | 5643 | if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status || |
5644 | shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) { | 5644 | (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) || |
5645 | (shdr_add_status == ADD_STATUS_INVALID_REQUEST)) { | ||
5645 | mempool_free(pmb, phba->mbox_mem_pool); | 5646 | mempool_free(pmb, phba->mbox_mem_pool); |
5646 | goto error; | 5647 | goto error; |
5647 | } | 5648 | } |
@@ -5661,6 +5662,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
5661 | lcb_res = (struct fc_lcb_res_frame *) | 5662 | lcb_res = (struct fc_lcb_res_frame *) |
5662 | (((struct lpfc_dmabuf *)elsiocb->context2)->virt); | 5663 | (((struct lpfc_dmabuf *)elsiocb->context2)->virt); |
5663 | 5664 | ||
5665 | memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame)); | ||
5664 | icmd = &elsiocb->iocb; | 5666 | icmd = &elsiocb->iocb; |
5665 | icmd->ulpContext = lcb_context->rx_id; | 5667 | icmd->ulpContext = lcb_context->rx_id; |
5666 | icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; | 5668 | icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id; |
@@ -5669,7 +5671,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
5669 | *((uint32_t *)(pcmd)) = ELS_CMD_ACC; | 5671 | *((uint32_t *)(pcmd)) = ELS_CMD_ACC; |
5670 | lcb_res->lcb_sub_command = lcb_context->sub_command; | 5672 | lcb_res->lcb_sub_command = lcb_context->sub_command; |
5671 | lcb_res->lcb_type = lcb_context->type; | 5673 | lcb_res->lcb_type = lcb_context->type; |
5674 | lcb_res->capability = lcb_context->capability; | ||
5672 | lcb_res->lcb_frequency = lcb_context->frequency; | 5675 | lcb_res->lcb_frequency = lcb_context->frequency; |
5676 | lcb_res->lcb_duration = lcb_context->duration; | ||
5673 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; | 5677 | elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; |
5674 | phba->fc_stat.elsXmitACC++; | 5678 | phba->fc_stat.elsXmitACC++; |
5675 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); | 5679 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); |
@@ -5712,6 +5716,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, | |||
5712 | uint32_t beacon_state) | 5716 | uint32_t beacon_state) |
5713 | { | 5717 | { |
5714 | struct lpfc_hba *phba = vport->phba; | 5718 | struct lpfc_hba *phba = vport->phba; |
5719 | union lpfc_sli4_cfg_shdr *cfg_shdr; | ||
5715 | LPFC_MBOXQ_t *mbox = NULL; | 5720 | LPFC_MBOXQ_t *mbox = NULL; |
5716 | uint32_t len; | 5721 | uint32_t len; |
5717 | int rc; | 5722 | int rc; |
@@ -5720,6 +5725,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, | |||
5720 | if (!mbox) | 5725 | if (!mbox) |
5721 | return 1; | 5726 | return 1; |
5722 | 5727 | ||
5728 | cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; | ||
5723 | len = sizeof(struct lpfc_mbx_set_beacon_config) - | 5729 | len = sizeof(struct lpfc_mbx_set_beacon_config) - |
5724 | sizeof(struct lpfc_sli4_cfg_mhdr); | 5730 | sizeof(struct lpfc_sli4_cfg_mhdr); |
5725 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, | 5731 | lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON, |
@@ -5732,8 +5738,40 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport, | |||
5732 | phba->sli4_hba.physical_port); | 5738 | phba->sli4_hba.physical_port); |
5733 | bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, | 5739 | bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config, |
5734 | beacon_state); | 5740 | beacon_state); |
5735 | bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1); | 5741 | mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */ |
5736 | bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0); | 5742 | |
5743 | /* | ||
5744 | * Check bv1s bit before issuing the mailbox | ||
5745 | * if bv1s == 1, LCB V1 supported | ||
5746 | * else, LCB V0 supported | ||
5747 | */ | ||
5748 | |||
5749 | if (phba->sli4_hba.pc_sli4_params.bv1s) { | ||
5750 | /* COMMON_SET_BEACON_CONFIG_V1 */ | ||
5751 | cfg_shdr->request.word9 = BEACON_VERSION_V1; | ||
5752 | lcb_context->capability |= LCB_CAPABILITY_DURATION; | ||
5753 | bf_set(lpfc_mbx_set_beacon_port_type, | ||
5754 | &mbox->u.mqe.un.beacon_config, 0); | ||
5755 | bf_set(lpfc_mbx_set_beacon_duration_v1, | ||
5756 | &mbox->u.mqe.un.beacon_config, | ||
5757 | be16_to_cpu(lcb_context->duration)); | ||
5758 | } else { | ||
5759 | /* COMMON_SET_BEACON_CONFIG_V0 */ | ||
5760 | if (be16_to_cpu(lcb_context->duration) != 0) { | ||
5761 | mempool_free(mbox, phba->mbox_mem_pool); | ||
5762 | return 1; | ||
5763 | } | ||
5764 | cfg_shdr->request.word9 = BEACON_VERSION_V0; | ||
5765 | lcb_context->capability &= ~(LCB_CAPABILITY_DURATION); | ||
5766 | bf_set(lpfc_mbx_set_beacon_state, | ||
5767 | &mbox->u.mqe.un.beacon_config, beacon_state); | ||
5768 | bf_set(lpfc_mbx_set_beacon_port_type, | ||
5769 | &mbox->u.mqe.un.beacon_config, 1); | ||
5770 | bf_set(lpfc_mbx_set_beacon_duration, | ||
5771 | &mbox->u.mqe.un.beacon_config, | ||
5772 | be16_to_cpu(lcb_context->duration)); | ||
5773 | } | ||
5774 | |||
5737 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); | 5775 | rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); |
5738 | if (rc == MBX_NOT_FINISHED) { | 5776 | if (rc == MBX_NOT_FINISHED) { |
5739 | mempool_free(mbox, phba->mbox_mem_pool); | 5777 | mempool_free(mbox, phba->mbox_mem_pool); |
@@ -5784,24 +5822,16 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
5784 | beacon->lcb_frequency, | 5822 | beacon->lcb_frequency, |
5785 | be16_to_cpu(beacon->lcb_duration)); | 5823 | be16_to_cpu(beacon->lcb_duration)); |
5786 | 5824 | ||
5787 | if (phba->sli_rev < LPFC_SLI_REV4 || | ||
5788 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != | ||
5789 | LPFC_SLI_INTF_IF_TYPE_2)) { | ||
5790 | rjt_err = LSRJT_CMD_UNSUPPORTED; | ||
5791 | goto rjt; | ||
5792 | } | ||
5793 | |||
5794 | if (phba->hba_flag & HBA_FCOE_MODE) { | ||
5795 | rjt_err = LSRJT_CMD_UNSUPPORTED; | ||
5796 | goto rjt; | ||
5797 | } | ||
5798 | if (beacon->lcb_sub_command != LPFC_LCB_ON && | 5825 | if (beacon->lcb_sub_command != LPFC_LCB_ON && |
5799 | beacon->lcb_sub_command != LPFC_LCB_OFF) { | 5826 | beacon->lcb_sub_command != LPFC_LCB_OFF) { |
5800 | rjt_err = LSRJT_CMD_UNSUPPORTED; | 5827 | rjt_err = LSRJT_CMD_UNSUPPORTED; |
5801 | goto rjt; | 5828 | goto rjt; |
5802 | } | 5829 | } |
5803 | if (beacon->lcb_sub_command == LPFC_LCB_ON && | 5830 | |
5804 | be16_to_cpu(beacon->lcb_duration) != 0) { | 5831 | if (phba->sli_rev < LPFC_SLI_REV4 || |
5832 | phba->hba_flag & HBA_FCOE_MODE || | ||
5833 | (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) < | ||
5834 | LPFC_SLI_INTF_IF_TYPE_2)) { | ||
5805 | rjt_err = LSRJT_CMD_UNSUPPORTED; | 5835 | rjt_err = LSRJT_CMD_UNSUPPORTED; |
5806 | goto rjt; | 5836 | goto rjt; |
5807 | } | 5837 | } |
@@ -5814,8 +5844,10 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, | |||
5814 | 5844 | ||
5815 | state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; | 5845 | state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0; |
5816 | lcb_context->sub_command = beacon->lcb_sub_command; | 5846 | lcb_context->sub_command = beacon->lcb_sub_command; |
5847 | lcb_context->capability = 0; | ||
5817 | lcb_context->type = beacon->lcb_type; | 5848 | lcb_context->type = beacon->lcb_type; |
5818 | lcb_context->frequency = beacon->lcb_frequency; | 5849 | lcb_context->frequency = beacon->lcb_frequency; |
5850 | lcb_context->duration = beacon->lcb_duration; | ||
5819 | lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; | 5851 | lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id; |
5820 | lcb_context->rx_id = cmdiocb->iocb.ulpContext; | 5852 | lcb_context->rx_id = cmdiocb->iocb.ulpContext; |
5821 | lcb_context->ndlp = lpfc_nlp_get(ndlp); | 5853 | lcb_context->ndlp = lpfc_nlp_get(ndlp); |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 2fef54fab86d..eb71877f12f8 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 08a3f1520159..009aa0eee040 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -1065,14 +1065,17 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */ | |||
1065 | struct fc_lcb_request_frame { | 1065 | struct fc_lcb_request_frame { |
1066 | uint32_t lcb_command; /* ELS command opcode (0x81) */ | 1066 | uint32_t lcb_command; /* ELS command opcode (0x81) */ |
1067 | uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ | 1067 | uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ |
1068 | #define LPFC_LCB_ON 0x1 | 1068 | #define LPFC_LCB_ON 0x1 |
1069 | #define LPFC_LCB_OFF 0x2 | 1069 | #define LPFC_LCB_OFF 0x2 |
1070 | uint8_t reserved[3]; | 1070 | uint8_t reserved[2]; |
1071 | 1071 | uint8_t capability; /* LCB Payload Word 1, bit 0:7 */ | |
1072 | uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ | 1072 | uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ |
1073 | #define LPFC_LCB_GREEN 0x1 | 1073 | #define LPFC_LCB_GREEN 0x1 |
1074 | #define LPFC_LCB_AMBER 0x2 | 1074 | #define LPFC_LCB_AMBER 0x2 |
1075 | uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ | 1075 | uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ |
1076 | #define LCB_CAPABILITY_DURATION 1 | ||
1077 | #define BEACON_VERSION_V1 1 | ||
1078 | #define BEACON_VERSION_V0 0 | ||
1076 | uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ | 1079 | uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ |
1077 | }; | 1080 | }; |
1078 | 1081 | ||
@@ -1082,7 +1085,8 @@ struct fc_lcb_request_frame { | |||
1082 | struct fc_lcb_res_frame { | 1085 | struct fc_lcb_res_frame { |
1083 | uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */ | 1086 | uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */ |
1084 | uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ | 1087 | uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */ |
1085 | uint8_t reserved[3]; | 1088 | uint8_t reserved[2]; |
1089 | uint8_t capability; /* LCB Payload Word 1, bit 0:7 */ | ||
1086 | uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ | 1090 | uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */ |
1087 | uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ | 1091 | uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */ |
1088 | uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ | 1092 | uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */ |
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index f43f0bacb77a..083f8c8706e5 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h | |||
@@ -1790,9 +1790,12 @@ struct lpfc_mbx_set_beacon_config { | |||
1790 | #define lpfc_mbx_set_beacon_duration_SHIFT 16 | 1790 | #define lpfc_mbx_set_beacon_duration_SHIFT 16 |
1791 | #define lpfc_mbx_set_beacon_duration_MASK 0x000000FF | 1791 | #define lpfc_mbx_set_beacon_duration_MASK 0x000000FF |
1792 | #define lpfc_mbx_set_beacon_duration_WORD word4 | 1792 | #define lpfc_mbx_set_beacon_duration_WORD word4 |
1793 | #define lpfc_mbx_set_beacon_status_duration_SHIFT 24 | 1793 | |
1794 | #define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF | 1794 | /* COMMON_SET_BEACON_CONFIG_V1 */ |
1795 | #define lpfc_mbx_set_beacon_status_duration_WORD word4 | 1795 | #define lpfc_mbx_set_beacon_duration_v1_SHIFT 16 |
1796 | #define lpfc_mbx_set_beacon_duration_v1_MASK 0x0000FFFF | ||
1797 | #define lpfc_mbx_set_beacon_duration_v1_WORD word4 | ||
1798 | uint32_t word5; /* RESERVED */ | ||
1796 | }; | 1799 | }; |
1797 | 1800 | ||
1798 | struct lpfc_id_range { | 1801 | struct lpfc_id_range { |
@@ -2243,6 +2246,7 @@ struct lpfc_mbx_redisc_fcf_tbl { | |||
2243 | */ | 2246 | */ |
2244 | #define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 | 2247 | #define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 |
2245 | #define ADD_STATUS_FW_NOT_SUPPORTED 0xEB | 2248 | #define ADD_STATUS_FW_NOT_SUPPORTED 0xEB |
2249 | #define ADD_STATUS_INVALID_REQUEST 0x4B | ||
2246 | 2250 | ||
2247 | struct lpfc_mbx_sli4_config { | 2251 | struct lpfc_mbx_sli4_config { |
2248 | struct mbox_header header; | 2252 | struct mbox_header header; |
@@ -3392,7 +3396,41 @@ struct lpfc_sli4_parameters { | |||
3392 | #define cfg_nosr_SHIFT 9 | 3396 | #define cfg_nosr_SHIFT 9 |
3393 | #define cfg_nosr_MASK 0x00000001 | 3397 | #define cfg_nosr_MASK 0x00000001 |
3394 | #define cfg_nosr_WORD word19 | 3398 | #define cfg_nosr_WORD word19 |
3395 | #define LPFC_NODELAY_MAX_IO 32 | 3399 | |
3400 | #define cfg_bv1s_SHIFT 10 | ||
3401 | #define cfg_bv1s_MASK 0x00000001 | ||
3402 | #define cfg_bv1s_WORD word19 | ||
3403 | |||
3404 | uint32_t word20; | ||
3405 | #define cfg_max_tow_xri_SHIFT 0 | ||
3406 | #define cfg_max_tow_xri_MASK 0x0000ffff | ||
3407 | #define cfg_max_tow_xri_WORD word20 | ||
3408 | |||
3409 | uint32_t word21; /* RESERVED */ | ||
3410 | uint32_t word22; /* RESERVED */ | ||
3411 | uint32_t word23; /* RESERVED */ | ||
3412 | |||
3413 | uint32_t word24; | ||
3414 | #define cfg_frag_field_offset_SHIFT 0 | ||
3415 | #define cfg_frag_field_offset_MASK 0x0000ffff | ||
3416 | #define cfg_frag_field_offset_WORD word24 | ||
3417 | |||
3418 | #define cfg_frag_field_size_SHIFT 16 | ||
3419 | #define cfg_frag_field_size_MASK 0x0000ffff | ||
3420 | #define cfg_frag_field_size_WORD word24 | ||
3421 | |||
3422 | uint32_t word25; | ||
3423 | #define cfg_sgl_field_offset_SHIFT 0 | ||
3424 | #define cfg_sgl_field_offset_MASK 0x0000ffff | ||
3425 | #define cfg_sgl_field_offset_WORD word25 | ||
3426 | |||
3427 | #define cfg_sgl_field_size_SHIFT 16 | ||
3428 | #define cfg_sgl_field_size_MASK 0x0000ffff | ||
3429 | #define cfg_sgl_field_size_WORD word25 | ||
3430 | |||
3431 | uint32_t word26; /* Chain SGE initial value LOW */ | ||
3432 | uint32_t word27; /* Chain SGE initial value HIGH */ | ||
3433 | #define LPFC_NODELAY_MAX_IO 32 | ||
3396 | }; | 3434 | }; |
3397 | 3435 | ||
3398 | #define LPFC_SET_UE_RECOVERY 0x10 | 3436 | #define LPFC_SET_UE_RECOVERY 0x10 |
diff --git a/drivers/scsi/lpfc/lpfc_ids.h b/drivers/scsi/lpfc/lpfc_ids.h index 07ee34017d88..d48414e295a0 100644 --- a/drivers/scsi/lpfc/lpfc_ids.h +++ b/drivers/scsi/lpfc/lpfc_ids.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 52cae87da0d2..f3cae733ae2d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -10387,6 +10387,11 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) | |||
10387 | while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || | 10387 | while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl || |
10388 | !nvmet_xri_cmpl) { | 10388 | !nvmet_xri_cmpl) { |
10389 | if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { | 10389 | if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { |
10390 | if (!nvmet_xri_cmpl) | ||
10391 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
10392 | "6424 NVMET XRI exchange busy " | ||
10393 | "wait time: %d seconds.\n", | ||
10394 | wait_time/1000); | ||
10390 | if (!nvme_xri_cmpl) | 10395 | if (!nvme_xri_cmpl) |
10391 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 10396 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
10392 | "6100 NVME XRI exchange busy " | 10397 | "6100 NVME XRI exchange busy " |
@@ -10639,6 +10644,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
10639 | sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); | 10644 | sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters); |
10640 | sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); | 10645 | sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); |
10641 | sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); | 10646 | sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); |
10647 | sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); | ||
10642 | sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, | 10648 | sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, |
10643 | mbx_sli4_parameters); | 10649 | mbx_sli4_parameters); |
10644 | sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); | 10650 | sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); |
@@ -10668,18 +10674,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
10668 | phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; | 10674 | phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP; |
10669 | } | 10675 | } |
10670 | 10676 | ||
10671 | /* Only embed PBDE for if_type 6 */ | 10677 | /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ |
10672 | if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == | 10678 | if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != |
10673 | LPFC_SLI_INTF_IF_TYPE_6) { | 10679 | LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) |
10674 | phba->fcp_embed_pbde = 1; | 10680 | phba->cfg_enable_pbde = 0; |
10675 | phba->nvme_embed_pbde = 1; | ||
10676 | } | ||
10677 | |||
10678 | /* PBDE support requires xib be set */ | ||
10679 | if (!bf_get(cfg_xib, mbx_sli4_parameters)) { | ||
10680 | phba->fcp_embed_pbde = 0; | ||
10681 | phba->nvme_embed_pbde = 0; | ||
10682 | } | ||
10683 | 10681 | ||
10684 | /* | 10682 | /* |
10685 | * To support Suppress Response feature we must satisfy 3 conditions. | 10683 | * To support Suppress Response feature we must satisfy 3 conditions. |
@@ -10713,10 +10711,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) | |||
10713 | phba->fcp_embed_io = 0; | 10711 | phba->fcp_embed_io = 0; |
10714 | 10712 | ||
10715 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, | 10713 | lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME, |
10716 | "6422 XIB %d: FCP %d %d NVME %d %d %d %d\n", | 10714 | "6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n", |
10717 | bf_get(cfg_xib, mbx_sli4_parameters), | 10715 | bf_get(cfg_xib, mbx_sli4_parameters), |
10718 | phba->fcp_embed_pbde, phba->fcp_embed_io, | 10716 | phba->cfg_enable_pbde, |
10719 | phba->nvme_support, phba->nvme_embed_pbde, | 10717 | phba->fcp_embed_io, phba->nvme_support, |
10720 | phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); | 10718 | phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp); |
10721 | 10719 | ||
10722 | if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == | 10720 | if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == |
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index 3b654ad08d1f..ea10f03437f5 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2009 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 47c02da11f01..deb094fdbb79 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 0758edb9dfe2..9c22a2c93462 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2014 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2014 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h index b93e78f671fb..95d60ab5ebf9 100644 --- a/drivers/scsi/lpfc/lpfc_nl.h +++ b/drivers/scsi/lpfc/lpfc_nl.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2010 Emulex. All rights reserved. * | 6 | * Copyright (C) 2010 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 1a803975bcbc..bd9bce9d9974 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -1062,6 +1062,9 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1062 | { | 1062 | { |
1063 | struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; | 1063 | struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; |
1064 | 1064 | ||
1065 | /* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */ | ||
1066 | if (vport->phba->sli_rev == LPFC_SLI_REV3) | ||
1067 | ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag; | ||
1065 | /* software abort outstanding PLOGI */ | 1068 | /* software abort outstanding PLOGI */ |
1066 | lpfc_els_abort(vport->phba, ndlp); | 1069 | lpfc_els_abort(vport->phba, ndlp); |
1067 | 1070 | ||
@@ -1982,12 +1985,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, | |||
1982 | if (bf_get_be32(prli_disc, nvpr)) | 1985 | if (bf_get_be32(prli_disc, nvpr)) |
1983 | ndlp->nlp_type |= NLP_NVME_DISCOVERY; | 1986 | ndlp->nlp_type |= NLP_NVME_DISCOVERY; |
1984 | 1987 | ||
1985 | /* This node is an NVME target. Adjust the command | ||
1986 | * queue depth on this node to not exceed the available | ||
1987 | * xris. | ||
1988 | */ | ||
1989 | ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max; | ||
1990 | |||
1991 | /* | 1988 | /* |
1992 | * If prli_fba is set, the Target supports FirstBurst. | 1989 | * If prli_fba is set, the Target supports FirstBurst. |
1993 | * If prli_fb_sz is 0, the FirstBurst size is unlimited, | 1990 | * If prli_fb_sz is 0, the FirstBurst size is unlimited, |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 76a5a99605aa..028462e5994d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
@@ -1135,9 +1135,6 @@ out_err: | |||
1135 | else | 1135 | else |
1136 | lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; | 1136 | lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; |
1137 | 1137 | ||
1138 | if (ndlp && NLP_CHK_NODE_ACT(ndlp)) | ||
1139 | atomic_dec(&ndlp->cmd_pending); | ||
1140 | |||
1141 | /* Update stats and complete the IO. There is | 1138 | /* Update stats and complete the IO. There is |
1142 | * no need for dma unprep because the nvme_transport | 1139 | * no need for dma unprep because the nvme_transport |
1143 | * owns the dma address. | 1140 | * owns the dma address. |
@@ -1279,6 +1276,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, | |||
1279 | /* Word 9 */ | 1276 | /* Word 9 */ |
1280 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); | 1277 | bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag); |
1281 | 1278 | ||
1279 | /* Words 13 14 15 are for PBDE support */ | ||
1280 | |||
1282 | pwqeq->vport = vport; | 1281 | pwqeq->vport = vport; |
1283 | return 0; | 1282 | return 0; |
1284 | } | 1283 | } |
@@ -1378,7 +1377,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, | |||
1378 | data_sg = sg_next(data_sg); | 1377 | data_sg = sg_next(data_sg); |
1379 | sgl++; | 1378 | sgl++; |
1380 | } | 1379 | } |
1381 | if (phba->nvme_embed_pbde) { | 1380 | if (phba->cfg_enable_pbde) { |
1382 | /* Use PBDE support for first SGL only, offset == 0 */ | 1381 | /* Use PBDE support for first SGL only, offset == 0 */ |
1383 | /* Words 13-15 */ | 1382 | /* Words 13-15 */ |
1384 | bde = (struct ulp_bde64 *) | 1383 | bde = (struct ulp_bde64 *) |
@@ -1394,10 +1393,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, | |||
1394 | memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); | 1393 | memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); |
1395 | bf_set(wqe_pbde, &wqe->generic.wqe_com, 0); | 1394 | bf_set(wqe_pbde, &wqe->generic.wqe_com, 0); |
1396 | } | 1395 | } |
1397 | } else { | ||
1398 | bf_set(wqe_pbde, &wqe->generic.wqe_com, 0); | ||
1399 | memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3)); | ||
1400 | 1396 | ||
1397 | } else { | ||
1401 | /* For this clause to be valid, the payload_length | 1398 | /* For this clause to be valid, the payload_length |
1402 | * and sg_cnt must zero. | 1399 | * and sg_cnt must zero. |
1403 | */ | 1400 | */ |
@@ -1546,17 +1543,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, | |||
1546 | /* The node is shared with FCP IO, make sure the IO pending count does | 1543 | /* The node is shared with FCP IO, make sure the IO pending count does |
1547 | * not exceed the programmed depth. | 1544 | * not exceed the programmed depth. |
1548 | */ | 1545 | */ |
1549 | if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && | 1546 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
1550 | !expedite) { | 1547 | if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) && |
1551 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, | 1548 | !expedite) { |
1552 | "6174 Fail IO, ndlp qdepth exceeded: " | 1549 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
1553 | "idx %d DID %x pend %d qdepth %d\n", | 1550 | "6174 Fail IO, ndlp qdepth exceeded: " |
1554 | lpfc_queue_info->index, ndlp->nlp_DID, | 1551 | "idx %d DID %x pend %d qdepth %d\n", |
1555 | atomic_read(&ndlp->cmd_pending), | 1552 | lpfc_queue_info->index, ndlp->nlp_DID, |
1556 | ndlp->cmd_qdepth); | 1553 | atomic_read(&ndlp->cmd_pending), |
1557 | atomic_inc(&lport->xmt_fcp_qdepth); | 1554 | ndlp->cmd_qdepth); |
1558 | ret = -EBUSY; | 1555 | atomic_inc(&lport->xmt_fcp_qdepth); |
1559 | goto out_fail; | 1556 | ret = -EBUSY; |
1557 | goto out_fail; | ||
1558 | } | ||
1560 | } | 1559 | } |
1561 | 1560 | ||
1562 | lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite); | 1561 | lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite); |
@@ -1614,8 +1613,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, | |||
1614 | goto out_free_nvme_buf; | 1613 | goto out_free_nvme_buf; |
1615 | } | 1614 | } |
1616 | 1615 | ||
1617 | atomic_inc(&ndlp->cmd_pending); | ||
1618 | |||
1619 | lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", | 1616 | lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n", |
1620 | lpfc_ncmd->cur_iocbq.sli4_xritag, | 1617 | lpfc_ncmd->cur_iocbq.sli4_xritag, |
1621 | lpfc_queue_info->index, ndlp->nlp_DID); | 1618 | lpfc_queue_info->index, ndlp->nlp_DID); |
@@ -1623,7 +1620,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, | |||
1623 | ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); | 1620 | ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq); |
1624 | if (ret) { | 1621 | if (ret) { |
1625 | atomic_inc(&lport->xmt_fcp_wqerr); | 1622 | atomic_inc(&lport->xmt_fcp_wqerr); |
1626 | atomic_dec(&ndlp->cmd_pending); | ||
1627 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, | 1623 | lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, |
1628 | "6113 Fail IO, Could not issue WQE err %x " | 1624 | "6113 Fail IO, Could not issue WQE err %x " |
1629 | "sid: x%x did: x%x oxid: x%x\n", | 1625 | "sid: x%x did: x%x oxid: x%x\n", |
@@ -2378,6 +2374,11 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, | |||
2378 | lpfc_ncmd = lpfc_nvme_buf(phba); | 2374 | lpfc_ncmd = lpfc_nvme_buf(phba); |
2379 | } | 2375 | } |
2380 | spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); | 2376 | spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag); |
2377 | |||
2378 | if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) { | ||
2379 | atomic_inc(&ndlp->cmd_pending); | ||
2380 | lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH; | ||
2381 | } | ||
2381 | return lpfc_ncmd; | 2382 | return lpfc_ncmd; |
2382 | } | 2383 | } |
2383 | 2384 | ||
@@ -2396,7 +2397,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd) | |||
2396 | { | 2397 | { |
2397 | unsigned long iflag = 0; | 2398 | unsigned long iflag = 0; |
2398 | 2399 | ||
2400 | if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp) | ||
2401 | atomic_dec(&lpfc_ncmd->ndlp->cmd_pending); | ||
2402 | |||
2399 | lpfc_ncmd->nonsg_phys = 0; | 2403 | lpfc_ncmd->nonsg_phys = 0; |
2404 | lpfc_ncmd->ndlp = NULL; | ||
2405 | lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH; | ||
2406 | |||
2400 | if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { | 2407 | if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { |
2401 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, | 2408 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, |
2402 | "6310 XB release deferred for " | 2409 | "6310 XB release deferred for " |
@@ -2687,7 +2694,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2687 | struct lpfc_nvme_rport *oldrport; | 2694 | struct lpfc_nvme_rport *oldrport; |
2688 | struct nvme_fc_remote_port *remote_port; | 2695 | struct nvme_fc_remote_port *remote_port; |
2689 | struct nvme_fc_port_info rpinfo; | 2696 | struct nvme_fc_port_info rpinfo; |
2690 | struct lpfc_nodelist *prev_ndlp; | 2697 | struct lpfc_nodelist *prev_ndlp = NULL; |
2691 | 2698 | ||
2692 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, | 2699 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, |
2693 | "6006 Register NVME PORT. DID x%06x nlptype x%x\n", | 2700 | "6006 Register NVME PORT. DID x%06x nlptype x%x\n", |
@@ -2736,23 +2743,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2736 | spin_unlock_irq(&vport->phba->hbalock); | 2743 | spin_unlock_irq(&vport->phba->hbalock); |
2737 | rport = remote_port->private; | 2744 | rport = remote_port->private; |
2738 | if (oldrport) { | 2745 | if (oldrport) { |
2746 | /* New remoteport record does not guarantee valid | ||
2747 | * host private memory area. | ||
2748 | */ | ||
2749 | prev_ndlp = oldrport->ndlp; | ||
2739 | if (oldrport == remote_port->private) { | 2750 | if (oldrport == remote_port->private) { |
2740 | /* Same remoteport. Just reuse. */ | 2751 | /* Same remoteport - ndlp should match. |
2752 | * Just reuse. | ||
2753 | */ | ||
2741 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, | 2754 | lpfc_printf_vlog(ndlp->vport, KERN_INFO, |
2742 | LOG_NVME_DISC, | 2755 | LOG_NVME_DISC, |
2743 | "6014 Rebinding lport to " | 2756 | "6014 Rebinding lport to " |
2744 | "remoteport %p wwpn 0x%llx, " | 2757 | "remoteport %p wwpn 0x%llx, " |
2745 | "Data: x%x x%x %p x%x x%06x\n", | 2758 | "Data: x%x x%x %p %p x%x x%06x\n", |
2746 | remote_port, | 2759 | remote_port, |
2747 | remote_port->port_name, | 2760 | remote_port->port_name, |
2748 | remote_port->port_id, | 2761 | remote_port->port_id, |
2749 | remote_port->port_role, | 2762 | remote_port->port_role, |
2763 | prev_ndlp, | ||
2750 | ndlp, | 2764 | ndlp, |
2751 | ndlp->nlp_type, | 2765 | ndlp->nlp_type, |
2752 | ndlp->nlp_DID); | 2766 | ndlp->nlp_DID); |
2753 | return 0; | 2767 | return 0; |
2754 | } | 2768 | } |
2755 | prev_ndlp = rport->ndlp; | ||
2756 | 2769 | ||
2757 | /* Sever the ndlp<->rport association | 2770 | /* Sever the ndlp<->rport association |
2758 | * before dropping the ndlp ref from | 2771 | * before dropping the ndlp ref from |
@@ -2786,13 +2799,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
2786 | lpfc_printf_vlog(vport, KERN_INFO, | 2799 | lpfc_printf_vlog(vport, KERN_INFO, |
2787 | LOG_NVME_DISC | LOG_NODE, | 2800 | LOG_NVME_DISC | LOG_NODE, |
2788 | "6022 Binding new rport to " | 2801 | "6022 Binding new rport to " |
2789 | "lport %p Remoteport %p WWNN 0x%llx, " | 2802 | "lport %p Remoteport %p rport %p WWNN 0x%llx, " |
2790 | "Rport WWPN 0x%llx DID " | 2803 | "Rport WWPN 0x%llx DID " |
2791 | "x%06x Role x%x, ndlp %p\n", | 2804 | "x%06x Role x%x, ndlp %p prev_ndlp %p\n", |
2792 | lport, remote_port, | 2805 | lport, remote_port, rport, |
2793 | rpinfo.node_name, rpinfo.port_name, | 2806 | rpinfo.node_name, rpinfo.port_name, |
2794 | rpinfo.port_id, rpinfo.port_role, | 2807 | rpinfo.port_id, rpinfo.port_role, |
2795 | ndlp); | 2808 | ndlp, prev_ndlp); |
2796 | } else { | 2809 | } else { |
2797 | lpfc_printf_vlog(vport, KERN_ERR, | 2810 | lpfc_printf_vlog(vport, KERN_ERR, |
2798 | LOG_NVME_DISC | LOG_NODE, | 2811 | LOG_NVME_DISC | LOG_NODE, |
@@ -2970,7 +2983,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) | |||
2970 | struct lpfc_sli_ring *pring; | 2983 | struct lpfc_sli_ring *pring; |
2971 | u32 i, wait_cnt = 0; | 2984 | u32 i, wait_cnt = 0; |
2972 | 2985 | ||
2973 | if (phba->sli_rev < LPFC_SLI_REV4) | 2986 | if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq) |
2974 | return; | 2987 | return; |
2975 | 2988 | ||
2976 | /* Cycle through all NVME rings and make sure all outstanding | 2989 | /* Cycle through all NVME rings and make sure all outstanding |
@@ -2979,6 +2992,9 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) | |||
2979 | for (i = 0; i < phba->cfg_nvme_io_channel; i++) { | 2992 | for (i = 0; i < phba->cfg_nvme_io_channel; i++) { |
2980 | pring = phba->sli4_hba.nvme_wq[i]->pring; | 2993 | pring = phba->sli4_hba.nvme_wq[i]->pring; |
2981 | 2994 | ||
2995 | if (!pring) | ||
2996 | continue; | ||
2997 | |||
2982 | /* Retrieve everything on the txcmplq */ | 2998 | /* Retrieve everything on the txcmplq */ |
2983 | while (!list_empty(&pring->txcmplq)) { | 2999 | while (!list_empty(&pring->txcmplq)) { |
2984 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); | 3000 | msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h index 04bd463dd043..cfd4719be25c 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.h +++ b/drivers/scsi/lpfc/lpfc_nvme.h | |||
@@ -86,6 +86,7 @@ struct lpfc_nvme_buf { | |||
86 | 86 | ||
87 | uint16_t flags; /* TBD convert exch_busy to flags */ | 87 | uint16_t flags; /* TBD convert exch_busy to flags */ |
88 | #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ | 88 | #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ |
89 | #define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ | ||
89 | uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ | 90 | uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ |
90 | uint16_t status; /* From IOCB Word 7- ulpStatus */ | 91 | uint16_t status; /* From IOCB Word 7- ulpStatus */ |
91 | uint16_t cpu; | 92 | uint16_t cpu; |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 7271c9d885dd..b766afe10d3d 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channsel Host Bus Adapters. * | 3 | * Fibre Channsel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -402,6 +402,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) | |||
402 | 402 | ||
403 | /* Process FCP command */ | 403 | /* Process FCP command */ |
404 | if (rc == 0) { | 404 | if (rc == 0) { |
405 | ctxp->rqb_buffer = NULL; | ||
405 | atomic_inc(&tgtp->rcv_fcp_cmd_out); | 406 | atomic_inc(&tgtp->rcv_fcp_cmd_out); |
406 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); | 407 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
407 | return; | 408 | return; |
@@ -1116,8 +1117,17 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport, | |||
1116 | lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", | 1117 | lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n", |
1117 | ctxp->oxid, ctxp->size, smp_processor_id()); | 1118 | ctxp->oxid, ctxp->size, smp_processor_id()); |
1118 | 1119 | ||
1120 | if (!nvmebuf) { | ||
1121 | lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, | ||
1122 | "6425 Defer rcv: no buffer xri x%x: " | ||
1123 | "flg %x ste %x\n", | ||
1124 | ctxp->oxid, ctxp->flag, ctxp->state); | ||
1125 | return; | ||
1126 | } | ||
1127 | |||
1119 | tgtp = phba->targetport->private; | 1128 | tgtp = phba->targetport->private; |
1120 | atomic_inc(&tgtp->rcv_fcp_cmd_defer); | 1129 | if (tgtp) |
1130 | atomic_inc(&tgtp->rcv_fcp_cmd_defer); | ||
1121 | 1131 | ||
1122 | /* Free the nvmebuf since a new buffer already replaced it */ | 1132 | /* Free the nvmebuf since a new buffer already replaced it */ |
1123 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); | 1133 | nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf); |
@@ -1732,9 +1742,12 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1732 | uint32_t *payload; | 1742 | uint32_t *payload; |
1733 | uint32_t size, oxid, sid, rc; | 1743 | uint32_t size, oxid, sid, rc; |
1734 | 1744 | ||
1735 | if (!nvmebuf || !phba->targetport) { | 1745 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); |
1746 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); | ||
1747 | |||
1748 | if (!phba->targetport) { | ||
1736 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, | 1749 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, |
1737 | "6154 LS Drop IO\n"); | 1750 | "6154 LS Drop IO x%x\n", oxid); |
1738 | oxid = 0; | 1751 | oxid = 0; |
1739 | size = 0; | 1752 | size = 0; |
1740 | sid = 0; | 1753 | sid = 0; |
@@ -1744,9 +1757,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
1744 | 1757 | ||
1745 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; | 1758 | tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; |
1746 | payload = (uint32_t *)(nvmebuf->dbuf.virt); | 1759 | payload = (uint32_t *)(nvmebuf->dbuf.virt); |
1747 | fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); | ||
1748 | size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); | 1760 | size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); |
1749 | oxid = be16_to_cpu(fc_hdr->fh_ox_id); | ||
1750 | sid = sli4_sid_from_fc_hdr(fc_hdr); | 1761 | sid = sli4_sid_from_fc_hdr(fc_hdr); |
1751 | 1762 | ||
1752 | ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); | 1763 | ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC); |
@@ -1759,8 +1770,7 @@ dropit: | |||
1759 | lpfc_nvmeio_data(phba, "NVMET LS DROP: " | 1770 | lpfc_nvmeio_data(phba, "NVMET LS DROP: " |
1760 | "xri x%x sz %d from %06x\n", | 1771 | "xri x%x sz %d from %06x\n", |
1761 | oxid, size, sid); | 1772 | oxid, size, sid); |
1762 | if (nvmebuf) | 1773 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
1763 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); | ||
1764 | return; | 1774 | return; |
1765 | } | 1775 | } |
1766 | ctxp->phba = phba; | 1776 | ctxp->phba = phba; |
@@ -1803,8 +1813,7 @@ dropit: | |||
1803 | ctxp->oxid, rc); | 1813 | ctxp->oxid, rc); |
1804 | 1814 | ||
1805 | /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ | 1815 | /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */ |
1806 | if (nvmebuf) | 1816 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); |
1807 | lpfc_in_buf_free(phba, &nvmebuf->dbuf); | ||
1808 | 1817 | ||
1809 | atomic_inc(&tgtp->xmt_ls_abort); | 1818 | atomic_inc(&tgtp->xmt_ls_abort); |
1810 | lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); | 1819 | lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid); |
@@ -2492,7 +2501,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, | |||
2492 | bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); | 2501 | bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0); |
2493 | 2502 | ||
2494 | /* Word 11 - set pbde later */ | 2503 | /* Word 11 - set pbde later */ |
2495 | if (phba->nvme_embed_pbde) { | 2504 | if (phba->cfg_enable_pbde) { |
2496 | do_pbde = 1; | 2505 | do_pbde = 1; |
2497 | } else { | 2506 | } else { |
2498 | bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); | 2507 | bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0); |
@@ -2607,16 +2616,19 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, | |||
2607 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 2616 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
2608 | sgl->word2 = cpu_to_le32(sgl->word2); | 2617 | sgl->word2 = cpu_to_le32(sgl->word2); |
2609 | sgl->sge_len = cpu_to_le32(cnt); | 2618 | sgl->sge_len = cpu_to_le32(cnt); |
2610 | if (do_pbde && i == 0) { | 2619 | if (i == 0) { |
2611 | bde = (struct ulp_bde64 *)&wqe->words[13]; | 2620 | bde = (struct ulp_bde64 *)&wqe->words[13]; |
2612 | memset(bde, 0, sizeof(struct ulp_bde64)); | 2621 | if (do_pbde) { |
2613 | /* Words 13-15 (PBDE)*/ | 2622 | /* Words 13-15 (PBDE) */ |
2614 | bde->addrLow = sgl->addr_lo; | 2623 | bde->addrLow = sgl->addr_lo; |
2615 | bde->addrHigh = sgl->addr_hi; | 2624 | bde->addrHigh = sgl->addr_hi; |
2616 | bde->tus.f.bdeSize = | 2625 | bde->tus.f.bdeSize = |
2617 | le32_to_cpu(sgl->sge_len); | 2626 | le32_to_cpu(sgl->sge_len); |
2618 | bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; | 2627 | bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; |
2619 | bde->tus.w = cpu_to_le32(bde->tus.w); | 2628 | bde->tus.w = cpu_to_le32(bde->tus.w); |
2629 | } else { | ||
2630 | memset(bde, 0, sizeof(struct ulp_bde64)); | ||
2631 | } | ||
2620 | } | 2632 | } |
2621 | sgl++; | 2633 | sgl++; |
2622 | ctxp->offset += cnt; | 2634 | ctxp->offset += cnt; |
@@ -3105,11 +3117,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba, | |||
3105 | } | 3117 | } |
3106 | 3118 | ||
3107 | aerr: | 3119 | aerr: |
3108 | ctxp->flag &= ~LPFC_NVMET_ABORT_OP; | 3120 | spin_lock_irqsave(&ctxp->ctxlock, flags); |
3121 | if (ctxp->flag & LPFC_NVMET_CTX_RLS) | ||
3122 | list_del(&ctxp->list); | ||
3123 | ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS); | ||
3124 | spin_unlock_irqrestore(&ctxp->ctxlock, flags); | ||
3125 | |||
3109 | atomic_inc(&tgtp->xmt_abort_rsp_error); | 3126 | atomic_inc(&tgtp->xmt_abort_rsp_error); |
3110 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, | 3127 | lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS, |
3111 | "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", | 3128 | "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n", |
3112 | ctxp->oxid, rc); | 3129 | ctxp->oxid, rc); |
3130 | lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf); | ||
3113 | return 1; | 3131 | return 1; |
3114 | } | 3132 | } |
3115 | 3133 | ||
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 81f520abfd64..1aaff63f1f41 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index a94fb9f8bb44..5c7858e735c9 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -995,6 +995,11 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
995 | spin_unlock(&phba->scsi_buf_list_put_lock); | 995 | spin_unlock(&phba->scsi_buf_list_put_lock); |
996 | } | 996 | } |
997 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); | 997 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); |
998 | |||
999 | if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { | ||
1000 | atomic_inc(&ndlp->cmd_pending); | ||
1001 | lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; | ||
1002 | } | ||
998 | return lpfc_cmd; | 1003 | return lpfc_cmd; |
999 | } | 1004 | } |
1000 | /** | 1005 | /** |
@@ -1044,6 +1049,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1044 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); | 1049 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); |
1045 | if (!found) | 1050 | if (!found) |
1046 | return NULL; | 1051 | return NULL; |
1052 | |||
1053 | if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) { | ||
1054 | atomic_inc(&ndlp->cmd_pending); | ||
1055 | lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH; | ||
1056 | } | ||
1047 | return lpfc_cmd; | 1057 | return lpfc_cmd; |
1048 | } | 1058 | } |
1049 | /** | 1059 | /** |
@@ -1134,7 +1144,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | |||
1134 | static void | 1144 | static void |
1135 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) | 1145 | lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) |
1136 | { | 1146 | { |
1147 | if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp) | ||
1148 | atomic_dec(&psb->ndlp->cmd_pending); | ||
1137 | 1149 | ||
1150 | psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH; | ||
1138 | phba->lpfc_release_scsi_buf(phba, psb); | 1151 | phba->lpfc_release_scsi_buf(phba, psb); |
1139 | } | 1152 | } |
1140 | 1153 | ||
@@ -3017,8 +3030,8 @@ out: | |||
3017 | if (err_type == BGS_GUARD_ERR_MASK) { | 3030 | if (err_type == BGS_GUARD_ERR_MASK) { |
3018 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 3031 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
3019 | 0x10, 0x1); | 3032 | 0x10, 0x1); |
3020 | cmd->result = DRIVER_SENSE << 24 | 3033 | cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | |
3021 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 3034 | SAM_STAT_CHECK_CONDITION; |
3022 | phba->bg_guard_err_cnt++; | 3035 | phba->bg_guard_err_cnt++; |
3023 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3036 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
3024 | "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", | 3037 | "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", |
@@ -3028,8 +3041,8 @@ out: | |||
3028 | } else if (err_type == BGS_REFTAG_ERR_MASK) { | 3041 | } else if (err_type == BGS_REFTAG_ERR_MASK) { |
3029 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 3042 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
3030 | 0x10, 0x3); | 3043 | 0x10, 0x3); |
3031 | cmd->result = DRIVER_SENSE << 24 | 3044 | cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | |
3032 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 3045 | SAM_STAT_CHECK_CONDITION; |
3033 | 3046 | ||
3034 | phba->bg_reftag_err_cnt++; | 3047 | phba->bg_reftag_err_cnt++; |
3035 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3048 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
@@ -3040,8 +3053,8 @@ out: | |||
3040 | } else if (err_type == BGS_APPTAG_ERR_MASK) { | 3053 | } else if (err_type == BGS_APPTAG_ERR_MASK) { |
3041 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 3054 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
3042 | 0x10, 0x2); | 3055 | 0x10, 0x2); |
3043 | cmd->result = DRIVER_SENSE << 24 | 3056 | cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | |
3044 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 3057 | SAM_STAT_CHECK_CONDITION; |
3045 | 3058 | ||
3046 | phba->bg_apptag_err_cnt++; | 3059 | phba->bg_apptag_err_cnt++; |
3047 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3060 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
@@ -3096,7 +3109,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
3096 | spin_unlock(&_dump_buf_lock); | 3109 | spin_unlock(&_dump_buf_lock); |
3097 | 3110 | ||
3098 | if (lpfc_bgs_get_invalid_prof(bgstat)) { | 3111 | if (lpfc_bgs_get_invalid_prof(bgstat)) { |
3099 | cmd->result = ScsiResult(DID_ERROR, 0); | 3112 | cmd->result = DID_ERROR << 16; |
3100 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3113 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
3101 | "9072 BLKGRD: Invalid BG Profile in cmd" | 3114 | "9072 BLKGRD: Invalid BG Profile in cmd" |
3102 | " 0x%x lba 0x%llx blk cnt 0x%x " | 3115 | " 0x%x lba 0x%llx blk cnt 0x%x " |
@@ -3108,7 +3121,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
3108 | } | 3121 | } |
3109 | 3122 | ||
3110 | if (lpfc_bgs_get_uninit_dif_block(bgstat)) { | 3123 | if (lpfc_bgs_get_uninit_dif_block(bgstat)) { |
3111 | cmd->result = ScsiResult(DID_ERROR, 0); | 3124 | cmd->result = DID_ERROR << 16; |
3112 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3125 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
3113 | "9073 BLKGRD: Invalid BG PDIF Block in cmd" | 3126 | "9073 BLKGRD: Invalid BG PDIF Block in cmd" |
3114 | " 0x%x lba 0x%llx blk cnt 0x%x " | 3127 | " 0x%x lba 0x%llx blk cnt 0x%x " |
@@ -3124,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
3124 | 3137 | ||
3125 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 3138 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
3126 | 0x10, 0x1); | 3139 | 0x10, 0x1); |
3127 | cmd->result = DRIVER_SENSE << 24 | 3140 | cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | |
3128 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 3141 | SAM_STAT_CHECK_CONDITION; |
3129 | phba->bg_guard_err_cnt++; | 3142 | phba->bg_guard_err_cnt++; |
3130 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3143 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
3131 | "9055 BLKGRD: Guard Tag error in cmd" | 3144 | "9055 BLKGRD: Guard Tag error in cmd" |
@@ -3140,8 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
3140 | 3153 | ||
3141 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 3154 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
3142 | 0x10, 0x3); | 3155 | 0x10, 0x3); |
3143 | cmd->result = DRIVER_SENSE << 24 | 3156 | cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | |
3144 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 3157 | SAM_STAT_CHECK_CONDITION; |
3145 | 3158 | ||
3146 | phba->bg_reftag_err_cnt++; | 3159 | phba->bg_reftag_err_cnt++; |
3147 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3160 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
@@ -3157,8 +3170,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | |||
3157 | 3170 | ||
3158 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, | 3171 | scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, |
3159 | 0x10, 0x2); | 3172 | 0x10, 0x2); |
3160 | cmd->result = DRIVER_SENSE << 24 | 3173 | cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 | |
3161 | | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); | 3174 | SAM_STAT_CHECK_CONDITION; |
3162 | 3175 | ||
3163 | phba->bg_apptag_err_cnt++; | 3176 | phba->bg_apptag_err_cnt++; |
3164 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, | 3177 | lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, |
@@ -3311,12 +3324,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
3311 | } | 3324 | } |
3312 | /* | 3325 | /* |
3313 | * Setup the first Payload BDE. For FCoE we just key off | 3326 | * Setup the first Payload BDE. For FCoE we just key off |
3314 | * Performance Hints, for FC we utilize fcp_embed_pbde. | 3327 | * Performance Hints, for FC we use lpfc_enable_pbde. |
3328 | * We populate words 13-15 of IOCB/WQE. | ||
3315 | */ | 3329 | */ |
3316 | if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || | 3330 | if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || |
3317 | phba->fcp_embed_pbde) { | 3331 | phba->cfg_enable_pbde) { |
3318 | bde = (struct ulp_bde64 *) | 3332 | bde = (struct ulp_bde64 *) |
3319 | &(iocb_cmd->unsli3.sli3Words[5]); | 3333 | &(iocb_cmd->unsli3.sli3Words[5]); |
3320 | bde->addrLow = first_data_sgl->addr_lo; | 3334 | bde->addrLow = first_data_sgl->addr_lo; |
3321 | bde->addrHigh = first_data_sgl->addr_hi; | 3335 | bde->addrHigh = first_data_sgl->addr_hi; |
3322 | bde->tus.f.bdeSize = | 3336 | bde->tus.f.bdeSize = |
@@ -3330,6 +3344,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) | |||
3330 | sgl->word2 = le32_to_cpu(sgl->word2); | 3344 | sgl->word2 = le32_to_cpu(sgl->word2); |
3331 | bf_set(lpfc_sli4_sge_last, sgl, 1); | 3345 | bf_set(lpfc_sli4_sge_last, sgl, 1); |
3332 | sgl->word2 = cpu_to_le32(sgl->word2); | 3346 | sgl->word2 = cpu_to_le32(sgl->word2); |
3347 | |||
3348 | if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) || | ||
3349 | phba->cfg_enable_pbde) { | ||
3350 | bde = (struct ulp_bde64 *) | ||
3351 | &(iocb_cmd->unsli3.sli3Words[5]); | ||
3352 | memset(bde, 0, (sizeof(uint32_t) * 3)); | ||
3353 | } | ||
3333 | } | 3354 | } |
3334 | 3355 | ||
3335 | /* | 3356 | /* |
@@ -3866,7 +3887,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
3866 | } | 3887 | } |
3867 | 3888 | ||
3868 | out: | 3889 | out: |
3869 | cmnd->result = ScsiResult(host_status, scsi_status); | 3890 | cmnd->result = host_status << 16 | scsi_status; |
3870 | lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); | 3891 | lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb); |
3871 | } | 3892 | } |
3872 | 3893 | ||
@@ -4019,7 +4040,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
4019 | break; | 4040 | break; |
4020 | case IOSTAT_NPORT_BSY: | 4041 | case IOSTAT_NPORT_BSY: |
4021 | case IOSTAT_FABRIC_BSY: | 4042 | case IOSTAT_FABRIC_BSY: |
4022 | cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0); | 4043 | cmd->result = DID_TRANSPORT_DISRUPTED << 16; |
4023 | fast_path_evt = lpfc_alloc_fast_evt(phba); | 4044 | fast_path_evt = lpfc_alloc_fast_evt(phba); |
4024 | if (!fast_path_evt) | 4045 | if (!fast_path_evt) |
4025 | break; | 4046 | break; |
@@ -4053,14 +4074,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
4053 | lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || | 4074 | lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR || |
4054 | lpfc_cmd->result == | 4075 | lpfc_cmd->result == |
4055 | IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { | 4076 | IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) { |
4056 | cmd->result = ScsiResult(DID_NO_CONNECT, 0); | 4077 | cmd->result = DID_NO_CONNECT << 16; |
4057 | break; | 4078 | break; |
4058 | } | 4079 | } |
4059 | if (lpfc_cmd->result == IOERR_INVALID_RPI || | 4080 | if (lpfc_cmd->result == IOERR_INVALID_RPI || |
4060 | lpfc_cmd->result == IOERR_NO_RESOURCES || | 4081 | lpfc_cmd->result == IOERR_NO_RESOURCES || |
4061 | lpfc_cmd->result == IOERR_ABORT_REQUESTED || | 4082 | lpfc_cmd->result == IOERR_ABORT_REQUESTED || |
4062 | lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { | 4083 | lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) { |
4063 | cmd->result = ScsiResult(DID_REQUEUE, 0); | 4084 | cmd->result = DID_REQUEUE << 16; |
4064 | break; | 4085 | break; |
4065 | } | 4086 | } |
4066 | if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || | 4087 | if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED || |
@@ -4094,16 +4115,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
4094 | } | 4115 | } |
4095 | /* else: fall through */ | 4116 | /* else: fall through */ |
4096 | default: | 4117 | default: |
4097 | cmd->result = ScsiResult(DID_ERROR, 0); | 4118 | cmd->result = DID_ERROR << 16; |
4098 | break; | 4119 | break; |
4099 | } | 4120 | } |
4100 | 4121 | ||
4101 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) | 4122 | if (!pnode || !NLP_CHK_NODE_ACT(pnode) |
4102 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) | 4123 | || (pnode->nlp_state != NLP_STE_MAPPED_NODE)) |
4103 | cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, | 4124 | cmd->result = DID_TRANSPORT_DISRUPTED << 16 | |
4104 | SAM_STAT_BUSY); | 4125 | SAM_STAT_BUSY; |
4105 | } else | 4126 | } else |
4106 | cmd->result = ScsiResult(DID_OK, 0); | 4127 | cmd->result = DID_OK << 16; |
4107 | 4128 | ||
4108 | if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { | 4129 | if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) { |
4109 | uint32_t *lp = (uint32_t *)cmd->sense_buffer; | 4130 | uint32_t *lp = (uint32_t *)cmd->sense_buffer; |
@@ -4122,7 +4143,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
4122 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { | 4143 | msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { |
4123 | spin_lock_irqsave(shost->host_lock, flags); | 4144 | spin_lock_irqsave(shost->host_lock, flags); |
4124 | if (pnode && NLP_CHK_NODE_ACT(pnode)) { | 4145 | if (pnode && NLP_CHK_NODE_ACT(pnode)) { |
4125 | atomic_dec(&pnode->cmd_pending); | ||
4126 | if (pnode->cmd_qdepth > | 4146 | if (pnode->cmd_qdepth > |
4127 | atomic_read(&pnode->cmd_pending) && | 4147 | atomic_read(&pnode->cmd_pending) && |
4128 | (atomic_read(&pnode->cmd_pending) > | 4148 | (atomic_read(&pnode->cmd_pending) > |
@@ -4135,8 +4155,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, | |||
4135 | pnode->last_change_time = jiffies; | 4155 | pnode->last_change_time = jiffies; |
4136 | } | 4156 | } |
4137 | spin_unlock_irqrestore(shost->host_lock, flags); | 4157 | spin_unlock_irqrestore(shost->host_lock, flags); |
4138 | } else if (pnode && NLP_CHK_NODE_ACT(pnode)) { | ||
4139 | atomic_dec(&pnode->cmd_pending); | ||
4140 | } | 4158 | } |
4141 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 4159 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
4142 | 4160 | ||
@@ -4530,6 +4548,11 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) | |||
4530 | int err; | 4548 | int err; |
4531 | 4549 | ||
4532 | rdata = lpfc_rport_data_from_scsi_device(cmnd->device); | 4550 | rdata = lpfc_rport_data_from_scsi_device(cmnd->device); |
4551 | |||
4552 | /* sanity check on references */ | ||
4553 | if (unlikely(!rdata) || unlikely(!rport)) | ||
4554 | goto out_fail_command; | ||
4555 | |||
4533 | err = fc_remote_port_chkready(rport); | 4556 | err = fc_remote_port_chkready(rport); |
4534 | if (err) { | 4557 | if (err) { |
4535 | cmnd->result = err; | 4558 | cmnd->result = err; |
@@ -4555,33 +4578,36 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) | |||
4555 | */ | 4578 | */ |
4556 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) | 4579 | if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) |
4557 | goto out_tgt_busy; | 4580 | goto out_tgt_busy; |
4558 | if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { | 4581 | if (lpfc_ndlp_check_qdepth(phba, ndlp)) { |
4559 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, | 4582 | if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) { |
4560 | "3377 Target Queue Full, scsi Id:%d Qdepth:%d" | 4583 | lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR, |
4561 | " Pending command:%d" | 4584 | "3377 Target Queue Full, scsi Id:%d " |
4562 | " WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, " | 4585 | "Qdepth:%d Pending command:%d" |
4563 | " WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", | 4586 | " WWNN:%02x:%02x:%02x:%02x:" |
4564 | ndlp->nlp_sid, ndlp->cmd_qdepth, | 4587 | "%02x:%02x:%02x:%02x, " |
4565 | atomic_read(&ndlp->cmd_pending), | 4588 | " WWPN:%02x:%02x:%02x:%02x:" |
4566 | ndlp->nlp_nodename.u.wwn[0], | 4589 | "%02x:%02x:%02x:%02x", |
4567 | ndlp->nlp_nodename.u.wwn[1], | 4590 | ndlp->nlp_sid, ndlp->cmd_qdepth, |
4568 | ndlp->nlp_nodename.u.wwn[2], | 4591 | atomic_read(&ndlp->cmd_pending), |
4569 | ndlp->nlp_nodename.u.wwn[3], | 4592 | ndlp->nlp_nodename.u.wwn[0], |
4570 | ndlp->nlp_nodename.u.wwn[4], | 4593 | ndlp->nlp_nodename.u.wwn[1], |
4571 | ndlp->nlp_nodename.u.wwn[5], | 4594 | ndlp->nlp_nodename.u.wwn[2], |
4572 | ndlp->nlp_nodename.u.wwn[6], | 4595 | ndlp->nlp_nodename.u.wwn[3], |
4573 | ndlp->nlp_nodename.u.wwn[7], | 4596 | ndlp->nlp_nodename.u.wwn[4], |
4574 | ndlp->nlp_portname.u.wwn[0], | 4597 | ndlp->nlp_nodename.u.wwn[5], |
4575 | ndlp->nlp_portname.u.wwn[1], | 4598 | ndlp->nlp_nodename.u.wwn[6], |
4576 | ndlp->nlp_portname.u.wwn[2], | 4599 | ndlp->nlp_nodename.u.wwn[7], |
4577 | ndlp->nlp_portname.u.wwn[3], | 4600 | ndlp->nlp_portname.u.wwn[0], |
4578 | ndlp->nlp_portname.u.wwn[4], | 4601 | ndlp->nlp_portname.u.wwn[1], |
4579 | ndlp->nlp_portname.u.wwn[5], | 4602 | ndlp->nlp_portname.u.wwn[2], |
4580 | ndlp->nlp_portname.u.wwn[6], | 4603 | ndlp->nlp_portname.u.wwn[3], |
4581 | ndlp->nlp_portname.u.wwn[7]); | 4604 | ndlp->nlp_portname.u.wwn[4], |
4582 | goto out_tgt_busy; | 4605 | ndlp->nlp_portname.u.wwn[5], |
4606 | ndlp->nlp_portname.u.wwn[6], | ||
4607 | ndlp->nlp_portname.u.wwn[7]); | ||
4608 | goto out_tgt_busy; | ||
4609 | } | ||
4583 | } | 4610 | } |
4584 | atomic_inc(&ndlp->cmd_pending); | ||
4585 | 4611 | ||
4586 | lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); | 4612 | lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp); |
4587 | if (lpfc_cmd == NULL) { | 4613 | if (lpfc_cmd == NULL) { |
@@ -4599,6 +4625,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) | |||
4599 | */ | 4625 | */ |
4600 | lpfc_cmd->pCmd = cmnd; | 4626 | lpfc_cmd->pCmd = cmnd; |
4601 | lpfc_cmd->rdata = rdata; | 4627 | lpfc_cmd->rdata = rdata; |
4628 | lpfc_cmd->ndlp = ndlp; | ||
4602 | lpfc_cmd->timeout = 0; | 4629 | lpfc_cmd->timeout = 0; |
4603 | lpfc_cmd->start_time = jiffies; | 4630 | lpfc_cmd->start_time = jiffies; |
4604 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; | 4631 | cmnd->host_scribble = (unsigned char *)lpfc_cmd; |
@@ -4681,7 +4708,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) | |||
4681 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); | 4708 | lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd); |
4682 | lpfc_release_scsi_buf(phba, lpfc_cmd); | 4709 | lpfc_release_scsi_buf(phba, lpfc_cmd); |
4683 | out_host_busy: | 4710 | out_host_busy: |
4684 | atomic_dec(&ndlp->cmd_pending); | ||
4685 | return SCSI_MLQUEUE_HOST_BUSY; | 4711 | return SCSI_MLQUEUE_HOST_BUSY; |
4686 | 4712 | ||
4687 | out_tgt_busy: | 4713 | out_tgt_busy: |
@@ -4714,7 +4740,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4714 | struct lpfc_scsi_buf *lpfc_cmd; | 4740 | struct lpfc_scsi_buf *lpfc_cmd; |
4715 | IOCB_t *cmd, *icmd; | 4741 | IOCB_t *cmd, *icmd; |
4716 | int ret = SUCCESS, status = 0; | 4742 | int ret = SUCCESS, status = 0; |
4717 | struct lpfc_sli_ring *pring_s4; | 4743 | struct lpfc_sli_ring *pring_s4 = NULL; |
4718 | int ret_val; | 4744 | int ret_val; |
4719 | unsigned long flags; | 4745 | unsigned long flags; |
4720 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); | 4746 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); |
@@ -4744,8 +4770,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4744 | } | 4770 | } |
4745 | 4771 | ||
4746 | iocb = &lpfc_cmd->cur_iocbq; | 4772 | iocb = &lpfc_cmd->cur_iocbq; |
4773 | if (phba->sli_rev == LPFC_SLI_REV4) { | ||
4774 | if (!(phba->cfg_fof) || | ||
4775 | (!(iocb->iocb_flag & LPFC_IO_FOF))) { | ||
4776 | pring_s4 = | ||
4777 | phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring; | ||
4778 | } else { | ||
4779 | iocb->hba_wqidx = 0; | ||
4780 | pring_s4 = phba->sli4_hba.oas_wq->pring; | ||
4781 | } | ||
4782 | if (!pring_s4) { | ||
4783 | ret = FAILED; | ||
4784 | goto out_unlock; | ||
4785 | } | ||
4786 | spin_lock(&pring_s4->ring_lock); | ||
4787 | } | ||
4747 | /* the command is in process of being cancelled */ | 4788 | /* the command is in process of being cancelled */ |
4748 | if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { | 4789 | if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { |
4790 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4791 | spin_unlock(&pring_s4->ring_lock); | ||
4749 | spin_unlock_irqrestore(&phba->hbalock, flags); | 4792 | spin_unlock_irqrestore(&phba->hbalock, flags); |
4750 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4793 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
4751 | "3169 SCSI Layer abort requested I/O has been " | 4794 | "3169 SCSI Layer abort requested I/O has been " |
@@ -4759,6 +4802,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4759 | * see the completion before the eh fired. Just return SUCCESS. | 4802 | * see the completion before the eh fired. Just return SUCCESS. |
4760 | */ | 4803 | */ |
4761 | if (lpfc_cmd->pCmd != cmnd) { | 4804 | if (lpfc_cmd->pCmd != cmnd) { |
4805 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4806 | spin_unlock(&pring_s4->ring_lock); | ||
4762 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4807 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
4763 | "3170 SCSI Layer abort requested I/O has been " | 4808 | "3170 SCSI Layer abort requested I/O has been " |
4764 | "completed by LLD.\n"); | 4809 | "completed by LLD.\n"); |
@@ -4771,6 +4816,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4771 | if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { | 4816 | if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) { |
4772 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, | 4817 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, |
4773 | "3389 SCSI Layer I/O Abort Request is pending\n"); | 4818 | "3389 SCSI Layer I/O Abort Request is pending\n"); |
4819 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4820 | spin_unlock(&pring_s4->ring_lock); | ||
4774 | spin_unlock_irqrestore(&phba->hbalock, flags); | 4821 | spin_unlock_irqrestore(&phba->hbalock, flags); |
4775 | goto wait_for_cmpl; | 4822 | goto wait_for_cmpl; |
4776 | } | 4823 | } |
@@ -4778,6 +4825,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4778 | abtsiocb = __lpfc_sli_get_iocbq(phba); | 4825 | abtsiocb = __lpfc_sli_get_iocbq(phba); |
4779 | if (abtsiocb == NULL) { | 4826 | if (abtsiocb == NULL) { |
4780 | ret = FAILED; | 4827 | ret = FAILED; |
4828 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4829 | spin_unlock(&pring_s4->ring_lock); | ||
4781 | goto out_unlock; | 4830 | goto out_unlock; |
4782 | } | 4831 | } |
4783 | 4832 | ||
@@ -4815,14 +4864,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4815 | 4864 | ||
4816 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; | 4865 | abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl; |
4817 | abtsiocb->vport = vport; | 4866 | abtsiocb->vport = vport; |
4867 | lpfc_cmd->waitq = &waitq; | ||
4818 | if (phba->sli_rev == LPFC_SLI_REV4) { | 4868 | if (phba->sli_rev == LPFC_SLI_REV4) { |
4819 | pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb); | ||
4820 | if (pring_s4 == NULL) { | ||
4821 | ret = FAILED; | ||
4822 | goto out_unlock; | ||
4823 | } | ||
4824 | /* Note: both hbalock and ring_lock must be set here */ | 4869 | /* Note: both hbalock and ring_lock must be set here */ |
4825 | spin_lock(&pring_s4->ring_lock); | ||
4826 | ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, | 4870 | ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno, |
4827 | abtsiocb, 0); | 4871 | abtsiocb, 0); |
4828 | spin_unlock(&pring_s4->ring_lock); | 4872 | spin_unlock(&pring_s4->ring_lock); |
@@ -4835,6 +4879,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4835 | 4879 | ||
4836 | 4880 | ||
4837 | if (ret_val == IOCB_ERROR) { | 4881 | if (ret_val == IOCB_ERROR) { |
4882 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4883 | spin_lock_irqsave(&pring_s4->ring_lock, flags); | ||
4884 | else | ||
4885 | spin_lock_irqsave(&phba->hbalock, flags); | ||
4886 | /* Indicate the IO is not being aborted by the driver. */ | ||
4887 | iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; | ||
4888 | lpfc_cmd->waitq = NULL; | ||
4889 | if (phba->sli_rev == LPFC_SLI_REV4) | ||
4890 | spin_unlock_irqrestore(&pring_s4->ring_lock, flags); | ||
4891 | else | ||
4892 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
4838 | lpfc_sli_release_iocbq(phba, abtsiocb); | 4893 | lpfc_sli_release_iocbq(phba, abtsiocb); |
4839 | ret = FAILED; | 4894 | ret = FAILED; |
4840 | goto out; | 4895 | goto out; |
@@ -4845,7 +4900,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4845 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); | 4900 | &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ); |
4846 | 4901 | ||
4847 | wait_for_cmpl: | 4902 | wait_for_cmpl: |
4848 | lpfc_cmd->waitq = &waitq; | ||
4849 | /* Wait for abort to complete */ | 4903 | /* Wait for abort to complete */ |
4850 | wait_event_timeout(waitq, | 4904 | wait_event_timeout(waitq, |
4851 | (lpfc_cmd->pCmd != cmnd), | 4905 | (lpfc_cmd->pCmd != cmnd), |
@@ -5006,6 +5060,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd, | |||
5006 | lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; | 5060 | lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; |
5007 | lpfc_cmd->rdata = rdata; | 5061 | lpfc_cmd->rdata = rdata; |
5008 | lpfc_cmd->pCmd = cmnd; | 5062 | lpfc_cmd->pCmd = cmnd; |
5063 | lpfc_cmd->ndlp = pnode; | ||
5009 | 5064 | ||
5010 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, | 5065 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, |
5011 | task_mgmt_cmd); | 5066 | task_mgmt_cmd); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h index c38e4da71f5f..cc99859774ff 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.h +++ b/drivers/scsi/lpfc/lpfc_scsi.h | |||
@@ -134,11 +134,13 @@ struct lpfc_scsi_buf { | |||
134 | struct list_head list; | 134 | struct list_head list; |
135 | struct scsi_cmnd *pCmd; | 135 | struct scsi_cmnd *pCmd; |
136 | struct lpfc_rport_data *rdata; | 136 | struct lpfc_rport_data *rdata; |
137 | struct lpfc_nodelist *ndlp; | ||
137 | 138 | ||
138 | uint32_t timeout; | 139 | uint32_t timeout; |
139 | 140 | ||
140 | uint16_t flags; /* TBD convert exch_busy to flags */ | 141 | uint16_t flags; /* TBD convert exch_busy to flags */ |
141 | #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ | 142 | #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ |
143 | #define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */ | ||
142 | uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ | 144 | uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ |
143 | uint16_t status; /* From IOCB Word 7- ulpStatus */ | 145 | uint16_t status; /* From IOCB Word 7- ulpStatus */ |
144 | uint32_t result; /* From IOCB Word 4. */ | 146 | uint32_t result; /* From IOCB Word 4. */ |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6f3c00a233ec..9830bdb6e072 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -145,6 +145,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) | |||
145 | uint32_t idx; | 145 | uint32_t idx; |
146 | uint32_t i = 0; | 146 | uint32_t i = 0; |
147 | uint8_t *tmp; | 147 | uint8_t *tmp; |
148 | u32 if_type; | ||
148 | 149 | ||
149 | /* sanity check on queue memory */ | 150 | /* sanity check on queue memory */ |
150 | if (unlikely(!q)) | 151 | if (unlikely(!q)) |
@@ -199,8 +200,14 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) | |||
199 | q->queue_id); | 200 | q->queue_id); |
200 | } else { | 201 | } else { |
201 | bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); | 202 | bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1); |
202 | bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index); | ||
203 | bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); | 203 | bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); |
204 | |||
205 | /* Leave bits <23:16> clear for if_type 6 dpp */ | ||
206 | if_type = bf_get(lpfc_sli_intf_if_type, | ||
207 | &q->phba->sli4_hba.sli_intf); | ||
208 | if (if_type != LPFC_SLI_INTF_IF_TYPE_6) | ||
209 | bf_set(lpfc_wq_db_list_fm_index, &doorbell, | ||
210 | host_index); | ||
204 | } | 211 | } |
205 | } else if (q->db_format == LPFC_DB_RING_FORMAT) { | 212 | } else if (q->db_format == LPFC_DB_RING_FORMAT) { |
206 | bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); | 213 | bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1); |
@@ -4591,7 +4598,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) | |||
4591 | spin_unlock_irq(&phba->hbalock); | 4598 | spin_unlock_irq(&phba->hbalock); |
4592 | 4599 | ||
4593 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | 4600 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); |
4594 | psli->stats_start = get_seconds(); | 4601 | psli->stats_start = ktime_get_seconds(); |
4595 | 4602 | ||
4596 | /* Give the INITFF and Post time to settle. */ | 4603 | /* Give the INITFF and Post time to settle. */ |
4597 | mdelay(100); | 4604 | mdelay(100); |
@@ -4638,7 +4645,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) | |||
4638 | spin_unlock_irq(&phba->hbalock); | 4645 | spin_unlock_irq(&phba->hbalock); |
4639 | 4646 | ||
4640 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); | 4647 | memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets)); |
4641 | psli->stats_start = get_seconds(); | 4648 | psli->stats_start = ktime_get_seconds(); |
4642 | 4649 | ||
4643 | /* Reset HBA AER if it was enabled, note hba_flag was reset above */ | 4650 | /* Reset HBA AER if it was enabled, note hba_flag was reset above */ |
4644 | if (hba_aer_enabled) | 4651 | if (hba_aer_enabled) |
@@ -9110,8 +9117,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
9110 | } | 9117 | } |
9111 | /* Note, word 10 is already initialized to 0 */ | 9118 | /* Note, word 10 is already initialized to 0 */ |
9112 | 9119 | ||
9113 | /* Don't set PBDE for Perf hints, just fcp_embed_pbde */ | 9120 | /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ |
9114 | if (phba->fcp_embed_pbde) | 9121 | if (phba->cfg_enable_pbde) |
9115 | bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); | 9122 | bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1); |
9116 | else | 9123 | else |
9117 | bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); | 9124 | bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0); |
@@ -9174,8 +9181,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, | |||
9174 | } | 9181 | } |
9175 | /* Note, word 10 is already initialized to 0 */ | 9182 | /* Note, word 10 is already initialized to 0 */ |
9176 | 9183 | ||
9177 | /* Don't set PBDE for Perf hints, just fcp_embed_pbde */ | 9184 | /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */ |
9178 | if (phba->fcp_embed_pbde) | 9185 | if (phba->cfg_enable_pbde) |
9179 | bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); | 9186 | bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1); |
9180 | else | 9187 | else |
9181 | bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); | 9188 | bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0); |
@@ -10696,6 +10703,12 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
10696 | 10703 | ||
10697 | spin_lock_irq(&phba->hbalock); | 10704 | spin_lock_irq(&phba->hbalock); |
10698 | if (phba->sli_rev < LPFC_SLI_REV4) { | 10705 | if (phba->sli_rev < LPFC_SLI_REV4) { |
10706 | if (irsp->ulpCommand == CMD_ABORT_XRI_CX && | ||
10707 | irsp->ulpStatus == IOSTAT_LOCAL_REJECT && | ||
10708 | irsp->un.ulpWord[4] == IOERR_ABORT_REQUESTED) { | ||
10709 | spin_unlock_irq(&phba->hbalock); | ||
10710 | goto release_iocb; | ||
10711 | } | ||
10699 | if (abort_iotag != 0 && | 10712 | if (abort_iotag != 0 && |
10700 | abort_iotag <= phba->sli.last_iotag) | 10713 | abort_iotag <= phba->sli.last_iotag) |
10701 | abort_iocb = | 10714 | abort_iocb = |
@@ -10717,6 +10730,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, | |||
10717 | 10730 | ||
10718 | spin_unlock_irq(&phba->hbalock); | 10731 | spin_unlock_irq(&phba->hbalock); |
10719 | } | 10732 | } |
10733 | release_iocb: | ||
10720 | lpfc_sli_release_iocbq(phba, cmdiocb); | 10734 | lpfc_sli_release_iocbq(phba, cmdiocb); |
10721 | return; | 10735 | return; |
10722 | } | 10736 | } |
@@ -10773,6 +10787,7 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
10773 | IOCB_t *iabt = NULL; | 10787 | IOCB_t *iabt = NULL; |
10774 | int retval; | 10788 | int retval; |
10775 | unsigned long iflags; | 10789 | unsigned long iflags; |
10790 | struct lpfc_nodelist *ndlp; | ||
10776 | 10791 | ||
10777 | lockdep_assert_held(&phba->hbalock); | 10792 | lockdep_assert_held(&phba->hbalock); |
10778 | 10793 | ||
@@ -10803,9 +10818,13 @@ lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | |||
10803 | if (phba->sli_rev == LPFC_SLI_REV4) { | 10818 | if (phba->sli_rev == LPFC_SLI_REV4) { |
10804 | iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; | 10819 | iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag; |
10805 | iabt->un.acxri.abortContextTag = cmdiocb->iotag; | 10820 | iabt->un.acxri.abortContextTag = cmdiocb->iotag; |
10806 | } | 10821 | } else { |
10807 | else | ||
10808 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; | 10822 | iabt->un.acxri.abortIoTag = icmd->ulpIoTag; |
10823 | if (pring->ringno == LPFC_ELS_RING) { | ||
10824 | ndlp = (struct lpfc_nodelist *)(cmdiocb->context1); | ||
10825 | iabt->un.acxri.abortContextTag = ndlp->nlp_rpi; | ||
10826 | } | ||
10827 | } | ||
10809 | iabt->ulpLe = 1; | 10828 | iabt->ulpLe = 1; |
10810 | iabt->ulpClass = icmd->ulpClass; | 10829 | iabt->ulpClass = icmd->ulpClass; |
10811 | 10830 | ||
@@ -11084,10 +11103,11 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, | |||
11084 | struct lpfc_scsi_buf *lpfc_cmd; | 11103 | struct lpfc_scsi_buf *lpfc_cmd; |
11085 | int rc = 1; | 11104 | int rc = 1; |
11086 | 11105 | ||
11087 | if (!(iocbq->iocb_flag & LPFC_IO_FCP)) | 11106 | if (iocbq->vport != vport) |
11088 | return rc; | 11107 | return rc; |
11089 | 11108 | ||
11090 | if (iocbq->vport != vport) | 11109 | if (!(iocbq->iocb_flag & LPFC_IO_FCP) || |
11110 | !(iocbq->iocb_flag & LPFC_IO_ON_TXCMPLQ)) | ||
11091 | return rc; | 11111 | return rc; |
11092 | 11112 | ||
11093 | lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); | 11113 | lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq); |
@@ -11097,13 +11117,13 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport, | |||
11097 | 11117 | ||
11098 | switch (ctx_cmd) { | 11118 | switch (ctx_cmd) { |
11099 | case LPFC_CTX_LUN: | 11119 | case LPFC_CTX_LUN: |
11100 | if ((lpfc_cmd->rdata->pnode) && | 11120 | if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && |
11101 | (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && | 11121 | (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) && |
11102 | (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) | 11122 | (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id)) |
11103 | rc = 0; | 11123 | rc = 0; |
11104 | break; | 11124 | break; |
11105 | case LPFC_CTX_TGT: | 11125 | case LPFC_CTX_TGT: |
11106 | if ((lpfc_cmd->rdata->pnode) && | 11126 | if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) && |
11107 | (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) | 11127 | (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id)) |
11108 | rc = 0; | 11128 | rc = 0; |
11109 | break; | 11129 | break; |
@@ -11218,6 +11238,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
11218 | int errcnt = 0, ret_val = 0; | 11238 | int errcnt = 0, ret_val = 0; |
11219 | int i; | 11239 | int i; |
11220 | 11240 | ||
11241 | /* all I/Os are in process of being flushed */ | ||
11242 | if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) | ||
11243 | return errcnt; | ||
11244 | |||
11221 | for (i = 1; i <= phba->sli.last_iotag; i++) { | 11245 | for (i = 1; i <= phba->sli.last_iotag; i++) { |
11222 | iocbq = phba->sli.iocbq_lookup[i]; | 11246 | iocbq = phba->sli.iocbq_lookup[i]; |
11223 | 11247 | ||
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 431754195505..34b7ab69b9b4 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -339,7 +339,7 @@ struct lpfc_sli { | |||
339 | struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ | 339 | struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */ |
340 | size_t iocbq_lookup_len; /* current lengs of the array */ | 340 | size_t iocbq_lookup_len; /* current lengs of the array */ |
341 | uint16_t last_iotag; /* last allocated IOTAG */ | 341 | uint16_t last_iotag; /* last allocated IOTAG */ |
342 | unsigned long stats_start; /* in seconds */ | 342 | time64_t stats_start; /* in seconds */ |
343 | struct lpfc_lnk_stat lnk_stat_offsets; | 343 | struct lpfc_lnk_stat lnk_stat_offsets; |
344 | }; | 344 | }; |
345 | 345 | ||
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index cf64aca82bd0..399c0015c546 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2009-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2009-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -490,6 +490,7 @@ struct lpfc_pc_sli4_params { | |||
490 | uint8_t eqav; | 490 | uint8_t eqav; |
491 | uint8_t cqav; | 491 | uint8_t cqav; |
492 | uint8_t wqsize; | 492 | uint8_t wqsize; |
493 | uint8_t bv1s; | ||
493 | #define LPFC_WQ_SZ64_SUPPORT 1 | 494 | #define LPFC_WQ_SZ64_SUPPORT 1 |
494 | #define LPFC_WQ_SZ128_SUPPORT 2 | 495 | #define LPFC_WQ_SZ128_SUPPORT 2 |
495 | uint8_t wqpcnt; | 496 | uint8_t wqpcnt; |
@@ -774,7 +775,9 @@ struct lpfc_rdp_context { | |||
774 | struct lpfc_lcb_context { | 775 | struct lpfc_lcb_context { |
775 | uint8_t sub_command; | 776 | uint8_t sub_command; |
776 | uint8_t type; | 777 | uint8_t type; |
778 | uint8_t capability; | ||
777 | uint8_t frequency; | 779 | uint8_t frequency; |
780 | uint16_t duration; | ||
778 | uint16_t ox_id; | 781 | uint16_t ox_id; |
779 | uint16_t rx_id; | 782 | uint16_t rx_id; |
780 | struct lpfc_nodelist *ndlp; | 783 | struct lpfc_nodelist *ndlp; |
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 18c23afcf46b..501249509af4 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
@@ -20,7 +20,7 @@ | |||
20 | * included with this package. * | 20 | * included with this package. * |
21 | *******************************************************************/ | 21 | *******************************************************************/ |
22 | 22 | ||
23 | #define LPFC_DRIVER_VERSION "12.0.0.4" | 23 | #define LPFC_DRIVER_VERSION "12.0.0.6" |
24 | #define LPFC_DRIVER_NAME "lpfc" | 24 | #define LPFC_DRIVER_NAME "lpfc" |
25 | 25 | ||
26 | /* Used for SLI 2/3 */ | 26 | /* Used for SLI 2/3 */ |
@@ -33,5 +33,5 @@ | |||
33 | #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ | 33 | #define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \ |
34 | LPFC_DRIVER_VERSION | 34 | LPFC_DRIVER_VERSION |
35 | #define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \ | 35 | #define LPFC_COPYRIGHT "Copyright (C) 2017-2018 Broadcom. All Rights " \ |
36 | "Reserved. The term \"Broadcom\" refers to Broadcom Limited " \ | 36 | "Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \ |
37 | "and/or its subsidiaries." | 37 | "and/or its subsidiaries." |
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 81bc12dedf41..1ff0f7de9105 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2016 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/lpfc/lpfc_vport.h b/drivers/scsi/lpfc/lpfc_vport.h index 62295971f66c..f4b8528dd2e7 100644 --- a/drivers/scsi/lpfc/lpfc_vport.h +++ b/drivers/scsi/lpfc/lpfc_vport.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /******************************************************************* | 1 | /******************************************************************* |
2 | * This file is part of the Emulex Linux Device Driver for * | 2 | * This file is part of the Emulex Linux Device Driver for * |
3 | * Fibre Channel Host Bus Adapters. * | 3 | * Fibre Channel Host Bus Adapters. * |
4 | * Copyright (C) 2017 Broadcom. All Rights Reserved. The term * | 4 | * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term * |
5 | * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. * | 5 | * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * |
6 | * Copyright (C) 2004-2006 Emulex. All rights reserved. * | 6 | * Copyright (C) 2004-2006 Emulex. All rights reserved. * |
7 | * EMULEX and SLI are trademarks of Emulex. * | 7 | * EMULEX and SLI are trademarks of Emulex. * |
8 | * www.broadcom.com * | 8 | * www.broadcom.com * |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 8e8cf1145d7f..8c7154143a4e 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -371,7 +371,7 @@ mega_runpendq(adapter_t *adapter) | |||
371 | * The command queuing entry point for the mid-layer. | 371 | * The command queuing entry point for the mid-layer. |
372 | */ | 372 | */ |
373 | static int | 373 | static int |
374 | megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) | 374 | megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) |
375 | { | 375 | { |
376 | adapter_t *adapter; | 376 | adapter_t *adapter; |
377 | scb_t *scb; | 377 | scb_t *scb; |
@@ -425,7 +425,7 @@ static DEF_SCSI_QCMD(megaraid_queue) | |||
425 | * commands. | 425 | * commands. |
426 | */ | 426 | */ |
427 | static inline scb_t * | 427 | static inline scb_t * |
428 | mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd) | 428 | mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd) |
429 | { | 429 | { |
430 | struct list_head *head = &adapter->free_list; | 430 | struct list_head *head = &adapter->free_list; |
431 | scb_t *scb; | 431 | scb_t *scb; |
@@ -457,7 +457,7 @@ mega_allocate_scb(adapter_t *adapter, Scsi_Cmnd *cmd) | |||
457 | * and the channel number. | 457 | * and the channel number. |
458 | */ | 458 | */ |
459 | static inline int | 459 | static inline int |
460 | mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel) | 460 | mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel) |
461 | { | 461 | { |
462 | int tgt; | 462 | int tgt; |
463 | int ldrv_num; | 463 | int ldrv_num; |
@@ -520,7 +520,7 @@ mega_get_ldrv_num(adapter_t *adapter, Scsi_Cmnd *cmd, int channel) | |||
520 | * boot settings. | 520 | * boot settings. |
521 | */ | 521 | */ |
522 | static scb_t * | 522 | static scb_t * |
523 | mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) | 523 | mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy) |
524 | { | 524 | { |
525 | mega_ext_passthru *epthru; | 525 | mega_ext_passthru *epthru; |
526 | mega_passthru *pthru; | 526 | mega_passthru *pthru; |
@@ -951,8 +951,8 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy) | |||
951 | * prepare a command for the scsi physical devices. | 951 | * prepare a command for the scsi physical devices. |
952 | */ | 952 | */ |
953 | static mega_passthru * | 953 | static mega_passthru * |
954 | mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd, | 954 | mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd, |
955 | int channel, int target) | 955 | int channel, int target) |
956 | { | 956 | { |
957 | mega_passthru *pthru; | 957 | mega_passthru *pthru; |
958 | 958 | ||
@@ -1015,8 +1015,9 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd, | |||
1015 | * commands for devices which can take extended CDBs (>10 bytes) | 1015 | * commands for devices which can take extended CDBs (>10 bytes) |
1016 | */ | 1016 | */ |
1017 | static mega_ext_passthru * | 1017 | static mega_ext_passthru * |
1018 | mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd, | 1018 | mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, |
1019 | int channel, int target) | 1019 | struct scsi_cmnd *cmd, |
1020 | int channel, int target) | ||
1020 | { | 1021 | { |
1021 | mega_ext_passthru *epthru; | 1022 | mega_ext_passthru *epthru; |
1022 | 1023 | ||
@@ -1417,7 +1418,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) | |||
1417 | { | 1418 | { |
1418 | mega_ext_passthru *epthru = NULL; | 1419 | mega_ext_passthru *epthru = NULL; |
1419 | struct scatterlist *sgl; | 1420 | struct scatterlist *sgl; |
1420 | Scsi_Cmnd *cmd = NULL; | 1421 | struct scsi_cmnd *cmd = NULL; |
1421 | mega_passthru *pthru = NULL; | 1422 | mega_passthru *pthru = NULL; |
1422 | mbox_t *mbox = NULL; | 1423 | mbox_t *mbox = NULL; |
1423 | u8 c; | 1424 | u8 c; |
@@ -1652,14 +1653,14 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status) | |||
1652 | static void | 1653 | static void |
1653 | mega_rundoneq (adapter_t *adapter) | 1654 | mega_rundoneq (adapter_t *adapter) |
1654 | { | 1655 | { |
1655 | Scsi_Cmnd *cmd; | 1656 | struct scsi_cmnd *cmd; |
1656 | struct list_head *pos; | 1657 | struct list_head *pos; |
1657 | 1658 | ||
1658 | list_for_each(pos, &adapter->completed_list) { | 1659 | list_for_each(pos, &adapter->completed_list) { |
1659 | 1660 | ||
1660 | struct scsi_pointer* spos = (struct scsi_pointer *)pos; | 1661 | struct scsi_pointer* spos = (struct scsi_pointer *)pos; |
1661 | 1662 | ||
1662 | cmd = list_entry(spos, Scsi_Cmnd, SCp); | 1663 | cmd = list_entry(spos, struct scsi_cmnd, SCp); |
1663 | cmd->scsi_done(cmd); | 1664 | cmd->scsi_done(cmd); |
1664 | } | 1665 | } |
1665 | 1666 | ||
@@ -1722,7 +1723,7 @@ static int | |||
1722 | mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) | 1723 | mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len) |
1723 | { | 1724 | { |
1724 | struct scatterlist *sg; | 1725 | struct scatterlist *sg; |
1725 | Scsi_Cmnd *cmd; | 1726 | struct scsi_cmnd *cmd; |
1726 | int sgcnt; | 1727 | int sgcnt; |
1727 | int idx; | 1728 | int idx; |
1728 | 1729 | ||
@@ -1869,7 +1870,7 @@ megaraid_info(struct Scsi_Host *host) | |||
1869 | * aborted. All the commands issued to the F/W must complete. | 1870 | * aborted. All the commands issued to the F/W must complete. |
1870 | */ | 1871 | */ |
1871 | static int | 1872 | static int |
1872 | megaraid_abort(Scsi_Cmnd *cmd) | 1873 | megaraid_abort(struct scsi_cmnd *cmd) |
1873 | { | 1874 | { |
1874 | adapter_t *adapter; | 1875 | adapter_t *adapter; |
1875 | int rval; | 1876 | int rval; |
@@ -1933,7 +1934,7 @@ megaraid_reset(struct scsi_cmnd *cmd) | |||
1933 | * issued to the controller, abort/reset it. Otherwise return failure | 1934 | * issued to the controller, abort/reset it. Otherwise return failure |
1934 | */ | 1935 | */ |
1935 | static int | 1936 | static int |
1936 | megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) | 1937 | megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor) |
1937 | { | 1938 | { |
1938 | struct list_head *pos, *next; | 1939 | struct list_head *pos, *next; |
1939 | scb_t *scb; | 1940 | scb_t *scb; |
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 18e85d9267ff..cce23a086fbe 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h | |||
@@ -191,7 +191,7 @@ typedef struct { | |||
191 | u32 dma_type; | 191 | u32 dma_type; |
192 | u32 dma_direction; | 192 | u32 dma_direction; |
193 | 193 | ||
194 | Scsi_Cmnd *cmd; | 194 | struct scsi_cmnd *cmd; |
195 | dma_addr_t dma_h_bulkdata; | 195 | dma_addr_t dma_h_bulkdata; |
196 | dma_addr_t dma_h_sgdata; | 196 | dma_addr_t dma_h_sgdata; |
197 | 197 | ||
@@ -942,7 +942,7 @@ static int issue_scb(adapter_t *, scb_t *); | |||
942 | static int mega_setup_mailbox(adapter_t *); | 942 | static int mega_setup_mailbox(adapter_t *); |
943 | 943 | ||
944 | static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *); | 944 | static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *); |
945 | static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); | 945 | static scb_t * mega_build_cmd(adapter_t *, struct scsi_cmnd *, int *); |
946 | static void __mega_runpendq(adapter_t *); | 946 | static void __mega_runpendq(adapter_t *); |
947 | static int issue_scb_block(adapter_t *, u_char *); | 947 | static int issue_scb_block(adapter_t *, u_char *); |
948 | 948 | ||
@@ -951,9 +951,9 @@ static irqreturn_t megaraid_isr_iomapped(int, void *); | |||
951 | 951 | ||
952 | static void mega_free_scb(adapter_t *, scb_t *); | 952 | static void mega_free_scb(adapter_t *, scb_t *); |
953 | 953 | ||
954 | static int megaraid_abort(Scsi_Cmnd *); | 954 | static int megaraid_abort(struct scsi_cmnd *); |
955 | static int megaraid_reset(Scsi_Cmnd *); | 955 | static int megaraid_reset(struct scsi_cmnd *); |
956 | static int megaraid_abort_and_reset(adapter_t *, Scsi_Cmnd *, int); | 956 | static int megaraid_abort_and_reset(adapter_t *, struct scsi_cmnd *, int); |
957 | static int megaraid_biosparam(struct scsi_device *, struct block_device *, | 957 | static int megaraid_biosparam(struct scsi_device *, struct block_device *, |
958 | sector_t, int []); | 958 | sector_t, int []); |
959 | 959 | ||
@@ -983,9 +983,9 @@ static int mega_internal_dev_inquiry(adapter_t *, u8, u8, dma_addr_t); | |||
983 | 983 | ||
984 | static int mega_support_ext_cdb(adapter_t *); | 984 | static int mega_support_ext_cdb(adapter_t *); |
985 | static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *, | 985 | static mega_passthru* mega_prepare_passthru(adapter_t *, scb_t *, |
986 | Scsi_Cmnd *, int, int); | 986 | struct scsi_cmnd *, int, int); |
987 | static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *, | 987 | static mega_ext_passthru* mega_prepare_extpassthru(adapter_t *, |
988 | scb_t *, Scsi_Cmnd *, int, int); | 988 | scb_t *, struct scsi_cmnd *, int, int); |
989 | static void mega_enum_raid_scsi(adapter_t *); | 989 | static void mega_enum_raid_scsi(adapter_t *); |
990 | static void mega_get_boot_drv(adapter_t *); | 990 | static void mega_get_boot_drv(adapter_t *); |
991 | static int mega_support_random_del(adapter_t *); | 991 | static int mega_support_random_del(adapter_t *); |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 75dc25f78336..67d356d84717 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -35,8 +35,8 @@ | |||
35 | /* | 35 | /* |
36 | * MegaRAID SAS Driver meta data | 36 | * MegaRAID SAS Driver meta data |
37 | */ | 37 | */ |
38 | #define MEGASAS_VERSION "07.705.02.00-rc1" | 38 | #define MEGASAS_VERSION "07.706.03.00-rc1" |
39 | #define MEGASAS_RELDATE "April 4, 2018" | 39 | #define MEGASAS_RELDATE "May 21, 2018" |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * Device IDs | 42 | * Device IDs |
@@ -709,7 +709,8 @@ struct MR_TARGET_PROPERTIES { | |||
709 | u32 max_io_size_kb; | 709 | u32 max_io_size_kb; |
710 | u32 device_qdepth; | 710 | u32 device_qdepth; |
711 | u32 sector_size; | 711 | u32 sector_size; |
712 | u8 reserved[500]; | 712 | u8 reset_tmo; |
713 | u8 reserved[499]; | ||
713 | } __packed; | 714 | } __packed; |
714 | 715 | ||
715 | /* | 716 | /* |
@@ -1400,6 +1401,19 @@ struct megasas_ctrl_info { | |||
1400 | #endif | 1401 | #endif |
1401 | } adapter_operations4; | 1402 | } adapter_operations4; |
1402 | u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */ | 1403 | u8 pad[0x800 - 0x7FE]; /* 0x7FE pad to 2K for expansion */ |
1404 | |||
1405 | u32 size; | ||
1406 | u32 pad1; | ||
1407 | |||
1408 | u8 reserved6[64]; | ||
1409 | |||
1410 | u32 rsvdForAdptOp[64]; | ||
1411 | |||
1412 | u8 reserved7[3]; | ||
1413 | |||
1414 | u8 TaskAbortTO; /* Timeout value in seconds used by Abort Task TM */ | ||
1415 | u8 MaxResetTO; /* Max Supported Reset timeout in seconds. */ | ||
1416 | u8 reserved8[3]; | ||
1403 | } __packed; | 1417 | } __packed; |
1404 | 1418 | ||
1405 | /* | 1419 | /* |
@@ -1472,6 +1486,7 @@ enum FW_BOOT_CONTEXT { | |||
1472 | #define MEGASAS_DEFAULT_CMD_TIMEOUT 90 | 1486 | #define MEGASAS_DEFAULT_CMD_TIMEOUT 90 |
1473 | #define MEGASAS_THROTTLE_QUEUE_DEPTH 16 | 1487 | #define MEGASAS_THROTTLE_QUEUE_DEPTH 16 |
1474 | #define MEGASAS_BLOCKED_CMD_TIMEOUT 60 | 1488 | #define MEGASAS_BLOCKED_CMD_TIMEOUT 60 |
1489 | #define MEGASAS_DEFAULT_TM_TIMEOUT 50 | ||
1475 | /* | 1490 | /* |
1476 | * FW reports the maximum of number of commands that it can accept (maximum | 1491 | * FW reports the maximum of number of commands that it can accept (maximum |
1477 | * commands that can be outstanding) at any time. The driver must report a | 1492 | * commands that can be outstanding) at any time. The driver must report a |
@@ -1915,7 +1930,9 @@ struct MR_PRIV_DEVICE { | |||
1915 | bool is_tm_capable; | 1930 | bool is_tm_capable; |
1916 | bool tm_busy; | 1931 | bool tm_busy; |
1917 | atomic_t r1_ldio_hint; | 1932 | atomic_t r1_ldio_hint; |
1918 | u8 interface_type; | 1933 | u8 interface_type; |
1934 | u8 task_abort_tmo; | ||
1935 | u8 target_reset_tmo; | ||
1919 | }; | 1936 | }; |
1920 | struct megasas_cmd; | 1937 | struct megasas_cmd; |
1921 | 1938 | ||
@@ -2291,6 +2308,8 @@ struct megasas_instance { | |||
2291 | u8 adapter_type; | 2308 | u8 adapter_type; |
2292 | bool consistent_mask_64bit; | 2309 | bool consistent_mask_64bit; |
2293 | bool support_nvme_passthru; | 2310 | bool support_nvme_passthru; |
2311 | u8 task_abort_tmo; | ||
2312 | u8 max_reset_tmo; | ||
2294 | }; | 2313 | }; |
2295 | struct MR_LD_VF_MAP { | 2314 | struct MR_LD_VF_MAP { |
2296 | u32 size; | 2315 | u32 size; |
@@ -2512,7 +2531,11 @@ int megasas_get_ctrl_info(struct megasas_instance *instance); | |||
2512 | /* PD sequence */ | 2531 | /* PD sequence */ |
2513 | int | 2532 | int |
2514 | megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend); | 2533 | megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend); |
2515 | void megasas_set_dynamic_target_properties(struct scsi_device *sdev); | 2534 | void megasas_set_dynamic_target_properties(struct scsi_device *sdev, |
2535 | bool is_target_prop); | ||
2536 | int megasas_get_target_prop(struct megasas_instance *instance, | ||
2537 | struct scsi_device *sdev); | ||
2538 | |||
2516 | int megasas_set_crash_dump_params(struct megasas_instance *instance, | 2539 | int megasas_set_crash_dump_params(struct megasas_instance *instance, |
2517 | u8 crash_buf_state); | 2540 | u8 crash_buf_state); |
2518 | void megasas_free_host_crash_buffer(struct megasas_instance *instance); | 2541 | void megasas_free_host_crash_buffer(struct megasas_instance *instance); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 71d97573a667..9aa9590c5373 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -120,8 +120,7 @@ static int megasas_register_aen(struct megasas_instance *instance, | |||
120 | u32 seq_num, u32 class_locale_word); | 120 | u32 seq_num, u32 class_locale_word); |
121 | static void megasas_get_pd_info(struct megasas_instance *instance, | 121 | static void megasas_get_pd_info(struct megasas_instance *instance, |
122 | struct scsi_device *sdev); | 122 | struct scsi_device *sdev); |
123 | static int megasas_get_target_prop(struct megasas_instance *instance, | 123 | |
124 | struct scsi_device *sdev); | ||
125 | /* | 124 | /* |
126 | * PCI ID table for all supported controllers | 125 | * PCI ID table for all supported controllers |
127 | */ | 126 | */ |
@@ -1794,7 +1793,8 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no) | |||
1794 | * | 1793 | * |
1795 | * Returns void | 1794 | * Returns void |
1796 | */ | 1795 | */ |
1797 | void megasas_set_dynamic_target_properties(struct scsi_device *sdev) | 1796 | void megasas_set_dynamic_target_properties(struct scsi_device *sdev, |
1797 | bool is_target_prop) | ||
1798 | { | 1798 | { |
1799 | u16 pd_index = 0, ld; | 1799 | u16 pd_index = 0, ld; |
1800 | u32 device_id; | 1800 | u32 device_id; |
@@ -1834,6 +1834,22 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev) | |||
1834 | mr_device_priv_data->is_tm_capable = | 1834 | mr_device_priv_data->is_tm_capable = |
1835 | pd_sync->seq[pd_index].capability.tmCapable; | 1835 | pd_sync->seq[pd_index].capability.tmCapable; |
1836 | } | 1836 | } |
1837 | |||
1838 | if (is_target_prop && instance->tgt_prop->reset_tmo) { | ||
1839 | /* | ||
1840 | * If FW provides a target reset timeout value, driver will use | ||
1841 | * it. If not set, fallback to default values. | ||
1842 | */ | ||
1843 | mr_device_priv_data->target_reset_tmo = | ||
1844 | min_t(u8, instance->max_reset_tmo, | ||
1845 | instance->tgt_prop->reset_tmo); | ||
1846 | mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo; | ||
1847 | } else { | ||
1848 | mr_device_priv_data->target_reset_tmo = | ||
1849 | MEGASAS_DEFAULT_TM_TIMEOUT; | ||
1850 | mr_device_priv_data->task_abort_tmo = | ||
1851 | MEGASAS_DEFAULT_TM_TIMEOUT; | ||
1852 | } | ||
1837 | } | 1853 | } |
1838 | 1854 | ||
1839 | /* | 1855 | /* |
@@ -1967,10 +1983,10 @@ static int megasas_slave_configure(struct scsi_device *sdev) | |||
1967 | is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; | 1983 | is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; |
1968 | megasas_set_static_target_properties(sdev, is_target_prop); | 1984 | megasas_set_static_target_properties(sdev, is_target_prop); |
1969 | 1985 | ||
1970 | mutex_unlock(&instance->reset_mutex); | ||
1971 | |||
1972 | /* This sdev property may change post OCR */ | 1986 | /* This sdev property may change post OCR */ |
1973 | megasas_set_dynamic_target_properties(sdev); | 1987 | megasas_set_dynamic_target_properties(sdev, is_target_prop); |
1988 | |||
1989 | mutex_unlock(&instance->reset_mutex); | ||
1974 | 1990 | ||
1975 | return 0; | 1991 | return 0; |
1976 | } | 1992 | } |
@@ -2818,7 +2834,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) | |||
2818 | "SCSI command pointer: (%p)\t SCSI host state: %d\t" | 2834 | "SCSI command pointer: (%p)\t SCSI host state: %d\t" |
2819 | " SCSI host busy: %d\t FW outstanding: %d\n", | 2835 | " SCSI host busy: %d\t FW outstanding: %d\n", |
2820 | scmd, scmd->device->host->shost_state, | 2836 | scmd, scmd->device->host->shost_state, |
2821 | atomic_read((atomic_t *)&scmd->device->host->host_busy), | 2837 | scsi_host_busy(scmd->device->host), |
2822 | atomic_read(&instance->fw_outstanding)); | 2838 | atomic_read(&instance->fw_outstanding)); |
2823 | 2839 | ||
2824 | /* | 2840 | /* |
@@ -4720,6 +4736,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance) | |||
4720 | ci->adapter_operations4.support_pd_map_target_id; | 4736 | ci->adapter_operations4.support_pd_map_target_id; |
4721 | instance->support_nvme_passthru = | 4737 | instance->support_nvme_passthru = |
4722 | ci->adapter_operations4.support_nvme_passthru; | 4738 | ci->adapter_operations4.support_nvme_passthru; |
4739 | instance->task_abort_tmo = ci->TaskAbortTO; | ||
4740 | instance->max_reset_tmo = ci->MaxResetTO; | ||
4723 | 4741 | ||
4724 | /*Check whether controller is iMR or MR */ | 4742 | /*Check whether controller is iMR or MR */ |
4725 | instance->is_imr = (ci->memory_size ? 0 : 1); | 4743 | instance->is_imr = (ci->memory_size ? 0 : 1); |
@@ -4738,6 +4756,10 @@ megasas_get_ctrl_info(struct megasas_instance *instance) | |||
4738 | instance->secure_jbod_support ? "Yes" : "No"); | 4756 | instance->secure_jbod_support ? "Yes" : "No"); |
4739 | dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", | 4757 | dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n", |
4740 | instance->support_nvme_passthru ? "Yes" : "No"); | 4758 | instance->support_nvme_passthru ? "Yes" : "No"); |
4759 | dev_info(&instance->pdev->dev, | ||
4760 | "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n", | ||
4761 | instance->task_abort_tmo, instance->max_reset_tmo); | ||
4762 | |||
4741 | break; | 4763 | break; |
4742 | 4764 | ||
4743 | case DCMD_TIMEOUT: | 4765 | case DCMD_TIMEOUT: |
@@ -4755,14 +4777,15 @@ megasas_get_ctrl_info(struct megasas_instance *instance) | |||
4755 | __func__, __LINE__); | 4777 | __func__, __LINE__); |
4756 | break; | 4778 | break; |
4757 | } | 4779 | } |
4780 | break; | ||
4758 | case DCMD_FAILED: | 4781 | case DCMD_FAILED: |
4759 | megaraid_sas_kill_hba(instance); | 4782 | megaraid_sas_kill_hba(instance); |
4760 | break; | 4783 | break; |
4761 | 4784 | ||
4762 | } | 4785 | } |
4763 | 4786 | ||
4764 | megasas_return_cmd(instance, cmd); | 4787 | if (ret != DCMD_TIMEOUT) |
4765 | 4788 | megasas_return_cmd(instance, cmd); | |
4766 | 4789 | ||
4767 | return ret; | 4790 | return ret; |
4768 | } | 4791 | } |
@@ -5831,7 +5854,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
5831 | * | 5854 | * |
5832 | * Returns 0 on success non-zero on failure. | 5855 | * Returns 0 on success non-zero on failure. |
5833 | */ | 5856 | */ |
5834 | static int | 5857 | int |
5835 | megasas_get_target_prop(struct megasas_instance *instance, | 5858 | megasas_get_target_prop(struct megasas_instance *instance, |
5836 | struct scsi_device *sdev) | 5859 | struct scsi_device *sdev) |
5837 | { | 5860 | { |
@@ -6789,6 +6812,9 @@ megasas_resume(struct pci_dev *pdev) | |||
6789 | goto fail_init_mfi; | 6812 | goto fail_init_mfi; |
6790 | } | 6813 | } |
6791 | 6814 | ||
6815 | if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) | ||
6816 | goto fail_init_mfi; | ||
6817 | |||
6792 | tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, | 6818 | tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet, |
6793 | (unsigned long)instance); | 6819 | (unsigned long)instance); |
6794 | 6820 | ||
@@ -6842,12 +6868,12 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance) | |||
6842 | { | 6868 | { |
6843 | int wait_time = MEGASAS_RESET_WAIT_TIME * 2; | 6869 | int wait_time = MEGASAS_RESET_WAIT_TIME * 2; |
6844 | int i; | 6870 | int i; |
6845 | 6871 | u8 adp_state; | |
6846 | if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) | ||
6847 | return 1; | ||
6848 | 6872 | ||
6849 | for (i = 0; i < wait_time; i++) { | 6873 | for (i = 0; i < wait_time; i++) { |
6850 | if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) | 6874 | adp_state = atomic_read(&instance->adprecovery); |
6875 | if ((adp_state == MEGASAS_HBA_OPERATIONAL) || | ||
6876 | (adp_state == MEGASAS_HW_CRITICAL_ERROR)) | ||
6851 | break; | 6877 | break; |
6852 | 6878 | ||
6853 | if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) | 6879 | if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) |
@@ -6856,9 +6882,10 @@ megasas_wait_for_adapter_operational(struct megasas_instance *instance) | |||
6856 | msleep(1000); | 6882 | msleep(1000); |
6857 | } | 6883 | } |
6858 | 6884 | ||
6859 | if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) { | 6885 | if (adp_state != MEGASAS_HBA_OPERATIONAL) { |
6860 | dev_info(&instance->pdev->dev, "%s timed out while waiting for HBA to recover.\n", | 6886 | dev_info(&instance->pdev->dev, |
6861 | __func__); | 6887 | "%s HBA failed to become operational, adp_state %d\n", |
6888 | __func__, adp_state); | ||
6862 | return 1; | 6889 | return 1; |
6863 | } | 6890 | } |
6864 | 6891 | ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 94c23ad51179..c7f95bace353 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -4108,7 +4108,8 @@ megasas_tm_response_code(struct megasas_instance *instance, | |||
4108 | */ | 4108 | */ |
4109 | static int | 4109 | static int |
4110 | megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, | 4110 | megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, |
4111 | uint channel, uint id, u16 smid_task, u8 type) | 4111 | uint channel, uint id, u16 smid_task, u8 type, |
4112 | struct MR_PRIV_DEVICE *mr_device_priv_data) | ||
4112 | { | 4113 | { |
4113 | struct MR_TASK_MANAGE_REQUEST *mr_request; | 4114 | struct MR_TASK_MANAGE_REQUEST *mr_request; |
4114 | struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; | 4115 | struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request; |
@@ -4119,6 +4120,7 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, | |||
4119 | struct fusion_context *fusion = NULL; | 4120 | struct fusion_context *fusion = NULL; |
4120 | struct megasas_cmd_fusion *scsi_lookup; | 4121 | struct megasas_cmd_fusion *scsi_lookup; |
4121 | int rc; | 4122 | int rc; |
4123 | int timeout = MEGASAS_DEFAULT_TM_TIMEOUT; | ||
4122 | struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; | 4124 | struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; |
4123 | 4125 | ||
4124 | fusion = instance->ctrl_context; | 4126 | fusion = instance->ctrl_context; |
@@ -4170,7 +4172,16 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, | |||
4170 | init_completion(&cmd_fusion->done); | 4172 | init_completion(&cmd_fusion->done); |
4171 | megasas_fire_cmd_fusion(instance, req_desc); | 4173 | megasas_fire_cmd_fusion(instance, req_desc); |
4172 | 4174 | ||
4173 | timeleft = wait_for_completion_timeout(&cmd_fusion->done, 50 * HZ); | 4175 | switch (type) { |
4176 | case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: | ||
4177 | timeout = mr_device_priv_data->task_abort_tmo; | ||
4178 | break; | ||
4179 | case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: | ||
4180 | timeout = mr_device_priv_data->target_reset_tmo; | ||
4181 | break; | ||
4182 | } | ||
4183 | |||
4184 | timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ); | ||
4174 | 4185 | ||
4175 | if (!timeleft) { | 4186 | if (!timeleft) { |
4176 | dev_err(&instance->pdev->dev, | 4187 | dev_err(&instance->pdev->dev, |
@@ -4363,7 +4374,8 @@ int megasas_task_abort_fusion(struct scsi_cmnd *scmd) | |||
4363 | mr_device_priv_data->tm_busy = 1; | 4374 | mr_device_priv_data->tm_busy = 1; |
4364 | ret = megasas_issue_tm(instance, devhandle, | 4375 | ret = megasas_issue_tm(instance, devhandle, |
4365 | scmd->device->channel, scmd->device->id, smid, | 4376 | scmd->device->channel, scmd->device->id, smid, |
4366 | MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK); | 4377 | MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, |
4378 | mr_device_priv_data); | ||
4367 | mr_device_priv_data->tm_busy = 0; | 4379 | mr_device_priv_data->tm_busy = 0; |
4368 | 4380 | ||
4369 | mutex_unlock(&instance->reset_mutex); | 4381 | mutex_unlock(&instance->reset_mutex); |
@@ -4435,7 +4447,8 @@ int megasas_reset_target_fusion(struct scsi_cmnd *scmd) | |||
4435 | mr_device_priv_data->tm_busy = 1; | 4447 | mr_device_priv_data->tm_busy = 1; |
4436 | ret = megasas_issue_tm(instance, devhandle, | 4448 | ret = megasas_issue_tm(instance, devhandle, |
4437 | scmd->device->channel, scmd->device->id, 0, | 4449 | scmd->device->channel, scmd->device->id, 0, |
4438 | MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); | 4450 | MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, |
4451 | mr_device_priv_data); | ||
4439 | mr_device_priv_data->tm_busy = 0; | 4452 | mr_device_priv_data->tm_busy = 0; |
4440 | mutex_unlock(&instance->reset_mutex); | 4453 | mutex_unlock(&instance->reset_mutex); |
4441 | out: | 4454 | out: |
@@ -4490,6 +4503,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) | |||
4490 | u32 io_timeout_in_crash_mode = 0; | 4503 | u32 io_timeout_in_crash_mode = 0; |
4491 | struct scsi_cmnd *scmd_local = NULL; | 4504 | struct scsi_cmnd *scmd_local = NULL; |
4492 | struct scsi_device *sdev; | 4505 | struct scsi_device *sdev; |
4506 | int ret_target_prop = DCMD_FAILED; | ||
4507 | bool is_target_prop = false; | ||
4493 | 4508 | ||
4494 | instance = (struct megasas_instance *)shost->hostdata; | 4509 | instance = (struct megasas_instance *)shost->hostdata; |
4495 | fusion = instance->ctrl_context; | 4510 | fusion = instance->ctrl_context; |
@@ -4661,9 +4676,6 @@ transition_to_ready: | |||
4661 | 4676 | ||
4662 | megasas_setup_jbod_map(instance); | 4677 | megasas_setup_jbod_map(instance); |
4663 | 4678 | ||
4664 | shost_for_each_device(sdev, shost) | ||
4665 | megasas_set_dynamic_target_properties(sdev); | ||
4666 | |||
4667 | /* reset stream detection array */ | 4679 | /* reset stream detection array */ |
4668 | if (instance->adapter_type == VENTURA_SERIES) { | 4680 | if (instance->adapter_type == VENTURA_SERIES) { |
4669 | for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { | 4681 | for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) { |
@@ -4677,6 +4689,16 @@ transition_to_ready: | |||
4677 | clear_bit(MEGASAS_FUSION_IN_RESET, | 4689 | clear_bit(MEGASAS_FUSION_IN_RESET, |
4678 | &instance->reset_flags); | 4690 | &instance->reset_flags); |
4679 | instance->instancet->enable_intr(instance); | 4691 | instance->instancet->enable_intr(instance); |
4692 | |||
4693 | shost_for_each_device(sdev, shost) { | ||
4694 | if ((instance->tgt_prop) && | ||
4695 | (instance->nvme_page_size)) | ||
4696 | ret_target_prop = megasas_get_target_prop(instance, sdev); | ||
4697 | |||
4698 | is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; | ||
4699 | megasas_set_dynamic_target_properties(sdev, is_target_prop); | ||
4700 | } | ||
4701 | |||
4680 | atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); | 4702 | atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); |
4681 | 4703 | ||
4682 | dev_info(&instance->pdev->dev, "Interrupts are enabled and" | 4704 | dev_info(&instance->pdev->dev, "Interrupts are enabled and" |
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 1753e42826dd..82e01dbe90af 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c | |||
@@ -594,9 +594,9 @@ static void mesh_done(struct mesh_state *ms, int start_next) | |||
594 | ms->current_req = NULL; | 594 | ms->current_req = NULL; |
595 | tp->current_req = NULL; | 595 | tp->current_req = NULL; |
596 | if (cmd) { | 596 | if (cmd) { |
597 | cmd->result = (ms->stat << 16) + cmd->SCp.Status; | 597 | cmd->result = (ms->stat << 16) | cmd->SCp.Status; |
598 | if (ms->stat == DID_OK) | 598 | if (ms->stat == DID_OK) |
599 | cmd->result += (cmd->SCp.Message << 8); | 599 | cmd->result |= cmd->SCp.Message << 8; |
600 | if (DEBUG_TARGET(cmd)) { | 600 | if (DEBUG_TARGET(cmd)) { |
601 | printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n", | 601 | printk(KERN_DEBUG "mesh_done: result = %x, data_ptr=%d, buflen=%d\n", |
602 | cmd->result, ms->data_ptr, scsi_bufflen(cmd)); | 602 | cmd->result, ms->data_ptr, scsi_bufflen(cmd)); |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index e44c91edf92d..59d7844ee022 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c | |||
@@ -102,8 +102,39 @@ static int | |||
102 | _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); | 102 | _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); |
103 | 103 | ||
104 | /** | 104 | /** |
105 | * mpt3sas_base_check_cmd_timeout - Function | ||
106 | * to check timeout and command termination due | ||
107 | * to Host reset. | ||
108 | * | ||
109 | * @ioc: per adapter object. | ||
110 | * @status: Status of issued command. | ||
111 | * @mpi_request:mf request pointer. | ||
112 | * @sz: size of buffer. | ||
113 | * | ||
114 | * @Returns - 1/0 Reset to be done or Not | ||
115 | */ | ||
116 | u8 | ||
117 | mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, | ||
118 | u8 status, void *mpi_request, int sz) | ||
119 | { | ||
120 | u8 issue_reset = 0; | ||
121 | |||
122 | if (!(status & MPT3_CMD_RESET)) | ||
123 | issue_reset = 1; | ||
124 | |||
125 | pr_err(MPT3SAS_FMT "Command %s\n", ioc->name, | ||
126 | ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout")); | ||
127 | _debug_dump_mf(mpi_request, sz); | ||
128 | |||
129 | return issue_reset; | ||
130 | } | ||
131 | |||
132 | /** | ||
105 | * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. | 133 | * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. |
134 | * @val: ? | ||
135 | * @kp: ? | ||
106 | * | 136 | * |
137 | * Return: ? | ||
107 | */ | 138 | */ |
108 | static int | 139 | static int |
109 | _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) | 140 | _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp) |
@@ -132,8 +163,6 @@ module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, | |||
132 | * @ioc: per adapter object | 163 | * @ioc: per adapter object |
133 | * @reply: reply message frame(lower 32bit addr) | 164 | * @reply: reply message frame(lower 32bit addr) |
134 | * @index: System request message index. | 165 | * @index: System request message index. |
135 | * | ||
136 | * @Returns - Nothing | ||
137 | */ | 166 | */ |
138 | static void | 167 | static void |
139 | _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, | 168 | _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, |
@@ -156,7 +185,7 @@ _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply, | |||
156 | * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames | 185 | * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames |
157 | * to system/BAR0 region. | 186 | * to system/BAR0 region. |
158 | * | 187 | * |
159 | * @dst_iomem: Pointer to the destinaltion location in BAR0 space. | 188 | * @dst_iomem: Pointer to the destination location in BAR0 space. |
160 | * @src: Pointer to the Source data. | 189 | * @src: Pointer to the Source data. |
161 | * @size: Size of data to be copied. | 190 | * @size: Size of data to be copied. |
162 | */ | 191 | */ |
@@ -197,7 +226,7 @@ _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size) | |||
197 | * @smid: system request message index | 226 | * @smid: system request message index |
198 | * @sge_chain_count: Scatter gather chain count. | 227 | * @sge_chain_count: Scatter gather chain count. |
199 | * | 228 | * |
200 | * @Return: chain address. | 229 | * Return: the chain address. |
201 | */ | 230 | */ |
202 | static inline void __iomem* | 231 | static inline void __iomem* |
203 | _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, | 232 | _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
@@ -223,7 +252,7 @@ _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
223 | * @smid: system request message index | 252 | * @smid: system request message index |
224 | * @sge_chain_count: Scatter gather chain count. | 253 | * @sge_chain_count: Scatter gather chain count. |
225 | * | 254 | * |
226 | * @Return - Physical chain address. | 255 | * Return: Physical chain address. |
227 | */ | 256 | */ |
228 | static inline phys_addr_t | 257 | static inline phys_addr_t |
229 | _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, | 258 | _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
@@ -248,7 +277,7 @@ _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
248 | * @ioc: per adapter object | 277 | * @ioc: per adapter object |
249 | * @smid: system request message index | 278 | * @smid: system request message index |
250 | * | 279 | * |
251 | * @Returns - Pointer to buffer location in BAR0. | 280 | * Return: Pointer to buffer location in BAR0. |
252 | */ | 281 | */ |
253 | 282 | ||
254 | static void __iomem * | 283 | static void __iomem * |
@@ -270,7 +299,7 @@ _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
270 | * @ioc: per adapter object | 299 | * @ioc: per adapter object |
271 | * @smid: system request message index | 300 | * @smid: system request message index |
272 | * | 301 | * |
273 | * @Returns - Pointer to buffer location in BAR0. | 302 | * Return: Pointer to buffer location in BAR0. |
274 | */ | 303 | */ |
275 | static phys_addr_t | 304 | static phys_addr_t |
276 | _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 305 | _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -291,7 +320,7 @@ _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
291 | * @ioc: per adapter object | 320 | * @ioc: per adapter object |
292 | * @chain_buffer_dma: Chain buffer dma address. | 321 | * @chain_buffer_dma: Chain buffer dma address. |
293 | * | 322 | * |
294 | * @Returns - Pointer to chain buffer. Or Null on Failure. | 323 | * Return: Pointer to chain buffer. Or Null on Failure. |
295 | */ | 324 | */ |
296 | static void * | 325 | static void * |
297 | _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, | 326 | _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, |
@@ -322,8 +351,6 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc, | |||
322 | * @ioc: per adapter object. | 351 | * @ioc: per adapter object. |
323 | * @mpi_request: mf request pointer. | 352 | * @mpi_request: mf request pointer. |
324 | * @smid: system request message index. | 353 | * @smid: system request message index. |
325 | * | ||
326 | * @Returns: Nothing. | ||
327 | */ | 354 | */ |
328 | static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, | 355 | static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc, |
329 | void *mpi_request, u16 smid) | 356 | void *mpi_request, u16 smid) |
@@ -496,8 +523,9 @@ eob_clone_chain: | |||
496 | * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc | 523 | * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc |
497 | * @arg: input argument, used to derive ioc | 524 | * @arg: input argument, used to derive ioc |
498 | * | 525 | * |
499 | * Return 0 if controller is removed from pci subsystem. | 526 | * Return: |
500 | * Return -1 for other case. | 527 | * 0 if controller is removed from pci subsystem. |
528 | * -1 for other case. | ||
501 | */ | 529 | */ |
502 | static int mpt3sas_remove_dead_ioc_func(void *arg) | 530 | static int mpt3sas_remove_dead_ioc_func(void *arg) |
503 | { | 531 | { |
@@ -517,9 +545,8 @@ static int mpt3sas_remove_dead_ioc_func(void *arg) | |||
517 | /** | 545 | /** |
518 | * _base_fault_reset_work - workq handling ioc fault conditions | 546 | * _base_fault_reset_work - workq handling ioc fault conditions |
519 | * @work: input argument, used to derive ioc | 547 | * @work: input argument, used to derive ioc |
520 | * Context: sleep. | ||
521 | * | 548 | * |
522 | * Return nothing. | 549 | * Context: sleep. |
523 | */ | 550 | */ |
524 | static void | 551 | static void |
525 | _base_fault_reset_work(struct work_struct *work) | 552 | _base_fault_reset_work(struct work_struct *work) |
@@ -610,9 +637,8 @@ _base_fault_reset_work(struct work_struct *work) | |||
610 | /** | 637 | /** |
611 | * mpt3sas_base_start_watchdog - start the fault_reset_work_q | 638 | * mpt3sas_base_start_watchdog - start the fault_reset_work_q |
612 | * @ioc: per adapter object | 639 | * @ioc: per adapter object |
613 | * Context: sleep. | ||
614 | * | 640 | * |
615 | * Return nothing. | 641 | * Context: sleep. |
616 | */ | 642 | */ |
617 | void | 643 | void |
618 | mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) | 644 | mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) |
@@ -633,7 +659,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) | |||
633 | if (!ioc->fault_reset_work_q) { | 659 | if (!ioc->fault_reset_work_q) { |
634 | pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", | 660 | pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", |
635 | ioc->name, __func__, __LINE__); | 661 | ioc->name, __func__, __LINE__); |
636 | return; | 662 | return; |
637 | } | 663 | } |
638 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | 664 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
639 | if (ioc->fault_reset_work_q) | 665 | if (ioc->fault_reset_work_q) |
@@ -646,9 +672,8 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) | |||
646 | /** | 672 | /** |
647 | * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q | 673 | * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q |
648 | * @ioc: per adapter object | 674 | * @ioc: per adapter object |
649 | * Context: sleep. | ||
650 | * | 675 | * |
651 | * Return nothing. | 676 | * Context: sleep. |
652 | */ | 677 | */ |
653 | void | 678 | void |
654 | mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) | 679 | mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) |
@@ -671,8 +696,6 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) | |||
671 | * mpt3sas_base_fault_info - verbose translation of firmware FAULT code | 696 | * mpt3sas_base_fault_info - verbose translation of firmware FAULT code |
672 | * @ioc: per adapter object | 697 | * @ioc: per adapter object |
673 | * @fault_code: fault code | 698 | * @fault_code: fault code |
674 | * | ||
675 | * Return nothing. | ||
676 | */ | 699 | */ |
677 | void | 700 | void |
678 | mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) | 701 | mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) |
@@ -721,8 +744,6 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) | |||
721 | * @ioc: per adapter object | 744 | * @ioc: per adapter object |
722 | * @mpi_reply: reply mf payload returned from firmware | 745 | * @mpi_reply: reply mf payload returned from firmware |
723 | * @request_hdr: request mf | 746 | * @request_hdr: request mf |
724 | * | ||
725 | * Return nothing. | ||
726 | */ | 747 | */ |
727 | static void | 748 | static void |
728 | _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, | 749 | _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, |
@@ -945,8 +966,6 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, | |||
945 | * _base_display_event_data - verbose translation of firmware asyn events | 966 | * _base_display_event_data - verbose translation of firmware asyn events |
946 | * @ioc: per adapter object | 967 | * @ioc: per adapter object |
947 | * @mpi_reply: reply mf payload returned from firmware | 968 | * @mpi_reply: reply mf payload returned from firmware |
948 | * | ||
949 | * Return nothing. | ||
950 | */ | 969 | */ |
951 | static void | 970 | static void |
952 | _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, | 971 | _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, |
@@ -1065,8 +1084,6 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc, | |||
1065 | * _base_sas_log_info - verbose translation of firmware log info | 1084 | * _base_sas_log_info - verbose translation of firmware log info |
1066 | * @ioc: per adapter object | 1085 | * @ioc: per adapter object |
1067 | * @log_info: log info | 1086 | * @log_info: log info |
1068 | * | ||
1069 | * Return nothing. | ||
1070 | */ | 1087 | */ |
1071 | static void | 1088 | static void |
1072 | _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) | 1089 | _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) |
@@ -1124,8 +1141,6 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) | |||
1124 | * @smid: system request message index | 1141 | * @smid: system request message index |
1125 | * @msix_index: MSIX table index supplied by the OS | 1142 | * @msix_index: MSIX table index supplied by the OS |
1126 | * @reply: reply message frame(lower 32bit addr) | 1143 | * @reply: reply message frame(lower 32bit addr) |
1127 | * | ||
1128 | * Return nothing. | ||
1129 | */ | 1144 | */ |
1130 | static void | 1145 | static void |
1131 | _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 1146 | _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
@@ -1167,8 +1182,9 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
1167 | * @msix_index: MSIX table index supplied by the OS | 1182 | * @msix_index: MSIX table index supplied by the OS |
1168 | * @reply: reply message frame(lower 32bit addr) | 1183 | * @reply: reply message frame(lower 32bit addr) |
1169 | * | 1184 | * |
1170 | * Return 1 meaning mf should be freed from _base_interrupt | 1185 | * Return: |
1171 | * 0 means the mf is freed from this function. | 1186 | * 1 meaning mf should be freed from _base_interrupt |
1187 | * 0 means the mf is freed from this function. | ||
1172 | */ | 1188 | */ |
1173 | u8 | 1189 | u8 |
1174 | mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 1190 | mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
@@ -1200,8 +1216,9 @@ mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
1200 | * @msix_index: MSIX table index supplied by the OS | 1216 | * @msix_index: MSIX table index supplied by the OS |
1201 | * @reply: reply message frame(lower 32bit addr) | 1217 | * @reply: reply message frame(lower 32bit addr) |
1202 | * | 1218 | * |
1203 | * Return 1 meaning mf should be freed from _base_interrupt | 1219 | * Return: |
1204 | * 0 means the mf is freed from this function. | 1220 | * 1 meaning mf should be freed from _base_interrupt |
1221 | * 0 means the mf is freed from this function. | ||
1205 | */ | 1222 | */ |
1206 | static u8 | 1223 | static u8 |
1207 | _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) | 1224 | _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) |
@@ -1279,7 +1296,7 @@ _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
1279 | * @ioc: per adapter object | 1296 | * @ioc: per adapter object |
1280 | * @smid: system request message index | 1297 | * @smid: system request message index |
1281 | * | 1298 | * |
1282 | * Return callback index. | 1299 | * Return: callback index. |
1283 | */ | 1300 | */ |
1284 | static u8 | 1301 | static u8 |
1285 | _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 1302 | _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -1312,8 +1329,6 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
1312 | * @ioc: per adapter object | 1329 | * @ioc: per adapter object |
1313 | * | 1330 | * |
1314 | * Disabling ResetIRQ, Reply and Doorbell Interrupts | 1331 | * Disabling ResetIRQ, Reply and Doorbell Interrupts |
1315 | * | ||
1316 | * Return nothing. | ||
1317 | */ | 1332 | */ |
1318 | static void | 1333 | static void |
1319 | _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) | 1334 | _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) |
@@ -1332,8 +1347,6 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) | |||
1332 | * @ioc: per adapter object | 1347 | * @ioc: per adapter object |
1333 | * | 1348 | * |
1334 | * Enabling only Reply Interrupts | 1349 | * Enabling only Reply Interrupts |
1335 | * | ||
1336 | * Return nothing. | ||
1337 | */ | 1350 | */ |
1338 | static void | 1351 | static void |
1339 | _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) | 1352 | _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) |
@@ -1358,9 +1371,8 @@ union reply_descriptor { | |||
1358 | * _base_interrupt - MPT adapter (IOC) specific interrupt handler. | 1371 | * _base_interrupt - MPT adapter (IOC) specific interrupt handler. |
1359 | * @irq: irq number (not used) | 1372 | * @irq: irq number (not used) |
1360 | * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure | 1373 | * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure |
1361 | * @r: pt_regs pointer (not used) | ||
1362 | * | 1374 | * |
1363 | * Return IRQ_HANDLE if processed, else IRQ_NONE. | 1375 | * Return: IRQ_HANDLED if processed, else IRQ_NONE. |
1364 | */ | 1376 | */ |
1365 | static irqreturn_t | 1377 | static irqreturn_t |
1366 | _base_interrupt(int irq, void *bus_id) | 1378 | _base_interrupt(int irq, void *bus_id) |
@@ -1535,6 +1547,7 @@ _base_interrupt(int irq, void *bus_id) | |||
1535 | * _base_is_controller_msix_enabled - is controller support muli-reply queues | 1547 | * _base_is_controller_msix_enabled - is controller support muli-reply queues |
1536 | * @ioc: per adapter object | 1548 | * @ioc: per adapter object |
1537 | * | 1549 | * |
1550 | * Return: Whether or not MSI/X is enabled. | ||
1538 | */ | 1551 | */ |
1539 | static inline int | 1552 | static inline int |
1540 | _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) | 1553 | _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) |
@@ -1549,8 +1562,6 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) | |||
1549 | * Context: non ISR conext | 1562 | * Context: non ISR conext |
1550 | * | 1563 | * |
1551 | * Called when a Task Management request has completed. | 1564 | * Called when a Task Management request has completed. |
1552 | * | ||
1553 | * Return nothing. | ||
1554 | */ | 1565 | */ |
1555 | void | 1566 | void |
1556 | mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) | 1567 | mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) |
@@ -1577,8 +1588,6 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc) | |||
1577 | /** | 1588 | /** |
1578 | * mpt3sas_base_release_callback_handler - clear interrupt callback handler | 1589 | * mpt3sas_base_release_callback_handler - clear interrupt callback handler |
1579 | * @cb_idx: callback index | 1590 | * @cb_idx: callback index |
1580 | * | ||
1581 | * Return nothing. | ||
1582 | */ | 1591 | */ |
1583 | void | 1592 | void |
1584 | mpt3sas_base_release_callback_handler(u8 cb_idx) | 1593 | mpt3sas_base_release_callback_handler(u8 cb_idx) |
@@ -1590,7 +1599,7 @@ mpt3sas_base_release_callback_handler(u8 cb_idx) | |||
1590 | * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler | 1599 | * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler |
1591 | * @cb_func: callback function | 1600 | * @cb_func: callback function |
1592 | * | 1601 | * |
1593 | * Returns cb_func. | 1602 | * Return: Index of @cb_func. |
1594 | */ | 1603 | */ |
1595 | u8 | 1604 | u8 |
1596 | mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) | 1605 | mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) |
@@ -1607,8 +1616,6 @@ mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) | |||
1607 | 1616 | ||
1608 | /** | 1617 | /** |
1609 | * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler | 1618 | * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler |
1610 | * | ||
1611 | * Return nothing. | ||
1612 | */ | 1619 | */ |
1613 | void | 1620 | void |
1614 | mpt3sas_base_initialize_callback_handler(void) | 1621 | mpt3sas_base_initialize_callback_handler(void) |
@@ -1628,8 +1635,6 @@ mpt3sas_base_initialize_callback_handler(void) | |||
1628 | * Create a zero length scatter gather entry to insure the IOCs hardware has | 1635 | * Create a zero length scatter gather entry to insure the IOCs hardware has |
1629 | * something to use if the target device goes brain dead and tries | 1636 | * something to use if the target device goes brain dead and tries |
1630 | * to send data even when none is asked for. | 1637 | * to send data even when none is asked for. |
1631 | * | ||
1632 | * Return nothing. | ||
1633 | */ | 1638 | */ |
1634 | static void | 1639 | static void |
1635 | _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) | 1640 | _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) |
@@ -1646,8 +1651,6 @@ _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) | |||
1646 | * @paddr: virtual address for SGE | 1651 | * @paddr: virtual address for SGE |
1647 | * @flags_length: SGE flags and data transfer length | 1652 | * @flags_length: SGE flags and data transfer length |
1648 | * @dma_addr: Physical address | 1653 | * @dma_addr: Physical address |
1649 | * | ||
1650 | * Return nothing. | ||
1651 | */ | 1654 | */ |
1652 | static void | 1655 | static void |
1653 | _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) | 1656 | _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) |
@@ -1666,8 +1669,6 @@ _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) | |||
1666 | * @paddr: virtual address for SGE | 1669 | * @paddr: virtual address for SGE |
1667 | * @flags_length: SGE flags and data transfer length | 1670 | * @flags_length: SGE flags and data transfer length |
1668 | * @dma_addr: Physical address | 1671 | * @dma_addr: Physical address |
1669 | * | ||
1670 | * Return nothing. | ||
1671 | */ | 1672 | */ |
1672 | static void | 1673 | static void |
1673 | _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) | 1674 | _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) |
@@ -1685,7 +1686,7 @@ _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) | |||
1685 | * @ioc: per adapter object | 1686 | * @ioc: per adapter object |
1686 | * @scmd: SCSI commands of the IO request | 1687 | * @scmd: SCSI commands of the IO request |
1687 | * | 1688 | * |
1688 | * Returns chain tracker from chain_lookup table using key as | 1689 | * Return: chain tracker from chain_lookup table using key as |
1689 | * smid and smid's chain_offset. | 1690 | * smid and smid's chain_offset. |
1690 | */ | 1691 | */ |
1691 | static struct chain_tracker * | 1692 | static struct chain_tracker * |
@@ -1715,8 +1716,6 @@ _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, | |||
1715 | * @data_out_sz: data xfer size for WRITES | 1716 | * @data_out_sz: data xfer size for WRITES |
1716 | * @data_in_dma: physical address for READS | 1717 | * @data_in_dma: physical address for READS |
1717 | * @data_in_sz: data xfer size for READS | 1718 | * @data_in_sz: data xfer size for READS |
1718 | * | ||
1719 | * Return nothing. | ||
1720 | */ | 1719 | */ |
1721 | static void | 1720 | static void |
1722 | _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, | 1721 | _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, |
@@ -1777,7 +1776,7 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, | |||
1777 | * describes the first data memory segment, and PRP2 contains a pointer to a PRP | 1776 | * describes the first data memory segment, and PRP2 contains a pointer to a PRP |
1778 | * list located elsewhere in memory to describe the remaining data memory | 1777 | * list located elsewhere in memory to describe the remaining data memory |
1779 | * segments. The PRP list will be contiguous. | 1778 | * segments. The PRP list will be contiguous. |
1780 | 1779 | * | |
1781 | * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP | 1780 | * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP |
1782 | * consists of a list of PRP entries to describe a number of noncontigous | 1781 | * consists of a list of PRP entries to describe a number of noncontigous |
1783 | * physical memory segments as a single memory buffer, just as a SGL does. Note | 1782 | * physical memory segments as a single memory buffer, just as a SGL does. Note |
@@ -1820,8 +1819,6 @@ _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, | |||
1820 | * @data_out_sz: data xfer size for WRITES | 1819 | * @data_out_sz: data xfer size for WRITES |
1821 | * @data_in_dma: physical address for READS | 1820 | * @data_in_dma: physical address for READS |
1822 | * @data_in_sz: data xfer size for READS | 1821 | * @data_in_sz: data xfer size for READS |
1823 | * | ||
1824 | * Returns nothing. | ||
1825 | */ | 1822 | */ |
1826 | static void | 1823 | static void |
1827 | _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, | 1824 | _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
@@ -1836,6 +1833,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
1836 | u32 offset, entry_len; | 1833 | u32 offset, entry_len; |
1837 | u32 page_mask_result, page_mask; | 1834 | u32 page_mask_result, page_mask; |
1838 | size_t length; | 1835 | size_t length; |
1836 | struct mpt3sas_nvme_cmd *nvme_cmd = | ||
1837 | (void *)nvme_encap_request->NVMe_Command; | ||
1839 | 1838 | ||
1840 | /* | 1839 | /* |
1841 | * Not all commands require a data transfer. If no data, just return | 1840 | * Not all commands require a data transfer. If no data, just return |
@@ -1843,15 +1842,8 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
1843 | */ | 1842 | */ |
1844 | if (!data_in_sz && !data_out_sz) | 1843 | if (!data_in_sz && !data_out_sz) |
1845 | return; | 1844 | return; |
1846 | /* | 1845 | prp1_entry = &nvme_cmd->prp1; |
1847 | * Set pointers to PRP1 and PRP2, which are in the NVMe command. | 1846 | prp2_entry = &nvme_cmd->prp2; |
1848 | * PRP1 is located at a 24 byte offset from the start of the NVMe | ||
1849 | * command. Then set the current PRP entry pointer to PRP1. | ||
1850 | */ | ||
1851 | prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command + | ||
1852 | NVME_CMD_PRP1_OFFSET); | ||
1853 | prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command + | ||
1854 | NVME_CMD_PRP2_OFFSET); | ||
1855 | prp_entry = prp1_entry; | 1847 | prp_entry = prp1_entry; |
1856 | /* | 1848 | /* |
1857 | * For the PRP entries, use the specially allocated buffer of | 1849 | * For the PRP entries, use the specially allocated buffer of |
@@ -1992,7 +1984,7 @@ _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
1992 | * @smid: msg Index | 1984 | * @smid: msg Index |
1993 | * @sge_count: scatter gather element count. | 1985 | * @sge_count: scatter gather element count. |
1994 | * | 1986 | * |
1995 | * Returns: true: PRPs are built | 1987 | * Return: true: PRPs are built |
1996 | * false: IEEE SGLs needs to be built | 1988 | * false: IEEE SGLs needs to be built |
1997 | */ | 1989 | */ |
1998 | static void | 1990 | static void |
@@ -2127,11 +2119,9 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, | |||
2127 | struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) | 2119 | struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count) |
2128 | { | 2120 | { |
2129 | u32 data_length = 0; | 2121 | u32 data_length = 0; |
2130 | struct scatterlist *sg_scmd; | ||
2131 | bool build_prp = true; | 2122 | bool build_prp = true; |
2132 | 2123 | ||
2133 | data_length = scsi_bufflen(scmd); | 2124 | data_length = scsi_bufflen(scmd); |
2134 | sg_scmd = scsi_sglist(scmd); | ||
2135 | 2125 | ||
2136 | /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 | 2126 | /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 |
2137 | * we built IEEE SGL | 2127 | * we built IEEE SGL |
@@ -2155,18 +2145,16 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, | |||
2155 | * @scmd: scsi command | 2145 | * @scmd: scsi command |
2156 | * @pcie_device: points to the PCIe device's info | 2146 | * @pcie_device: points to the PCIe device's info |
2157 | * | 2147 | * |
2158 | * Returns 0 if native SGL was built, 1 if no SGL was built | 2148 | * Return: 0 if native SGL was built, 1 if no SGL was built |
2159 | */ | 2149 | */ |
2160 | static int | 2150 | static int |
2161 | _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, | 2151 | _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc, |
2162 | Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, | 2152 | Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd, |
2163 | struct _pcie_device *pcie_device) | 2153 | struct _pcie_device *pcie_device) |
2164 | { | 2154 | { |
2165 | struct scatterlist *sg_scmd; | ||
2166 | int sges_left; | 2155 | int sges_left; |
2167 | 2156 | ||
2168 | /* Get the SG list pointer and info. */ | 2157 | /* Get the SG list pointer and info. */ |
2169 | sg_scmd = scsi_sglist(scmd); | ||
2170 | sges_left = scsi_dma_map(scmd); | 2158 | sges_left = scsi_dma_map(scmd); |
2171 | if (sges_left < 0) { | 2159 | if (sges_left < 0) { |
2172 | sdev_printk(KERN_ERR, scmd->device, | 2160 | sdev_printk(KERN_ERR, scmd->device, |
@@ -2201,8 +2189,6 @@ out: | |||
2201 | * @chain_offset: number of 128 byte elements from start of segment | 2189 | * @chain_offset: number of 128 byte elements from start of segment |
2202 | * @length: data transfer length | 2190 | * @length: data transfer length |
2203 | * @dma_addr: Physical address | 2191 | * @dma_addr: Physical address |
2204 | * | ||
2205 | * Return nothing. | ||
2206 | */ | 2192 | */ |
2207 | static void | 2193 | static void |
2208 | _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, | 2194 | _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, |
@@ -2224,8 +2210,6 @@ _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, | |||
2224 | * Create a zero length scatter gather entry to insure the IOCs hardware has | 2210 | * Create a zero length scatter gather entry to insure the IOCs hardware has |
2225 | * something to use if the target device goes brain dead and tries | 2211 | * something to use if the target device goes brain dead and tries |
2226 | * to send data even when none is asked for. | 2212 | * to send data even when none is asked for. |
2227 | * | ||
2228 | * Return nothing. | ||
2229 | */ | 2213 | */ |
2230 | static void | 2214 | static void |
2231 | _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) | 2215 | _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) |
@@ -2249,7 +2233,7 @@ _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) | |||
2249 | * The main routine that builds scatter gather table from a given | 2233 | * The main routine that builds scatter gather table from a given |
2250 | * scsi request sent via the .queuecommand main handler. | 2234 | * scsi request sent via the .queuecommand main handler. |
2251 | * | 2235 | * |
2252 | * Returns 0 success, anything else error | 2236 | * Return: 0 success, anything else error |
2253 | */ | 2237 | */ |
2254 | static int | 2238 | static int |
2255 | _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, | 2239 | _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, |
@@ -2394,7 +2378,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, | |||
2394 | * The main routine that builds scatter gather table from a given | 2378 | * The main routine that builds scatter gather table from a given |
2395 | * scsi request sent via the .queuecommand main handler. | 2379 | * scsi request sent via the .queuecommand main handler. |
2396 | * | 2380 | * |
2397 | * Returns 0 success, anything else error | 2381 | * Return: 0 success, anything else error |
2398 | */ | 2382 | */ |
2399 | static int | 2383 | static int |
2400 | _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, | 2384 | _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, |
@@ -2525,8 +2509,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, | |||
2525 | * @data_out_sz: data xfer size for WRITES | 2509 | * @data_out_sz: data xfer size for WRITES |
2526 | * @data_in_dma: physical address for READS | 2510 | * @data_in_dma: physical address for READS |
2527 | * @data_in_sz: data xfer size for READS | 2511 | * @data_in_sz: data xfer size for READS |
2528 | * | ||
2529 | * Return nothing. | ||
2530 | */ | 2512 | */ |
2531 | static void | 2513 | static void |
2532 | _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, | 2514 | _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, |
@@ -2576,7 +2558,7 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, | |||
2576 | * @ioc: per adapter object | 2558 | * @ioc: per adapter object |
2577 | * @pdev: PCI device struct | 2559 | * @pdev: PCI device struct |
2578 | * | 2560 | * |
2579 | * Returns 0 for success, non-zero for failure. | 2561 | * Return: 0 for success, non-zero for failure. |
2580 | */ | 2562 | */ |
2581 | static int | 2563 | static int |
2582 | _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) | 2564 | _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) |
@@ -2924,10 +2906,9 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) | |||
2924 | _base_free_irq(ioc); | 2906 | _base_free_irq(ioc); |
2925 | _base_disable_msix(ioc); | 2907 | _base_disable_msix(ioc); |
2926 | 2908 | ||
2927 | if (ioc->combined_reply_queue) { | 2909 | kfree(ioc->replyPostRegisterIndex); |
2928 | kfree(ioc->replyPostRegisterIndex); | 2910 | ioc->replyPostRegisterIndex = NULL; |
2929 | ioc->replyPostRegisterIndex = NULL; | 2911 | |
2930 | } | ||
2931 | 2912 | ||
2932 | if (ioc->chip_phys) { | 2913 | if (ioc->chip_phys) { |
2933 | iounmap(ioc->chip); | 2914 | iounmap(ioc->chip); |
@@ -2945,7 +2926,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) | |||
2945 | * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) | 2926 | * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) |
2946 | * @ioc: per adapter object | 2927 | * @ioc: per adapter object |
2947 | * | 2928 | * |
2948 | * Returns 0 for success, non-zero for failure. | 2929 | * Return: 0 for success, non-zero for failure. |
2949 | */ | 2930 | */ |
2950 | int | 2931 | int |
2951 | mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | 2932 | mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) |
@@ -3034,7 +3015,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | |||
3034 | /* Use the Combined reply queue feature only for SAS3 C0 & higher | 3015 | /* Use the Combined reply queue feature only for SAS3 C0 & higher |
3035 | * revision HBAs and also only when reply queue count is greater than 8 | 3016 | * revision HBAs and also only when reply queue count is greater than 8 |
3036 | */ | 3017 | */ |
3037 | if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) { | 3018 | if (ioc->combined_reply_queue) { |
3038 | /* Determine the Supplemental Reply Post Host Index Registers | 3019 | /* Determine the Supplemental Reply Post Host Index Registers |
3039 | * Addresse. Supplemental Reply Post Host Index Registers | 3020 | * Addresse. Supplemental Reply Post Host Index Registers |
3040 | * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and | 3021 | * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and |
@@ -3058,8 +3039,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | |||
3058 | MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + | 3039 | MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + |
3059 | (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); | 3040 | (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); |
3060 | } | 3041 | } |
3061 | } else | 3042 | } |
3062 | ioc->combined_reply_queue = 0; | ||
3063 | 3043 | ||
3064 | if (ioc->is_warpdrive) { | 3044 | if (ioc->is_warpdrive) { |
3065 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) | 3045 | ioc->reply_post_host_index[0] = (resource_size_t __iomem *) |
@@ -3097,7 +3077,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) | |||
3097 | * @ioc: per adapter object | 3077 | * @ioc: per adapter object |
3098 | * @smid: system request message index(smid zero is invalid) | 3078 | * @smid: system request message index(smid zero is invalid) |
3099 | * | 3079 | * |
3100 | * Returns virt pointer to message frame. | 3080 | * Return: virt pointer to message frame. |
3101 | */ | 3081 | */ |
3102 | void * | 3082 | void * |
3103 | mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3083 | mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3110,7 +3090,7 @@ mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
3110 | * @ioc: per adapter object | 3090 | * @ioc: per adapter object |
3111 | * @smid: system request message index | 3091 | * @smid: system request message index |
3112 | * | 3092 | * |
3113 | * Returns virt pointer to sense buffer. | 3093 | * Return: virt pointer to sense buffer. |
3114 | */ | 3094 | */ |
3115 | void * | 3095 | void * |
3116 | mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3096 | mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3123,7 +3103,7 @@ mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
3123 | * @ioc: per adapter object | 3103 | * @ioc: per adapter object |
3124 | * @smid: system request message index | 3104 | * @smid: system request message index |
3125 | * | 3105 | * |
3126 | * Returns phys pointer to the low 32bit address of the sense buffer. | 3106 | * Return: phys pointer to the low 32bit address of the sense buffer. |
3127 | */ | 3107 | */ |
3128 | __le32 | 3108 | __le32 |
3129 | mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3109 | mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3137,7 +3117,7 @@ mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
3137 | * @ioc: per adapter object | 3117 | * @ioc: per adapter object |
3138 | * @smid: system request message index | 3118 | * @smid: system request message index |
3139 | * | 3119 | * |
3140 | * Returns virt pointer to a PCIe SGL. | 3120 | * Return: virt pointer to a PCIe SGL. |
3141 | */ | 3121 | */ |
3142 | void * | 3122 | void * |
3143 | mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3123 | mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3150,7 +3130,7 @@ mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
3150 | * @ioc: per adapter object | 3130 | * @ioc: per adapter object |
3151 | * @smid: system request message index | 3131 | * @smid: system request message index |
3152 | * | 3132 | * |
3153 | * Returns phys pointer to the address of the PCIe buffer. | 3133 | * Return: phys pointer to the address of the PCIe buffer. |
3154 | */ | 3134 | */ |
3155 | dma_addr_t | 3135 | dma_addr_t |
3156 | mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3136 | mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3184,7 +3164,7 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) | |||
3184 | * @ioc: per adapter object | 3164 | * @ioc: per adapter object |
3185 | * @cb_idx: callback index | 3165 | * @cb_idx: callback index |
3186 | * | 3166 | * |
3187 | * Returns smid (zero is invalid) | 3167 | * Return: smid (zero is invalid) |
3188 | */ | 3168 | */ |
3189 | u16 | 3169 | u16 |
3190 | mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) | 3170 | mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) |
@@ -3216,7 +3196,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) | |||
3216 | * @cb_idx: callback index | 3196 | * @cb_idx: callback index |
3217 | * @scmd: pointer to scsi command object | 3197 | * @scmd: pointer to scsi command object |
3218 | * | 3198 | * |
3219 | * Returns smid (zero is invalid) | 3199 | * Return: smid (zero is invalid) |
3220 | */ | 3200 | */ |
3221 | u16 | 3201 | u16 |
3222 | mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, | 3202 | mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, |
@@ -3239,7 +3219,7 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, | |||
3239 | * @ioc: per adapter object | 3219 | * @ioc: per adapter object |
3240 | * @cb_idx: callback index | 3220 | * @cb_idx: callback index |
3241 | * | 3221 | * |
3242 | * Returns smid (zero is invalid) | 3222 | * Return: smid (zero is invalid) |
3243 | */ | 3223 | */ |
3244 | u16 | 3224 | u16 |
3245 | mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) | 3225 | mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) |
@@ -3270,7 +3250,7 @@ _base_recovery_check(struct MPT3SAS_ADAPTER *ioc) | |||
3270 | * See _wait_for_commands_to_complete() call with regards to this code. | 3250 | * See _wait_for_commands_to_complete() call with regards to this code. |
3271 | */ | 3251 | */ |
3272 | if (ioc->shost_recovery && ioc->pending_io_count) { | 3252 | if (ioc->shost_recovery && ioc->pending_io_count) { |
3273 | ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); | 3253 | ioc->pending_io_count = scsi_host_busy(ioc->shost); |
3274 | if (ioc->pending_io_count == 0) | 3254 | if (ioc->pending_io_count == 0) |
3275 | wake_up(&ioc->reset_wq); | 3255 | wake_up(&ioc->reset_wq); |
3276 | } | 3256 | } |
@@ -3284,14 +3264,13 @@ void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc, | |||
3284 | st->cb_idx = 0xFF; | 3264 | st->cb_idx = 0xFF; |
3285 | st->direct_io = 0; | 3265 | st->direct_io = 0; |
3286 | atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); | 3266 | atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0); |
3267 | st->smid = 0; | ||
3287 | } | 3268 | } |
3288 | 3269 | ||
3289 | /** | 3270 | /** |
3290 | * mpt3sas_base_free_smid - put smid back on free_list | 3271 | * mpt3sas_base_free_smid - put smid back on free_list |
3291 | * @ioc: per adapter object | 3272 | * @ioc: per adapter object |
3292 | * @smid: system request message index | 3273 | * @smid: system request message index |
3293 | * | ||
3294 | * Return nothing. | ||
3295 | */ | 3274 | */ |
3296 | void | 3275 | void |
3297 | mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3276 | mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3353,7 +3332,6 @@ _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr, | |||
3353 | 3332 | ||
3354 | /** | 3333 | /** |
3355 | * _base_writeq - 64 bit write to MMIO | 3334 | * _base_writeq - 64 bit write to MMIO |
3356 | * @ioc: per adapter object | ||
3357 | * @b: data payload | 3335 | * @b: data payload |
3358 | * @addr: address in MMIO space | 3336 | * @addr: address in MMIO space |
3359 | * @writeq_lock: spin lock | 3337 | * @writeq_lock: spin lock |
@@ -3382,8 +3360,6 @@ _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) | |||
3382 | * @ioc: per adapter object | 3360 | * @ioc: per adapter object |
3383 | * @smid: system request message index | 3361 | * @smid: system request message index |
3384 | * @handle: device handle | 3362 | * @handle: device handle |
3385 | * | ||
3386 | * Return nothing. | ||
3387 | */ | 3363 | */ |
3388 | static void | 3364 | static void |
3389 | _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) | 3365 | _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) |
@@ -3412,8 +3388,6 @@ _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) | |||
3412 | * @ioc: per adapter object | 3388 | * @ioc: per adapter object |
3413 | * @smid: system request message index | 3389 | * @smid: system request message index |
3414 | * @handle: device handle | 3390 | * @handle: device handle |
3415 | * | ||
3416 | * Return nothing. | ||
3417 | */ | 3391 | */ |
3418 | static void | 3392 | static void |
3419 | _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) | 3393 | _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) |
@@ -3436,8 +3410,6 @@ _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) | |||
3436 | * @ioc: per adapter object | 3410 | * @ioc: per adapter object |
3437 | * @smid: system request message index | 3411 | * @smid: system request message index |
3438 | * @handle: device handle | 3412 | * @handle: device handle |
3439 | * | ||
3440 | * Return nothing. | ||
3441 | */ | 3413 | */ |
3442 | void | 3414 | void |
3443 | mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, | 3415 | mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
@@ -3461,7 +3433,6 @@ mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
3461 | * @ioc: per adapter object | 3433 | * @ioc: per adapter object |
3462 | * @smid: system request message index | 3434 | * @smid: system request message index |
3463 | * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. | 3435 | * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. |
3464 | * Return nothing. | ||
3465 | */ | 3436 | */ |
3466 | void | 3437 | void |
3467 | mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, | 3438 | mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
@@ -3472,11 +3443,8 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
3472 | u64 *request; | 3443 | u64 *request; |
3473 | 3444 | ||
3474 | if (ioc->is_mcpu_endpoint) { | 3445 | if (ioc->is_mcpu_endpoint) { |
3475 | MPI2RequestHeader_t *request_hdr; | ||
3476 | |||
3477 | __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); | 3446 | __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); |
3478 | 3447 | ||
3479 | request_hdr = (MPI2RequestHeader_t *)mfp; | ||
3480 | /* TBD 256 is offset within sys register. */ | 3448 | /* TBD 256 is offset within sys register. */ |
3481 | mpi_req_iomem = (void __force *)ioc->chip | 3449 | mpi_req_iomem = (void __force *)ioc->chip |
3482 | + MPI_FRAME_START_OFFSET | 3450 | + MPI_FRAME_START_OFFSET |
@@ -3507,8 +3475,6 @@ mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
3507 | * firmware | 3475 | * firmware |
3508 | * @ioc: per adapter object | 3476 | * @ioc: per adapter object |
3509 | * @smid: system request message index | 3477 | * @smid: system request message index |
3510 | * | ||
3511 | * Return nothing. | ||
3512 | */ | 3478 | */ |
3513 | void | 3479 | void |
3514 | mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3480 | mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3530,8 +3496,6 @@ mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
3530 | * mpt3sas_base_put_smid_default - Default, primarily used for config pages | 3496 | * mpt3sas_base_put_smid_default - Default, primarily used for config pages |
3531 | * @ioc: per adapter object | 3497 | * @ioc: per adapter object |
3532 | * @smid: system request message index | 3498 | * @smid: system request message index |
3533 | * | ||
3534 | * Return nothing. | ||
3535 | */ | 3499 | */ |
3536 | void | 3500 | void |
3537 | mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 3501 | mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -3539,13 +3503,10 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
3539 | Mpi2RequestDescriptorUnion_t descriptor; | 3503 | Mpi2RequestDescriptorUnion_t descriptor; |
3540 | void *mpi_req_iomem; | 3504 | void *mpi_req_iomem; |
3541 | u64 *request; | 3505 | u64 *request; |
3542 | MPI2RequestHeader_t *request_hdr; | ||
3543 | 3506 | ||
3544 | if (ioc->is_mcpu_endpoint) { | 3507 | if (ioc->is_mcpu_endpoint) { |
3545 | __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); | 3508 | __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid); |
3546 | 3509 | ||
3547 | request_hdr = (MPI2RequestHeader_t *)mfp; | ||
3548 | |||
3549 | _clone_sg_entries(ioc, (void *) mfp, smid); | 3510 | _clone_sg_entries(ioc, (void *) mfp, smid); |
3550 | /* TBD 256 is offset within sys register */ | 3511 | /* TBD 256 is offset within sys register */ |
3551 | mpi_req_iomem = (void __force *)ioc->chip + | 3512 | mpi_req_iomem = (void __force *)ioc->chip + |
@@ -3571,8 +3532,6 @@ mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
3571 | /** | 3532 | /** |
3572 | * _base_display_OEMs_branding - Display branding string | 3533 | * _base_display_OEMs_branding - Display branding string |
3573 | * @ioc: per adapter object | 3534 | * @ioc: per adapter object |
3574 | * | ||
3575 | * Return nothing. | ||
3576 | */ | 3535 | */ |
3577 | static void | 3536 | static void |
3578 | _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) | 3537 | _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) |
@@ -3833,7 +3792,7 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) | |||
3833 | * version from FW Image Header. | 3792 | * version from FW Image Header. |
3834 | * @ioc: per adapter object | 3793 | * @ioc: per adapter object |
3835 | * | 3794 | * |
3836 | * Returns 0 for success, non-zero for failure. | 3795 | * Return: 0 for success, non-zero for failure. |
3837 | */ | 3796 | */ |
3838 | static int | 3797 | static int |
3839 | _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) | 3798 | _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) |
@@ -3930,8 +3889,6 @@ out: | |||
3930 | /** | 3889 | /** |
3931 | * _base_display_ioc_capabilities - Disply IOC's capabilities. | 3890 | * _base_display_ioc_capabilities - Disply IOC's capabilities. |
3932 | * @ioc: per adapter object | 3891 | * @ioc: per adapter object |
3933 | * | ||
3934 | * Return nothing. | ||
3935 | */ | 3892 | */ |
3936 | static void | 3893 | static void |
3937 | _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) | 3894 | _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) |
@@ -4047,8 +4004,6 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) | |||
4047 | * @device_missing_delay: amount of time till device is reported missing | 4004 | * @device_missing_delay: amount of time till device is reported missing |
4048 | * @io_missing_delay: interval IO is returned when there is a missing device | 4005 | * @io_missing_delay: interval IO is returned when there is a missing device |
4049 | * | 4006 | * |
4050 | * Return nothing. | ||
4051 | * | ||
4052 | * Passed on the command line, this function will modify the device missing | 4007 | * Passed on the command line, this function will modify the device missing |
4053 | * delay, as well as the io missing delay. This should be called at driver | 4008 | * delay, as well as the io missing delay. This should be called at driver |
4054 | * load time. | 4009 | * load time. |
@@ -4131,11 +4086,10 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, | |||
4131 | out: | 4086 | out: |
4132 | kfree(sas_iounit_pg1); | 4087 | kfree(sas_iounit_pg1); |
4133 | } | 4088 | } |
4089 | |||
4134 | /** | 4090 | /** |
4135 | * _base_static_config_pages - static start of day config pages | 4091 | * _base_static_config_pages - static start of day config pages |
4136 | * @ioc: per adapter object | 4092 | * @ioc: per adapter object |
4137 | * | ||
4138 | * Return nothing. | ||
4139 | */ | 4093 | */ |
4140 | static void | 4094 | static void |
4141 | _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) | 4095 | _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) |
@@ -4207,8 +4161,6 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) | |||
4207 | * @ioc: per adapter object | 4161 | * @ioc: per adapter object |
4208 | * | 4162 | * |
4209 | * Free memory allocated during encloure add. | 4163 | * Free memory allocated during encloure add. |
4210 | * | ||
4211 | * Return nothing. | ||
4212 | */ | 4164 | */ |
4213 | void | 4165 | void |
4214 | mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) | 4166 | mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) |
@@ -4228,8 +4180,6 @@ mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc) | |||
4228 | * @ioc: per adapter object | 4180 | * @ioc: per adapter object |
4229 | * | 4181 | * |
4230 | * Free memory allocated from _base_allocate_memory_pools. | 4182 | * Free memory allocated from _base_allocate_memory_pools. |
4231 | * | ||
4232 | * Return nothing. | ||
4233 | */ | 4183 | */ |
4234 | static void | 4184 | static void |
4235 | _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) | 4185 | _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) |
@@ -4350,9 +4300,8 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) | |||
4350 | * @reply_pool_start_address: Base address of a reply queue set | 4300 | * @reply_pool_start_address: Base address of a reply queue set |
4351 | * @pool_sz: Size of single Reply Descriptor Post Queues pool size | 4301 | * @pool_sz: Size of single Reply Descriptor Post Queues pool size |
4352 | * | 4302 | * |
4353 | * Returns 1 if reply queues in a set have a same upper 32bits | 4303 | * Return: 1 if reply queues in a set have a same upper 32bits in their base |
4354 | * in their base memory address, | 4304 | * memory address, else 0. |
4355 | * else 0 | ||
4356 | */ | 4305 | */ |
4357 | 4306 | ||
4358 | static int | 4307 | static int |
@@ -4373,7 +4322,7 @@ is_MSB_are_same(long reply_pool_start_address, u32 pool_sz) | |||
4373 | * _base_allocate_memory_pools - allocate start of day memory pools | 4322 | * _base_allocate_memory_pools - allocate start of day memory pools |
4374 | * @ioc: per adapter object | 4323 | * @ioc: per adapter object |
4375 | * | 4324 | * |
4376 | * Returns 0 success, anything else error | 4325 | * Return: 0 success, anything else error. |
4377 | */ | 4326 | */ |
4378 | static int | 4327 | static int |
4379 | _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) | 4328 | _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) |
@@ -4975,7 +4924,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) | |||
4975 | * @ioc: Pointer to MPT_ADAPTER structure | 4924 | * @ioc: Pointer to MPT_ADAPTER structure |
4976 | * @cooked: Request raw or cooked IOC state | 4925 | * @cooked: Request raw or cooked IOC state |
4977 | * | 4926 | * |
4978 | * Returns all IOC Doorbell register bits if cooked==0, else just the | 4927 | * Return: all IOC Doorbell register bits if cooked==0, else just the |
4979 | * Doorbell bits in MPI_IOC_STATE_MASK. | 4928 | * Doorbell bits in MPI_IOC_STATE_MASK. |
4980 | */ | 4929 | */ |
4981 | u32 | 4930 | u32 |
@@ -4990,10 +4939,11 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) | |||
4990 | 4939 | ||
4991 | /** | 4940 | /** |
4992 | * _base_wait_on_iocstate - waiting on a particular ioc state | 4941 | * _base_wait_on_iocstate - waiting on a particular ioc state |
4942 | * @ioc: ? | ||
4993 | * @ioc_state: controller state { READY, OPERATIONAL, or RESET } | 4943 | * @ioc_state: controller state { READY, OPERATIONAL, or RESET } |
4994 | * @timeout: timeout in second | 4944 | * @timeout: timeout in second |
4995 | * | 4945 | * |
4996 | * Returns 0 for success, non-zero for failure. | 4946 | * Return: 0 for success, non-zero for failure. |
4997 | */ | 4947 | */ |
4998 | static int | 4948 | static int |
4999 | _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) | 4949 | _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) |
@@ -5021,9 +4971,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) | |||
5021 | * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by | 4971 | * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by |
5022 | * a write to the doorbell) | 4972 | * a write to the doorbell) |
5023 | * @ioc: per adapter object | 4973 | * @ioc: per adapter object |
5024 | * @timeout: timeout in second | ||
5025 | * | 4974 | * |
5026 | * Returns 0 for success, non-zero for failure. | 4975 | * Return: 0 for success, non-zero for failure. |
5027 | * | 4976 | * |
5028 | * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. | 4977 | * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. |
5029 | */ | 4978 | */ |
@@ -5090,7 +5039,7 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) | |||
5090 | * @ioc: per adapter object | 5039 | * @ioc: per adapter object |
5091 | * @timeout: timeout in second | 5040 | * @timeout: timeout in second |
5092 | * | 5041 | * |
5093 | * Returns 0 for success, non-zero for failure. | 5042 | * Return: 0 for success, non-zero for failure. |
5094 | * | 5043 | * |
5095 | * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to | 5044 | * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to |
5096 | * doorbell. | 5045 | * doorbell. |
@@ -5137,8 +5086,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) | |||
5137 | * @ioc: per adapter object | 5086 | * @ioc: per adapter object |
5138 | * @timeout: timeout in second | 5087 | * @timeout: timeout in second |
5139 | * | 5088 | * |
5140 | * Returns 0 for success, non-zero for failure. | 5089 | * Return: 0 for success, non-zero for failure. |
5141 | * | ||
5142 | */ | 5090 | */ |
5143 | static int | 5091 | static int |
5144 | _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) | 5092 | _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) |
@@ -5173,7 +5121,7 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout) | |||
5173 | * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET | 5121 | * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET |
5174 | * @timeout: timeout in second | 5122 | * @timeout: timeout in second |
5175 | * | 5123 | * |
5176 | * Returns 0 for success, non-zero for failure. | 5124 | * Return: 0 for success, non-zero for failure. |
5177 | */ | 5125 | */ |
5178 | static int | 5126 | static int |
5179 | _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) | 5127 | _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) |
@@ -5222,7 +5170,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) | |||
5222 | * @reply: pointer to reply payload | 5170 | * @reply: pointer to reply payload |
5223 | * @timeout: timeout in second | 5171 | * @timeout: timeout in second |
5224 | * | 5172 | * |
5225 | * Returns 0 for success, non-zero for failure. | 5173 | * Return: 0 for success, non-zero for failure. |
5226 | */ | 5174 | */ |
5227 | static int | 5175 | static int |
5228 | _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, | 5176 | _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, |
@@ -5346,7 +5294,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, | |||
5346 | * identifying information about the device, in addition allows the host to | 5294 | * identifying information about the device, in addition allows the host to |
5347 | * remove IOC resources associated with the device. | 5295 | * remove IOC resources associated with the device. |
5348 | * | 5296 | * |
5349 | * Returns 0 for success, non-zero for failure. | 5297 | * Return: 0 for success, non-zero for failure. |
5350 | */ | 5298 | */ |
5351 | int | 5299 | int |
5352 | mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | 5300 | mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, |
@@ -5355,7 +5303,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |||
5355 | { | 5303 | { |
5356 | u16 smid; | 5304 | u16 smid; |
5357 | u32 ioc_state; | 5305 | u32 ioc_state; |
5358 | bool issue_reset = false; | 5306 | u8 issue_reset = 0; |
5359 | int rc; | 5307 | int rc; |
5360 | void *request; | 5308 | void *request; |
5361 | u16 wait_state_count; | 5309 | u16 wait_state_count; |
@@ -5414,12 +5362,10 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |||
5414 | ioc->ioc_link_reset_in_progress) | 5362 | ioc->ioc_link_reset_in_progress) |
5415 | ioc->ioc_link_reset_in_progress = 0; | 5363 | ioc->ioc_link_reset_in_progress = 0; |
5416 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { | 5364 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { |
5417 | pr_err(MPT3SAS_FMT "%s: timeout\n", | 5365 | issue_reset = |
5418 | ioc->name, __func__); | 5366 | mpt3sas_base_check_cmd_timeout(ioc, |
5419 | _debug_dump_mf(mpi_request, | 5367 | ioc->base_cmds.status, mpi_request, |
5420 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); | 5368 | sizeof(Mpi2SasIoUnitControlRequest_t)/4); |
5421 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | ||
5422 | issue_reset = true; | ||
5423 | goto issue_host_reset; | 5369 | goto issue_host_reset; |
5424 | } | 5370 | } |
5425 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | 5371 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
@@ -5449,7 +5395,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, | |||
5449 | * The SCSI Enclosure Processor request message causes the IOC to | 5395 | * The SCSI Enclosure Processor request message causes the IOC to |
5450 | * communicate with SES devices to control LED status signals. | 5396 | * communicate with SES devices to control LED status signals. |
5451 | * | 5397 | * |
5452 | * Returns 0 for success, non-zero for failure. | 5398 | * Return: 0 for success, non-zero for failure. |
5453 | */ | 5399 | */ |
5454 | int | 5400 | int |
5455 | mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | 5401 | mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, |
@@ -5457,7 +5403,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |||
5457 | { | 5403 | { |
5458 | u16 smid; | 5404 | u16 smid; |
5459 | u32 ioc_state; | 5405 | u32 ioc_state; |
5460 | bool issue_reset = false; | 5406 | u8 issue_reset = 0; |
5461 | int rc; | 5407 | int rc; |
5462 | void *request; | 5408 | void *request; |
5463 | u16 wait_state_count; | 5409 | u16 wait_state_count; |
@@ -5510,12 +5456,10 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |||
5510 | wait_for_completion_timeout(&ioc->base_cmds.done, | 5456 | wait_for_completion_timeout(&ioc->base_cmds.done, |
5511 | msecs_to_jiffies(10000)); | 5457 | msecs_to_jiffies(10000)); |
5512 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { | 5458 | if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { |
5513 | pr_err(MPT3SAS_FMT "%s: timeout\n", | 5459 | issue_reset = |
5514 | ioc->name, __func__); | 5460 | mpt3sas_base_check_cmd_timeout(ioc, |
5515 | _debug_dump_mf(mpi_request, | 5461 | ioc->base_cmds.status, mpi_request, |
5516 | sizeof(Mpi2SepRequest_t)/4); | 5462 | sizeof(Mpi2SepRequest_t)/4); |
5517 | if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) | ||
5518 | issue_reset = false; | ||
5519 | goto issue_host_reset; | 5463 | goto issue_host_reset; |
5520 | } | 5464 | } |
5521 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) | 5465 | if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) |
@@ -5539,8 +5483,9 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, | |||
5539 | /** | 5483 | /** |
5540 | * _base_get_port_facts - obtain port facts reply and save in ioc | 5484 | * _base_get_port_facts - obtain port facts reply and save in ioc |
5541 | * @ioc: per adapter object | 5485 | * @ioc: per adapter object |
5486 | * @port: ? | ||
5542 | * | 5487 | * |
5543 | * Returns 0 for success, non-zero for failure. | 5488 | * Return: 0 for success, non-zero for failure. |
5544 | */ | 5489 | */ |
5545 | static int | 5490 | static int |
5546 | _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) | 5491 | _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) |
@@ -5583,7 +5528,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port) | |||
5583 | * @ioc: per adapter object | 5528 | * @ioc: per adapter object |
5584 | * @timeout: | 5529 | * @timeout: |
5585 | * | 5530 | * |
5586 | * Returns 0 for success, non-zero for failure. | 5531 | * Return: 0 for success, non-zero for failure. |
5587 | */ | 5532 | */ |
5588 | static int | 5533 | static int |
5589 | _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) | 5534 | _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) |
@@ -5637,7 +5582,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) | |||
5637 | * _base_get_ioc_facts - obtain ioc facts reply and save in ioc | 5582 | * _base_get_ioc_facts - obtain ioc facts reply and save in ioc |
5638 | * @ioc: per adapter object | 5583 | * @ioc: per adapter object |
5639 | * | 5584 | * |
5640 | * Returns 0 for success, non-zero for failure. | 5585 | * Return: 0 for success, non-zero for failure. |
5641 | */ | 5586 | */ |
5642 | static int | 5587 | static int |
5643 | _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) | 5588 | _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) |
@@ -5681,6 +5626,9 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) | |||
5681 | facts->WhoInit = mpi_reply.WhoInit; | 5626 | facts->WhoInit = mpi_reply.WhoInit; |
5682 | facts->NumberOfPorts = mpi_reply.NumberOfPorts; | 5627 | facts->NumberOfPorts = mpi_reply.NumberOfPorts; |
5683 | facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; | 5628 | facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; |
5629 | if (ioc->msix_enable && (facts->MaxMSIxVectors <= | ||
5630 | MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc))) | ||
5631 | ioc->combined_reply_queue = 0; | ||
5684 | facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); | 5632 | facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); |
5685 | facts->MaxReplyDescriptorPostQueueDepth = | 5633 | facts->MaxReplyDescriptorPostQueueDepth = |
5686 | le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); | 5634 | le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); |
@@ -5736,7 +5684,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc) | |||
5736 | * _base_send_ioc_init - send ioc_init to firmware | 5684 | * _base_send_ioc_init - send ioc_init to firmware |
5737 | * @ioc: per adapter object | 5685 | * @ioc: per adapter object |
5738 | * | 5686 | * |
5739 | * Returns 0 for success, non-zero for failure. | 5687 | * Return: 0 for success, non-zero for failure. |
5740 | */ | 5688 | */ |
5741 | static int | 5689 | static int |
5742 | _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) | 5690 | _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) |
@@ -5837,8 +5785,8 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) | |||
5837 | * @msix_index: MSIX table index supplied by the OS | 5785 | * @msix_index: MSIX table index supplied by the OS |
5838 | * @reply: reply message frame(lower 32bit addr) | 5786 | * @reply: reply message frame(lower 32bit addr) |
5839 | * | 5787 | * |
5840 | * Return 1 meaning mf should be freed from _base_interrupt | 5788 | * Return: 1 meaning mf should be freed from _base_interrupt |
5841 | * 0 means the mf is freed from this function. | 5789 | * 0 means the mf is freed from this function. |
5842 | */ | 5790 | */ |
5843 | u8 | 5791 | u8 |
5844 | mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 5792 | mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
@@ -5883,7 +5831,7 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
5883 | * _base_send_port_enable - send port_enable(discovery stuff) to firmware | 5831 | * _base_send_port_enable - send port_enable(discovery stuff) to firmware |
5884 | * @ioc: per adapter object | 5832 | * @ioc: per adapter object |
5885 | * | 5833 | * |
5886 | * Returns 0 for success, non-zero for failure. | 5834 | * Return: 0 for success, non-zero for failure. |
5887 | */ | 5835 | */ |
5888 | static int | 5836 | static int |
5889 | _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) | 5837 | _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) |
@@ -5950,7 +5898,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc) | |||
5950 | * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) | 5898 | * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) |
5951 | * @ioc: per adapter object | 5899 | * @ioc: per adapter object |
5952 | * | 5900 | * |
5953 | * Returns 0 for success, non-zero for failure. | 5901 | * Return: 0 for success, non-zero for failure. |
5954 | */ | 5902 | */ |
5955 | int | 5903 | int |
5956 | mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) | 5904 | mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) |
@@ -5990,7 +5938,7 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) | |||
5990 | * Decide whether to wait on discovery to complete. Used to either | 5938 | * Decide whether to wait on discovery to complete. Used to either |
5991 | * locate boot device, or report volumes ahead of physical devices. | 5939 | * locate boot device, or report volumes ahead of physical devices. |
5992 | * | 5940 | * |
5993 | * Returns 1 for wait, 0 for don't wait | 5941 | * Return: 1 for wait, 0 for don't wait. |
5994 | */ | 5942 | */ |
5995 | static int | 5943 | static int |
5996 | _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) | 5944 | _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) |
@@ -6062,7 +6010,7 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) | |||
6062 | * _base_event_notification - send event notification | 6010 | * _base_event_notification - send event notification |
6063 | * @ioc: per adapter object | 6011 | * @ioc: per adapter object |
6064 | * | 6012 | * |
6065 | * Returns 0 for success, non-zero for failure. | 6013 | * Return: 0 for success, non-zero for failure. |
6066 | */ | 6014 | */ |
6067 | static int | 6015 | static int |
6068 | _base_event_notification(struct MPT3SAS_ADAPTER *ioc) | 6016 | _base_event_notification(struct MPT3SAS_ADAPTER *ioc) |
@@ -6119,7 +6067,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc) | |||
6119 | /** | 6067 | /** |
6120 | * mpt3sas_base_validate_event_type - validating event types | 6068 | * mpt3sas_base_validate_event_type - validating event types |
6121 | * @ioc: per adapter object | 6069 | * @ioc: per adapter object |
6122 | * @event: firmware event | 6070 | * @event_type: firmware event |
6123 | * | 6071 | * |
6124 | * This will turn on firmware event notification when application | 6072 | * This will turn on firmware event notification when application |
6125 | * ask for that event. We don't mask events that are already enabled. | 6073 | * ask for that event. We don't mask events that are already enabled. |
@@ -6157,7 +6105,7 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) | |||
6157 | * _base_diag_reset - the "big hammer" start of day reset | 6105 | * _base_diag_reset - the "big hammer" start of day reset |
6158 | * @ioc: per adapter object | 6106 | * @ioc: per adapter object |
6159 | * | 6107 | * |
6160 | * Returns 0 for success, non-zero for failure. | 6108 | * Return: 0 for success, non-zero for failure. |
6161 | */ | 6109 | */ |
6162 | static int | 6110 | static int |
6163 | _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) | 6111 | _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) |
@@ -6271,7 +6219,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) | |||
6271 | * @ioc: per adapter object | 6219 | * @ioc: per adapter object |
6272 | * @type: FORCE_BIG_HAMMER or SOFT_RESET | 6220 | * @type: FORCE_BIG_HAMMER or SOFT_RESET |
6273 | * | 6221 | * |
6274 | * Returns 0 for success, non-zero for failure. | 6222 | * Return: 0 for success, non-zero for failure. |
6275 | */ | 6223 | */ |
6276 | static int | 6224 | static int |
6277 | _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) | 6225 | _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) |
@@ -6340,7 +6288,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) | |||
6340 | * _base_make_ioc_operational - put controller in OPERATIONAL state | 6288 | * _base_make_ioc_operational - put controller in OPERATIONAL state |
6341 | * @ioc: per adapter object | 6289 | * @ioc: per adapter object |
6342 | * | 6290 | * |
6343 | * Returns 0 for success, non-zero for failure. | 6291 | * Return: 0 for success, non-zero for failure. |
6344 | */ | 6292 | */ |
6345 | static int | 6293 | static int |
6346 | _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) | 6294 | _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) |
@@ -6513,8 +6461,6 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) | |||
6513 | /** | 6461 | /** |
6514 | * mpt3sas_base_free_resources - free resources controller resources | 6462 | * mpt3sas_base_free_resources - free resources controller resources |
6515 | * @ioc: per adapter object | 6463 | * @ioc: per adapter object |
6516 | * | ||
6517 | * Return nothing. | ||
6518 | */ | 6464 | */ |
6519 | void | 6465 | void |
6520 | mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) | 6466 | mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) |
@@ -6540,7 +6486,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) | |||
6540 | * mpt3sas_base_attach - attach controller instance | 6486 | * mpt3sas_base_attach - attach controller instance |
6541 | * @ioc: per adapter object | 6487 | * @ioc: per adapter object |
6542 | * | 6488 | * |
6543 | * Returns 0 for success, non-zero for failure. | 6489 | * Return: 0 for success, non-zero for failure. |
6544 | */ | 6490 | */ |
6545 | int | 6491 | int |
6546 | mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | 6492 | mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) |
@@ -6797,8 +6743,6 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) | |||
6797 | /** | 6743 | /** |
6798 | * mpt3sas_base_detach - remove controller instance | 6744 | * mpt3sas_base_detach - remove controller instance |
6799 | * @ioc: per adapter object | 6745 | * @ioc: per adapter object |
6800 | * | ||
6801 | * Return nothing. | ||
6802 | */ | 6746 | */ |
6803 | void | 6747 | void |
6804 | mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) | 6748 | mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) |
@@ -6830,65 +6774,69 @@ mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) | |||
6830 | } | 6774 | } |
6831 | 6775 | ||
6832 | /** | 6776 | /** |
6833 | * _base_reset_handler - reset callback handler (for base) | 6777 | * _base_pre_reset_handler - pre reset handler |
6834 | * @ioc: per adapter object | 6778 | * @ioc: per adapter object |
6835 | * @reset_phase: phase | ||
6836 | * | ||
6837 | * The handler for doing any required cleanup or initialization. | ||
6838 | * | ||
6839 | * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, | ||
6840 | * MPT3_IOC_DONE_RESET | ||
6841 | * | ||
6842 | * Return nothing. | ||
6843 | */ | 6779 | */ |
6844 | static void | 6780 | static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) |
6845 | _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) | ||
6846 | { | 6781 | { |
6847 | mpt3sas_scsih_reset_handler(ioc, reset_phase); | 6782 | mpt3sas_scsih_pre_reset_handler(ioc); |
6848 | mpt3sas_ctl_reset_handler(ioc, reset_phase); | 6783 | mpt3sas_ctl_pre_reset_handler(ioc); |
6849 | switch (reset_phase) { | 6784 | dtmprintk(ioc, pr_info(MPT3SAS_FMT |
6850 | case MPT3_IOC_PRE_RESET: | 6785 | "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); |
6851 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | 6786 | } |
6852 | "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); | 6787 | |
6853 | break; | 6788 | /** |
6854 | case MPT3_IOC_AFTER_RESET: | 6789 | * _base_after_reset_handler - after reset handler |
6855 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | 6790 | * @ioc: per adapter object |
6856 | "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); | 6791 | */ |
6857 | if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { | 6792 | static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) |
6858 | ioc->transport_cmds.status |= MPT3_CMD_RESET; | 6793 | { |
6859 | mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); | 6794 | mpt3sas_scsih_after_reset_handler(ioc); |
6860 | complete(&ioc->transport_cmds.done); | 6795 | mpt3sas_ctl_after_reset_handler(ioc); |
6861 | } | 6796 | dtmprintk(ioc, pr_info(MPT3SAS_FMT |
6862 | if (ioc->base_cmds.status & MPT3_CMD_PENDING) { | 6797 | "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); |
6863 | ioc->base_cmds.status |= MPT3_CMD_RESET; | 6798 | if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { |
6864 | mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); | 6799 | ioc->transport_cmds.status |= MPT3_CMD_RESET; |
6865 | complete(&ioc->base_cmds.done); | 6800 | mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); |
6866 | } | 6801 | complete(&ioc->transport_cmds.done); |
6867 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { | 6802 | } |
6868 | ioc->port_enable_failed = 1; | 6803 | if (ioc->base_cmds.status & MPT3_CMD_PENDING) { |
6869 | ioc->port_enable_cmds.status |= MPT3_CMD_RESET; | 6804 | ioc->base_cmds.status |= MPT3_CMD_RESET; |
6870 | mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); | 6805 | mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); |
6871 | if (ioc->is_driver_loading) { | 6806 | complete(&ioc->base_cmds.done); |
6872 | ioc->start_scan_failed = | 6807 | } |
6873 | MPI2_IOCSTATUS_INTERNAL_ERROR; | 6808 | if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { |
6874 | ioc->start_scan = 0; | 6809 | ioc->port_enable_failed = 1; |
6875 | ioc->port_enable_cmds.status = | 6810 | ioc->port_enable_cmds.status |= MPT3_CMD_RESET; |
6876 | MPT3_CMD_NOT_USED; | 6811 | mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); |
6877 | } else | 6812 | if (ioc->is_driver_loading) { |
6878 | complete(&ioc->port_enable_cmds.done); | 6813 | ioc->start_scan_failed = |
6879 | } | 6814 | MPI2_IOCSTATUS_INTERNAL_ERROR; |
6880 | if (ioc->config_cmds.status & MPT3_CMD_PENDING) { | 6815 | ioc->start_scan = 0; |
6881 | ioc->config_cmds.status |= MPT3_CMD_RESET; | 6816 | ioc->port_enable_cmds.status = |
6882 | mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); | 6817 | MPT3_CMD_NOT_USED; |
6883 | ioc->config_cmds.smid = USHRT_MAX; | 6818 | } else { |
6884 | complete(&ioc->config_cmds.done); | 6819 | complete(&ioc->port_enable_cmds.done); |
6885 | } | 6820 | } |
6886 | break; | ||
6887 | case MPT3_IOC_DONE_RESET: | ||
6888 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
6889 | "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); | ||
6890 | break; | ||
6891 | } | 6821 | } |
6822 | if (ioc->config_cmds.status & MPT3_CMD_PENDING) { | ||
6823 | ioc->config_cmds.status |= MPT3_CMD_RESET; | ||
6824 | mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); | ||
6825 | ioc->config_cmds.smid = USHRT_MAX; | ||
6826 | complete(&ioc->config_cmds.done); | ||
6827 | } | ||
6828 | } | ||
6829 | |||
6830 | /** | ||
6831 | * _base_reset_done_handler - reset done handler | ||
6832 | * @ioc: per adapter object | ||
6833 | */ | ||
6834 | static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) | ||
6835 | { | ||
6836 | mpt3sas_scsih_reset_done_handler(ioc); | ||
6837 | mpt3sas_ctl_reset_done_handler(ioc); | ||
6838 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
6839 | "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); | ||
6892 | } | 6840 | } |
6893 | 6841 | ||
6894 | /** | 6842 | /** |
@@ -6910,7 +6858,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) | |||
6910 | return; | 6858 | return; |
6911 | 6859 | ||
6912 | /* pending command count */ | 6860 | /* pending command count */ |
6913 | ioc->pending_io_count = atomic_read(&ioc->shost->host_busy); | 6861 | ioc->pending_io_count = scsi_host_busy(ioc->shost); |
6914 | 6862 | ||
6915 | if (!ioc->pending_io_count) | 6863 | if (!ioc->pending_io_count) |
6916 | return; | 6864 | return; |
@@ -6924,7 +6872,7 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) | |||
6924 | * @ioc: Pointer to MPT_ADAPTER structure | 6872 | * @ioc: Pointer to MPT_ADAPTER structure |
6925 | * @type: FORCE_BIG_HAMMER or SOFT_RESET | 6873 | * @type: FORCE_BIG_HAMMER or SOFT_RESET |
6926 | * | 6874 | * |
6927 | * Returns 0 for success, non-zero for failure. | 6875 | * Return: 0 for success, non-zero for failure. |
6928 | */ | 6876 | */ |
6929 | int | 6877 | int |
6930 | mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, | 6878 | mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, |
@@ -6949,14 +6897,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, | |||
6949 | mpt3sas_halt_firmware(ioc); | 6897 | mpt3sas_halt_firmware(ioc); |
6950 | 6898 | ||
6951 | /* wait for an active reset in progress to complete */ | 6899 | /* wait for an active reset in progress to complete */ |
6952 | if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { | 6900 | mutex_lock(&ioc->reset_in_progress_mutex); |
6953 | do { | ||
6954 | ssleep(1); | ||
6955 | } while (ioc->shost_recovery == 1); | ||
6956 | dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, | ||
6957 | __func__)); | ||
6958 | return ioc->ioc_reset_in_progress_status; | ||
6959 | } | ||
6960 | 6901 | ||
6961 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | 6902 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
6962 | ioc->shost_recovery = 1; | 6903 | ioc->shost_recovery = 1; |
@@ -6971,13 +6912,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, | |||
6971 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) | 6912 | if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) |
6972 | is_fault = 1; | 6913 | is_fault = 1; |
6973 | } | 6914 | } |
6974 | _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); | 6915 | _base_pre_reset_handler(ioc); |
6975 | mpt3sas_wait_for_commands_to_complete(ioc); | 6916 | mpt3sas_wait_for_commands_to_complete(ioc); |
6976 | _base_mask_interrupts(ioc); | 6917 | _base_mask_interrupts(ioc); |
6977 | r = _base_make_ioc_ready(ioc, type); | 6918 | r = _base_make_ioc_ready(ioc, type); |
6978 | if (r) | 6919 | if (r) |
6979 | goto out; | 6920 | goto out; |
6980 | _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); | 6921 | _base_after_reset_handler(ioc); |
6981 | 6922 | ||
6982 | /* If this hard reset is called while port enable is active, then | 6923 | /* If this hard reset is called while port enable is active, then |
6983 | * there is no reason to call make_ioc_operational | 6924 | * there is no reason to call make_ioc_operational |
@@ -6998,14 +6939,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, | |||
6998 | 6939 | ||
6999 | r = _base_make_ioc_operational(ioc); | 6940 | r = _base_make_ioc_operational(ioc); |
7000 | if (!r) | 6941 | if (!r) |
7001 | _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); | 6942 | _base_reset_done_handler(ioc); |
7002 | 6943 | ||
7003 | out: | 6944 | out: |
7004 | dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", | 6945 | dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", |
7005 | ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); | 6946 | ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); |
7006 | 6947 | ||
7007 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); | 6948 | spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); |
7008 | ioc->ioc_reset_in_progress_status = r; | ||
7009 | ioc->shost_recovery = 0; | 6949 | ioc->shost_recovery = 0; |
7010 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); | 6950 | spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); |
7011 | ioc->ioc_reset_count++; | 6951 | ioc->ioc_reset_count++; |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index f02974c0be4a..96dc15e90bd8 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h | |||
@@ -74,8 +74,8 @@ | |||
74 | #define MPT3SAS_DRIVER_NAME "mpt3sas" | 74 | #define MPT3SAS_DRIVER_NAME "mpt3sas" |
75 | #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" | 75 | #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" |
76 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" | 76 | #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" |
77 | #define MPT3SAS_DRIVER_VERSION "25.100.00.00" | 77 | #define MPT3SAS_DRIVER_VERSION "26.100.00.00" |
78 | #define MPT3SAS_MAJOR_VERSION 25 | 78 | #define MPT3SAS_MAJOR_VERSION 26 |
79 | #define MPT3SAS_MINOR_VERSION 100 | 79 | #define MPT3SAS_MINOR_VERSION 100 |
80 | #define MPT3SAS_BUILD_VERSION 0 | 80 | #define MPT3SAS_BUILD_VERSION 0 |
81 | #define MPT3SAS_RELEASE_VERSION 00 | 81 | #define MPT3SAS_RELEASE_VERSION 00 |
@@ -143,21 +143,17 @@ | |||
143 | * NVMe defines | 143 | * NVMe defines |
144 | */ | 144 | */ |
145 | #define NVME_PRP_SIZE 8 /* PRP size */ | 145 | #define NVME_PRP_SIZE 8 /* PRP size */ |
146 | #define NVME_CMD_PRP1_OFFSET 24 /* PRP1 offset in NVMe cmd */ | ||
147 | #define NVME_CMD_PRP2_OFFSET 32 /* PRP2 offset in NVMe cmd */ | ||
148 | #define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */ | 146 | #define NVME_ERROR_RESPONSE_SIZE 16 /* Max NVME Error Response */ |
149 | #define NVME_TASK_ABORT_MIN_TIMEOUT 6 | 147 | #define NVME_TASK_ABORT_MIN_TIMEOUT 6 |
150 | #define NVME_TASK_ABORT_MAX_TIMEOUT 60 | 148 | #define NVME_TASK_ABORT_MAX_TIMEOUT 60 |
151 | #define NVME_TASK_MNGT_CUSTOM_MASK (0x0010) | 149 | #define NVME_TASK_MNGT_CUSTOM_MASK (0x0010) |
152 | #define NVME_PRP_PAGE_SIZE 4096 /* Page size */ | 150 | #define NVME_PRP_PAGE_SIZE 4096 /* Page size */ |
153 | 151 | ||
154 | 152 | struct mpt3sas_nvme_cmd { | |
155 | /* | 153 | u8 rsvd[24]; |
156 | * reset phases | 154 | __le64 prp1; |
157 | */ | 155 | __le64 prp2; |
158 | #define MPT3_IOC_PRE_RESET 1 /* prior to host reset */ | 156 | }; |
159 | #define MPT3_IOC_AFTER_RESET 2 /* just after host reset */ | ||
160 | #define MPT3_IOC_DONE_RESET 3 /* links re-initialized */ | ||
161 | 157 | ||
162 | /* | 158 | /* |
163 | * logging format | 159 | * logging format |
@@ -323,6 +319,7 @@ | |||
323 | * There are twelve Supplemental Reply Post Host Index Registers | 319 | * There are twelve Supplemental Reply Post Host Index Registers |
324 | * and each register is at offset 0x10 bytes from the previous one. | 320 | * and each register is at offset 0x10 bytes from the previous one. |
325 | */ | 321 | */ |
322 | #define MAX_COMBINED_MSIX_VECTORS(gen35) ((gen35 == 1) ? 16 : 8) | ||
326 | #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 | 323 | #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3 12 |
327 | #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 | 324 | #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35 16 |
328 | #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) | 325 | #define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) |
@@ -1162,7 +1159,6 @@ struct MPT3SAS_ADAPTER { | |||
1162 | struct mutex reset_in_progress_mutex; | 1159 | struct mutex reset_in_progress_mutex; |
1163 | spinlock_t ioc_reset_in_progress_lock; | 1160 | spinlock_t ioc_reset_in_progress_lock; |
1164 | u8 ioc_link_reset_in_progress; | 1161 | u8 ioc_link_reset_in_progress; |
1165 | u8 ioc_reset_in_progress_status; | ||
1166 | 1162 | ||
1167 | u8 ignore_loginfos; | 1163 | u8 ignore_loginfos; |
1168 | u8 remove_host; | 1164 | u8 remove_host; |
@@ -1482,13 +1478,17 @@ int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); | |||
1482 | void | 1478 | void |
1483 | mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); | 1479 | mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); |
1484 | 1480 | ||
1481 | u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, | ||
1482 | u8 status, void *mpi_request, int sz); | ||
1485 | 1483 | ||
1486 | /* scsih shared API */ | 1484 | /* scsih shared API */ |
1487 | struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, | 1485 | struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, |
1488 | u16 smid); | 1486 | u16 smid); |
1489 | u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | 1487 | u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, |
1490 | u32 reply); | 1488 | u32 reply); |
1491 | void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); | 1489 | void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); |
1490 | void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); | ||
1491 | void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); | ||
1492 | 1492 | ||
1493 | int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, | 1493 | int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, |
1494 | u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method); | 1494 | u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method); |
@@ -1615,7 +1615,9 @@ void mpt3sas_ctl_init(ushort hbas_to_enumerate); | |||
1615 | void mpt3sas_ctl_exit(ushort hbas_to_enumerate); | 1615 | void mpt3sas_ctl_exit(ushort hbas_to_enumerate); |
1616 | u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 1616 | u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
1617 | u32 reply); | 1617 | u32 reply); |
1618 | void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); | 1618 | void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); |
1619 | void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); | ||
1620 | void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); | ||
1619 | u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, | 1621 | u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, |
1620 | u8 msix_index, u32 reply); | 1622 | u8 msix_index, u32 reply); |
1621 | void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, | 1623 | void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index e87c76a832f6..d29a2dcc7d0e 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c | |||
@@ -198,7 +198,7 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
198 | * | 198 | * |
199 | * A wrapper for obtaining dma-able memory for config page request. | 199 | * A wrapper for obtaining dma-able memory for config page request. |
200 | * | 200 | * |
201 | * Returns 0 for success, non-zero for failure. | 201 | * Return: 0 for success, non-zero for failure. |
202 | */ | 202 | */ |
203 | static int | 203 | static int |
204 | _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, | 204 | _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, |
@@ -230,7 +230,7 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, | |||
230 | * | 230 | * |
231 | * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. | 231 | * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. |
232 | * | 232 | * |
233 | * Returns 0 for success, non-zero for failure. | 233 | * Return: 0 for success, non-zero for failure. |
234 | */ | 234 | */ |
235 | static void | 235 | static void |
236 | _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, | 236 | _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, |
@@ -251,8 +251,8 @@ _config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, | |||
251 | * | 251 | * |
252 | * The callback handler when using _config_request. | 252 | * The callback handler when using _config_request. |
253 | * | 253 | * |
254 | * Return 1 meaning mf should be freed from _base_interrupt | 254 | * Return: 1 meaning mf should be freed from _base_interrupt |
255 | * 0 means the mf is freed from this function. | 255 | * 0 means the mf is freed from this function. |
256 | */ | 256 | */ |
257 | u8 | 257 | u8 |
258 | mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 258 | mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
@@ -295,7 +295,7 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
295 | * | 295 | * |
296 | * The callback index is set inside `ioc->config_cb_idx. | 296 | * The callback index is set inside `ioc->config_cb_idx. |
297 | * | 297 | * |
298 | * Returns 0 for success, non-zero for failure. | 298 | * Return: 0 for success, non-zero for failure. |
299 | */ | 299 | */ |
300 | static int | 300 | static int |
301 | _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | 301 | _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t |
@@ -406,10 +406,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
406 | mpt3sas_base_put_smid_default(ioc, smid); | 406 | mpt3sas_base_put_smid_default(ioc, smid); |
407 | wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); | 407 | wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); |
408 | if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { | 408 | if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { |
409 | pr_err(MPT3SAS_FMT "%s: timeout\n", | 409 | mpt3sas_base_check_cmd_timeout(ioc, |
410 | ioc->name, __func__); | 410 | ioc->config_cmds.status, mpi_request, |
411 | _debug_dump_mf(mpi_request, | 411 | sizeof(Mpi2ConfigRequest_t)/4); |
412 | sizeof(Mpi2ConfigRequest_t)/4); | ||
413 | retry_count++; | 412 | retry_count++; |
414 | if (ioc->config_cmds.smid == smid) | 413 | if (ioc->config_cmds.smid == smid) |
415 | mpt3sas_base_free_smid(ioc, smid); | 414 | mpt3sas_base_free_smid(ioc, smid); |
@@ -519,7 +518,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t | |||
519 | * @config_page: contents of the config page | 518 | * @config_page: contents of the config page |
520 | * Context: sleep. | 519 | * Context: sleep. |
521 | * | 520 | * |
522 | * Returns 0 for success, non-zero for failure. | 521 | * Return: 0 for success, non-zero for failure. |
523 | */ | 522 | */ |
524 | int | 523 | int |
525 | mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, | 524 | mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, |
@@ -556,7 +555,7 @@ mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, | |||
556 | * @sz: size of buffer passed in config_page | 555 | * @sz: size of buffer passed in config_page |
557 | * Context: sleep. | 556 | * Context: sleep. |
558 | * | 557 | * |
559 | * Returns 0 for success, non-zero for failure. | 558 | * Return: 0 for success, non-zero for failure. |
560 | */ | 559 | */ |
561 | int | 560 | int |
562 | mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, | 561 | mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, |
@@ -593,7 +592,7 @@ mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, | |||
593 | * @config_page: contents of the config page | 592 | * @config_page: contents of the config page |
594 | * Context: sleep. | 593 | * Context: sleep. |
595 | * | 594 | * |
596 | * Returns 0 for success, non-zero for failure. | 595 | * Return: 0 for success, non-zero for failure. |
597 | */ | 596 | */ |
598 | int | 597 | int |
599 | mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, | 598 | mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, |
@@ -630,7 +629,7 @@ mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, | |||
630 | * @config_page: contents of the config page | 629 | * @config_page: contents of the config page |
631 | * Context: sleep. | 630 | * Context: sleep. |
632 | * | 631 | * |
633 | * Returns 0 for success, non-zero for failure. | 632 | * Return: 0 for success, non-zero for failure. |
634 | */ | 633 | */ |
635 | int | 634 | int |
636 | mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, | 635 | mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, |
@@ -667,7 +666,7 @@ mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, | |||
667 | * @config_page: contents of the config page | 666 | * @config_page: contents of the config page |
668 | * Context: sleep. | 667 | * Context: sleep. |
669 | * | 668 | * |
670 | * Returns 0 for success, non-zero for failure. | 669 | * Return: 0 for success, non-zero for failure. |
671 | */ | 670 | */ |
672 | int | 671 | int |
673 | mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, | 672 | mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, |
@@ -708,7 +707,7 @@ mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, | |||
708 | * @config_page: contents of the config page | 707 | * @config_page: contents of the config page |
709 | * Context: sleep. | 708 | * Context: sleep. |
710 | * | 709 | * |
711 | * Returns 0 for success, non-zero for failure. | 710 | * Return: 0 for success, non-zero for failure. |
712 | */ | 711 | */ |
713 | int | 712 | int |
714 | mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, | 713 | mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, |
@@ -744,7 +743,7 @@ mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, | |||
744 | * @config_page: contents of the config page | 743 | * @config_page: contents of the config page |
745 | * Context: sleep. | 744 | * Context: sleep. |
746 | * | 745 | * |
747 | * Returns 0 for success, non-zero for failure. | 746 | * Return: 0 for success, non-zero for failure. |
748 | */ | 747 | */ |
749 | int | 748 | int |
750 | mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 749 | mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
@@ -780,7 +779,7 @@ mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
780 | * @config_page: contents of the config page | 779 | * @config_page: contents of the config page |
781 | * Context: sleep. | 780 | * Context: sleep. |
782 | * | 781 | * |
783 | * Returns 0 for success, non-zero for failure. | 782 | * Return: 0 for success, non-zero for failure. |
784 | */ | 783 | */ |
785 | int | 784 | int |
786 | mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, | 785 | mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, |
@@ -816,7 +815,7 @@ mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, | |||
816 | * @config_page: contents of the config page | 815 | * @config_page: contents of the config page |
817 | * Context: sleep. | 816 | * Context: sleep. |
818 | * | 817 | * |
819 | * Returns 0 for success, non-zero for failure. | 818 | * Return: 0 for success, non-zero for failure. |
820 | */ | 819 | */ |
821 | int | 820 | int |
822 | mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | 821 | mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, |
@@ -852,7 +851,7 @@ mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
852 | * @config_page: contents of the config page | 851 | * @config_page: contents of the config page |
853 | * Context: sleep. | 852 | * Context: sleep. |
854 | * | 853 | * |
855 | * Returns 0 for success, non-zero for failure. | 854 | * Return: 0 for success, non-zero for failure. |
856 | */ | 855 | */ |
857 | int | 856 | int |
858 | mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | 857 | mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, |
@@ -889,7 +888,7 @@ mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
889 | * @sz: size of buffer passed in config_page | 888 | * @sz: size of buffer passed in config_page |
890 | * Context: sleep. | 889 | * Context: sleep. |
891 | * | 890 | * |
892 | * Returns 0 for success, non-zero for failure. | 891 | * Return: 0 for success, non-zero for failure. |
893 | */ | 892 | */ |
894 | int | 893 | int |
895 | mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, | 894 | mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, |
@@ -924,7 +923,7 @@ mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, | |||
924 | * @config_page: contents of the config page | 923 | * @config_page: contents of the config page |
925 | * Context: sleep. | 924 | * Context: sleep. |
926 | * | 925 | * |
927 | * Returns 0 for success, non-zero for failure. | 926 | * Return: 0 for success, non-zero for failure. |
928 | */ | 927 | */ |
929 | int | 928 | int |
930 | mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, | 929 | mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, |
@@ -960,7 +959,7 @@ mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, | |||
960 | * @config_page: contents of the config page | 959 | * @config_page: contents of the config page |
961 | * Context: sleep. | 960 | * Context: sleep. |
962 | * | 961 | * |
963 | * Returns 0 for success, non-zero for failure. | 962 | * Return: 0 for success, non-zero for failure. |
964 | */ | 963 | */ |
965 | int | 964 | int |
966 | mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, | 965 | mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, |
@@ -998,7 +997,7 @@ mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, | |||
998 | * @handle: device handle | 997 | * @handle: device handle |
999 | * Context: sleep. | 998 | * Context: sleep. |
1000 | * | 999 | * |
1001 | * Returns 0 for success, non-zero for failure. | 1000 | * Return: 0 for success, non-zero for failure. |
1002 | */ | 1001 | */ |
1003 | int | 1002 | int |
1004 | mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, | 1003 | mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, |
@@ -1039,7 +1038,7 @@ mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, | |||
1039 | * @handle: device handle | 1038 | * @handle: device handle |
1040 | * Context: sleep. | 1039 | * Context: sleep. |
1041 | * | 1040 | * |
1042 | * Returns 0 for success, non-zero for failure. | 1041 | * Return: 0 for success, non-zero for failure. |
1043 | */ | 1042 | */ |
1044 | int | 1043 | int |
1045 | mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, | 1044 | mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, |
@@ -1080,7 +1079,7 @@ mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
1080 | * @handle: device handle | 1079 | * @handle: device handle |
1081 | * Context: sleep. | 1080 | * Context: sleep. |
1082 | * | 1081 | * |
1083 | * Returns 0 for success, non-zero for failure. | 1082 | * Return: 0 for success, non-zero for failure. |
1084 | */ | 1083 | */ |
1085 | int | 1084 | int |
1086 | mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, | 1085 | mpt3sas_config_get_pcie_device_pg0(struct MPT3SAS_ADAPTER *ioc, |
@@ -1121,7 +1120,7 @@ out: | |||
1121 | * @handle: device handle | 1120 | * @handle: device handle |
1122 | * Context: sleep. | 1121 | * Context: sleep. |
1123 | * | 1122 | * |
1124 | * Returns 0 for success, non-zero for failure. | 1123 | * Return: 0 for success, non-zero for failure. |
1125 | */ | 1124 | */ |
1126 | int | 1125 | int |
1127 | mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, | 1126 | mpt3sas_config_get_pcie_device_pg2(struct MPT3SAS_ADAPTER *ioc, |
@@ -1159,7 +1158,7 @@ out: | |||
1159 | * @num_phys: pointer returned with the number of phys | 1158 | * @num_phys: pointer returned with the number of phys |
1160 | * Context: sleep. | 1159 | * Context: sleep. |
1161 | * | 1160 | * |
1162 | * Returns 0 for success, non-zero for failure. | 1161 | * Return: 0 for success, non-zero for failure. |
1163 | */ | 1162 | */ |
1164 | int | 1163 | int |
1165 | mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) | 1164 | mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) |
@@ -1209,7 +1208,7 @@ mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) | |||
1209 | * Calling function should call config_get_number_hba_phys prior to | 1208 | * Calling function should call config_get_number_hba_phys prior to |
1210 | * this function, so enough memory is allocated for config_page. | 1209 | * this function, so enough memory is allocated for config_page. |
1211 | * | 1210 | * |
1212 | * Returns 0 for success, non-zero for failure. | 1211 | * Return: 0 for success, non-zero for failure. |
1213 | */ | 1212 | */ |
1214 | int | 1213 | int |
1215 | mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, | 1214 | mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, |
@@ -1250,7 +1249,7 @@ mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, | |||
1250 | * Calling function should call config_get_number_hba_phys prior to | 1249 | * Calling function should call config_get_number_hba_phys prior to |
1251 | * this function, so enough memory is allocated for config_page. | 1250 | * this function, so enough memory is allocated for config_page. |
1252 | * | 1251 | * |
1253 | * Returns 0 for success, non-zero for failure. | 1252 | * Return: 0 for success, non-zero for failure. |
1254 | */ | 1253 | */ |
1255 | int | 1254 | int |
1256 | mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | 1255 | mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, |
@@ -1291,7 +1290,7 @@ mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
1291 | * Calling function should call config_get_number_hba_phys prior to | 1290 | * Calling function should call config_get_number_hba_phys prior to |
1292 | * this function, so enough memory is allocated for config_page. | 1291 | * this function, so enough memory is allocated for config_page. |
1293 | * | 1292 | * |
1294 | * Returns 0 for success, non-zero for failure. | 1293 | * Return: 0 for success, non-zero for failure. |
1295 | */ | 1294 | */ |
1296 | int | 1295 | int |
1297 | mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | 1296 | mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, |
@@ -1333,7 +1332,7 @@ mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
1333 | * @handle: expander handle | 1332 | * @handle: expander handle |
1334 | * Context: sleep. | 1333 | * Context: sleep. |
1335 | * | 1334 | * |
1336 | * Returns 0 for success, non-zero for failure. | 1335 | * Return: 0 for success, non-zero for failure. |
1337 | */ | 1336 | */ |
1338 | int | 1337 | int |
1339 | mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1338 | mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
@@ -1373,7 +1372,7 @@ mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1373 | * @handle: expander handle | 1372 | * @handle: expander handle |
1374 | * Context: sleep. | 1373 | * Context: sleep. |
1375 | * | 1374 | * |
1376 | * Returns 0 for success, non-zero for failure. | 1375 | * Return: 0 for success, non-zero for failure. |
1377 | */ | 1376 | */ |
1378 | int | 1377 | int |
1379 | mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1378 | mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
@@ -1416,7 +1415,7 @@ mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1416 | * @handle: expander handle | 1415 | * @handle: expander handle |
1417 | * Context: sleep. | 1416 | * Context: sleep. |
1418 | * | 1417 | * |
1419 | * Returns 0 for success, non-zero for failure. | 1418 | * Return: 0 for success, non-zero for failure. |
1420 | */ | 1419 | */ |
1421 | int | 1420 | int |
1422 | mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1421 | mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
@@ -1455,7 +1454,7 @@ mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1455 | * @phy_number: phy number | 1454 | * @phy_number: phy number |
1456 | * Context: sleep. | 1455 | * Context: sleep. |
1457 | * | 1456 | * |
1458 | * Returns 0 for success, non-zero for failure. | 1457 | * Return: 0 for success, non-zero for failure. |
1459 | */ | 1458 | */ |
1460 | int | 1459 | int |
1461 | mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1460 | mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
@@ -1495,7 +1494,7 @@ mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1495 | * @phy_number: phy number | 1494 | * @phy_number: phy number |
1496 | * Context: sleep. | 1495 | * Context: sleep. |
1497 | * | 1496 | * |
1498 | * Returns 0 for success, non-zero for failure. | 1497 | * Return: 0 for success, non-zero for failure. |
1499 | */ | 1498 | */ |
1500 | int | 1499 | int |
1501 | mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1500 | mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
@@ -1536,7 +1535,7 @@ mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1536 | * @handle: volume handle | 1535 | * @handle: volume handle |
1537 | * Context: sleep. | 1536 | * Context: sleep. |
1538 | * | 1537 | * |
1539 | * Returns 0 for success, non-zero for failure. | 1538 | * Return: 0 for success, non-zero for failure. |
1540 | */ | 1539 | */ |
1541 | int | 1540 | int |
1542 | mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, | 1541 | mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, |
@@ -1574,7 +1573,7 @@ mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, | |||
1574 | * @num_pds: returns pds count | 1573 | * @num_pds: returns pds count |
1575 | * Context: sleep. | 1574 | * Context: sleep. |
1576 | * | 1575 | * |
1577 | * Returns 0 for success, non-zero for failure. | 1576 | * Return: 0 for success, non-zero for failure. |
1578 | */ | 1577 | */ |
1579 | int | 1578 | int |
1580 | mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, | 1579 | mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
@@ -1626,7 +1625,7 @@ mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, | |||
1626 | * @sz: size of buffer passed in config_page | 1625 | * @sz: size of buffer passed in config_page |
1627 | * Context: sleep. | 1626 | * Context: sleep. |
1628 | * | 1627 | * |
1629 | * Returns 0 for success, non-zero for failure. | 1628 | * Return: 0 for success, non-zero for failure. |
1630 | */ | 1629 | */ |
1631 | int | 1630 | int |
1632 | mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, | 1631 | mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, |
@@ -1665,7 +1664,7 @@ mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, | |||
1665 | * @form_specific: specific to the form | 1664 | * @form_specific: specific to the form |
1666 | * Context: sleep. | 1665 | * Context: sleep. |
1667 | * | 1666 | * |
1668 | * Returns 0 for success, non-zero for failure. | 1667 | * Return: 0 for success, non-zero for failure. |
1669 | */ | 1668 | */ |
1670 | int | 1669 | int |
1671 | mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | 1670 | mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t |
@@ -1704,7 +1703,7 @@ mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t | |||
1704 | * @volume_handle: volume handle | 1703 | * @volume_handle: volume handle |
1705 | * Context: sleep. | 1704 | * Context: sleep. |
1706 | * | 1705 | * |
1707 | * Returns 0 for success, non-zero for failure. | 1706 | * Return: 0 for success, non-zero for failure. |
1708 | */ | 1707 | */ |
1709 | int | 1708 | int |
1710 | mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, | 1709 | mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, |
@@ -1794,7 +1793,7 @@ mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, | |||
1794 | * @wwid: volume wwid | 1793 | * @wwid: volume wwid |
1795 | * Context: sleep. | 1794 | * Context: sleep. |
1796 | * | 1795 | * |
1797 | * Returns 0 for success, non-zero for failure. | 1796 | * Return: 0 for success, non-zero for failure. |
1798 | */ | 1797 | */ |
1799 | int | 1798 | int |
1800 | mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, | 1799 | mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index 3269ef43f07e..5e8c059ce2c9 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c | |||
@@ -253,8 +253,8 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, | |||
253 | * | 253 | * |
254 | * The callback handler when using ioc->ctl_cb_idx. | 254 | * The callback handler when using ioc->ctl_cb_idx. |
255 | * | 255 | * |
256 | * Return 1 meaning mf should be freed from _base_interrupt | 256 | * Return: 1 meaning mf should be freed from _base_interrupt |
257 | * 0 means the mf is freed from this function. | 257 | * 0 means the mf is freed from this function. |
258 | */ | 258 | */ |
259 | u8 | 259 | u8 |
260 | mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 260 | mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
@@ -317,7 +317,7 @@ mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
317 | * The bitmask in ioc->event_type[] indicates which events should be | 317 | * The bitmask in ioc->event_type[] indicates which events should be |
318 | * be saved in the driver event_log. This bitmask is set by application. | 318 | * be saved in the driver event_log. This bitmask is set by application. |
319 | * | 319 | * |
320 | * Returns 1 when event should be captured, or zero means no match. | 320 | * Return: 1 when event should be captured, or zero means no match. |
321 | */ | 321 | */ |
322 | static int | 322 | static int |
323 | _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) | 323 | _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) |
@@ -339,8 +339,6 @@ _ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) | |||
339 | * mpt3sas_ctl_add_to_event_log - add event | 339 | * mpt3sas_ctl_add_to_event_log - add event |
340 | * @ioc: per adapter object | 340 | * @ioc: per adapter object |
341 | * @mpi_reply: reply message frame | 341 | * @mpi_reply: reply message frame |
342 | * | ||
343 | * Return nothing. | ||
344 | */ | 342 | */ |
345 | void | 343 | void |
346 | mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, | 344 | mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, |
@@ -395,8 +393,8 @@ mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, | |||
395 | * This function merely adds a new work task into ioc->firmware_event_thread. | 393 | * This function merely adds a new work task into ioc->firmware_event_thread. |
396 | * The tasks are worked from _firmware_event_work in user context. | 394 | * The tasks are worked from _firmware_event_work in user context. |
397 | * | 395 | * |
398 | * Return 1 meaning mf should be freed from _base_interrupt | 396 | * Return: 1 meaning mf should be freed from _base_interrupt |
399 | * 0 means the mf is freed from this function. | 397 | * 0 means the mf is freed from this function. |
400 | */ | 398 | */ |
401 | u8 | 399 | u8 |
402 | mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | 400 | mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, |
@@ -412,12 +410,12 @@ mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
412 | 410 | ||
413 | /** | 411 | /** |
414 | * _ctl_verify_adapter - validates ioc_number passed from application | 412 | * _ctl_verify_adapter - validates ioc_number passed from application |
415 | * @ioc: per adapter object | 413 | * @ioc_number: ? |
416 | * @iocpp: The ioc pointer is returned in this. | 414 | * @iocpp: The ioc pointer is returned in this. |
417 | * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & | 415 | * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & |
418 | * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. | 416 | * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. |
419 | * | 417 | * |
420 | * Return (-1) means error, else ioc_number. | 418 | * Return: (-1) means error, else ioc_number. |
421 | */ | 419 | */ |
422 | static int | 420 | static int |
423 | _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, | 421 | _ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, |
@@ -460,65 +458,74 @@ out: | |||
460 | /** | 458 | /** |
461 | * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) | 459 | * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) |
462 | * @ioc: per adapter object | 460 | * @ioc: per adapter object |
463 | * @reset_phase: phase | ||
464 | * | 461 | * |
465 | * The handler for doing any required cleanup or initialization. | 462 | * The handler for doing any required cleanup or initialization. |
466 | * | ||
467 | * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, | ||
468 | * MPT3_IOC_DONE_RESET | ||
469 | */ | 463 | */ |
470 | void | 464 | void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) |
471 | mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) | ||
472 | { | 465 | { |
473 | int i; | 466 | int i; |
474 | u8 issue_reset; | 467 | u8 issue_reset; |
475 | 468 | ||
476 | switch (reset_phase) { | 469 | dtmprintk(ioc, pr_info(MPT3SAS_FMT |
477 | case MPT3_IOC_PRE_RESET: | ||
478 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
479 | "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); | 470 | "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); |
480 | for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { | 471 | for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { |
481 | if (!(ioc->diag_buffer_status[i] & | 472 | if (!(ioc->diag_buffer_status[i] & |
482 | MPT3_DIAG_BUFFER_IS_REGISTERED)) | 473 | MPT3_DIAG_BUFFER_IS_REGISTERED)) |
483 | continue; | 474 | continue; |
484 | if ((ioc->diag_buffer_status[i] & | 475 | if ((ioc->diag_buffer_status[i] & |
485 | MPT3_DIAG_BUFFER_IS_RELEASED)) | 476 | MPT3_DIAG_BUFFER_IS_RELEASED)) |
486 | continue; | 477 | continue; |
487 | mpt3sas_send_diag_release(ioc, i, &issue_reset); | 478 | mpt3sas_send_diag_release(ioc, i, &issue_reset); |
488 | } | 479 | } |
489 | break; | 480 | } |
490 | case MPT3_IOC_AFTER_RESET: | 481 | |
491 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | 482 | /** |
483 | * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) | ||
484 | * @ioc: per adapter object | ||
485 | * | ||
486 | * The handler for doing any required cleanup or initialization. | ||
487 | */ | ||
488 | void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) | ||
489 | { | ||
490 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
492 | "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); | 491 | "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); |
493 | if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { | 492 | if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { |
494 | ioc->ctl_cmds.status |= MPT3_CMD_RESET; | 493 | ioc->ctl_cmds.status |= MPT3_CMD_RESET; |
495 | mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); | 494 | mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); |
496 | complete(&ioc->ctl_cmds.done); | 495 | complete(&ioc->ctl_cmds.done); |
497 | } | 496 | } |
498 | break; | 497 | } |
499 | case MPT3_IOC_DONE_RESET: | 498 | |
500 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | 499 | /** |
500 | * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) | ||
501 | * @ioc: per adapter object | ||
502 | * | ||
503 | * The handler for doing any required cleanup or initialization. | ||
504 | */ | ||
505 | void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) | ||
506 | { | ||
507 | int i; | ||
508 | |||
509 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
501 | "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); | 510 | "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); |
502 | 511 | ||
503 | for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { | 512 | for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { |
504 | if (!(ioc->diag_buffer_status[i] & | 513 | if (!(ioc->diag_buffer_status[i] & |
505 | MPT3_DIAG_BUFFER_IS_REGISTERED)) | 514 | MPT3_DIAG_BUFFER_IS_REGISTERED)) |
506 | continue; | 515 | continue; |
507 | if ((ioc->diag_buffer_status[i] & | 516 | if ((ioc->diag_buffer_status[i] & |
508 | MPT3_DIAG_BUFFER_IS_RELEASED)) | 517 | MPT3_DIAG_BUFFER_IS_RELEASED)) |
509 | continue; | 518 | continue; |
510 | ioc->diag_buffer_status[i] |= | 519 | ioc->diag_buffer_status[i] |= |
511 | MPT3_DIAG_BUFFER_IS_DIAG_RESET; | 520 | MPT3_DIAG_BUFFER_IS_DIAG_RESET; |
512 | } | ||
513 | break; | ||
514 | } | 521 | } |
515 | } | 522 | } |
516 | 523 | ||
517 | /** | 524 | /** |
518 | * _ctl_fasync - | 525 | * _ctl_fasync - |
519 | * @fd - | 526 | * @fd: ? |
520 | * @filep - | 527 | * @filep: ? |
521 | * @mode - | 528 | * @mode: ? |
522 | * | 529 | * |
523 | * Called when application request fasyn callback handler. | 530 | * Called when application request fasyn callback handler. |
524 | */ | 531 | */ |
@@ -530,8 +537,8 @@ _ctl_fasync(int fd, struct file *filep, int mode) | |||
530 | 537 | ||
531 | /** | 538 | /** |
532 | * _ctl_poll - | 539 | * _ctl_poll - |
533 | * @file - | 540 | * @filep: ? |
534 | * @wait - | 541 | * @wait: ? |
535 | * | 542 | * |
536 | */ | 543 | */ |
537 | static __poll_t | 544 | static __poll_t |
@@ -556,10 +563,10 @@ _ctl_poll(struct file *filep, poll_table *wait) | |||
556 | /** | 563 | /** |
557 | * _ctl_set_task_mid - assign an active smid to tm request | 564 | * _ctl_set_task_mid - assign an active smid to tm request |
558 | * @ioc: per adapter object | 565 | * @ioc: per adapter object |
559 | * @karg - (struct mpt3_ioctl_command) | 566 | * @karg: (struct mpt3_ioctl_command) |
560 | * @tm_request - pointer to mf from user space | 567 | * @tm_request: pointer to mf from user space |
561 | * | 568 | * |
562 | * Returns 0 when an smid if found, else fail. | 569 | * Return: 0 when an smid if found, else fail. |
563 | * during failure, the reply frame is filled. | 570 | * during failure, the reply frame is filled. |
564 | */ | 571 | */ |
565 | static int | 572 | static int |
@@ -634,8 +641,8 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, | |||
634 | /** | 641 | /** |
635 | * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode | 642 | * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode |
636 | * @ioc: per adapter object | 643 | * @ioc: per adapter object |
637 | * @karg - (struct mpt3_ioctl_command) | 644 | * @karg: (struct mpt3_ioctl_command) |
638 | * @mf - pointer to mf in user space | 645 | * @mf: pointer to mf in user space |
639 | */ | 646 | */ |
640 | static long | 647 | static long |
641 | _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, | 648 | _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, |
@@ -970,6 +977,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, | |||
970 | } | 977 | } |
971 | /* drop to default case for posting the request */ | 978 | /* drop to default case for posting the request */ |
972 | } | 979 | } |
980 | /* fall through */ | ||
973 | default: | 981 | default: |
974 | ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, | 982 | ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, |
975 | data_in_dma, data_in_sz); | 983 | data_in_dma, data_in_sz); |
@@ -995,11 +1003,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, | |||
995 | ioc->ignore_loginfos = 0; | 1003 | ioc->ignore_loginfos = 0; |
996 | } | 1004 | } |
997 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { | 1005 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { |
998 | pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, | 1006 | issue_reset = |
999 | __func__); | 1007 | mpt3sas_base_check_cmd_timeout(ioc, |
1000 | _debug_dump_mf(mpi_request, karg.data_sge_offset); | 1008 | ioc->ctl_cmds.status, mpi_request, |
1001 | if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) | 1009 | karg.data_sge_offset); |
1002 | issue_reset = 1; | ||
1003 | goto issue_host_reset; | 1010 | goto issue_host_reset; |
1004 | } | 1011 | } |
1005 | 1012 | ||
@@ -1114,7 +1121,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, | |||
1114 | /** | 1121 | /** |
1115 | * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode | 1122 | * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode |
1116 | * @ioc: per adapter object | 1123 | * @ioc: per adapter object |
1117 | * @arg - user space buffer containing ioctl content | 1124 | * @arg: user space buffer containing ioctl content |
1118 | */ | 1125 | */ |
1119 | static long | 1126 | static long |
1120 | _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | 1127 | _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) |
@@ -1168,7 +1175,7 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
1168 | /** | 1175 | /** |
1169 | * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode | 1176 | * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode |
1170 | * @ioc: per adapter object | 1177 | * @ioc: per adapter object |
1171 | * @arg - user space buffer containing ioctl content | 1178 | * @arg: user space buffer containing ioctl content |
1172 | */ | 1179 | */ |
1173 | static long | 1180 | static long |
1174 | _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | 1181 | _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) |
@@ -1199,7 +1206,7 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
1199 | /** | 1206 | /** |
1200 | * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode | 1207 | * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode |
1201 | * @ioc: per adapter object | 1208 | * @ioc: per adapter object |
1202 | * @arg - user space buffer containing ioctl content | 1209 | * @arg: user space buffer containing ioctl content |
1203 | */ | 1210 | */ |
1204 | static long | 1211 | static long |
1205 | _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | 1212 | _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) |
@@ -1237,7 +1244,7 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
1237 | /** | 1244 | /** |
1238 | * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode | 1245 | * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode |
1239 | * @ioc: per adapter object | 1246 | * @ioc: per adapter object |
1240 | * @arg - user space buffer containing ioctl content | 1247 | * @arg: user space buffer containing ioctl content |
1241 | */ | 1248 | */ |
1242 | static long | 1249 | static long |
1243 | _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | 1250 | _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) |
@@ -1281,7 +1288,7 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
1281 | /** | 1288 | /** |
1282 | * _ctl_do_reset - main handler for MPT3HARDRESET opcode | 1289 | * _ctl_do_reset - main handler for MPT3HARDRESET opcode |
1283 | * @ioc: per adapter object | 1290 | * @ioc: per adapter object |
1284 | * @arg - user space buffer containing ioctl content | 1291 | * @arg: user space buffer containing ioctl content |
1285 | */ | 1292 | */ |
1286 | static long | 1293 | static long |
1287 | _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | 1294 | _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) |
@@ -1419,7 +1426,7 @@ _ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, | |||
1419 | /** | 1426 | /** |
1420 | * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode | 1427 | * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode |
1421 | * @ioc: per adapter object | 1428 | * @ioc: per adapter object |
1422 | * @arg - user space buffer containing ioctl content | 1429 | * @arg: user space buffer containing ioctl content |
1423 | */ | 1430 | */ |
1424 | static long | 1431 | static long |
1425 | _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | 1432 | _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) |
@@ -1621,12 +1628,10 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, | |||
1621 | MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); | 1628 | MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); |
1622 | 1629 | ||
1623 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { | 1630 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { |
1624 | pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, | 1631 | issue_reset = |
1625 | __func__); | 1632 | mpt3sas_base_check_cmd_timeout(ioc, |
1626 | _debug_dump_mf(mpi_request, | 1633 | ioc->ctl_cmds.status, mpi_request, |
1627 | sizeof(Mpi2DiagBufferPostRequest_t)/4); | 1634 | sizeof(Mpi2DiagBufferPostRequest_t)/4); |
1628 | if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) | ||
1629 | issue_reset = 1; | ||
1630 | goto issue_host_reset; | 1635 | goto issue_host_reset; |
1631 | } | 1636 | } |
1632 | 1637 | ||
@@ -1719,7 +1724,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) | |||
1719 | /** | 1724 | /** |
1720 | * _ctl_diag_register - application register with driver | 1725 | * _ctl_diag_register - application register with driver |
1721 | * @ioc: per adapter object | 1726 | * @ioc: per adapter object |
1722 | * @arg - user space buffer containing ioctl content | 1727 | * @arg: user space buffer containing ioctl content |
1723 | * | 1728 | * |
1724 | * This will allow the driver to setup any required buffers that will be | 1729 | * This will allow the driver to setup any required buffers that will be |
1725 | * needed by firmware to communicate with the driver. | 1730 | * needed by firmware to communicate with the driver. |
@@ -1743,7 +1748,7 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
1743 | /** | 1748 | /** |
1744 | * _ctl_diag_unregister - application unregister with driver | 1749 | * _ctl_diag_unregister - application unregister with driver |
1745 | * @ioc: per adapter object | 1750 | * @ioc: per adapter object |
1746 | * @arg - user space buffer containing ioctl content | 1751 | * @arg: user space buffer containing ioctl content |
1747 | * | 1752 | * |
1748 | * This will allow the driver to cleanup any memory allocated for diag | 1753 | * This will allow the driver to cleanup any memory allocated for diag |
1749 | * messages and to free up any resources. | 1754 | * messages and to free up any resources. |
@@ -1816,7 +1821,7 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
1816 | /** | 1821 | /** |
1817 | * _ctl_diag_query - query relevant info associated with diag buffers | 1822 | * _ctl_diag_query - query relevant info associated with diag buffers |
1818 | * @ioc: per adapter object | 1823 | * @ioc: per adapter object |
1819 | * @arg - user space buffer containing ioctl content | 1824 | * @arg: user space buffer containing ioctl content |
1820 | * | 1825 | * |
1821 | * The application will send only buffer_type and unique_id. Driver will | 1826 | * The application will send only buffer_type and unique_id. Driver will |
1822 | * inspect unique_id first, if valid, fill in all the info. If unique_id is | 1827 | * inspect unique_id first, if valid, fill in all the info. If unique_id is |
@@ -1903,8 +1908,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
1903 | /** | 1908 | /** |
1904 | * mpt3sas_send_diag_release - Diag Release Message | 1909 | * mpt3sas_send_diag_release - Diag Release Message |
1905 | * @ioc: per adapter object | 1910 | * @ioc: per adapter object |
1906 | * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED | 1911 | * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED |
1907 | * @issue_reset - specifies whether host reset is required. | 1912 | * @issue_reset: specifies whether host reset is required. |
1908 | * | 1913 | * |
1909 | */ | 1914 | */ |
1910 | int | 1915 | int |
@@ -1968,12 +1973,9 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, | |||
1968 | MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); | 1973 | MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); |
1969 | 1974 | ||
1970 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { | 1975 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { |
1971 | pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, | 1976 | *issue_reset = mpt3sas_base_check_cmd_timeout(ioc, |
1972 | __func__); | 1977 | ioc->ctl_cmds.status, mpi_request, |
1973 | _debug_dump_mf(mpi_request, | 1978 | sizeof(Mpi2DiagReleaseRequest_t)/4); |
1974 | sizeof(Mpi2DiagReleaseRequest_t)/4); | ||
1975 | if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) | ||
1976 | *issue_reset = 1; | ||
1977 | rc = -EFAULT; | 1979 | rc = -EFAULT; |
1978 | goto out; | 1980 | goto out; |
1979 | } | 1981 | } |
@@ -2009,7 +2011,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, | |||
2009 | 2011 | ||
2010 | /** | 2012 | /** |
2011 | * _ctl_diag_release - request to send Diag Release Message to firmware | 2013 | * _ctl_diag_release - request to send Diag Release Message to firmware |
2012 | * @arg - user space buffer containing ioctl content | 2014 | * @ioc: ? |
2015 | * @arg: user space buffer containing ioctl content | ||
2013 | * | 2016 | * |
2014 | * This allows ownership of the specified buffer to returned to the driver, | 2017 | * This allows ownership of the specified buffer to returned to the driver, |
2015 | * allowing an application to read the buffer without fear that firmware is | 2018 | * allowing an application to read the buffer without fear that firmware is |
@@ -2098,7 +2101,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
2098 | /** | 2101 | /** |
2099 | * _ctl_diag_read_buffer - request for copy of the diag buffer | 2102 | * _ctl_diag_read_buffer - request for copy of the diag buffer |
2100 | * @ioc: per adapter object | 2103 | * @ioc: per adapter object |
2101 | * @arg - user space buffer containing ioctl content | 2104 | * @arg: user space buffer containing ioctl content |
2102 | */ | 2105 | */ |
2103 | static long | 2106 | static long |
2104 | _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | 2107 | _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) |
@@ -2235,12 +2238,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
2235 | MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); | 2238 | MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); |
2236 | 2239 | ||
2237 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { | 2240 | if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { |
2238 | pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, | 2241 | issue_reset = |
2239 | __func__); | 2242 | mpt3sas_base_check_cmd_timeout(ioc, |
2240 | _debug_dump_mf(mpi_request, | 2243 | ioc->ctl_cmds.status, mpi_request, |
2241 | sizeof(Mpi2DiagBufferPostRequest_t)/4); | 2244 | sizeof(Mpi2DiagBufferPostRequest_t)/4); |
2242 | if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) | ||
2243 | issue_reset = 1; | ||
2244 | goto issue_host_reset; | 2245 | goto issue_host_reset; |
2245 | } | 2246 | } |
2246 | 2247 | ||
@@ -2284,8 +2285,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) | |||
2284 | /** | 2285 | /** |
2285 | * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. | 2286 | * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. |
2286 | * @ioc: per adapter object | 2287 | * @ioc: per adapter object |
2287 | * @cmd - ioctl opcode | 2288 | * @cmd: ioctl opcode |
2288 | * @arg - (struct mpt3_ioctl_command32) | 2289 | * @arg: (struct mpt3_ioctl_command32) |
2289 | * | 2290 | * |
2290 | * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. | 2291 | * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. |
2291 | */ | 2292 | */ |
@@ -2328,10 +2329,10 @@ _ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, | |||
2328 | 2329 | ||
2329 | /** | 2330 | /** |
2330 | * _ctl_ioctl_main - main ioctl entry point | 2331 | * _ctl_ioctl_main - main ioctl entry point |
2331 | * @file - (struct file) | 2332 | * @file: (struct file) |
2332 | * @cmd - ioctl opcode | 2333 | * @cmd: ioctl opcode |
2333 | * @arg - user space data buffer | 2334 | * @arg: user space data buffer |
2334 | * @compat - handles 32 bit applications in 64bit os | 2335 | * @compat: handles 32 bit applications in 64bit os |
2335 | * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & | 2336 | * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & |
2336 | * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. | 2337 | * MPI25_VERSION | MPI26_VERSION for mpt3ctl ioctl device. |
2337 | */ | 2338 | */ |
@@ -2462,9 +2463,9 @@ out_unlock_pciaccess: | |||
2462 | 2463 | ||
2463 | /** | 2464 | /** |
2464 | * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) | 2465 | * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) |
2465 | * @file - (struct file) | 2466 | * @file: (struct file) |
2466 | * @cmd - ioctl opcode | 2467 | * @cmd: ioctl opcode |
2467 | * @arg - | 2468 | * @arg: ? |
2468 | */ | 2469 | */ |
2469 | static long | 2470 | static long |
2470 | _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 2471 | _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
@@ -2482,9 +2483,9 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
2482 | 2483 | ||
2483 | /** | 2484 | /** |
2484 | * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) | 2485 | * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) |
2485 | * @file - (struct file) | 2486 | * @file: (struct file) |
2486 | * @cmd - ioctl opcode | 2487 | * @cmd: ioctl opcode |
2487 | * @arg - | 2488 | * @arg: ? |
2488 | */ | 2489 | */ |
2489 | static long | 2490 | static long |
2490 | _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | 2491 | _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
@@ -2500,9 +2501,9 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
2500 | #ifdef CONFIG_COMPAT | 2501 | #ifdef CONFIG_COMPAT |
2501 | /** | 2502 | /** |
2502 | *_ ctl_ioctl_compat - main ioctl entry point (compat) | 2503 | *_ ctl_ioctl_compat - main ioctl entry point (compat) |
2503 | * @file - | 2504 | * @file: ? |
2504 | * @cmd - | 2505 | * @cmd: ? |
2505 | * @arg - | 2506 | * @arg: ? |
2506 | * | 2507 | * |
2507 | * This routine handles 32 bit applications in 64bit os. | 2508 | * This routine handles 32 bit applications in 64bit os. |
2508 | */ | 2509 | */ |
@@ -2518,9 +2519,9 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) | |||
2518 | 2519 | ||
2519 | /** | 2520 | /** |
2520 | *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) | 2521 | *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) |
2521 | * @file - | 2522 | * @file: ? |
2522 | * @cmd - | 2523 | * @cmd: ? |
2523 | * @arg - | 2524 | * @arg: ? |
2524 | * | 2525 | * |
2525 | * This routine handles 32 bit applications in 64bit os. | 2526 | * This routine handles 32 bit applications in 64bit os. |
2526 | */ | 2527 | */ |
@@ -2537,8 +2538,9 @@ _ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) | |||
2537 | /* scsi host attributes */ | 2538 | /* scsi host attributes */ |
2538 | /** | 2539 | /** |
2539 | * _ctl_version_fw_show - firmware version | 2540 | * _ctl_version_fw_show - firmware version |
2540 | * @cdev - pointer to embedded class device | 2541 | * @cdev: pointer to embedded class device |
2541 | * @buf - the buffer returned | 2542 | * @attr: ? |
2543 | * @buf: the buffer returned | ||
2542 | * | 2544 | * |
2543 | * A sysfs 'read-only' shost attribute. | 2545 | * A sysfs 'read-only' shost attribute. |
2544 | */ | 2546 | */ |
@@ -2559,8 +2561,9 @@ static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); | |||
2559 | 2561 | ||
2560 | /** | 2562 | /** |
2561 | * _ctl_version_bios_show - bios version | 2563 | * _ctl_version_bios_show - bios version |
2562 | * @cdev - pointer to embedded class device | 2564 | * @cdev: pointer to embedded class device |
2563 | * @buf - the buffer returned | 2565 | * @attr: ? |
2566 | * @buf: the buffer returned | ||
2564 | * | 2567 | * |
2565 | * A sysfs 'read-only' shost attribute. | 2568 | * A sysfs 'read-only' shost attribute. |
2566 | */ | 2569 | */ |
@@ -2583,8 +2586,9 @@ static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); | |||
2583 | 2586 | ||
2584 | /** | 2587 | /** |
2585 | * _ctl_version_mpi_show - MPI (message passing interface) version | 2588 | * _ctl_version_mpi_show - MPI (message passing interface) version |
2586 | * @cdev - pointer to embedded class device | 2589 | * @cdev: pointer to embedded class device |
2587 | * @buf - the buffer returned | 2590 | * @attr: ? |
2591 | * @buf: the buffer returned | ||
2588 | * | 2592 | * |
2589 | * A sysfs 'read-only' shost attribute. | 2593 | * A sysfs 'read-only' shost attribute. |
2590 | */ | 2594 | */ |
@@ -2602,8 +2606,9 @@ static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); | |||
2602 | 2606 | ||
2603 | /** | 2607 | /** |
2604 | * _ctl_version_product_show - product name | 2608 | * _ctl_version_product_show - product name |
2605 | * @cdev - pointer to embedded class device | 2609 | * @cdev: pointer to embedded class device |
2606 | * @buf - the buffer returned | 2610 | * @attr: ? |
2611 | * @buf: the buffer returned | ||
2607 | * | 2612 | * |
2608 | * A sysfs 'read-only' shost attribute. | 2613 | * A sysfs 'read-only' shost attribute. |
2609 | */ | 2614 | */ |
@@ -2620,8 +2625,9 @@ static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); | |||
2620 | 2625 | ||
2621 | /** | 2626 | /** |
2622 | * _ctl_version_nvdata_persistent_show - ndvata persistent version | 2627 | * _ctl_version_nvdata_persistent_show - ndvata persistent version |
2623 | * @cdev - pointer to embedded class device | 2628 | * @cdev: pointer to embedded class device |
2624 | * @buf - the buffer returned | 2629 | * @attr: ? |
2630 | * @buf: the buffer returned | ||
2625 | * | 2631 | * |
2626 | * A sysfs 'read-only' shost attribute. | 2632 | * A sysfs 'read-only' shost attribute. |
2627 | */ | 2633 | */ |
@@ -2640,8 +2646,9 @@ static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, | |||
2640 | 2646 | ||
2641 | /** | 2647 | /** |
2642 | * _ctl_version_nvdata_default_show - nvdata default version | 2648 | * _ctl_version_nvdata_default_show - nvdata default version |
2643 | * @cdev - pointer to embedded class device | 2649 | * @cdev: pointer to embedded class device |
2644 | * @buf - the buffer returned | 2650 | * @attr: ? |
2651 | * @buf: the buffer returned | ||
2645 | * | 2652 | * |
2646 | * A sysfs 'read-only' shost attribute. | 2653 | * A sysfs 'read-only' shost attribute. |
2647 | */ | 2654 | */ |
@@ -2660,8 +2667,9 @@ static DEVICE_ATTR(version_nvdata_default, S_IRUGO, | |||
2660 | 2667 | ||
2661 | /** | 2668 | /** |
2662 | * _ctl_board_name_show - board name | 2669 | * _ctl_board_name_show - board name |
2663 | * @cdev - pointer to embedded class device | 2670 | * @cdev: pointer to embedded class device |
2664 | * @buf - the buffer returned | 2671 | * @attr: ? |
2672 | * @buf: the buffer returned | ||
2665 | * | 2673 | * |
2666 | * A sysfs 'read-only' shost attribute. | 2674 | * A sysfs 'read-only' shost attribute. |
2667 | */ | 2675 | */ |
@@ -2678,8 +2686,9 @@ static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); | |||
2678 | 2686 | ||
2679 | /** | 2687 | /** |
2680 | * _ctl_board_assembly_show - board assembly name | 2688 | * _ctl_board_assembly_show - board assembly name |
2681 | * @cdev - pointer to embedded class device | 2689 | * @cdev: pointer to embedded class device |
2682 | * @buf - the buffer returned | 2690 | * @attr: ? |
2691 | * @buf: the buffer returned | ||
2683 | * | 2692 | * |
2684 | * A sysfs 'read-only' shost attribute. | 2693 | * A sysfs 'read-only' shost attribute. |
2685 | */ | 2694 | */ |
@@ -2696,8 +2705,9 @@ static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); | |||
2696 | 2705 | ||
2697 | /** | 2706 | /** |
2698 | * _ctl_board_tracer_show - board tracer number | 2707 | * _ctl_board_tracer_show - board tracer number |
2699 | * @cdev - pointer to embedded class device | 2708 | * @cdev: pointer to embedded class device |
2700 | * @buf - the buffer returned | 2709 | * @attr: ? |
2710 | * @buf: the buffer returned | ||
2701 | * | 2711 | * |
2702 | * A sysfs 'read-only' shost attribute. | 2712 | * A sysfs 'read-only' shost attribute. |
2703 | */ | 2713 | */ |
@@ -2714,8 +2724,9 @@ static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); | |||
2714 | 2724 | ||
2715 | /** | 2725 | /** |
2716 | * _ctl_io_delay_show - io missing delay | 2726 | * _ctl_io_delay_show - io missing delay |
2717 | * @cdev - pointer to embedded class device | 2727 | * @cdev: pointer to embedded class device |
2718 | * @buf - the buffer returned | 2728 | * @attr: ? |
2729 | * @buf: the buffer returned | ||
2719 | * | 2730 | * |
2720 | * This is for firmware implemention for deboucing device | 2731 | * This is for firmware implemention for deboucing device |
2721 | * removal events. | 2732 | * removal events. |
@@ -2735,8 +2746,9 @@ static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); | |||
2735 | 2746 | ||
2736 | /** | 2747 | /** |
2737 | * _ctl_device_delay_show - device missing delay | 2748 | * _ctl_device_delay_show - device missing delay |
2738 | * @cdev - pointer to embedded class device | 2749 | * @cdev: pointer to embedded class device |
2739 | * @buf - the buffer returned | 2750 | * @attr: ? |
2751 | * @buf: the buffer returned | ||
2740 | * | 2752 | * |
2741 | * This is for firmware implemention for deboucing device | 2753 | * This is for firmware implemention for deboucing device |
2742 | * removal events. | 2754 | * removal events. |
@@ -2756,8 +2768,9 @@ static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); | |||
2756 | 2768 | ||
2757 | /** | 2769 | /** |
2758 | * _ctl_fw_queue_depth_show - global credits | 2770 | * _ctl_fw_queue_depth_show - global credits |
2759 | * @cdev - pointer to embedded class device | 2771 | * @cdev: pointer to embedded class device |
2760 | * @buf - the buffer returned | 2772 | * @attr: ? |
2773 | * @buf: the buffer returned | ||
2761 | * | 2774 | * |
2762 | * This is firmware queue depth limit | 2775 | * This is firmware queue depth limit |
2763 | * | 2776 | * |
@@ -2776,8 +2789,9 @@ static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); | |||
2776 | 2789 | ||
2777 | /** | 2790 | /** |
2778 | * _ctl_sas_address_show - sas address | 2791 | * _ctl_sas_address_show - sas address |
2779 | * @cdev - pointer to embedded class device | 2792 | * @cdev: pointer to embedded class device |
2780 | * @buf - the buffer returned | 2793 | * @attr: ? |
2794 | * @buf: the buffer returned | ||
2781 | * | 2795 | * |
2782 | * This is the controller sas address | 2796 | * This is the controller sas address |
2783 | * | 2797 | * |
@@ -2799,8 +2813,9 @@ static DEVICE_ATTR(host_sas_address, S_IRUGO, | |||
2799 | 2813 | ||
2800 | /** | 2814 | /** |
2801 | * _ctl_logging_level_show - logging level | 2815 | * _ctl_logging_level_show - logging level |
2802 | * @cdev - pointer to embedded class device | 2816 | * @cdev: pointer to embedded class device |
2803 | * @buf - the buffer returned | 2817 | * @attr: ? |
2818 | * @buf: the buffer returned | ||
2804 | * | 2819 | * |
2805 | * A sysfs 'read/write' shost attribute. | 2820 | * A sysfs 'read/write' shost attribute. |
2806 | */ | 2821 | */ |
@@ -2834,8 +2849,9 @@ static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, | |||
2834 | 2849 | ||
2835 | /** | 2850 | /** |
2836 | * _ctl_fwfault_debug_show - show/store fwfault_debug | 2851 | * _ctl_fwfault_debug_show - show/store fwfault_debug |
2837 | * @cdev - pointer to embedded class device | 2852 | * @cdev: pointer to embedded class device |
2838 | * @buf - the buffer returned | 2853 | * @attr: ? |
2854 | * @buf: the buffer returned | ||
2839 | * | 2855 | * |
2840 | * mpt3sas_fwfault_debug is command line option | 2856 | * mpt3sas_fwfault_debug is command line option |
2841 | * A sysfs 'read/write' shost attribute. | 2857 | * A sysfs 'read/write' shost attribute. |
@@ -2870,8 +2886,9 @@ static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, | |||
2870 | 2886 | ||
2871 | /** | 2887 | /** |
2872 | * _ctl_ioc_reset_count_show - ioc reset count | 2888 | * _ctl_ioc_reset_count_show - ioc reset count |
2873 | * @cdev - pointer to embedded class device | 2889 | * @cdev: pointer to embedded class device |
2874 | * @buf - the buffer returned | 2890 | * @attr: ? |
2891 | * @buf: the buffer returned | ||
2875 | * | 2892 | * |
2876 | * This is firmware queue depth limit | 2893 | * This is firmware queue depth limit |
2877 | * | 2894 | * |
@@ -2890,8 +2907,9 @@ static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); | |||
2890 | 2907 | ||
2891 | /** | 2908 | /** |
2892 | * _ctl_ioc_reply_queue_count_show - number of reply queues | 2909 | * _ctl_ioc_reply_queue_count_show - number of reply queues |
2893 | * @cdev - pointer to embedded class device | 2910 | * @cdev: pointer to embedded class device |
2894 | * @buf - the buffer returned | 2911 | * @attr: ? |
2912 | * @buf: the buffer returned | ||
2895 | * | 2913 | * |
2896 | * This is number of reply queues | 2914 | * This is number of reply queues |
2897 | * | 2915 | * |
@@ -2918,8 +2936,9 @@ static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, | |||
2918 | 2936 | ||
2919 | /** | 2937 | /** |
2920 | * _ctl_BRM_status_show - Backup Rail Monitor Status | 2938 | * _ctl_BRM_status_show - Backup Rail Monitor Status |
2921 | * @cdev - pointer to embedded class device | 2939 | * @cdev: pointer to embedded class device |
2922 | * @buf - the buffer returned | 2940 | * @attr: ? |
2941 | * @buf: the buffer returned | ||
2923 | * | 2942 | * |
2924 | * This is number of reply queues | 2943 | * This is number of reply queues |
2925 | * | 2944 | * |
@@ -3004,8 +3023,9 @@ struct DIAG_BUFFER_START { | |||
3004 | 3023 | ||
3005 | /** | 3024 | /** |
3006 | * _ctl_host_trace_buffer_size_show - host buffer size (trace only) | 3025 | * _ctl_host_trace_buffer_size_show - host buffer size (trace only) |
3007 | * @cdev - pointer to embedded class device | 3026 | * @cdev: pointer to embedded class device |
3008 | * @buf - the buffer returned | 3027 | * @attr: ? |
3028 | * @buf: the buffer returned | ||
3009 | * | 3029 | * |
3010 | * A sysfs 'read-only' shost attribute. | 3030 | * A sysfs 'read-only' shost attribute. |
3011 | */ | 3031 | */ |
@@ -3049,8 +3069,9 @@ static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, | |||
3049 | 3069 | ||
3050 | /** | 3070 | /** |
3051 | * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) | 3071 | * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) |
3052 | * @cdev - pointer to embedded class device | 3072 | * @cdev: pointer to embedded class device |
3053 | * @buf - the buffer returned | 3073 | * @attr: ? |
3074 | * @buf: the buffer returned | ||
3054 | * | 3075 | * |
3055 | * A sysfs 'read/write' shost attribute. | 3076 | * A sysfs 'read/write' shost attribute. |
3056 | * | 3077 | * |
@@ -3114,8 +3135,9 @@ static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, | |||
3114 | 3135 | ||
3115 | /** | 3136 | /** |
3116 | * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) | 3137 | * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) |
3117 | * @cdev - pointer to embedded class device | 3138 | * @cdev: pointer to embedded class device |
3118 | * @buf - the buffer returned | 3139 | * @attr: ? |
3140 | * @buf: the buffer returned | ||
3119 | * | 3141 | * |
3120 | * A sysfs 'read/write' shost attribute. | 3142 | * A sysfs 'read/write' shost attribute. |
3121 | * | 3143 | * |
@@ -3200,8 +3222,9 @@ static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, | |||
3200 | 3222 | ||
3201 | /** | 3223 | /** |
3202 | * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute | 3224 | * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute |
3203 | * @cdev - pointer to embedded class device | 3225 | * @cdev: pointer to embedded class device |
3204 | * @buf - the buffer returned | 3226 | * @attr: ? |
3227 | * @buf: the buffer returned | ||
3205 | * | 3228 | * |
3206 | * A sysfs 'read/write' shost attribute. | 3229 | * A sysfs 'read/write' shost attribute. |
3207 | */ | 3230 | */ |
@@ -3224,8 +3247,10 @@ _ctl_diag_trigger_master_show(struct device *cdev, | |||
3224 | 3247 | ||
3225 | /** | 3248 | /** |
3226 | * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute | 3249 | * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute |
3227 | * @cdev - pointer to embedded class device | 3250 | * @cdev: pointer to embedded class device |
3228 | * @buf - the buffer returned | 3251 | * @attr: ? |
3252 | * @buf: the buffer returned | ||
3253 | * @count: ? | ||
3229 | * | 3254 | * |
3230 | * A sysfs 'read/write' shost attribute. | 3255 | * A sysfs 'read/write' shost attribute. |
3231 | */ | 3256 | */ |
@@ -3255,8 +3280,9 @@ static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, | |||
3255 | 3280 | ||
3256 | /** | 3281 | /** |
3257 | * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute | 3282 | * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute |
3258 | * @cdev - pointer to embedded class device | 3283 | * @cdev: pointer to embedded class device |
3259 | * @buf - the buffer returned | 3284 | * @attr: ? |
3285 | * @buf: the buffer returned | ||
3260 | * | 3286 | * |
3261 | * A sysfs 'read/write' shost attribute. | 3287 | * A sysfs 'read/write' shost attribute. |
3262 | */ | 3288 | */ |
@@ -3278,8 +3304,10 @@ _ctl_diag_trigger_event_show(struct device *cdev, | |||
3278 | 3304 | ||
3279 | /** | 3305 | /** |
3280 | * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute | 3306 | * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute |
3281 | * @cdev - pointer to embedded class device | 3307 | * @cdev: pointer to embedded class device |
3282 | * @buf - the buffer returned | 3308 | * @attr: ? |
3309 | * @buf: the buffer returned | ||
3310 | * @count: ? | ||
3283 | * | 3311 | * |
3284 | * A sysfs 'read/write' shost attribute. | 3312 | * A sysfs 'read/write' shost attribute. |
3285 | */ | 3313 | */ |
@@ -3309,8 +3337,9 @@ static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, | |||
3309 | 3337 | ||
3310 | /** | 3338 | /** |
3311 | * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute | 3339 | * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute |
3312 | * @cdev - pointer to embedded class device | 3340 | * @cdev: pointer to embedded class device |
3313 | * @buf - the buffer returned | 3341 | * @attr: ? |
3342 | * @buf: the buffer returned | ||
3314 | * | 3343 | * |
3315 | * A sysfs 'read/write' shost attribute. | 3344 | * A sysfs 'read/write' shost attribute. |
3316 | */ | 3345 | */ |
@@ -3332,8 +3361,10 @@ _ctl_diag_trigger_scsi_show(struct device *cdev, | |||
3332 | 3361 | ||
3333 | /** | 3362 | /** |
3334 | * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute | 3363 | * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute |
3335 | * @cdev - pointer to embedded class device | 3364 | * @cdev: pointer to embedded class device |
3336 | * @buf - the buffer returned | 3365 | * @attr: ? |
3366 | * @buf: the buffer returned | ||
3367 | * @count: ? | ||
3337 | * | 3368 | * |
3338 | * A sysfs 'read/write' shost attribute. | 3369 | * A sysfs 'read/write' shost attribute. |
3339 | */ | 3370 | */ |
@@ -3362,8 +3393,9 @@ static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, | |||
3362 | 3393 | ||
3363 | /** | 3394 | /** |
3364 | * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute | 3395 | * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute |
3365 | * @cdev - pointer to embedded class device | 3396 | * @cdev: pointer to embedded class device |
3366 | * @buf - the buffer returned | 3397 | * @attr: ? |
3398 | * @buf: the buffer returned | ||
3367 | * | 3399 | * |
3368 | * A sysfs 'read/write' shost attribute. | 3400 | * A sysfs 'read/write' shost attribute. |
3369 | */ | 3401 | */ |
@@ -3385,8 +3417,10 @@ _ctl_diag_trigger_mpi_show(struct device *cdev, | |||
3385 | 3417 | ||
3386 | /** | 3418 | /** |
3387 | * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute | 3419 | * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute |
3388 | * @cdev - pointer to embedded class device | 3420 | * @cdev: pointer to embedded class device |
3389 | * @buf - the buffer returned | 3421 | * @attr: ? |
3422 | * @buf: the buffer returned | ||
3423 | * @count: ? | ||
3390 | * | 3424 | * |
3391 | * A sysfs 'read/write' shost attribute. | 3425 | * A sysfs 'read/write' shost attribute. |
3392 | */ | 3426 | */ |
@@ -3450,8 +3484,9 @@ struct device_attribute *mpt3sas_host_attrs[] = { | |||
3450 | 3484 | ||
3451 | /** | 3485 | /** |
3452 | * _ctl_device_sas_address_show - sas address | 3486 | * _ctl_device_sas_address_show - sas address |
3453 | * @cdev - pointer to embedded class device | 3487 | * @dev: pointer to embedded class device |
3454 | * @buf - the buffer returned | 3488 | * @attr: ? |
3489 | * @buf: the buffer returned | ||
3455 | * | 3490 | * |
3456 | * This is the sas address for the target | 3491 | * This is the sas address for the target |
3457 | * | 3492 | * |
@@ -3471,8 +3506,9 @@ static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); | |||
3471 | 3506 | ||
3472 | /** | 3507 | /** |
3473 | * _ctl_device_handle_show - device handle | 3508 | * _ctl_device_handle_show - device handle |
3474 | * @cdev - pointer to embedded class device | 3509 | * @dev: pointer to embedded class device |
3475 | * @buf - the buffer returned | 3510 | * @attr: ? |
3511 | * @buf: the buffer returned | ||
3476 | * | 3512 | * |
3477 | * This is the firmware assigned device handle | 3513 | * This is the firmware assigned device handle |
3478 | * | 3514 | * |
@@ -3492,8 +3528,9 @@ static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); | |||
3492 | 3528 | ||
3493 | /** | 3529 | /** |
3494 | * _ctl_device_ncq_io_prio_show - send prioritized io commands to device | 3530 | * _ctl_device_ncq_io_prio_show - send prioritized io commands to device |
3495 | * @dev - pointer to embedded device | 3531 | * @dev: pointer to embedded device |
3496 | * @buf - the buffer returned | 3532 | * @attr: ? |
3533 | * @buf: the buffer returned | ||
3497 | * | 3534 | * |
3498 | * A sysfs 'read/write' sdev attribute, only works with SATA | 3535 | * A sysfs 'read/write' sdev attribute, only works with SATA |
3499 | */ | 3536 | */ |
@@ -3573,7 +3610,7 @@ static struct miscdevice gen2_ctl_dev = { | |||
3573 | 3610 | ||
3574 | /** | 3611 | /** |
3575 | * mpt3sas_ctl_init - main entry point for ctl. | 3612 | * mpt3sas_ctl_init - main entry point for ctl. |
3576 | * | 3613 | * @hbas_to_enumerate: ? |
3577 | */ | 3614 | */ |
3578 | void | 3615 | void |
3579 | mpt3sas_ctl_init(ushort hbas_to_enumerate) | 3616 | mpt3sas_ctl_init(ushort hbas_to_enumerate) |
@@ -3601,7 +3638,7 @@ mpt3sas_ctl_init(ushort hbas_to_enumerate) | |||
3601 | 3638 | ||
3602 | /** | 3639 | /** |
3603 | * mpt3sas_ctl_exit - exit point for ctl | 3640 | * mpt3sas_ctl_exit - exit point for ctl |
3604 | * | 3641 | * @hbas_to_enumerate: ? |
3605 | */ | 3642 | */ |
3606 | void | 3643 | void |
3607 | mpt3sas_ctl_exit(ushort hbas_to_enumerate) | 3644 | mpt3sas_ctl_exit(ushort hbas_to_enumerate) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index dd738ae5c75b..53133cfd420f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -284,6 +284,8 @@ struct _scsi_io_transfer { | |||
284 | 284 | ||
285 | /** | 285 | /** |
286 | * _scsih_set_debug_level - global setting of ioc->logging_level. | 286 | * _scsih_set_debug_level - global setting of ioc->logging_level. |
287 | * @val: ? | ||
288 | * @kp: ? | ||
287 | * | 289 | * |
288 | * Note: The logging levels are defined in mpt3sas_debug.h. | 290 | * Note: The logging levels are defined in mpt3sas_debug.h. |
289 | */ | 291 | */ |
@@ -311,7 +313,7 @@ module_param_call(logging_level, _scsih_set_debug_level, param_get_int, | |||
311 | * @sas_address: sas address | 313 | * @sas_address: sas address |
312 | * @boot_device: boot device object from bios page 2 | 314 | * @boot_device: boot device object from bios page 2 |
313 | * | 315 | * |
314 | * Returns 1 when there's a match, 0 means no match. | 316 | * Return: 1 when there's a match, 0 means no match. |
315 | */ | 317 | */ |
316 | static inline int | 318 | static inline int |
317 | _scsih_srch_boot_sas_address(u64 sas_address, | 319 | _scsih_srch_boot_sas_address(u64 sas_address, |
@@ -325,7 +327,7 @@ _scsih_srch_boot_sas_address(u64 sas_address, | |||
325 | * @device_name: device name specified in INDENTIFY fram | 327 | * @device_name: device name specified in INDENTIFY fram |
326 | * @boot_device: boot device object from bios page 2 | 328 | * @boot_device: boot device object from bios page 2 |
327 | * | 329 | * |
328 | * Returns 1 when there's a match, 0 means no match. | 330 | * Return: 1 when there's a match, 0 means no match. |
329 | */ | 331 | */ |
330 | static inline int | 332 | static inline int |
331 | _scsih_srch_boot_device_name(u64 device_name, | 333 | _scsih_srch_boot_device_name(u64 device_name, |
@@ -340,7 +342,7 @@ _scsih_srch_boot_device_name(u64 device_name, | |||
340 | * @slot_number: slot number | 342 | * @slot_number: slot number |
341 | * @boot_device: boot device object from bios page 2 | 343 | * @boot_device: boot device object from bios page 2 |
342 | * | 344 | * |
343 | * Returns 1 when there's a match, 0 means no match. | 345 | * Return: 1 when there's a match, 0 means no match. |
344 | */ | 346 | */ |
345 | static inline int | 347 | static inline int |
346 | _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, | 348 | _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, |
@@ -356,11 +358,11 @@ _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, | |||
356 | * @sas_address: sas address | 358 | * @sas_address: sas address |
357 | * @device_name: device name specified in INDENTIFY fram | 359 | * @device_name: device name specified in INDENTIFY fram |
358 | * @enclosure_logical_id: enclosure logical id | 360 | * @enclosure_logical_id: enclosure logical id |
359 | * @slot_number: slot number | 361 | * @slot: slot number |
360 | * @form: specifies boot device form | 362 | * @form: specifies boot device form |
361 | * @boot_device: boot device object from bios page 2 | 363 | * @boot_device: boot device object from bios page 2 |
362 | * | 364 | * |
363 | * Returns 1 when there's a match, 0 means no match. | 365 | * Return: 1 when there's a match, 0 means no match. |
364 | */ | 366 | */ |
365 | static int | 367 | static int |
366 | _scsih_is_boot_device(u64 sas_address, u64 device_name, | 368 | _scsih_is_boot_device(u64 sas_address, u64 device_name, |
@@ -398,10 +400,11 @@ _scsih_is_boot_device(u64 sas_address, u64 device_name, | |||
398 | 400 | ||
399 | /** | 401 | /** |
400 | * _scsih_get_sas_address - set the sas_address for given device handle | 402 | * _scsih_get_sas_address - set the sas_address for given device handle |
403 | * @ioc: ? | ||
401 | * @handle: device handle | 404 | * @handle: device handle |
402 | * @sas_address: sas address | 405 | * @sas_address: sas address |
403 | * | 406 | * |
404 | * Returns 0 success, non-zero when failure | 407 | * Return: 0 success, non-zero when failure |
405 | */ | 408 | */ |
406 | static int | 409 | static int |
407 | _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, | 410 | _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
@@ -710,8 +713,6 @@ mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
710 | * @sas_device: per sas device object | 713 | * @sas_device: per sas device object |
711 | * @sdev: scsi device struct | 714 | * @sdev: scsi device struct |
712 | * @starget: scsi target struct | 715 | * @starget: scsi target struct |
713 | * | ||
714 | * Returns nothing. | ||
715 | */ | 716 | */ |
716 | static void | 717 | static void |
717 | _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, | 718 | _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc, |
@@ -806,8 +807,6 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, | |||
806 | * _scsih_device_remove_by_handle - removing device object by handle | 807 | * _scsih_device_remove_by_handle - removing device object by handle |
807 | * @ioc: per adapter object | 808 | * @ioc: per adapter object |
808 | * @handle: device handle | 809 | * @handle: device handle |
809 | * | ||
810 | * Return nothing. | ||
811 | */ | 810 | */ |
812 | static void | 811 | static void |
813 | _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 812 | _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -835,8 +834,6 @@ _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
835 | * mpt3sas_device_remove_by_sas_address - removing device object by sas address | 834 | * mpt3sas_device_remove_by_sas_address - removing device object by sas address |
836 | * @ioc: per adapter object | 835 | * @ioc: per adapter object |
837 | * @sas_address: device sas_address | 836 | * @sas_address: device sas_address |
838 | * | ||
839 | * Return nothing. | ||
840 | */ | 837 | */ |
841 | void | 838 | void |
842 | mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, | 839 | mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, |
@@ -1109,8 +1106,6 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, | |||
1109 | * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle | 1106 | * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle |
1110 | * @ioc: per adapter object | 1107 | * @ioc: per adapter object |
1111 | * @handle: device handle | 1108 | * @handle: device handle |
1112 | * | ||
1113 | * Return nothing. | ||
1114 | */ | 1109 | */ |
1115 | static void | 1110 | static void |
1116 | _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 1111 | _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -1273,7 +1268,7 @@ mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
1273 | /** | 1268 | /** |
1274 | * _scsih_raid_device_find_by_wwid - raid device search | 1269 | * _scsih_raid_device_find_by_wwid - raid device search |
1275 | * @ioc: per adapter object | 1270 | * @ioc: per adapter object |
1276 | * @handle: sas device handle (assigned by firmware) | 1271 | * @wwid: ? |
1277 | * Context: Calling function should acquire ioc->raid_device_lock | 1272 | * Context: Calling function should acquire ioc->raid_device_lock |
1278 | * | 1273 | * |
1279 | * This searches for raid_device based on wwid, then return raid_device | 1274 | * This searches for raid_device based on wwid, then return raid_device |
@@ -1418,8 +1413,6 @@ mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, | |||
1418 | * Context: This function will acquire ioc->sas_node_lock. | 1413 | * Context: This function will acquire ioc->sas_node_lock. |
1419 | * | 1414 | * |
1420 | * Adding new object to the ioc->sas_expander_list. | 1415 | * Adding new object to the ioc->sas_expander_list. |
1421 | * | ||
1422 | * Return nothing. | ||
1423 | */ | 1416 | */ |
1424 | static void | 1417 | static void |
1425 | _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, | 1418 | _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, |
@@ -1437,7 +1430,7 @@ _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, | |||
1437 | * @device_info: bitfield providing information about the device. | 1430 | * @device_info: bitfield providing information about the device. |
1438 | * Context: none | 1431 | * Context: none |
1439 | * | 1432 | * |
1440 | * Returns 1 if end device. | 1433 | * Return: 1 if end device. |
1441 | */ | 1434 | */ |
1442 | static int | 1435 | static int |
1443 | _scsih_is_end_device(u32 device_info) | 1436 | _scsih_is_end_device(u32 device_info) |
@@ -1456,7 +1449,7 @@ _scsih_is_end_device(u32 device_info) | |||
1456 | * @device_info: bitfield providing information about the device. | 1449 | * @device_info: bitfield providing information about the device. |
1457 | * Context: none | 1450 | * Context: none |
1458 | * | 1451 | * |
1459 | * Returns 1 if nvme device. | 1452 | * Return: 1 if nvme device. |
1460 | */ | 1453 | */ |
1461 | static int | 1454 | static int |
1462 | _scsih_is_nvme_device(u32 device_info) | 1455 | _scsih_is_nvme_device(u32 device_info) |
@@ -1473,7 +1466,7 @@ _scsih_is_nvme_device(u32 device_info) | |||
1473 | * @ioc: per adapter object | 1466 | * @ioc: per adapter object |
1474 | * @smid: system request message index | 1467 | * @smid: system request message index |
1475 | * | 1468 | * |
1476 | * Returns the smid stored scmd pointer. | 1469 | * Return: the smid stored scmd pointer. |
1477 | * Then will dereference the stored scmd pointer. | 1470 | * Then will dereference the stored scmd pointer. |
1478 | */ | 1471 | */ |
1479 | struct scsi_cmnd * | 1472 | struct scsi_cmnd * |
@@ -1489,7 +1482,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
1489 | scmd = scsi_host_find_tag(ioc->shost, unique_tag); | 1482 | scmd = scsi_host_find_tag(ioc->shost, unique_tag); |
1490 | if (scmd) { | 1483 | if (scmd) { |
1491 | st = scsi_cmd_priv(scmd); | 1484 | st = scsi_cmd_priv(scmd); |
1492 | if (st->cb_idx == 0xFF) | 1485 | if (st->cb_idx == 0xFF || st->smid == 0) |
1493 | scmd = NULL; | 1486 | scmd = NULL; |
1494 | } | 1487 | } |
1495 | } | 1488 | } |
@@ -1501,7 +1494,7 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
1501 | * @sdev: scsi device struct | 1494 | * @sdev: scsi device struct |
1502 | * @qdepth: requested queue depth | 1495 | * @qdepth: requested queue depth |
1503 | * | 1496 | * |
1504 | * Returns queue depth. | 1497 | * Return: queue depth. |
1505 | */ | 1498 | */ |
1506 | static int | 1499 | static int |
1507 | scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) | 1500 | scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) |
@@ -1549,7 +1542,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) | |||
1549 | * scsih_target_alloc - target add routine | 1542 | * scsih_target_alloc - target add routine |
1550 | * @starget: scsi target struct | 1543 | * @starget: scsi target struct |
1551 | * | 1544 | * |
1552 | * Returns 0 if ok. Any other return is assumed to be an error and | 1545 | * Return: 0 if ok. Any other return is assumed to be an error and |
1553 | * the device is ignored. | 1546 | * the device is ignored. |
1554 | */ | 1547 | */ |
1555 | static int | 1548 | static int |
@@ -1640,8 +1633,6 @@ scsih_target_alloc(struct scsi_target *starget) | |||
1640 | /** | 1633 | /** |
1641 | * scsih_target_destroy - target destroy routine | 1634 | * scsih_target_destroy - target destroy routine |
1642 | * @starget: scsi target struct | 1635 | * @starget: scsi target struct |
1643 | * | ||
1644 | * Returns nothing. | ||
1645 | */ | 1636 | */ |
1646 | static void | 1637 | static void |
1647 | scsih_target_destroy(struct scsi_target *starget) | 1638 | scsih_target_destroy(struct scsi_target *starget) |
@@ -1653,7 +1644,6 @@ scsih_target_destroy(struct scsi_target *starget) | |||
1653 | struct _raid_device *raid_device; | 1644 | struct _raid_device *raid_device; |
1654 | struct _pcie_device *pcie_device; | 1645 | struct _pcie_device *pcie_device; |
1655 | unsigned long flags; | 1646 | unsigned long flags; |
1656 | struct sas_rphy *rphy; | ||
1657 | 1647 | ||
1658 | sas_target_priv_data = starget->hostdata; | 1648 | sas_target_priv_data = starget->hostdata; |
1659 | if (!sas_target_priv_data) | 1649 | if (!sas_target_priv_data) |
@@ -1693,7 +1683,6 @@ scsih_target_destroy(struct scsi_target *starget) | |||
1693 | } | 1683 | } |
1694 | 1684 | ||
1695 | spin_lock_irqsave(&ioc->sas_device_lock, flags); | 1685 | spin_lock_irqsave(&ioc->sas_device_lock, flags); |
1696 | rphy = dev_to_rphy(starget->dev.parent); | ||
1697 | sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); | 1686 | sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); |
1698 | if (sas_device && (sas_device->starget == starget) && | 1687 | if (sas_device && (sas_device->starget == starget) && |
1699 | (sas_device->id == starget->id) && | 1688 | (sas_device->id == starget->id) && |
@@ -1720,7 +1709,7 @@ scsih_target_destroy(struct scsi_target *starget) | |||
1720 | * scsih_slave_alloc - device add routine | 1709 | * scsih_slave_alloc - device add routine |
1721 | * @sdev: scsi device struct | 1710 | * @sdev: scsi device struct |
1722 | * | 1711 | * |
1723 | * Returns 0 if ok. Any other return is assumed to be an error and | 1712 | * Return: 0 if ok. Any other return is assumed to be an error and |
1724 | * the device is ignored. | 1713 | * the device is ignored. |
1725 | */ | 1714 | */ |
1726 | static int | 1715 | static int |
@@ -1800,8 +1789,6 @@ scsih_slave_alloc(struct scsi_device *sdev) | |||
1800 | /** | 1789 | /** |
1801 | * scsih_slave_destroy - device destroy routine | 1790 | * scsih_slave_destroy - device destroy routine |
1802 | * @sdev: scsi device struct | 1791 | * @sdev: scsi device struct |
1803 | * | ||
1804 | * Returns nothing. | ||
1805 | */ | 1792 | */ |
1806 | static void | 1793 | static void |
1807 | scsih_slave_destroy(struct scsi_device *sdev) | 1794 | scsih_slave_destroy(struct scsi_device *sdev) |
@@ -1907,7 +1894,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, | |||
1907 | 1894 | ||
1908 | /** | 1895 | /** |
1909 | * scsih_is_raid - return boolean indicating device is raid volume | 1896 | * scsih_is_raid - return boolean indicating device is raid volume |
1910 | * @dev the device struct object | 1897 | * @dev: the device struct object |
1911 | */ | 1898 | */ |
1912 | static int | 1899 | static int |
1913 | scsih_is_raid(struct device *dev) | 1900 | scsih_is_raid(struct device *dev) |
@@ -1930,7 +1917,7 @@ scsih_is_nvme(struct device *dev) | |||
1930 | 1917 | ||
1931 | /** | 1918 | /** |
1932 | * scsih_get_resync - get raid volume resync percent complete | 1919 | * scsih_get_resync - get raid volume resync percent complete |
1933 | * @dev the device struct object | 1920 | * @dev: the device struct object |
1934 | */ | 1921 | */ |
1935 | static void | 1922 | static void |
1936 | scsih_get_resync(struct device *dev) | 1923 | scsih_get_resync(struct device *dev) |
@@ -1991,7 +1978,7 @@ scsih_get_resync(struct device *dev) | |||
1991 | 1978 | ||
1992 | /** | 1979 | /** |
1993 | * scsih_get_state - get raid volume level | 1980 | * scsih_get_state - get raid volume level |
1994 | * @dev the device struct object | 1981 | * @dev: the device struct object |
1995 | */ | 1982 | */ |
1996 | static void | 1983 | static void |
1997 | scsih_get_state(struct device *dev) | 1984 | scsih_get_state(struct device *dev) |
@@ -2057,6 +2044,7 @@ scsih_get_state(struct device *dev) | |||
2057 | 2044 | ||
2058 | /** | 2045 | /** |
2059 | * _scsih_set_level - set raid level | 2046 | * _scsih_set_level - set raid level |
2047 | * @ioc: ? | ||
2060 | * @sdev: scsi device struct | 2048 | * @sdev: scsi device struct |
2061 | * @volume_type: volume type | 2049 | * @volume_type: volume type |
2062 | */ | 2050 | */ |
@@ -2098,9 +2086,9 @@ _scsih_set_level(struct MPT3SAS_ADAPTER *ioc, | |||
2098 | /** | 2086 | /** |
2099 | * _scsih_get_volume_capabilities - volume capabilities | 2087 | * _scsih_get_volume_capabilities - volume capabilities |
2100 | * @ioc: per adapter object | 2088 | * @ioc: per adapter object |
2101 | * @sas_device: the raid_device object | 2089 | * @raid_device: the raid_device object |
2102 | * | 2090 | * |
2103 | * Returns 0 for success, else 1 | 2091 | * Return: 0 for success, else 1 |
2104 | */ | 2092 | */ |
2105 | static int | 2093 | static int |
2106 | _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, | 2094 | _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, |
@@ -2192,7 +2180,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) | |||
2192 | * scsih_slave_configure - device configure routine. | 2180 | * scsih_slave_configure - device configure routine. |
2193 | * @sdev: scsi device struct | 2181 | * @sdev: scsi device struct |
2194 | * | 2182 | * |
2195 | * Returns 0 if ok. Any other return is assumed to be an error and | 2183 | * Return: 0 if ok. Any other return is assumed to be an error and |
2196 | * the device is ignored. | 2184 | * the device is ignored. |
2197 | */ | 2185 | */ |
2198 | static int | 2186 | static int |
@@ -2256,7 +2244,7 @@ scsih_slave_configure(struct scsi_device *sdev) | |||
2256 | ds = "SSP"; | 2244 | ds = "SSP"; |
2257 | } else { | 2245 | } else { |
2258 | qdepth = MPT3SAS_SATA_QUEUE_DEPTH; | 2246 | qdepth = MPT3SAS_SATA_QUEUE_DEPTH; |
2259 | if (raid_device->device_info & | 2247 | if (raid_device->device_info & |
2260 | MPI2_SAS_DEVICE_INFO_SATA_DEVICE) | 2248 | MPI2_SAS_DEVICE_INFO_SATA_DEVICE) |
2261 | ds = "SATA"; | 2249 | ds = "SATA"; |
2262 | else | 2250 | else |
@@ -2365,13 +2353,14 @@ scsih_slave_configure(struct scsi_device *sdev) | |||
2365 | "connector name( %s)\n", ds, | 2353 | "connector name( %s)\n", ds, |
2366 | pcie_device->enclosure_level, | 2354 | pcie_device->enclosure_level, |
2367 | pcie_device->connector_name); | 2355 | pcie_device->connector_name); |
2368 | pcie_device_put(pcie_device); | ||
2369 | spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); | ||
2370 | scsih_change_queue_depth(sdev, qdepth); | ||
2371 | 2356 | ||
2372 | if (pcie_device->nvme_mdts) | 2357 | if (pcie_device->nvme_mdts) |
2373 | blk_queue_max_hw_sectors(sdev->request_queue, | 2358 | blk_queue_max_hw_sectors(sdev->request_queue, |
2374 | pcie_device->nvme_mdts/512); | 2359 | pcie_device->nvme_mdts/512); |
2360 | |||
2361 | pcie_device_put(pcie_device); | ||
2362 | spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); | ||
2363 | scsih_change_queue_depth(sdev, qdepth); | ||
2375 | /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be | 2364 | /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be |
2376 | ** merged and can eliminate holes created during merging | 2365 | ** merged and can eliminate holes created during merging |
2377 | ** operation. | 2366 | ** operation. |
@@ -2450,8 +2439,6 @@ scsih_slave_configure(struct scsi_device *sdev) | |||
2450 | * params[0] number of heads (max 255) | 2439 | * params[0] number of heads (max 255) |
2451 | * params[1] number of sectors (max 63) | 2440 | * params[1] number of sectors (max 63) |
2452 | * params[2] number of cylinders | 2441 | * params[2] number of cylinders |
2453 | * | ||
2454 | * Return nothing. | ||
2455 | */ | 2442 | */ |
2456 | static int | 2443 | static int |
2457 | scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, | 2444 | scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, |
@@ -2493,8 +2480,6 @@ scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, | |||
2493 | * _scsih_response_code - translation of device response code | 2480 | * _scsih_response_code - translation of device response code |
2494 | * @ioc: per adapter object | 2481 | * @ioc: per adapter object |
2495 | * @response_code: response code returned by the device | 2482 | * @response_code: response code returned by the device |
2496 | * | ||
2497 | * Return nothing. | ||
2498 | */ | 2483 | */ |
2499 | static void | 2484 | static void |
2500 | _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) | 2485 | _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) |
@@ -2544,8 +2529,8 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) | |||
2544 | * | 2529 | * |
2545 | * The callback handler when using scsih_issue_tm. | 2530 | * The callback handler when using scsih_issue_tm. |
2546 | * | 2531 | * |
2547 | * Return 1 meaning mf should be freed from _base_interrupt | 2532 | * Return: 1 meaning mf should be freed from _base_interrupt |
2548 | * 0 means the mf is freed from this function. | 2533 | * 0 means the mf is freed from this function. |
2549 | */ | 2534 | */ |
2550 | static u8 | 2535 | static u8 |
2551 | _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | 2536 | _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
@@ -2640,7 +2625,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
2640 | * The callback index is set inside `ioc->tm_cb_idx`. | 2625 | * The callback index is set inside `ioc->tm_cb_idx`. |
2641 | * The caller is responsible to check for outstanding commands. | 2626 | * The caller is responsible to check for outstanding commands. |
2642 | * | 2627 | * |
2643 | * Return SUCCESS or FAILED. | 2628 | * Return: SUCCESS or FAILED. |
2644 | */ | 2629 | */ |
2645 | int | 2630 | int |
2646 | mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, | 2631 | mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, |
@@ -2708,11 +2693,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, | |||
2708 | mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); | 2693 | mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); |
2709 | wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); | 2694 | wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); |
2710 | if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { | 2695 | if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { |
2711 | pr_err(MPT3SAS_FMT "%s: timeout\n", | 2696 | if (mpt3sas_base_check_cmd_timeout(ioc, |
2712 | ioc->name, __func__); | 2697 | ioc->tm_cmds.status, mpi_request, |
2713 | _debug_dump_mf(mpi_request, | 2698 | sizeof(Mpi2SCSITaskManagementRequest_t)/4)) { |
2714 | sizeof(Mpi2SCSITaskManagementRequest_t)/4); | ||
2715 | if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) { | ||
2716 | rc = mpt3sas_base_hard_reset_handler(ioc, | 2699 | rc = mpt3sas_base_hard_reset_handler(ioc, |
2717 | FORCE_BIG_HAMMER); | 2700 | FORCE_BIG_HAMMER); |
2718 | rc = (!rc) ? SUCCESS : FAILED; | 2701 | rc = (!rc) ? SUCCESS : FAILED; |
@@ -2846,7 +2829,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) | |||
2846 | * scsih_abort - eh threads main abort routine | 2829 | * scsih_abort - eh threads main abort routine |
2847 | * @scmd: pointer to scsi command object | 2830 | * @scmd: pointer to scsi command object |
2848 | * | 2831 | * |
2849 | * Returns SUCCESS if command aborted else FAILED | 2832 | * Return: SUCCESS if command aborted else FAILED |
2850 | */ | 2833 | */ |
2851 | static int | 2834 | static int |
2852 | scsih_abort(struct scsi_cmnd *scmd) | 2835 | scsih_abort(struct scsi_cmnd *scmd) |
@@ -2914,7 +2897,7 @@ scsih_abort(struct scsi_cmnd *scmd) | |||
2914 | * scsih_dev_reset - eh threads main device reset routine | 2897 | * scsih_dev_reset - eh threads main device reset routine |
2915 | * @scmd: pointer to scsi command object | 2898 | * @scmd: pointer to scsi command object |
2916 | * | 2899 | * |
2917 | * Returns SUCCESS if command aborted else FAILED | 2900 | * Return: SUCCESS if command aborted else FAILED |
2918 | */ | 2901 | */ |
2919 | static int | 2902 | static int |
2920 | scsih_dev_reset(struct scsi_cmnd *scmd) | 2903 | scsih_dev_reset(struct scsi_cmnd *scmd) |
@@ -2992,7 +2975,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd) | |||
2992 | * scsih_target_reset - eh threads main target reset routine | 2975 | * scsih_target_reset - eh threads main target reset routine |
2993 | * @scmd: pointer to scsi command object | 2976 | * @scmd: pointer to scsi command object |
2994 | * | 2977 | * |
2995 | * Returns SUCCESS if command aborted else FAILED | 2978 | * Return: SUCCESS if command aborted else FAILED |
2996 | */ | 2979 | */ |
2997 | static int | 2980 | static int |
2998 | scsih_target_reset(struct scsi_cmnd *scmd) | 2981 | scsih_target_reset(struct scsi_cmnd *scmd) |
@@ -3069,7 +3052,7 @@ scsih_target_reset(struct scsi_cmnd *scmd) | |||
3069 | * scsih_host_reset - eh threads main host reset routine | 3052 | * scsih_host_reset - eh threads main host reset routine |
3070 | * @scmd: pointer to scsi command object | 3053 | * @scmd: pointer to scsi command object |
3071 | * | 3054 | * |
3072 | * Returns SUCCESS if command aborted else FAILED | 3055 | * Return: SUCCESS if command aborted else FAILED |
3073 | */ | 3056 | */ |
3074 | static int | 3057 | static int |
3075 | scsih_host_reset(struct scsi_cmnd *scmd) | 3058 | scsih_host_reset(struct scsi_cmnd *scmd) |
@@ -3105,8 +3088,6 @@ out: | |||
3105 | * | 3088 | * |
3106 | * This adds the firmware event object into link list, then queues it up to | 3089 | * This adds the firmware event object into link list, then queues it up to |
3107 | * be processed from user context. | 3090 | * be processed from user context. |
3108 | * | ||
3109 | * Return nothing. | ||
3110 | */ | 3091 | */ |
3111 | static void | 3092 | static void |
3112 | _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) | 3093 | _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) |
@@ -3133,8 +3114,6 @@ _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) | |||
3133 | * Context: This function will acquire ioc->fw_event_lock. | 3114 | * Context: This function will acquire ioc->fw_event_lock. |
3134 | * | 3115 | * |
3135 | * If the fw_event is on the fw_event_list, remove it and do a put. | 3116 | * If the fw_event is on the fw_event_list, remove it and do a put. |
3136 | * | ||
3137 | * Return nothing. | ||
3138 | */ | 3117 | */ |
3139 | static void | 3118 | static void |
3140 | _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work | 3119 | _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work |
@@ -3155,8 +3134,6 @@ _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work | |||
3155 | * mpt3sas_send_trigger_data_event - send event for processing trigger data | 3134 | * mpt3sas_send_trigger_data_event - send event for processing trigger data |
3156 | * @ioc: per adapter object | 3135 | * @ioc: per adapter object |
3157 | * @event_data: trigger event data | 3136 | * @event_data: trigger event data |
3158 | * | ||
3159 | * Return nothing. | ||
3160 | */ | 3137 | */ |
3161 | void | 3138 | void |
3162 | mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, | 3139 | mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -3181,8 +3158,6 @@ mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, | |||
3181 | /** | 3158 | /** |
3182 | * _scsih_error_recovery_delete_devices - remove devices not responding | 3159 | * _scsih_error_recovery_delete_devices - remove devices not responding |
3183 | * @ioc: per adapter object | 3160 | * @ioc: per adapter object |
3184 | * | ||
3185 | * Return nothing. | ||
3186 | */ | 3161 | */ |
3187 | static void | 3162 | static void |
3188 | _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) | 3163 | _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) |
@@ -3203,8 +3178,6 @@ _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) | |||
3203 | /** | 3178 | /** |
3204 | * mpt3sas_port_enable_complete - port enable completed (fake event) | 3179 | * mpt3sas_port_enable_complete - port enable completed (fake event) |
3205 | * @ioc: per adapter object | 3180 | * @ioc: per adapter object |
3206 | * | ||
3207 | * Return nothing. | ||
3208 | */ | 3181 | */ |
3209 | void | 3182 | void |
3210 | mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) | 3183 | mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) |
@@ -3242,8 +3215,6 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) | |||
3242 | * | 3215 | * |
3243 | * Walk the firmware event queue, either killing timers, or waiting | 3216 | * Walk the firmware event queue, either killing timers, or waiting |
3244 | * for outstanding events to complete | 3217 | * for outstanding events to complete |
3245 | * | ||
3246 | * Return nothing. | ||
3247 | */ | 3218 | */ |
3248 | static void | 3219 | static void |
3249 | _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) | 3220 | _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) |
@@ -3369,7 +3340,7 @@ _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) | |||
3369 | /** | 3340 | /** |
3370 | * _scsih_ublock_io_device - prepare device to be deleted | 3341 | * _scsih_ublock_io_device - prepare device to be deleted |
3371 | * @ioc: per adapter object | 3342 | * @ioc: per adapter object |
3372 | * @sas_addr: sas address | 3343 | * @sas_address: sas address |
3373 | * | 3344 | * |
3374 | * unblock then put device in offline state | 3345 | * unblock then put device in offline state |
3375 | */ | 3346 | */ |
@@ -3395,7 +3366,6 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) | |||
3395 | /** | 3366 | /** |
3396 | * _scsih_block_io_all_device - set the device state to SDEV_BLOCK | 3367 | * _scsih_block_io_all_device - set the device state to SDEV_BLOCK |
3397 | * @ioc: per adapter object | 3368 | * @ioc: per adapter object |
3398 | * @handle: device handle | ||
3399 | * | 3369 | * |
3400 | * During device pull we need to appropriately set the sdev state. | 3370 | * During device pull we need to appropriately set the sdev state. |
3401 | */ | 3371 | */ |
@@ -3730,8 +3700,8 @@ out: | |||
3730 | * handshake protocol with controller firmware. | 3700 | * handshake protocol with controller firmware. |
3731 | * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) | 3701 | * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) |
3732 | * | 3702 | * |
3733 | * Return 1 meaning mf should be freed from _base_interrupt | 3703 | * Return: 1 meaning mf should be freed from _base_interrupt |
3734 | * 0 means the mf is freed from this function. | 3704 | * 0 means the mf is freed from this function. |
3735 | */ | 3705 | */ |
3736 | static u8 | 3706 | static u8 |
3737 | _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 3707 | _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
@@ -3822,8 +3792,8 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | |||
3822 | * This code is part of the code to initiate the device removal | 3792 | * This code is part of the code to initiate the device removal |
3823 | * handshake protocol with controller firmware. | 3793 | * handshake protocol with controller firmware. |
3824 | * | 3794 | * |
3825 | * Return 1 meaning mf should be freed from _base_interrupt | 3795 | * Return: 1 meaning mf should be freed from _base_interrupt |
3826 | * 0 means the mf is freed from this function. | 3796 | * 0 means the mf is freed from this function. |
3827 | */ | 3797 | */ |
3828 | static u8 | 3798 | static u8 |
3829 | _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, | 3799 | _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
@@ -3909,8 +3879,8 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
3909 | * @reply: reply message frame(lower 32bit addr) | 3879 | * @reply: reply message frame(lower 32bit addr) |
3910 | * Context: interrupt time. | 3880 | * Context: interrupt time. |
3911 | * | 3881 | * |
3912 | * Return 1 meaning mf should be freed from _base_interrupt | 3882 | * Return: 1 meaning mf should be freed from _base_interrupt |
3913 | * 0 means the mf is freed from this function. | 3883 | * 0 means the mf is freed from this function. |
3914 | */ | 3884 | */ |
3915 | static u8 | 3885 | static u8 |
3916 | _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, | 3886 | _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, |
@@ -4004,19 +3974,19 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event, | |||
4004 | static void | 3974 | static void |
4005 | _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, | 3975 | _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, |
4006 | u16 smid, u16 handle) | 3976 | u16 smid, u16 handle) |
4007 | { | 3977 | { |
4008 | Mpi2SasIoUnitControlRequest_t *mpi_request; | 3978 | Mpi2SasIoUnitControlRequest_t *mpi_request; |
4009 | u32 ioc_state; | 3979 | u32 ioc_state; |
4010 | int i = smid - ioc->internal_smid; | 3980 | int i = smid - ioc->internal_smid; |
4011 | unsigned long flags; | 3981 | unsigned long flags; |
4012 | 3982 | ||
4013 | if (ioc->remove_host) { | 3983 | if (ioc->remove_host) { |
4014 | dewtprintk(ioc, pr_info(MPT3SAS_FMT | 3984 | dewtprintk(ioc, pr_info(MPT3SAS_FMT |
4015 | "%s: host has been removed\n", | 3985 | "%s: host has been removed\n", |
4016 | __func__, ioc->name)); | 3986 | __func__, ioc->name)); |
4017 | return; | 3987 | return; |
4018 | } else if (ioc->pci_error_recovery) { | 3988 | } else if (ioc->pci_error_recovery) { |
4019 | dewtprintk(ioc, pr_info(MPT3SAS_FMT | 3989 | dewtprintk(ioc, pr_info(MPT3SAS_FMT |
4020 | "%s: host in pci error recovery\n", | 3990 | "%s: host in pci error recovery\n", |
4021 | __func__, ioc->name)); | 3991 | __func__, ioc->name)); |
4022 | return; | 3992 | return; |
@@ -4059,8 +4029,8 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, | |||
4059 | * This will check delayed internal messages list, and process the | 4029 | * This will check delayed internal messages list, and process the |
4060 | * next request. | 4030 | * next request. |
4061 | * | 4031 | * |
4062 | * Return 1 meaning mf should be freed from _base_interrupt | 4032 | * Return: 1 meaning mf should be freed from _base_interrupt |
4063 | * 0 means the mf is freed from this function. | 4033 | * 0 means the mf is freed from this function. |
4064 | */ | 4034 | */ |
4065 | u8 | 4035 | u8 |
4066 | mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 4036 | mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -4098,8 +4068,8 @@ mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
4098 | * This will check delayed target reset list, and feed the | 4068 | * This will check delayed target reset list, and feed the |
4099 | * next reqeust. | 4069 | * next reqeust. |
4100 | * | 4070 | * |
4101 | * Return 1 meaning mf should be freed from _base_interrupt | 4071 | * Return: 1 meaning mf should be freed from _base_interrupt |
4102 | * 0 means the mf is freed from this function. | 4072 | * 0 means the mf is freed from this function. |
4103 | */ | 4073 | */ |
4104 | static u8 | 4074 | static u8 |
4105 | _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) | 4075 | _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) |
@@ -4139,8 +4109,6 @@ _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) | |||
4139 | * This handles the case where driver receives multiple expander | 4109 | * This handles the case where driver receives multiple expander |
4140 | * add and delete events in a single shot. When there is a delete event | 4110 | * add and delete events in a single shot. When there is a delete event |
4141 | * the routine will void any pending add events waiting in the event queue. | 4111 | * the routine will void any pending add events waiting in the event queue. |
4142 | * | ||
4143 | * Return nothing. | ||
4144 | */ | 4112 | */ |
4145 | static void | 4113 | static void |
4146 | _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, | 4114 | _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, |
@@ -4222,8 +4190,6 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, | |||
4222 | * or device add and delete events in a single shot. When there | 4190 | * or device add and delete events in a single shot. When there |
4223 | * is a delete event the routine will void any pending add | 4191 | * is a delete event the routine will void any pending add |
4224 | * events waiting in the event queue. | 4192 | * events waiting in the event queue. |
4225 | * | ||
4226 | * Return nothing. | ||
4227 | */ | 4193 | */ |
4228 | static void | 4194 | static void |
4229 | _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, | 4195 | _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc, |
@@ -4348,8 +4314,6 @@ _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) | |||
4348 | * volume has been deleted or removed. When the target reset is sent | 4314 | * volume has been deleted or removed. When the target reset is sent |
4349 | * to volume, the PD target resets need to be queued to start upon | 4315 | * to volume, the PD target resets need to be queued to start upon |
4350 | * completion of the volume target reset. | 4316 | * completion of the volume target reset. |
4351 | * | ||
4352 | * Return nothing. | ||
4353 | */ | 4317 | */ |
4354 | static void | 4318 | static void |
4355 | _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, | 4319 | _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, |
@@ -4433,8 +4397,6 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, | |||
4433 | * This will handle the case when the cable connected to entire volume is | 4397 | * This will handle the case when the cable connected to entire volume is |
4434 | * pulled. We will take care of setting the deleted flag so normal IO will | 4398 | * pulled. We will take care of setting the deleted flag so normal IO will |
4435 | * not be sent. | 4399 | * not be sent. |
4436 | * | ||
4437 | * Return nothing. | ||
4438 | */ | 4400 | */ |
4439 | static void | 4401 | static void |
4440 | _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, | 4402 | _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, |
@@ -4456,8 +4418,6 @@ _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, | |||
4456 | * @ioc: per adapter object | 4418 | * @ioc: per adapter object |
4457 | * @event_data: the temp threshold event data | 4419 | * @event_data: the temp threshold event data |
4458 | * Context: interrupt time. | 4420 | * Context: interrupt time. |
4459 | * | ||
4460 | * Return nothing. | ||
4461 | */ | 4421 | */ |
4462 | static void | 4422 | static void |
4463 | _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, | 4423 | _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, |
@@ -4496,8 +4456,6 @@ static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) | |||
4496 | * | 4456 | * |
4497 | * The flushing out of all pending scmd commands following host reset, | 4457 | * The flushing out of all pending scmd commands following host reset, |
4498 | * where all IO is dropped to the floor. | 4458 | * where all IO is dropped to the floor. |
4499 | * | ||
4500 | * Return nothing. | ||
4501 | */ | 4459 | */ |
4502 | static void | 4460 | static void |
4503 | _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) | 4461 | _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) |
@@ -4533,8 +4491,6 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) | |||
4533 | * @mpi_request: pointer to the SCSI_IO request message frame | 4491 | * @mpi_request: pointer to the SCSI_IO request message frame |
4534 | * | 4492 | * |
4535 | * Supporting protection 1 and 3. | 4493 | * Supporting protection 1 and 3. |
4536 | * | ||
4537 | * Returns nothing | ||
4538 | */ | 4494 | */ |
4539 | static void | 4495 | static void |
4540 | _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | 4496 | _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, |
@@ -4593,8 +4549,6 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | |||
4593 | * _scsih_eedp_error_handling - return sense code for EEDP errors | 4549 | * _scsih_eedp_error_handling - return sense code for EEDP errors |
4594 | * @scmd: pointer to scsi command object | 4550 | * @scmd: pointer to scsi command object |
4595 | * @ioc_status: ioc status | 4551 | * @ioc_status: ioc status |
4596 | * | ||
4597 | * Returns nothing | ||
4598 | */ | 4552 | */ |
4599 | static void | 4553 | static void |
4600 | _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) | 4554 | _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) |
@@ -4623,12 +4577,12 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) | |||
4623 | 4577 | ||
4624 | /** | 4578 | /** |
4625 | * scsih_qcmd - main scsi request entry point | 4579 | * scsih_qcmd - main scsi request entry point |
4580 | * @shost: SCSI host pointer | ||
4626 | * @scmd: pointer to scsi command object | 4581 | * @scmd: pointer to scsi command object |
4627 | * @done: function pointer to be invoked on completion | ||
4628 | * | 4582 | * |
4629 | * The callback index is set inside `ioc->scsi_io_cb_idx`. | 4583 | * The callback index is set inside `ioc->scsi_io_cb_idx`. |
4630 | * | 4584 | * |
4631 | * Returns 0 on success. If there's a failure, return either: | 4585 | * Return: 0 on success. If there's a failure, return either: |
4632 | * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or | 4586 | * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or |
4633 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full | 4587 | * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full |
4634 | */ | 4588 | */ |
@@ -4674,19 +4628,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) | |||
4674 | } | 4628 | } |
4675 | 4629 | ||
4676 | 4630 | ||
4677 | /* host recovery or link resets sent via IOCTLs */ | 4631 | if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) { |
4678 | if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) | 4632 | /* host recovery or link resets sent via IOCTLs */ |
4679 | return SCSI_MLQUEUE_HOST_BUSY; | 4633 | return SCSI_MLQUEUE_HOST_BUSY; |
4680 | 4634 | } else if (sas_target_priv_data->deleted) { | |
4681 | /* device has been deleted */ | 4635 | /* device has been deleted */ |
4682 | else if (sas_target_priv_data->deleted) { | ||
4683 | scmd->result = DID_NO_CONNECT << 16; | 4636 | scmd->result = DID_NO_CONNECT << 16; |
4684 | scmd->scsi_done(scmd); | 4637 | scmd->scsi_done(scmd); |
4685 | return 0; | 4638 | return 0; |
4686 | /* device busy with task management */ | ||
4687 | } else if (sas_target_priv_data->tm_busy || | 4639 | } else if (sas_target_priv_data->tm_busy || |
4688 | sas_device_priv_data->block) | 4640 | sas_device_priv_data->block) { |
4641 | /* device busy with task management */ | ||
4689 | return SCSI_MLQUEUE_DEVICE_BUSY; | 4642 | return SCSI_MLQUEUE_DEVICE_BUSY; |
4643 | } | ||
4690 | 4644 | ||
4691 | /* | 4645 | /* |
4692 | * Bug work around for firmware SATL handling. The loop | 4646 | * Bug work around for firmware SATL handling. The loop |
@@ -4791,8 +4745,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) | |||
4791 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data | 4745 | * _scsih_normalize_sense - normalize descriptor and fixed format sense data |
4792 | * @sense_buffer: sense data returned by target | 4746 | * @sense_buffer: sense data returned by target |
4793 | * @data: normalized skey/asc/ascq | 4747 | * @data: normalized skey/asc/ascq |
4794 | * | ||
4795 | * Return nothing. | ||
4796 | */ | 4748 | */ |
4797 | static void | 4749 | static void |
4798 | _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) | 4750 | _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) |
@@ -4815,12 +4767,11 @@ _scsih_normalize_sense(char *sense_buffer, struct sense_info *data) | |||
4815 | * @ioc: per adapter object | 4767 | * @ioc: per adapter object |
4816 | * @scmd: pointer to scsi command object | 4768 | * @scmd: pointer to scsi command object |
4817 | * @mpi_reply: reply mf payload returned from firmware | 4769 | * @mpi_reply: reply mf payload returned from firmware |
4770 | * @smid: ? | ||
4818 | * | 4771 | * |
4819 | * scsi_status - SCSI Status code returned from target device | 4772 | * scsi_status - SCSI Status code returned from target device |
4820 | * scsi_state - state info associated with SCSI_IO determined by ioc | 4773 | * scsi_state - state info associated with SCSI_IO determined by ioc |
4821 | * ioc_status - ioc supplied status info | 4774 | * ioc_status - ioc supplied status info |
4822 | * | ||
4823 | * Return nothing. | ||
4824 | */ | 4775 | */ |
4825 | static void | 4776 | static void |
4826 | _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | 4777 | _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, |
@@ -5044,8 +4995,6 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | |||
5044 | * @ioc: per adapter object | 4995 | * @ioc: per adapter object |
5045 | * @handle: device handle | 4996 | * @handle: device handle |
5046 | * Context: process | 4997 | * Context: process |
5047 | * | ||
5048 | * Return nothing. | ||
5049 | */ | 4998 | */ |
5050 | static void | 4999 | static void |
5051 | _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 5000 | _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -5089,8 +5038,6 @@ out: | |||
5089 | * @ioc: per adapter object | 5038 | * @ioc: per adapter object |
5090 | * @sas_device: sas device whose PFA LED has to turned off | 5039 | * @sas_device: sas device whose PFA LED has to turned off |
5091 | * Context: process | 5040 | * Context: process |
5092 | * | ||
5093 | * Return nothing. | ||
5094 | */ | 5041 | */ |
5095 | static void | 5042 | static void |
5096 | _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, | 5043 | _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, |
@@ -5128,8 +5075,6 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, | |||
5128 | * @ioc: per adapter object | 5075 | * @ioc: per adapter object |
5129 | * @handle: device handle | 5076 | * @handle: device handle |
5130 | * Context: interrupt. | 5077 | * Context: interrupt. |
5131 | * | ||
5132 | * Return nothing. | ||
5133 | */ | 5078 | */ |
5134 | static void | 5079 | static void |
5135 | _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 5080 | _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -5151,8 +5096,6 @@ _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
5151 | * @ioc: per adapter object | 5096 | * @ioc: per adapter object |
5152 | * @handle: device handle | 5097 | * @handle: device handle |
5153 | * Context: interrupt. | 5098 | * Context: interrupt. |
5154 | * | ||
5155 | * Return nothing. | ||
5156 | */ | 5099 | */ |
5157 | static void | 5100 | static void |
5158 | _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 5101 | _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -5228,8 +5171,8 @@ out_unlock: | |||
5228 | * | 5171 | * |
5229 | * Callback handler when using _scsih_qcmd. | 5172 | * Callback handler when using _scsih_qcmd. |
5230 | * | 5173 | * |
5231 | * Return 1 meaning mf should be freed from _base_interrupt | 5174 | * Return: 1 meaning mf should be freed from _base_interrupt |
5232 | * 0 means the mf is freed from this function. | 5175 | * 0 means the mf is freed from this function. |
5233 | */ | 5176 | */ |
5234 | static u8 | 5177 | static u8 |
5235 | _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | 5178 | _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
@@ -5416,6 +5359,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
5416 | 5359 | ||
5417 | case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: | 5360 | case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: |
5418 | scsi_set_resid(scmd, 0); | 5361 | scsi_set_resid(scmd, 0); |
5362 | /* fall through */ | ||
5419 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: | 5363 | case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: |
5420 | case MPI2_IOCSTATUS_SUCCESS: | 5364 | case MPI2_IOCSTATUS_SUCCESS: |
5421 | scmd->result = (DID_OK << 16) | scsi_status; | 5365 | scmd->result = (DID_OK << 16) | scsi_status; |
@@ -5468,8 +5412,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
5468 | * During port enable, fw will send topology events for every device. Its | 5412 | * During port enable, fw will send topology events for every device. Its |
5469 | * possible that the handles may change from the previous setting, so this | 5413 | * possible that the handles may change from the previous setting, so this |
5470 | * code keeping handles updating if changed. | 5414 | * code keeping handles updating if changed. |
5471 | * | ||
5472 | * Return nothing. | ||
5473 | */ | 5415 | */ |
5474 | static void | 5416 | static void |
5475 | _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) | 5417 | _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) |
@@ -5523,8 +5465,6 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) | |||
5523 | * @ioc: per adapter object | 5465 | * @ioc: per adapter object |
5524 | * | 5466 | * |
5525 | * Creating host side data object, stored in ioc->sas_hba | 5467 | * Creating host side data object, stored in ioc->sas_hba |
5526 | * | ||
5527 | * Return nothing. | ||
5528 | */ | 5468 | */ |
5529 | static void | 5469 | static void |
5530 | _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) | 5470 | _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) |
@@ -5672,7 +5612,7 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) | |||
5672 | * | 5612 | * |
5673 | * Creating expander object, stored in ioc->sas_expander_list. | 5613 | * Creating expander object, stored in ioc->sas_expander_list. |
5674 | * | 5614 | * |
5675 | * Return 0 for success, else error. | 5615 | * Return: 0 for success, else error. |
5676 | */ | 5616 | */ |
5677 | static int | 5617 | static int |
5678 | _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 5618 | _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -5812,7 +5752,7 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
5812 | } | 5752 | } |
5813 | 5753 | ||
5814 | _scsih_expander_node_add(ioc, sas_expander); | 5754 | _scsih_expander_node_add(ioc, sas_expander); |
5815 | return 0; | 5755 | return 0; |
5816 | 5756 | ||
5817 | out_fail: | 5757 | out_fail: |
5818 | 5758 | ||
@@ -5827,8 +5767,6 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
5827 | * mpt3sas_expander_remove - removing expander object | 5767 | * mpt3sas_expander_remove - removing expander object |
5828 | * @ioc: per adapter object | 5768 | * @ioc: per adapter object |
5829 | * @sas_address: expander sas_address | 5769 | * @sas_address: expander sas_address |
5830 | * | ||
5831 | * Return nothing. | ||
5832 | */ | 5770 | */ |
5833 | void | 5771 | void |
5834 | mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) | 5772 | mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) |
@@ -5857,8 +5795,8 @@ mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) | |||
5857 | * Callback handler when sending internal generated SCSI_IO. | 5795 | * Callback handler when sending internal generated SCSI_IO. |
5858 | * The callback index passed is `ioc->scsih_cb_idx` | 5796 | * The callback index passed is `ioc->scsih_cb_idx` |
5859 | * | 5797 | * |
5860 | * Return 1 meaning mf should be freed from _base_interrupt | 5798 | * Return: 1 meaning mf should be freed from _base_interrupt |
5861 | * 0 means the mf is freed from this function. | 5799 | * 0 means the mf is freed from this function. |
5862 | */ | 5800 | */ |
5863 | static u8 | 5801 | static u8 |
5864 | _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | 5802 | _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) |
@@ -5892,9 +5830,9 @@ _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
5892 | * @ioc: per adapter object | 5830 | * @ioc: per adapter object |
5893 | * @sas_address: sas address | 5831 | * @sas_address: sas address |
5894 | * @handle: sas device handle | 5832 | * @handle: sas device handle |
5895 | * @access_flags: errors returned during discovery of the device | 5833 | * @access_status: errors returned during discovery of the device |
5896 | * | 5834 | * |
5897 | * Return 0 for success, else failure | 5835 | * Return: 0 for success, else failure |
5898 | */ | 5836 | */ |
5899 | static u8 | 5837 | static u8 |
5900 | _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, | 5838 | _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, |
@@ -5956,10 +5894,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, | |||
5956 | * @ioc: per adapter object | 5894 | * @ioc: per adapter object |
5957 | * @parent_sas_address: sas address of parent expander or sas host | 5895 | * @parent_sas_address: sas address of parent expander or sas host |
5958 | * @handle: attached device handle | 5896 | * @handle: attached device handle |
5959 | * @phy_numberv: phy number | 5897 | * @phy_number: phy number |
5960 | * @link_rate: new link rate | 5898 | * @link_rate: new link rate |
5961 | * | ||
5962 | * Returns nothing. | ||
5963 | */ | 5899 | */ |
5964 | static void | 5900 | static void |
5965 | _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, | 5901 | _scsih_check_device(struct MPT3SAS_ADAPTER *ioc, |
@@ -6076,7 +6012,7 @@ out_unlock: | |||
6076 | * | 6012 | * |
6077 | * Creating end device object, stored in ioc->sas_device_list. | 6013 | * Creating end device object, stored in ioc->sas_device_list. |
6078 | * | 6014 | * |
6079 | * Returns 0 for success, non-zero for failure. | 6015 | * Return: 0 for success, non-zero for failure. |
6080 | */ | 6016 | */ |
6081 | static int | 6017 | static int |
6082 | _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, | 6018 | _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, |
@@ -6208,9 +6144,7 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, | |||
6208 | /** | 6144 | /** |
6209 | * _scsih_remove_device - removing sas device object | 6145 | * _scsih_remove_device - removing sas device object |
6210 | * @ioc: per adapter object | 6146 | * @ioc: per adapter object |
6211 | * @sas_device_delete: the sas_device object | 6147 | * @sas_device: the sas_device object |
6212 | * | ||
6213 | * Return nothing. | ||
6214 | */ | 6148 | */ |
6215 | static void | 6149 | static void |
6216 | _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, | 6150 | _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, |
@@ -6446,6 +6380,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
6446 | if (!test_bit(handle, ioc->pend_os_device_add)) | 6380 | if (!test_bit(handle, ioc->pend_os_device_add)) |
6447 | break; | 6381 | break; |
6448 | 6382 | ||
6383 | /* fall through */ | ||
6449 | 6384 | ||
6450 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: | 6385 | case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: |
6451 | 6386 | ||
@@ -6475,10 +6410,9 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
6475 | 6410 | ||
6476 | /** | 6411 | /** |
6477 | * _scsih_sas_device_status_change_event_debug - debug for device event | 6412 | * _scsih_sas_device_status_change_event_debug - debug for device event |
6413 | * @ioc: ? | ||
6478 | * @event_data: event data payload | 6414 | * @event_data: event data payload |
6479 | * Context: user. | 6415 | * Context: user. |
6480 | * | ||
6481 | * Return nothing. | ||
6482 | */ | 6416 | */ |
6483 | static void | 6417 | static void |
6484 | _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | 6418 | _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
@@ -6546,8 +6480,6 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | |||
6546 | * @ioc: per adapter object | 6480 | * @ioc: per adapter object |
6547 | * @fw_event: The fw_event_work object | 6481 | * @fw_event: The fw_event_work object |
6548 | * Context: user. | 6482 | * Context: user. |
6549 | * | ||
6550 | * Return nothing. | ||
6551 | */ | 6483 | */ |
6552 | static void | 6484 | static void |
6553 | _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, | 6485 | _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -6608,9 +6540,9 @@ out: | |||
6608 | * @ioc: per adapter object | 6540 | * @ioc: per adapter object |
6609 | * @wwid: wwid | 6541 | * @wwid: wwid |
6610 | * @handle: sas device handle | 6542 | * @handle: sas device handle |
6611 | * @access_flags: errors returned during discovery of the device | 6543 | * @access_status: errors returned during discovery of the device |
6612 | * | 6544 | * |
6613 | * Return 0 for success, else failure | 6545 | * Return: 0 for success, else failure |
6614 | */ | 6546 | */ |
6615 | static u8 | 6547 | static u8 |
6616 | _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, | 6548 | _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, |
@@ -6695,8 +6627,6 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, | |||
6695 | * from SML and free up associated memory | 6627 | * from SML and free up associated memory |
6696 | * @ioc: per adapter object | 6628 | * @ioc: per adapter object |
6697 | * @pcie_device: the pcie_device object | 6629 | * @pcie_device: the pcie_device object |
6698 | * | ||
6699 | * Return nothing. | ||
6700 | */ | 6630 | */ |
6701 | static void | 6631 | static void |
6702 | _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, | 6632 | _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, |
@@ -6770,8 +6700,6 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, | |||
6770 | * _scsih_pcie_check_device - checking device responsiveness | 6700 | * _scsih_pcie_check_device - checking device responsiveness |
6771 | * @ioc: per adapter object | 6701 | * @ioc: per adapter object |
6772 | * @handle: attached device handle | 6702 | * @handle: attached device handle |
6773 | * | ||
6774 | * Returns nothing. | ||
6775 | */ | 6703 | */ |
6776 | static void | 6704 | static void |
6777 | _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 6705 | _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -6863,7 +6791,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
6863 | * | 6791 | * |
6864 | * Creating end device object, stored in ioc->pcie_device_list. | 6792 | * Creating end device object, stored in ioc->pcie_device_list. |
6865 | * | 6793 | * |
6866 | * Return 1 means queue the event later, 0 means complete the event | 6794 | * Return: 1 means queue the event later, 0 means complete the event |
6867 | */ | 6795 | */ |
6868 | static int | 6796 | static int |
6869 | _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 6797 | _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -6873,7 +6801,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
6873 | Mpi2ConfigReply_t mpi_reply; | 6801 | Mpi2ConfigReply_t mpi_reply; |
6874 | struct _pcie_device *pcie_device; | 6802 | struct _pcie_device *pcie_device; |
6875 | struct _enclosure_node *enclosure_dev; | 6803 | struct _enclosure_node *enclosure_dev; |
6876 | u32 pcie_device_type; | ||
6877 | u32 ioc_status; | 6804 | u32 ioc_status; |
6878 | u64 wwid; | 6805 | u64 wwid; |
6879 | 6806 | ||
@@ -6935,8 +6862,6 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
6935 | pcie_device->port_num = pcie_device_pg0.PortNum; | 6862 | pcie_device->port_num = pcie_device_pg0.PortNum; |
6936 | pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & | 6863 | pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) & |
6937 | MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; | 6864 | MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; |
6938 | pcie_device_type = pcie_device->device_info & | ||
6939 | MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE; | ||
6940 | 6865 | ||
6941 | pcie_device->enclosure_handle = | 6866 | pcie_device->enclosure_handle = |
6942 | le16_to_cpu(pcie_device_pg0.EnclosureHandle); | 6867 | le16_to_cpu(pcie_device_pg0.EnclosureHandle); |
@@ -7165,6 +7090,7 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
7165 | event_data->PortEntry[i].PortStatus &= 0xF0; | 7090 | event_data->PortEntry[i].PortStatus &= 0xF0; |
7166 | event_data->PortEntry[i].PortStatus |= | 7091 | event_data->PortEntry[i].PortStatus |= |
7167 | MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; | 7092 | MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; |
7093 | /* fall through */ | ||
7168 | case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: | 7094 | case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED: |
7169 | if (ioc->shost_recovery) | 7095 | if (ioc->shost_recovery) |
7170 | break; | 7096 | break; |
@@ -7190,12 +7116,10 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
7190 | } | 7116 | } |
7191 | 7117 | ||
7192 | /** | 7118 | /** |
7193 | * _scsih_pcie_device_status_change_event_debug - debug for | 7119 | * _scsih_pcie_device_status_change_event_debug - debug for device event |
7194 | * device event | 7120 | * @ioc: ? |
7195 | * @event_data: event data payload | 7121 | * @event_data: event data payload |
7196 | * Context: user. | 7122 | * Context: user. |
7197 | * | ||
7198 | * Return nothing. | ||
7199 | */ | 7123 | */ |
7200 | static void | 7124 | static void |
7201 | _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | 7125 | _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
@@ -7262,8 +7186,6 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | |||
7262 | * @ioc: per adapter object | 7186 | * @ioc: per adapter object |
7263 | * @fw_event: The fw_event_work object | 7187 | * @fw_event: The fw_event_work object |
7264 | * Context: user. | 7188 | * Context: user. |
7265 | * | ||
7266 | * Return nothing. | ||
7267 | */ | 7189 | */ |
7268 | static void | 7190 | static void |
7269 | _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, | 7191 | _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -7314,8 +7236,6 @@ out: | |||
7314 | * @ioc: per adapter object | 7236 | * @ioc: per adapter object |
7315 | * @event_data: event data payload | 7237 | * @event_data: event data payload |
7316 | * Context: user. | 7238 | * Context: user. |
7317 | * | ||
7318 | * Return nothing. | ||
7319 | */ | 7239 | */ |
7320 | static void | 7240 | static void |
7321 | _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | 7241 | _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
@@ -7348,8 +7268,6 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | |||
7348 | * @ioc: per adapter object | 7268 | * @ioc: per adapter object |
7349 | * @fw_event: The fw_event_work object | 7269 | * @fw_event: The fw_event_work object |
7350 | * Context: user. | 7270 | * Context: user. |
7351 | * | ||
7352 | * Return nothing. | ||
7353 | */ | 7271 | */ |
7354 | static void | 7272 | static void |
7355 | _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, | 7273 | _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -7416,8 +7334,6 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
7416 | * @ioc: per adapter object | 7334 | * @ioc: per adapter object |
7417 | * @fw_event: The fw_event_work object | 7335 | * @fw_event: The fw_event_work object |
7418 | * Context: user. | 7336 | * Context: user. |
7419 | * | ||
7420 | * Return nothing. | ||
7421 | */ | 7337 | */ |
7422 | static void | 7338 | static void |
7423 | _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, | 7339 | _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -7483,6 +7399,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, | |||
7483 | if (sas_device_priv_data->sas_target->flags & | 7399 | if (sas_device_priv_data->sas_target->flags & |
7484 | MPT_TARGET_FLAGS_VOLUME) | 7400 | MPT_TARGET_FLAGS_VOLUME) |
7485 | continue; | 7401 | continue; |
7402 | /* skip PCIe devices */ | ||
7403 | if (sas_device_priv_data->sas_target->flags & | ||
7404 | MPT_TARGET_FLAGS_PCIE_DEVICE) | ||
7405 | continue; | ||
7486 | 7406 | ||
7487 | handle = sas_device_priv_data->sas_target->handle; | 7407 | handle = sas_device_priv_data->sas_target->handle; |
7488 | lun = sas_device_priv_data->lun; | 7408 | lun = sas_device_priv_data->lun; |
@@ -7580,8 +7500,6 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, | |||
7580 | * @ioc: per adapter object | 7500 | * @ioc: per adapter object |
7581 | * @fw_event: The fw_event_work object | 7501 | * @fw_event: The fw_event_work object |
7582 | * Context: user. | 7502 | * Context: user. |
7583 | * | ||
7584 | * Return nothing. | ||
7585 | */ | 7503 | */ |
7586 | static void | 7504 | static void |
7587 | _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, | 7505 | _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -7617,8 +7535,6 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, | |||
7617 | * @ioc: per adapter object | 7535 | * @ioc: per adapter object |
7618 | * @fw_event: The fw_event_work object | 7536 | * @fw_event: The fw_event_work object |
7619 | * Context: user. | 7537 | * Context: user. |
7620 | * | ||
7621 | * Return nothing. | ||
7622 | */ | 7538 | */ |
7623 | static void | 7539 | static void |
7624 | _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, | 7540 | _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -7654,8 +7570,6 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc, | |||
7654 | * @ioc: per adapter object | 7570 | * @ioc: per adapter object |
7655 | * @fw_event: The fw_event_work object | 7571 | * @fw_event: The fw_event_work object |
7656 | * Context: user. | 7572 | * Context: user. |
7657 | * | ||
7658 | * Return nothing. | ||
7659 | */ | 7573 | */ |
7660 | static void | 7574 | static void |
7661 | _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, | 7575 | _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -7684,7 +7598,7 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc, | |||
7684 | * @handle: device handle for physical disk | 7598 | * @handle: device handle for physical disk |
7685 | * @phys_disk_num: physical disk number | 7599 | * @phys_disk_num: physical disk number |
7686 | * | 7600 | * |
7687 | * Return 0 for success, else failure. | 7601 | * Return: 0 for success, else failure. |
7688 | */ | 7602 | */ |
7689 | static int | 7603 | static int |
7690 | _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) | 7604 | _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) |
@@ -7736,10 +7650,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) | |||
7736 | wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); | 7650 | wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); |
7737 | 7651 | ||
7738 | if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { | 7652 | if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { |
7739 | pr_err(MPT3SAS_FMT "%s: timeout\n", | 7653 | issue_reset = |
7740 | ioc->name, __func__); | 7654 | mpt3sas_base_check_cmd_timeout(ioc, |
7741 | if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET)) | 7655 | ioc->scsih_cmds.status, mpi_request, |
7742 | issue_reset = 1; | 7656 | sizeof(Mpi2RaidActionRequest_t)/4); |
7743 | rc = -EFAULT; | 7657 | rc = -EFAULT; |
7744 | goto out; | 7658 | goto out; |
7745 | } | 7659 | } |
@@ -7794,8 +7708,6 @@ _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) | |||
7794 | * @ioc: per adapter object | 7708 | * @ioc: per adapter object |
7795 | * @element: IR config element data | 7709 | * @element: IR config element data |
7796 | * Context: user. | 7710 | * Context: user. |
7797 | * | ||
7798 | * Return nothing. | ||
7799 | */ | 7711 | */ |
7800 | static void | 7712 | static void |
7801 | _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, | 7713 | _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, |
@@ -7852,8 +7764,6 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, | |||
7852 | * @ioc: per adapter object | 7764 | * @ioc: per adapter object |
7853 | * @handle: volume device handle | 7765 | * @handle: volume device handle |
7854 | * Context: user. | 7766 | * Context: user. |
7855 | * | ||
7856 | * Return nothing. | ||
7857 | */ | 7767 | */ |
7858 | static void | 7768 | static void |
7859 | _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) | 7769 | _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) |
@@ -7887,8 +7797,6 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) | |||
7887 | * @ioc: per adapter object | 7797 | * @ioc: per adapter object |
7888 | * @element: IR config element data | 7798 | * @element: IR config element data |
7889 | * Context: user. | 7799 | * Context: user. |
7890 | * | ||
7891 | * Return nothing. | ||
7892 | */ | 7800 | */ |
7893 | static void | 7801 | static void |
7894 | _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, | 7802 | _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, |
@@ -7929,8 +7837,6 @@ _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, | |||
7929 | * @ioc: per adapter object | 7837 | * @ioc: per adapter object |
7930 | * @element: IR config element data | 7838 | * @element: IR config element data |
7931 | * Context: user. | 7839 | * Context: user. |
7932 | * | ||
7933 | * Return nothing. | ||
7934 | */ | 7840 | */ |
7935 | static void | 7841 | static void |
7936 | _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, | 7842 | _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, |
@@ -7980,8 +7886,6 @@ _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, | |||
7980 | * @ioc: per adapter object | 7886 | * @ioc: per adapter object |
7981 | * @element: IR config element data | 7887 | * @element: IR config element data |
7982 | * Context: user. | 7888 | * Context: user. |
7983 | * | ||
7984 | * Return nothing. | ||
7985 | */ | 7889 | */ |
7986 | static void | 7890 | static void |
7987 | _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, | 7891 | _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, |
@@ -7997,8 +7901,6 @@ _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, | |||
7997 | * @ioc: per adapter object | 7901 | * @ioc: per adapter object |
7998 | * @element: IR config element data | 7902 | * @element: IR config element data |
7999 | * Context: user. | 7903 | * Context: user. |
8000 | * | ||
8001 | * Return nothing. | ||
8002 | */ | 7904 | */ |
8003 | static void | 7905 | static void |
8004 | _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, | 7906 | _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, |
@@ -8050,8 +7952,6 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, | |||
8050 | * @ioc: per adapter object | 7952 | * @ioc: per adapter object |
8051 | * @event_data: event data payload | 7953 | * @event_data: event data payload |
8052 | * Context: user. | 7954 | * Context: user. |
8053 | * | ||
8054 | * Return nothing. | ||
8055 | */ | 7955 | */ |
8056 | static void | 7956 | static void |
8057 | _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | 7957 | _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, |
@@ -8130,8 +8030,6 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, | |||
8130 | * @ioc: per adapter object | 8030 | * @ioc: per adapter object |
8131 | * @fw_event: The fw_event_work object | 8031 | * @fw_event: The fw_event_work object |
8132 | * Context: user. | 8032 | * Context: user. |
8133 | * | ||
8134 | * Return nothing. | ||
8135 | */ | 8033 | */ |
8136 | static void | 8034 | static void |
8137 | _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, | 8035 | _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -8202,8 +8100,6 @@ _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, | |||
8202 | * @ioc: per adapter object | 8100 | * @ioc: per adapter object |
8203 | * @fw_event: The fw_event_work object | 8101 | * @fw_event: The fw_event_work object |
8204 | * Context: user. | 8102 | * Context: user. |
8205 | * | ||
8206 | * Return nothing. | ||
8207 | */ | 8103 | */ |
8208 | static void | 8104 | static void |
8209 | _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, | 8105 | _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -8286,8 +8182,6 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, | |||
8286 | * @ioc: per adapter object | 8182 | * @ioc: per adapter object |
8287 | * @fw_event: The fw_event_work object | 8183 | * @fw_event: The fw_event_work object |
8288 | * Context: user. | 8184 | * Context: user. |
8289 | * | ||
8290 | * Return nothing. | ||
8291 | */ | 8185 | */ |
8292 | static void | 8186 | static void |
8293 | _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, | 8187 | _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -8372,8 +8266,6 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, | |||
8372 | * @ioc: per adapter object | 8266 | * @ioc: per adapter object |
8373 | * @event_data: event data payload | 8267 | * @event_data: event data payload |
8374 | * Context: user. | 8268 | * Context: user. |
8375 | * | ||
8376 | * Return nothing. | ||
8377 | */ | 8269 | */ |
8378 | static void | 8270 | static void |
8379 | _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, | 8271 | _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, |
@@ -8414,8 +8306,6 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, | |||
8414 | * @ioc: per adapter object | 8306 | * @ioc: per adapter object |
8415 | * @fw_event: The fw_event_work object | 8307 | * @fw_event: The fw_event_work object |
8416 | * Context: user. | 8308 | * Context: user. |
8417 | * | ||
8418 | * Return nothing. | ||
8419 | */ | 8309 | */ |
8420 | static void | 8310 | static void |
8421 | _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, | 8311 | _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, |
@@ -8473,8 +8363,6 @@ _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) | |||
8473 | * | 8363 | * |
8474 | * After host reset, find out whether devices are still responding. | 8364 | * After host reset, find out whether devices are still responding. |
8475 | * Used in _scsih_remove_unresponsive_sas_devices. | 8365 | * Used in _scsih_remove_unresponsive_sas_devices. |
8476 | * | ||
8477 | * Return nothing. | ||
8478 | */ | 8366 | */ |
8479 | static void | 8367 | static void |
8480 | _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, | 8368 | _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, |
@@ -8569,8 +8457,6 @@ Mpi2SasDevicePage0_t *sas_device_pg0) | |||
8569 | * _scsih_create_enclosure_list_after_reset - Free Existing list, | 8457 | * _scsih_create_enclosure_list_after_reset - Free Existing list, |
8570 | * And create enclosure list by scanning all Enclosure Page(0)s | 8458 | * And create enclosure list by scanning all Enclosure Page(0)s |
8571 | * @ioc: per adapter object | 8459 | * @ioc: per adapter object |
8572 | * | ||
8573 | * Return nothing. | ||
8574 | */ | 8460 | */ |
8575 | static void | 8461 | static void |
8576 | _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) | 8462 | _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) |
@@ -8617,8 +8503,6 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc) | |||
8617 | * | 8503 | * |
8618 | * After host reset, find out whether devices are still responding. | 8504 | * After host reset, find out whether devices are still responding. |
8619 | * If not remove. | 8505 | * If not remove. |
8620 | * | ||
8621 | * Return nothing. | ||
8622 | */ | 8506 | */ |
8623 | static void | 8507 | static void |
8624 | _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) | 8508 | _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) |
@@ -8661,8 +8545,6 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) | |||
8661 | * | 8545 | * |
8662 | * After host reset, find out whether devices are still responding. | 8546 | * After host reset, find out whether devices are still responding. |
8663 | * Used in _scsih_remove_unresponding_devices. | 8547 | * Used in _scsih_remove_unresponding_devices. |
8664 | * | ||
8665 | * Return nothing. | ||
8666 | */ | 8548 | */ |
8667 | static void | 8549 | static void |
8668 | _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, | 8550 | _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, |
@@ -8736,8 +8618,6 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, | |||
8736 | * | 8618 | * |
8737 | * After host reset, find out whether devices are still responding. | 8619 | * After host reset, find out whether devices are still responding. |
8738 | * If not remove. | 8620 | * If not remove. |
8739 | * | ||
8740 | * Return nothing. | ||
8741 | */ | 8621 | */ |
8742 | static void | 8622 | static void |
8743 | _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) | 8623 | _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) |
@@ -8785,8 +8665,6 @@ out: | |||
8785 | * | 8665 | * |
8786 | * After host reset, find out whether devices are still responding. | 8666 | * After host reset, find out whether devices are still responding. |
8787 | * Used in _scsih_remove_unresponsive_raid_devices. | 8667 | * Used in _scsih_remove_unresponsive_raid_devices. |
8788 | * | ||
8789 | * Return nothing. | ||
8790 | */ | 8668 | */ |
8791 | static void | 8669 | static void |
8792 | _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, | 8670 | _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, |
@@ -8842,8 +8720,6 @@ _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, | |||
8842 | * | 8720 | * |
8843 | * After host reset, find out whether devices are still responding. | 8721 | * After host reset, find out whether devices are still responding. |
8844 | * If not remove. | 8722 | * If not remove. |
8845 | * | ||
8846 | * Return nothing. | ||
8847 | */ | 8723 | */ |
8848 | static void | 8724 | static void |
8849 | _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) | 8725 | _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) |
@@ -8914,8 +8790,6 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) | |||
8914 | * | 8790 | * |
8915 | * After host reset, find out whether devices are still responding. | 8791 | * After host reset, find out whether devices are still responding. |
8916 | * Used in _scsih_remove_unresponsive_expanders. | 8792 | * Used in _scsih_remove_unresponsive_expanders. |
8917 | * | ||
8918 | * Return nothing. | ||
8919 | */ | 8793 | */ |
8920 | static void | 8794 | static void |
8921 | _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, | 8795 | _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, |
@@ -8968,8 +8842,6 @@ _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, | |||
8968 | * | 8842 | * |
8969 | * After host reset, find out whether devices are still responding. | 8843 | * After host reset, find out whether devices are still responding. |
8970 | * If not remove. | 8844 | * If not remove. |
8971 | * | ||
8972 | * Return nothing. | ||
8973 | */ | 8845 | */ |
8974 | static void | 8846 | static void |
8975 | _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) | 8847 | _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) |
@@ -9009,8 +8881,6 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) | |||
9009 | /** | 8881 | /** |
9010 | * _scsih_remove_unresponding_devices - removing unresponding devices | 8882 | * _scsih_remove_unresponding_devices - removing unresponding devices |
9011 | * @ioc: per adapter object | 8883 | * @ioc: per adapter object |
9012 | * | ||
9013 | * Return nothing. | ||
9014 | */ | 8884 | */ |
9015 | static void | 8885 | static void |
9016 | _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) | 8886 | _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc) |
@@ -9136,8 +9006,6 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, | |||
9136 | /** | 9006 | /** |
9137 | * _scsih_scan_for_devices_after_reset - scan for devices after host reset | 9007 | * _scsih_scan_for_devices_after_reset - scan for devices after host reset |
9138 | * @ioc: per adapter object | 9008 | * @ioc: per adapter object |
9139 | * | ||
9140 | * Return nothing. | ||
9141 | */ | 9009 | */ |
9142 | static void | 9010 | static void |
9143 | _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) | 9011 | _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) |
@@ -9421,60 +9289,68 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) | |||
9421 | ioc->name); | 9289 | ioc->name); |
9422 | pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name); | 9290 | pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name); |
9423 | } | 9291 | } |
9292 | |||
9424 | /** | 9293 | /** |
9425 | * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) | 9294 | * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) |
9426 | * @ioc: per adapter object | 9295 | * @ioc: per adapter object |
9427 | * @reset_phase: phase | ||
9428 | * | 9296 | * |
9429 | * The handler for doing any required cleanup or initialization. | 9297 | * The handler for doing any required cleanup or initialization. |
9298 | */ | ||
9299 | void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) | ||
9300 | { | ||
9301 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
9302 | "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); | ||
9303 | } | ||
9304 | |||
9305 | /** | ||
9306 | * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih) | ||
9307 | * @ioc: per adapter object | ||
9430 | * | 9308 | * |
9431 | * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, | 9309 | * The handler for doing any required cleanup or initialization. |
9432 | * MPT3_IOC_DONE_RESET | ||
9433 | * | ||
9434 | * Return nothing. | ||
9435 | */ | 9310 | */ |
9436 | void | 9311 | void |
9437 | mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) | 9312 | mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) |
9438 | { | 9313 | { |
9439 | switch (reset_phase) { | 9314 | dtmprintk(ioc, pr_info(MPT3SAS_FMT |
9440 | case MPT3_IOC_PRE_RESET: | ||
9441 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
9442 | "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); | ||
9443 | break; | ||
9444 | case MPT3_IOC_AFTER_RESET: | ||
9445 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
9446 | "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); | 9315 | "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); |
9447 | if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { | 9316 | if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { |
9448 | ioc->scsih_cmds.status |= MPT3_CMD_RESET; | 9317 | ioc->scsih_cmds.status |= MPT3_CMD_RESET; |
9449 | mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); | 9318 | mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); |
9450 | complete(&ioc->scsih_cmds.done); | 9319 | complete(&ioc->scsih_cmds.done); |
9451 | } | 9320 | } |
9452 | if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { | 9321 | if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { |
9453 | ioc->tm_cmds.status |= MPT3_CMD_RESET; | 9322 | ioc->tm_cmds.status |= MPT3_CMD_RESET; |
9454 | mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); | 9323 | mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); |
9455 | complete(&ioc->tm_cmds.done); | 9324 | complete(&ioc->tm_cmds.done); |
9456 | } | 9325 | } |
9457 | 9326 | ||
9458 | memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); | 9327 | memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz); |
9459 | memset(ioc->device_remove_in_progress, 0, | 9328 | memset(ioc->device_remove_in_progress, 0, |
9460 | ioc->device_remove_in_progress_sz); | 9329 | ioc->device_remove_in_progress_sz); |
9461 | _scsih_fw_event_cleanup_queue(ioc); | 9330 | _scsih_fw_event_cleanup_queue(ioc); |
9462 | _scsih_flush_running_cmds(ioc); | 9331 | _scsih_flush_running_cmds(ioc); |
9463 | break; | 9332 | } |
9464 | case MPT3_IOC_DONE_RESET: | 9333 | |
9465 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | 9334 | /** |
9335 | * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) | ||
9336 | * @ioc: per adapter object | ||
9337 | * | ||
9338 | * The handler for doing any required cleanup or initialization. | ||
9339 | */ | ||
9340 | void | ||
9341 | mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc) | ||
9342 | { | ||
9343 | dtmprintk(ioc, pr_info(MPT3SAS_FMT | ||
9466 | "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); | 9344 | "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); |
9467 | if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && | 9345 | if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && |
9468 | !ioc->sas_hba.num_phys)) { | 9346 | !ioc->sas_hba.num_phys)) { |
9469 | _scsih_prep_device_scan(ioc); | 9347 | _scsih_prep_device_scan(ioc); |
9470 | _scsih_create_enclosure_list_after_reset(ioc); | 9348 | _scsih_create_enclosure_list_after_reset(ioc); |
9471 | _scsih_search_responding_sas_devices(ioc); | 9349 | _scsih_search_responding_sas_devices(ioc); |
9472 | _scsih_search_responding_pcie_devices(ioc); | 9350 | _scsih_search_responding_pcie_devices(ioc); |
9473 | _scsih_search_responding_raid_devices(ioc); | 9351 | _scsih_search_responding_raid_devices(ioc); |
9474 | _scsih_search_responding_expanders(ioc); | 9352 | _scsih_search_responding_expanders(ioc); |
9475 | _scsih_error_recovery_delete_devices(ioc); | 9353 | _scsih_error_recovery_delete_devices(ioc); |
9476 | } | ||
9477 | break; | ||
9478 | } | 9354 | } |
9479 | } | 9355 | } |
9480 | 9356 | ||
@@ -9483,8 +9359,6 @@ mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) | |||
9483 | * @ioc: per adapter object | 9359 | * @ioc: per adapter object |
9484 | * @fw_event: The fw_event_work object | 9360 | * @fw_event: The fw_event_work object |
9485 | * Context: user. | 9361 | * Context: user. |
9486 | * | ||
9487 | * Return nothing. | ||
9488 | */ | 9362 | */ |
9489 | static void | 9363 | static void |
9490 | _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) | 9364 | _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) |
@@ -9519,7 +9393,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) | |||
9519 | break; | 9393 | break; |
9520 | case MPT3SAS_PORT_ENABLE_COMPLETE: | 9394 | case MPT3SAS_PORT_ENABLE_COMPLETE: |
9521 | ioc->start_scan = 0; | 9395 | ioc->start_scan = 0; |
9522 | if (missing_delay[0] != -1 && missing_delay[1] != -1) | 9396 | if (missing_delay[0] != -1 && missing_delay[1] != -1) |
9523 | mpt3sas_base_update_missing_delay(ioc, missing_delay[0], | 9397 | mpt3sas_base_update_missing_delay(ioc, missing_delay[0], |
9524 | missing_delay[1]); | 9398 | missing_delay[1]); |
9525 | dewtprintk(ioc, pr_info(MPT3SAS_FMT | 9399 | dewtprintk(ioc, pr_info(MPT3SAS_FMT |
@@ -9577,13 +9451,10 @@ out: | |||
9577 | 9451 | ||
9578 | /** | 9452 | /** |
9579 | * _firmware_event_work | 9453 | * _firmware_event_work |
9580 | * @ioc: per adapter object | ||
9581 | * @work: The fw_event_work object | 9454 | * @work: The fw_event_work object |
9582 | * Context: user. | 9455 | * Context: user. |
9583 | * | 9456 | * |
9584 | * wrappers for the work thread handling firmware events | 9457 | * wrappers for the work thread handling firmware events |
9585 | * | ||
9586 | * Return nothing. | ||
9587 | */ | 9458 | */ |
9588 | 9459 | ||
9589 | static void | 9460 | static void |
@@ -9605,8 +9476,8 @@ _firmware_event_work(struct work_struct *work) | |||
9605 | * This function merely adds a new work task into ioc->firmware_event_thread. | 9476 | * This function merely adds a new work task into ioc->firmware_event_thread. |
9606 | * The tasks are worked from _firmware_event_work in user context. | 9477 | * The tasks are worked from _firmware_event_work in user context. |
9607 | * | 9478 | * |
9608 | * Return 1 meaning mf should be freed from _base_interrupt | 9479 | * Return: 1 meaning mf should be freed from _base_interrupt |
9609 | * 0 means the mf is freed from this function. | 9480 | * 0 means the mf is freed from this function. |
9610 | */ | 9481 | */ |
9611 | u8 | 9482 | u8 |
9612 | mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | 9483 | mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, |
@@ -9791,8 +9662,6 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, | |||
9791 | * | 9662 | * |
9792 | * Removing object and freeing associated memory from the | 9663 | * Removing object and freeing associated memory from the |
9793 | * ioc->sas_expander_list. | 9664 | * ioc->sas_expander_list. |
9794 | * | ||
9795 | * Return nothing. | ||
9796 | */ | 9665 | */ |
9797 | static void | 9666 | static void |
9798 | _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, | 9667 | _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, |
@@ -9841,8 +9710,6 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, | |||
9841 | * | 9710 | * |
9842 | * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that | 9711 | * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that |
9843 | * the host system is shutting down. | 9712 | * the host system is shutting down. |
9844 | * | ||
9845 | * Return nothing. | ||
9846 | */ | 9713 | */ |
9847 | static void | 9714 | static void |
9848 | _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) | 9715 | _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) |
@@ -9914,7 +9781,6 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) | |||
9914 | * @pdev: PCI device struct | 9781 | * @pdev: PCI device struct |
9915 | * | 9782 | * |
9916 | * Routine called when unloading the driver. | 9783 | * Routine called when unloading the driver. |
9917 | * Return nothing. | ||
9918 | */ | 9784 | */ |
9919 | static void scsih_remove(struct pci_dev *pdev) | 9785 | static void scsih_remove(struct pci_dev *pdev) |
9920 | { | 9786 | { |
@@ -9996,8 +9862,6 @@ static void scsih_remove(struct pci_dev *pdev) | |||
9996 | /** | 9862 | /** |
9997 | * scsih_shutdown - routine call during system shutdown | 9863 | * scsih_shutdown - routine call during system shutdown |
9998 | * @pdev: PCI device struct | 9864 | * @pdev: PCI device struct |
9999 | * | ||
10000 | * Return nothing. | ||
10001 | */ | 9865 | */ |
10002 | static void | 9866 | static void |
10003 | scsih_shutdown(struct pci_dev *pdev) | 9867 | scsih_shutdown(struct pci_dev *pdev) |
@@ -10220,7 +10084,7 @@ _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) | |||
10220 | * | 10084 | * |
10221 | * Get the next pcie device from pcie_device_init_list list. | 10085 | * Get the next pcie device from pcie_device_init_list list. |
10222 | * | 10086 | * |
10223 | * Returns pcie device structure if pcie_device_init_list list is not empty | 10087 | * Return: pcie device structure if pcie_device_init_list list is not empty |
10224 | * otherwise returns NULL | 10088 | * otherwise returns NULL |
10225 | */ | 10089 | */ |
10226 | static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) | 10090 | static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc) |
@@ -10390,7 +10254,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
10390 | } | 10254 | } |
10391 | 10255 | ||
10392 | if (time >= (300 * HZ)) { | 10256 | if (time >= (300 * HZ)) { |
10393 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | 10257 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
10394 | pr_info(MPT3SAS_FMT | 10258 | pr_info(MPT3SAS_FMT |
10395 | "port enable: FAILED with timeout (timeout=300s)\n", | 10259 | "port enable: FAILED with timeout (timeout=300s)\n", |
10396 | ioc->name); | 10260 | ioc->name); |
@@ -10412,7 +10276,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
10412 | } | 10276 | } |
10413 | 10277 | ||
10414 | pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name); | 10278 | pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name); |
10415 | ioc->base_cmds.status = MPT3_CMD_NOT_USED; | 10279 | ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; |
10416 | 10280 | ||
10417 | if (ioc->wait_for_discovery_to_complete) { | 10281 | if (ioc->wait_for_discovery_to_complete) { |
10418 | ioc->wait_for_discovery_to_complete = 0; | 10282 | ioc->wait_for_discovery_to_complete = 0; |
@@ -10568,7 +10432,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev) | |||
10568 | * @pdev: PCI device struct | 10432 | * @pdev: PCI device struct |
10569 | * @id: pci device id | 10433 | * @id: pci device id |
10570 | * | 10434 | * |
10571 | * Returns 0 success, anything else error. | 10435 | * Return: 0 success, anything else error. |
10572 | */ | 10436 | */ |
10573 | static int | 10437 | static int |
10574 | _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 10438 | _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
@@ -10818,7 +10682,7 @@ out_add_shost_fail: | |||
10818 | * @pdev: PCI device struct | 10682 | * @pdev: PCI device struct |
10819 | * @state: PM state change to (usually PCI_D3) | 10683 | * @state: PM state change to (usually PCI_D3) |
10820 | * | 10684 | * |
10821 | * Returns 0 success, anything else error. | 10685 | * Return: 0 success, anything else error. |
10822 | */ | 10686 | */ |
10823 | static int | 10687 | static int |
10824 | scsih_suspend(struct pci_dev *pdev, pm_message_t state) | 10688 | scsih_suspend(struct pci_dev *pdev, pm_message_t state) |
@@ -10845,7 +10709,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state) | |||
10845 | * scsih_resume - power management resume main entry point | 10709 | * scsih_resume - power management resume main entry point |
10846 | * @pdev: PCI device struct | 10710 | * @pdev: PCI device struct |
10847 | * | 10711 | * |
10848 | * Returns 0 success, anything else error. | 10712 | * Return: 0 success, anything else error. |
10849 | */ | 10713 | */ |
10850 | static int | 10714 | static int |
10851 | scsih_resume(struct pci_dev *pdev) | 10715 | scsih_resume(struct pci_dev *pdev) |
@@ -10881,8 +10745,7 @@ scsih_resume(struct pci_dev *pdev) | |||
10881 | * | 10745 | * |
10882 | * Description: Called when a PCI error is detected. | 10746 | * Description: Called when a PCI error is detected. |
10883 | * | 10747 | * |
10884 | * Return value: | 10748 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT. |
10885 | * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT | ||
10886 | */ | 10749 | */ |
10887 | static pci_ers_result_t | 10750 | static pci_ers_result_t |
10888 | scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) | 10751 | scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) |
@@ -11143,7 +11006,7 @@ static struct pci_driver mpt3sas_driver = { | |||
11143 | /** | 11006 | /** |
11144 | * scsih_init - main entry point for this driver. | 11007 | * scsih_init - main entry point for this driver. |
11145 | * | 11008 | * |
11146 | * Returns 0 success, anything else error. | 11009 | * Return: 0 success, anything else error. |
11147 | */ | 11010 | */ |
11148 | static int | 11011 | static int |
11149 | scsih_init(void) | 11012 | scsih_init(void) |
@@ -11193,7 +11056,7 @@ scsih_init(void) | |||
11193 | /** | 11056 | /** |
11194 | * scsih_exit - exit point for this driver (when it is a module). | 11057 | * scsih_exit - exit point for this driver (when it is a module). |
11195 | * | 11058 | * |
11196 | * Returns 0 success, anything else error. | 11059 | * Return: 0 success, anything else error. |
11197 | */ | 11060 | */ |
11198 | static void | 11061 | static void |
11199 | scsih_exit(void) | 11062 | scsih_exit(void) |
@@ -11223,7 +11086,7 @@ scsih_exit(void) | |||
11223 | /** | 11086 | /** |
11224 | * _mpt3sas_init - main entry point for this driver. | 11087 | * _mpt3sas_init - main entry point for this driver. |
11225 | * | 11088 | * |
11226 | * Returns 0 success, anything else error. | 11089 | * Return: 0 success, anything else error. |
11227 | */ | 11090 | */ |
11228 | static int __init | 11091 | static int __init |
11229 | _mpt3sas_init(void) | 11092 | _mpt3sas_init(void) |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 3a143bb5ca72..f8cc2677c1cd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c | |||
@@ -134,7 +134,7 @@ _transport_convert_phy_link_rate(u8 link_rate) | |||
134 | * | 134 | * |
135 | * Populates sas identify info. | 135 | * Populates sas identify info. |
136 | * | 136 | * |
137 | * Returns 0 for success, non-zero for failure. | 137 | * Return: 0 for success, non-zero for failure. |
138 | */ | 138 | */ |
139 | static int | 139 | static int |
140 | _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, | 140 | _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
@@ -226,8 +226,8 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, | |||
226 | * Callback handler when sending internal generated transport cmds. | 226 | * Callback handler when sending internal generated transport cmds. |
227 | * The callback index passed is `ioc->transport_cb_idx` | 227 | * The callback index passed is `ioc->transport_cb_idx` |
228 | * | 228 | * |
229 | * Return 1 meaning mf should be freed from _base_interrupt | 229 | * Return: 1 meaning mf should be freed from _base_interrupt |
230 | * 0 means the mf is freed from this function. | 230 | * 0 means the mf is freed from this function. |
231 | */ | 231 | */ |
232 | u8 | 232 | u8 |
233 | mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, | 233 | mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, |
@@ -287,7 +287,7 @@ struct rep_manu_reply { | |||
287 | * | 287 | * |
288 | * Fills in the sas_expander_device object when SMP port is created. | 288 | * Fills in the sas_expander_device object when SMP port is created. |
289 | * | 289 | * |
290 | * Returns 0 for success, non-zero for failure. | 290 | * Return: 0 for success, non-zero for failure. |
291 | */ | 291 | */ |
292 | static int | 292 | static int |
293 | _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, | 293 | _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, |
@@ -460,8 +460,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, | |||
460 | * _transport_delete_port - helper function to removing a port | 460 | * _transport_delete_port - helper function to removing a port |
461 | * @ioc: per adapter object | 461 | * @ioc: per adapter object |
462 | * @mpt3sas_port: mpt3sas per port object | 462 | * @mpt3sas_port: mpt3sas per port object |
463 | * | ||
464 | * Returns nothing. | ||
465 | */ | 463 | */ |
466 | static void | 464 | static void |
467 | _transport_delete_port(struct MPT3SAS_ADAPTER *ioc, | 465 | _transport_delete_port(struct MPT3SAS_ADAPTER *ioc, |
@@ -489,8 +487,6 @@ _transport_delete_port(struct MPT3SAS_ADAPTER *ioc, | |||
489 | * @ioc: per adapter object | 487 | * @ioc: per adapter object |
490 | * @mpt3sas_port: mpt3sas per port object | 488 | * @mpt3sas_port: mpt3sas per port object |
491 | * @mpt3sas_phy: mpt3sas per phy object | 489 | * @mpt3sas_phy: mpt3sas per phy object |
492 | * | ||
493 | * Returns nothing. | ||
494 | */ | 490 | */ |
495 | static void | 491 | static void |
496 | _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, | 492 | _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, |
@@ -513,8 +509,6 @@ _transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, | |||
513 | * @ioc: per adapter object | 509 | * @ioc: per adapter object |
514 | * @mpt3sas_port: mpt3sas per port object | 510 | * @mpt3sas_port: mpt3sas per port object |
515 | * @mpt3sas_phy: mpt3sas per phy object | 511 | * @mpt3sas_phy: mpt3sas per phy object |
516 | * | ||
517 | * Returns nothing. | ||
518 | */ | 512 | */ |
519 | static void | 513 | static void |
520 | _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, | 514 | _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, |
@@ -538,8 +532,6 @@ _transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, | |||
538 | * @sas_node: sas node object (either expander or sas host) | 532 | * @sas_node: sas node object (either expander or sas host) |
539 | * @mpt3sas_phy: mpt3sas per phy object | 533 | * @mpt3sas_phy: mpt3sas per phy object |
540 | * @sas_address: sas address of device/expander were phy needs to be added to | 534 | * @sas_address: sas address of device/expander were phy needs to be added to |
541 | * | ||
542 | * Returns nothing. | ||
543 | */ | 535 | */ |
544 | static void | 536 | static void |
545 | _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, | 537 | _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, |
@@ -563,7 +555,7 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, | |||
563 | return; | 555 | return; |
564 | } | 556 | } |
565 | _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy); | 557 | _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy); |
566 | return; | 558 | return; |
567 | } | 559 | } |
568 | 560 | ||
569 | } | 561 | } |
@@ -573,8 +565,6 @@ _transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, | |||
573 | * @ioc: per adapter object | 565 | * @ioc: per adapter object |
574 | * @sas_node: sas node object (either expander or sas host) | 566 | * @sas_node: sas node object (either expander or sas host) |
575 | * @mpt3sas_phy: mpt3sas per phy object | 567 | * @mpt3sas_phy: mpt3sas per phy object |
576 | * | ||
577 | * Returns nothing. | ||
578 | */ | 568 | */ |
579 | static void | 569 | static void |
580 | _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, | 570 | _transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, |
@@ -635,7 +625,7 @@ _transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, | |||
635 | * | 625 | * |
636 | * Adding new port object to the sas_node->sas_port_list. | 626 | * Adding new port object to the sas_node->sas_port_list. |
637 | * | 627 | * |
638 | * Returns mpt3sas_port. | 628 | * Return: mpt3sas_port. |
639 | */ | 629 | */ |
640 | struct _sas_port * | 630 | struct _sas_port * |
641 | mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, | 631 | mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, |
@@ -794,8 +784,6 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, | |||
794 | * | 784 | * |
795 | * Removing object and freeing associated memory from the | 785 | * Removing object and freeing associated memory from the |
796 | * ioc->sas_port_list. | 786 | * ioc->sas_port_list. |
797 | * | ||
798 | * Return nothing. | ||
799 | */ | 787 | */ |
800 | void | 788 | void |
801 | mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, | 789 | mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, |
@@ -860,7 +848,7 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, | |||
860 | * @phy_pg0: sas phy page 0 | 848 | * @phy_pg0: sas phy page 0 |
861 | * @parent_dev: parent device class object | 849 | * @parent_dev: parent device class object |
862 | * | 850 | * |
863 | * Returns 0 for success, non-zero for failure. | 851 | * Return: 0 for success, non-zero for failure. |
864 | */ | 852 | */ |
865 | int | 853 | int |
866 | mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy | 854 | mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy |
@@ -928,7 +916,7 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy | |||
928 | * @expander_pg1: expander page 1 | 916 | * @expander_pg1: expander page 1 |
929 | * @parent_dev: parent device class object | 917 | * @parent_dev: parent device class object |
930 | * | 918 | * |
931 | * Returns 0 for success, non-zero for failure. | 919 | * Return: 0 for success, non-zero for failure. |
932 | */ | 920 | */ |
933 | int | 921 | int |
934 | mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy | 922 | mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy |
@@ -995,10 +983,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy | |||
995 | * @ioc: per adapter object | 983 | * @ioc: per adapter object |
996 | * @sas_address: sas address of parent expander or sas host | 984 | * @sas_address: sas address of parent expander or sas host |
997 | * @handle: attached device handle | 985 | * @handle: attached device handle |
998 | * @phy_numberv: phy number | 986 | * @phy_number: phy number |
999 | * @link_rate: new link rate | 987 | * @link_rate: new link rate |
1000 | * | ||
1001 | * Returns nothing. | ||
1002 | */ | 988 | */ |
1003 | void | 989 | void |
1004 | mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, | 990 | mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, |
@@ -1090,7 +1076,7 @@ struct phy_error_log_reply { | |||
1090 | * @ioc: per adapter object | 1076 | * @ioc: per adapter object |
1091 | * @phy: The sas phy object | 1077 | * @phy: The sas phy object |
1092 | * | 1078 | * |
1093 | * Returns 0 for success, non-zero for failure. | 1079 | * Return: 0 for success, non-zero for failure. |
1094 | * | 1080 | * |
1095 | */ | 1081 | */ |
1096 | static int | 1082 | static int |
@@ -1262,7 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, | |||
1262 | * _transport_get_linkerrors - return phy counters for both hba and expanders | 1248 | * _transport_get_linkerrors - return phy counters for both hba and expanders |
1263 | * @phy: The sas phy object | 1249 | * @phy: The sas phy object |
1264 | * | 1250 | * |
1265 | * Returns 0 for success, non-zero for failure. | 1251 | * Return: 0 for success, non-zero for failure. |
1266 | * | 1252 | * |
1267 | */ | 1253 | */ |
1268 | static int | 1254 | static int |
@@ -1311,10 +1297,11 @@ _transport_get_linkerrors(struct sas_phy *phy) | |||
1311 | 1297 | ||
1312 | /** | 1298 | /** |
1313 | * _transport_get_enclosure_identifier - | 1299 | * _transport_get_enclosure_identifier - |
1314 | * @phy: The sas phy object | 1300 | * @rphy: The sas phy object |
1301 | * @identifier: ? | ||
1315 | * | 1302 | * |
1316 | * Obtain the enclosure logical id for an expander. | 1303 | * Obtain the enclosure logical id for an expander. |
1317 | * Returns 0 for success, non-zero for failure. | 1304 | * Return: 0 for success, non-zero for failure. |
1318 | */ | 1305 | */ |
1319 | static int | 1306 | static int |
1320 | _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) | 1307 | _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) |
@@ -1342,9 +1329,9 @@ _transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) | |||
1342 | 1329 | ||
1343 | /** | 1330 | /** |
1344 | * _transport_get_bay_identifier - | 1331 | * _transport_get_bay_identifier - |
1345 | * @phy: The sas phy object | 1332 | * @rphy: The sas phy object |
1346 | * | 1333 | * |
1347 | * Returns the slot id for a device that resides inside an enclosure. | 1334 | * Return: the slot id for a device that resides inside an enclosure. |
1348 | */ | 1335 | */ |
1349 | static int | 1336 | static int |
1350 | _transport_get_bay_identifier(struct sas_rphy *rphy) | 1337 | _transport_get_bay_identifier(struct sas_rphy *rphy) |
@@ -1400,8 +1387,9 @@ struct phy_control_reply { | |||
1400 | * _transport_expander_phy_control - expander phy control | 1387 | * _transport_expander_phy_control - expander phy control |
1401 | * @ioc: per adapter object | 1388 | * @ioc: per adapter object |
1402 | * @phy: The sas phy object | 1389 | * @phy: The sas phy object |
1390 | * @phy_operation: ? | ||
1403 | * | 1391 | * |
1404 | * Returns 0 for success, non-zero for failure. | 1392 | * Return: 0 for success, non-zero for failure. |
1405 | * | 1393 | * |
1406 | */ | 1394 | */ |
1407 | static int | 1395 | static int |
@@ -1571,7 +1559,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, | |||
1571 | * @phy: The sas phy object | 1559 | * @phy: The sas phy object |
1572 | * @hard_reset: | 1560 | * @hard_reset: |
1573 | * | 1561 | * |
1574 | * Returns 0 for success, non-zero for failure. | 1562 | * Return: 0 for success, non-zero for failure. |
1575 | */ | 1563 | */ |
1576 | static int | 1564 | static int |
1577 | _transport_phy_reset(struct sas_phy *phy, int hard_reset) | 1565 | _transport_phy_reset(struct sas_phy *phy, int hard_reset) |
@@ -1623,7 +1611,7 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset) | |||
1623 | * @enable: enable phy when true | 1611 | * @enable: enable phy when true |
1624 | * | 1612 | * |
1625 | * Only support sas_host direct attached phys. | 1613 | * Only support sas_host direct attached phys. |
1626 | * Returns 0 for success, non-zero for failure. | 1614 | * Return: 0 for success, non-zero for failure. |
1627 | */ | 1615 | */ |
1628 | static int | 1616 | static int |
1629 | _transport_phy_enable(struct sas_phy *phy, int enable) | 1617 | _transport_phy_enable(struct sas_phy *phy, int enable) |
@@ -1761,7 +1749,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable) | |||
1761 | * @rates: rates defined in sas_phy_linkrates | 1749 | * @rates: rates defined in sas_phy_linkrates |
1762 | * | 1750 | * |
1763 | * Only support sas_host direct attached phys. | 1751 | * Only support sas_host direct attached phys. |
1764 | * Returns 0 for success, non-zero for failure. | 1752 | * |
1753 | * Return: 0 for success, non-zero for failure. | ||
1765 | */ | 1754 | */ |
1766 | static int | 1755 | static int |
1767 | _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) | 1756 | _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) |
@@ -1904,9 +1893,9 @@ _transport_unmap_smp_buffer(struct device *dev, struct bsg_buffer *buf, | |||
1904 | 1893 | ||
1905 | /** | 1894 | /** |
1906 | * _transport_smp_handler - transport portal for smp passthru | 1895 | * _transport_smp_handler - transport portal for smp passthru |
1896 | * @job: ? | ||
1907 | * @shost: shost object | 1897 | * @shost: shost object |
1908 | * @rphy: sas transport rphy object | 1898 | * @rphy: sas transport rphy object |
1909 | * @req: | ||
1910 | * | 1899 | * |
1911 | * This used primarily for smp_utils. | 1900 | * This used primarily for smp_utils. |
1912 | * Example: | 1901 | * Example: |
@@ -1936,12 +1925,12 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, | |||
1936 | pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", | 1925 | pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", |
1937 | __func__, ioc->name); | 1926 | __func__, ioc->name); |
1938 | rc = -EFAULT; | 1927 | rc = -EFAULT; |
1939 | goto out; | 1928 | goto job_done; |
1940 | } | 1929 | } |
1941 | 1930 | ||
1942 | rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); | 1931 | rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); |
1943 | if (rc) | 1932 | if (rc) |
1944 | goto out; | 1933 | goto job_done; |
1945 | 1934 | ||
1946 | if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { | 1935 | if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { |
1947 | pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, | 1936 | pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, |
@@ -2066,6 +2055,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, | |||
2066 | out: | 2055 | out: |
2067 | ioc->transport_cmds.status = MPT3_CMD_NOT_USED; | 2056 | ioc->transport_cmds.status = MPT3_CMD_NOT_USED; |
2068 | mutex_unlock(&ioc->transport_cmds.mutex); | 2057 | mutex_unlock(&ioc->transport_cmds.mutex); |
2058 | job_done: | ||
2069 | bsg_job_done(job, rc, reslen); | 2059 | bsg_job_done(job, rc, reslen); |
2070 | } | 2060 | } |
2071 | 2061 | ||
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c index b60fd7a3b571..cae7c1eaef34 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c | |||
@@ -62,7 +62,7 @@ | |||
62 | /** | 62 | /** |
63 | * _mpt3sas_raise_sigio - notifiy app | 63 | * _mpt3sas_raise_sigio - notifiy app |
64 | * @ioc: per adapter object | 64 | * @ioc: per adapter object |
65 | * @event_data: | 65 | * @event_data: ? |
66 | */ | 66 | */ |
67 | static void | 67 | static void |
68 | _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, | 68 | _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, |
@@ -107,7 +107,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, | |||
107 | /** | 107 | /** |
108 | * mpt3sas_process_trigger_data - process the event data for the trigger | 108 | * mpt3sas_process_trigger_data - process the event data for the trigger |
109 | * @ioc: per adapter object | 109 | * @ioc: per adapter object |
110 | * @event_data: | 110 | * @event_data: ? |
111 | */ | 111 | */ |
112 | void | 112 | void |
113 | mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, | 113 | mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, |
@@ -209,8 +209,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) | |||
209 | /** | 209 | /** |
210 | * mpt3sas_trigger_event - Event trigger handler | 210 | * mpt3sas_trigger_event - Event trigger handler |
211 | * @ioc: per adapter object | 211 | * @ioc: per adapter object |
212 | * @event: | 212 | * @event: ? |
213 | * @log_entry_qualifier: | 213 | * @log_entry_qualifier: ? |
214 | * | 214 | * |
215 | */ | 215 | */ |
216 | void | 216 | void |
@@ -288,9 +288,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, | |||
288 | /** | 288 | /** |
289 | * mpt3sas_trigger_scsi - SCSI trigger handler | 289 | * mpt3sas_trigger_scsi - SCSI trigger handler |
290 | * @ioc: per adapter object | 290 | * @ioc: per adapter object |
291 | * @sense_key: | 291 | * @sense_key: ? |
292 | * @asc: | 292 | * @asc: ? |
293 | * @ascq: | 293 | * @ascq: ? |
294 | * | 294 | * |
295 | */ | 295 | */ |
296 | void | 296 | void |
@@ -364,8 +364,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, | |||
364 | /** | 364 | /** |
365 | * mpt3sas_trigger_mpi - MPI trigger handler | 365 | * mpt3sas_trigger_mpi - MPI trigger handler |
366 | * @ioc: per adapter object | 366 | * @ioc: per adapter object |
367 | * @ioc_status: | 367 | * @ioc_status: ? |
368 | * @loginfo: | 368 | * @loginfo: ? |
369 | * | 369 | * |
370 | */ | 370 | */ |
371 | void | 371 | void |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c index 45aa94915cbf..b4927f2b7677 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c +++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c | |||
@@ -267,9 +267,6 @@ out_error: | |||
267 | * @scmd: pointer to scsi command object | 267 | * @scmd: pointer to scsi command object |
268 | * @raid_device: pointer to raid device data structure | 268 | * @raid_device: pointer to raid device data structure |
269 | * @mpi_request: pointer to the SCSI_IO reqest message frame | 269 | * @mpi_request: pointer to the SCSI_IO reqest message frame |
270 | * @smid: system request message index | ||
271 | * | ||
272 | * Returns nothing | ||
273 | */ | 270 | */ |
274 | void | 271 | void |
275 | mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, | 272 | mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, |
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index dc4e801b2cef..6cd3e289ef99 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c | |||
@@ -4611,7 +4611,7 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset) | |||
4611 | * in order to keep it alive. | 4611 | * in order to keep it alive. |
4612 | */ | 4612 | */ |
4613 | if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) { | 4613 | if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) { |
4614 | cmd->result = ScsiResult(DID_RESET, 0); | 4614 | cmd->result = DID_RESET << 16; |
4615 | ncr_queue_done_cmd(np, cmd); | 4615 | ncr_queue_done_cmd(np, cmd); |
4616 | } | 4616 | } |
4617 | 4617 | ||
@@ -4957,7 +4957,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp) | |||
4957 | /* | 4957 | /* |
4958 | ** Check condition code | 4958 | ** Check condition code |
4959 | */ | 4959 | */ |
4960 | cmd->result = ScsiResult(DID_OK, S_CHECK_COND); | 4960 | cmd->result = DID_OK << 16 | S_CHECK_COND; |
4961 | 4961 | ||
4962 | /* | 4962 | /* |
4963 | ** Copy back sense data to caller's buffer. | 4963 | ** Copy back sense data to caller's buffer. |
@@ -4978,7 +4978,7 @@ void ncr_complete (struct ncb *np, struct ccb *cp) | |||
4978 | /* | 4978 | /* |
4979 | ** Reservation Conflict condition code | 4979 | ** Reservation Conflict condition code |
4980 | */ | 4980 | */ |
4981 | cmd->result = ScsiResult(DID_OK, S_CONFLICT); | 4981 | cmd->result = DID_OK << 16 | S_CONFLICT; |
4982 | 4982 | ||
4983 | } else if ((cp->host_status == HS_COMPLETE) | 4983 | } else if ((cp->host_status == HS_COMPLETE) |
4984 | && (cp->scsi_status == S_BUSY || | 4984 | && (cp->scsi_status == S_BUSY || |
@@ -8043,7 +8043,7 @@ printk("ncr53c8xx_queue_command\n"); | |||
8043 | spin_lock_irqsave(&np->smp_lock, flags); | 8043 | spin_lock_irqsave(&np->smp_lock, flags); |
8044 | 8044 | ||
8045 | if ((sts = ncr_queue_command(np, cmd)) != DID_OK) { | 8045 | if ((sts = ncr_queue_command(np, cmd)) != DID_OK) { |
8046 | cmd->result = ScsiResult(sts, 0); | 8046 | cmd->result = sts << 16; |
8047 | #ifdef DEBUG_NCR53C8XX | 8047 | #ifdef DEBUG_NCR53C8XX |
8048 | printk("ncr53c8xx : command not queued - result=%d\n", sts); | 8048 | printk("ncr53c8xx : command not queued - result=%d\n", sts); |
8049 | #endif | 8049 | #endif |
@@ -8234,7 +8234,7 @@ static void process_waiting_list(struct ncb *np, int sts) | |||
8234 | #ifdef DEBUG_WAITING_LIST | 8234 | #ifdef DEBUG_WAITING_LIST |
8235 | printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts); | 8235 | printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts); |
8236 | #endif | 8236 | #endif |
8237 | wcmd->result = ScsiResult(sts, 0); | 8237 | wcmd->result = sts << 16; |
8238 | ncr_queue_done_cmd(np, wcmd); | 8238 | ncr_queue_done_cmd(np, wcmd); |
8239 | } | 8239 | } |
8240 | } | 8240 | } |
diff --git a/drivers/scsi/nsp32_debug.c b/drivers/scsi/nsp32_debug.c index 58806f432a16..4f1d4bf9c775 100644 --- a/drivers/scsi/nsp32_debug.c +++ b/drivers/scsi/nsp32_debug.c | |||
@@ -137,7 +137,7 @@ static void print_commandk (unsigned char *command) | |||
137 | printk("\n"); | 137 | printk("\n"); |
138 | } | 138 | } |
139 | 139 | ||
140 | static void show_command(Scsi_Cmnd *SCpnt) | 140 | static void show_command(struct scsi_cmnd *SCpnt) |
141 | { | 141 | { |
142 | print_commandk(SCpnt->cmnd); | 142 | print_commandk(SCpnt->cmnd); |
143 | } | 143 | } |
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c index a269da1a6c75..387dc87e4d22 100644 --- a/drivers/scsi/qedi/qedi_fw_api.c +++ b/drivers/scsi/qedi/qedi_fw_api.c | |||
@@ -126,22 +126,24 @@ static void init_sqe(struct iscsi_task_params *task_params, | |||
126 | sgl_task_params, | 126 | sgl_task_params, |
127 | dif_task_params); | 127 | dif_task_params); |
128 | 128 | ||
129 | if (scsi_is_slow_sgl(sgl_task_params->num_sges, | 129 | if (scsi_is_slow_sgl(sgl_task_params->num_sges, |
130 | sgl_task_params->small_mid_sge)) | 130 | sgl_task_params->small_mid_sge)) |
131 | num_sges = ISCSI_WQE_NUM_SGES_SLOWIO; | 131 | num_sges = ISCSI_WQE_NUM_SGES_SLOWIO; |
132 | else | 132 | else |
133 | num_sges = min(sgl_task_params->num_sges, | 133 | num_sges = min(sgl_task_params->num_sges, |
134 | (u16)SCSI_NUM_SGES_SLOW_SGL_THR); | 134 | (u16)SCSI_NUM_SGES_SLOW_SGL_THR); |
135 | } | 135 | } |
136 | 136 | ||
137 | SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, num_sges); | 137 | SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, |
138 | SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, | 138 | num_sges); |
139 | buf_size); | 139 | SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, |
140 | buf_size); | ||
140 | 141 | ||
141 | if (GET_FIELD(pdu_header->hdr_second_dword, | 142 | if (GET_FIELD(pdu_header->hdr_second_dword, |
142 | ISCSI_CMD_HDR_TOTAL_AHS_LEN)) | 143 | ISCSI_CMD_HDR_TOTAL_AHS_LEN)) |
143 | SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CDB_SIZE, | 144 | SET_FIELD(task_params->sqe->contlen_cdbsize, |
144 | cmd_params->extended_cdb_sge.sge_len); | 145 | ISCSI_WQE_CDB_SIZE, |
146 | cmd_params->extended_cdb_sge.sge_len); | ||
145 | } | 147 | } |
146 | break; | 148 | break; |
147 | case ISCSI_TASK_TYPE_INITIATOR_READ: | 149 | case ISCSI_TASK_TYPE_INITIATOR_READ: |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index cff83b9457f7..aa96bccb5a96 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
@@ -524,7 +524,7 @@ static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size, | |||
524 | id_tbl->max = size; | 524 | id_tbl->max = size; |
525 | id_tbl->next = next; | 525 | id_tbl->next = next; |
526 | spin_lock_init(&id_tbl->lock); | 526 | spin_lock_init(&id_tbl->lock); |
527 | id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); | 527 | id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL); |
528 | if (!id_tbl->table) | 528 | if (!id_tbl->table) |
529 | return -ENOMEM; | 529 | return -ENOMEM; |
530 | 530 | ||
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index c8731568f9c4..4888b999e82f 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -518,6 +518,9 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, | |||
518 | if (unlikely(pci_channel_offline(ha->pdev))) | 518 | if (unlikely(pci_channel_offline(ha->pdev))) |
519 | return 0; | 519 | return 0; |
520 | 520 | ||
521 | if (qla2x00_chip_is_down(vha)) | ||
522 | return 0; | ||
523 | |||
521 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || | 524 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size || |
522 | !ha->isp_ops->write_nvram) | 525 | !ha->isp_ops->write_nvram) |
523 | return 0; | 526 | return 0; |
@@ -570,7 +573,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj, | |||
570 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) | 573 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE) |
571 | return 0; | 574 | return 0; |
572 | 575 | ||
573 | if (qla2x00_reset_active(vha)) | 576 | if (qla2x00_chip_is_down(vha)) |
574 | return 0; | 577 | return 0; |
575 | 578 | ||
576 | rval = qla2x00_read_sfp_dev(vha, buf, count); | 579 | rval = qla2x00_read_sfp_dev(vha, buf, count); |
@@ -733,6 +736,15 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj, | |||
733 | int type; | 736 | int type; |
734 | port_id_t did; | 737 | port_id_t did; |
735 | 738 | ||
739 | if (!capable(CAP_SYS_ADMIN)) | ||
740 | return 0; | ||
741 | |||
742 | if (unlikely(pci_channel_offline(vha->hw->pdev))) | ||
743 | return 0; | ||
744 | |||
745 | if (qla2x00_chip_is_down(vha)) | ||
746 | return 0; | ||
747 | |||
736 | type = simple_strtol(buf, NULL, 10); | 748 | type = simple_strtol(buf, NULL, 10); |
737 | 749 | ||
738 | did.b.domain = (type & 0x00ff0000) >> 16; | 750 | did.b.domain = (type & 0x00ff0000) >> 16; |
@@ -771,6 +783,12 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, | |||
771 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) | 783 | if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE) |
772 | return 0; | 784 | return 0; |
773 | 785 | ||
786 | if (unlikely(pci_channel_offline(ha->pdev))) | ||
787 | return 0; | ||
788 | |||
789 | if (qla2x00_chip_is_down(vha)) | ||
790 | return 0; | ||
791 | |||
774 | if (ha->xgmac_data) | 792 | if (ha->xgmac_data) |
775 | goto do_read; | 793 | goto do_read; |
776 | 794 | ||
@@ -825,6 +843,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, | |||
825 | if (ha->dcbx_tlv) | 843 | if (ha->dcbx_tlv) |
826 | goto do_read; | 844 | goto do_read; |
827 | 845 | ||
846 | if (qla2x00_chip_is_down(vha)) | ||
847 | return 0; | ||
848 | |||
828 | ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, | 849 | ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, |
829 | &ha->dcbx_tlv_dma, GFP_KERNEL); | 850 | &ha->dcbx_tlv_dma, GFP_KERNEL); |
830 | if (!ha->dcbx_tlv) { | 851 | if (!ha->dcbx_tlv) { |
@@ -1036,7 +1057,7 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr, | |||
1036 | vha->device_flags & DFLG_NO_CABLE) | 1057 | vha->device_flags & DFLG_NO_CABLE) |
1037 | len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); | 1058 | len = scnprintf(buf, PAGE_SIZE, "Link Down\n"); |
1038 | else if (atomic_read(&vha->loop_state) != LOOP_READY || | 1059 | else if (atomic_read(&vha->loop_state) != LOOP_READY || |
1039 | qla2x00_reset_active(vha)) | 1060 | qla2x00_chip_is_down(vha)) |
1040 | len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); | 1061 | len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n"); |
1041 | else { | 1062 | else { |
1042 | len = scnprintf(buf, PAGE_SIZE, "Link Up - "); | 1063 | len = scnprintf(buf, PAGE_SIZE, "Link Up - "); |
@@ -1163,7 +1184,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, | |||
1163 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) | 1184 | if (IS_QLA2100(ha) || IS_QLA2200(ha)) |
1164 | return -EPERM; | 1185 | return -EPERM; |
1165 | 1186 | ||
1166 | if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { | 1187 | if (qla2x00_chip_is_down(vha)) { |
1167 | ql_log(ql_log_warn, vha, 0x707a, | 1188 | ql_log(ql_log_warn, vha, 0x707a, |
1168 | "Abort ISP active -- ignoring beacon request.\n"); | 1189 | "Abort ISP active -- ignoring beacon request.\n"); |
1169 | return -EBUSY; | 1190 | return -EBUSY; |
@@ -1350,7 +1371,7 @@ qla2x00_thermal_temp_show(struct device *dev, | |||
1350 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); | 1371 | scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); |
1351 | uint16_t temp = 0; | 1372 | uint16_t temp = 0; |
1352 | 1373 | ||
1353 | if (qla2x00_reset_active(vha)) { | 1374 | if (qla2x00_chip_is_down(vha)) { |
1354 | ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); | 1375 | ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n"); |
1355 | goto done; | 1376 | goto done; |
1356 | } | 1377 | } |
@@ -1381,7 +1402,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, | |||
1381 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); | 1402 | return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); |
1382 | } | 1403 | } |
1383 | 1404 | ||
1384 | if (qla2x00_reset_active(vha)) | 1405 | if (qla2x00_chip_is_down(vha)) |
1385 | ql_log(ql_log_warn, vha, 0x707c, | 1406 | ql_log(ql_log_warn, vha, 0x707c, |
1386 | "ISP reset active.\n"); | 1407 | "ISP reset active.\n"); |
1387 | else if (!vha->hw->flags.eeh_busy) | 1408 | else if (!vha->hw->flags.eeh_busy) |
@@ -1840,7 +1861,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) | |||
1840 | if (unlikely(pci_channel_offline(ha->pdev))) | 1861 | if (unlikely(pci_channel_offline(ha->pdev))) |
1841 | goto done; | 1862 | goto done; |
1842 | 1863 | ||
1843 | if (qla2x00_reset_active(vha)) | 1864 | if (qla2x00_chip_is_down(vha)) |
1844 | goto done; | 1865 | goto done; |
1845 | 1866 | ||
1846 | stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), | 1867 | stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats), |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 5fd44c50bbac..c7533fa7f46e 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c | |||
@@ -1130,6 +1130,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
1130 | ha->fw_dump); | 1130 | ha->fw_dump); |
1131 | goto qla24xx_fw_dump_failed; | 1131 | goto qla24xx_fw_dump_failed; |
1132 | } | 1132 | } |
1133 | QLA_FW_STOPPED(ha); | ||
1133 | fw = &ha->fw_dump->isp.isp24; | 1134 | fw = &ha->fw_dump->isp.isp24; |
1134 | qla2xxx_prep_dump(ha, ha->fw_dump); | 1135 | qla2xxx_prep_dump(ha, ha->fw_dump); |
1135 | 1136 | ||
@@ -1384,6 +1385,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
1384 | ha->fw_dump); | 1385 | ha->fw_dump); |
1385 | goto qla25xx_fw_dump_failed; | 1386 | goto qla25xx_fw_dump_failed; |
1386 | } | 1387 | } |
1388 | QLA_FW_STOPPED(ha); | ||
1387 | fw = &ha->fw_dump->isp.isp25; | 1389 | fw = &ha->fw_dump->isp.isp25; |
1388 | qla2xxx_prep_dump(ha, ha->fw_dump); | 1390 | qla2xxx_prep_dump(ha, ha->fw_dump); |
1389 | ha->fw_dump->version = htonl(2); | 1391 | ha->fw_dump->version = htonl(2); |
@@ -2036,6 +2038,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) | |||
2036 | "request...\n", ha->fw_dump); | 2038 | "request...\n", ha->fw_dump); |
2037 | goto qla83xx_fw_dump_failed; | 2039 | goto qla83xx_fw_dump_failed; |
2038 | } | 2040 | } |
2041 | QLA_FW_STOPPED(ha); | ||
2039 | fw = &ha->fw_dump->isp.isp83; | 2042 | fw = &ha->fw_dump->isp.isp83; |
2040 | qla2xxx_prep_dump(ha, ha->fw_dump); | 2043 | qla2xxx_prep_dump(ha, ha->fw_dump); |
2041 | 2044 | ||
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 0f94b1d62d3f..a9dc9c4a6382 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
@@ -313,6 +313,7 @@ struct srb_cmd { | |||
313 | #define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */ | 313 | #define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */ |
314 | #define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */ | 314 | #define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */ |
315 | #define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */ | 315 | #define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */ |
316 | #define SRB_WAKEUP_ON_COMP BIT_6 | ||
316 | 317 | ||
317 | /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ | 318 | /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ |
318 | #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) | 319 | #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) |
@@ -379,6 +380,7 @@ struct srb_iocb { | |||
379 | #define SRB_LOGIN_COND_PLOGI BIT_1 | 380 | #define SRB_LOGIN_COND_PLOGI BIT_1 |
380 | #define SRB_LOGIN_SKIP_PRLI BIT_2 | 381 | #define SRB_LOGIN_SKIP_PRLI BIT_2 |
381 | #define SRB_LOGIN_NVME_PRLI BIT_3 | 382 | #define SRB_LOGIN_NVME_PRLI BIT_3 |
383 | #define SRB_LOGIN_PRLI_ONLY BIT_4 | ||
382 | uint16_t data[2]; | 384 | uint16_t data[2]; |
383 | u32 iop[2]; | 385 | u32 iop[2]; |
384 | } logio; | 386 | } logio; |
@@ -398,6 +400,8 @@ struct srb_iocb { | |||
398 | struct completion comp; | 400 | struct completion comp; |
399 | struct els_plogi_payload *els_plogi_pyld; | 401 | struct els_plogi_payload *els_plogi_pyld; |
400 | struct els_plogi_payload *els_resp_pyld; | 402 | struct els_plogi_payload *els_resp_pyld; |
403 | u32 tx_size; | ||
404 | u32 rx_size; | ||
401 | dma_addr_t els_plogi_pyld_dma; | 405 | dma_addr_t els_plogi_pyld_dma; |
402 | dma_addr_t els_resp_pyld_dma; | 406 | dma_addr_t els_resp_pyld_dma; |
403 | uint32_t fw_status[3]; | 407 | uint32_t fw_status[3]; |
@@ -2312,6 +2316,7 @@ enum fcport_mgt_event { | |||
2312 | FCME_ADISC_DONE, | 2316 | FCME_ADISC_DONE, |
2313 | FCME_GNNID_DONE, | 2317 | FCME_GNNID_DONE, |
2314 | FCME_GFPNID_DONE, | 2318 | FCME_GFPNID_DONE, |
2319 | FCME_ELS_PLOGI_DONE, | ||
2315 | }; | 2320 | }; |
2316 | 2321 | ||
2317 | enum rscn_addr_format { | 2322 | enum rscn_addr_format { |
@@ -2408,6 +2413,7 @@ typedef struct fc_port { | |||
2408 | struct ct_sns_desc ct_desc; | 2413 | struct ct_sns_desc ct_desc; |
2409 | enum discovery_state disc_state; | 2414 | enum discovery_state disc_state; |
2410 | enum login_state fw_login_state; | 2415 | enum login_state fw_login_state; |
2416 | unsigned long dm_login_expire; | ||
2411 | unsigned long plogi_nack_done_deadline; | 2417 | unsigned long plogi_nack_done_deadline; |
2412 | 2418 | ||
2413 | u32 login_gen, last_login_gen; | 2419 | u32 login_gen, last_login_gen; |
@@ -2418,7 +2424,8 @@ typedef struct fc_port { | |||
2418 | u8 iocb[IOCB_SIZE]; | 2424 | u8 iocb[IOCB_SIZE]; |
2419 | u8 current_login_state; | 2425 | u8 current_login_state; |
2420 | u8 last_login_state; | 2426 | u8 last_login_state; |
2421 | struct completion n2n_done; | 2427 | u16 n2n_link_reset_cnt; |
2428 | u16 n2n_chip_reset; | ||
2422 | } fc_port_t; | 2429 | } fc_port_t; |
2423 | 2430 | ||
2424 | #define QLA_FCPORT_SCAN 1 | 2431 | #define QLA_FCPORT_SCAN 1 |
@@ -3228,6 +3235,7 @@ enum qla_work_type { | |||
3228 | QLA_EVT_GFPNID, | 3235 | QLA_EVT_GFPNID, |
3229 | QLA_EVT_SP_RETRY, | 3236 | QLA_EVT_SP_RETRY, |
3230 | QLA_EVT_IIDMA, | 3237 | QLA_EVT_IIDMA, |
3238 | QLA_EVT_ELS_PLOGI, | ||
3231 | }; | 3239 | }; |
3232 | 3240 | ||
3233 | 3241 | ||
@@ -3599,6 +3607,8 @@ struct qla_hw_data { | |||
3599 | uint32_t detected_lr_sfp:1; | 3607 | uint32_t detected_lr_sfp:1; |
3600 | uint32_t using_lr_setting:1; | 3608 | uint32_t using_lr_setting:1; |
3601 | uint32_t rida_fmt2:1; | 3609 | uint32_t rida_fmt2:1; |
3610 | uint32_t purge_mbox:1; | ||
3611 | uint32_t n2n_bigger:1; | ||
3602 | } flags; | 3612 | } flags; |
3603 | 3613 | ||
3604 | uint16_t max_exchg; | 3614 | uint16_t max_exchg; |
@@ -3844,6 +3854,10 @@ struct qla_hw_data { | |||
3844 | int port_down_retry_count; | 3854 | int port_down_retry_count; |
3845 | uint8_t mbx_count; | 3855 | uint8_t mbx_count; |
3846 | uint8_t aen_mbx_count; | 3856 | uint8_t aen_mbx_count; |
3857 | atomic_t num_pend_mbx_stage1; | ||
3858 | atomic_t num_pend_mbx_stage2; | ||
3859 | atomic_t num_pend_mbx_stage3; | ||
3860 | uint16_t frame_payload_size; | ||
3847 | 3861 | ||
3848 | uint32_t login_retry_count; | 3862 | uint32_t login_retry_count; |
3849 | /* SNS command interfaces. */ | 3863 | /* SNS command interfaces. */ |
@@ -3903,6 +3917,9 @@ struct qla_hw_data { | |||
3903 | int exchoffld_size; | 3917 | int exchoffld_size; |
3904 | int exchoffld_count; | 3918 | int exchoffld_count; |
3905 | 3919 | ||
3920 | /* n2n */ | ||
3921 | struct els_plogi_payload plogi_els_payld; | ||
3922 | |||
3906 | void *swl; | 3923 | void *swl; |
3907 | 3924 | ||
3908 | /* These are used by mailbox operations. */ | 3925 | /* These are used by mailbox operations. */ |
@@ -4157,6 +4174,7 @@ struct qla_hw_data { | |||
4157 | struct work_struct board_disable; | 4174 | struct work_struct board_disable; |
4158 | 4175 | ||
4159 | struct mr_data_fx00 mr; | 4176 | struct mr_data_fx00 mr; |
4177 | uint32_t chip_reset; | ||
4160 | 4178 | ||
4161 | struct qlt_hw_data tgt; | 4179 | struct qlt_hw_data tgt; |
4162 | int allow_cna_fw_dump; | 4180 | int allow_cna_fw_dump; |
@@ -4238,7 +4256,7 @@ typedef struct scsi_qla_host { | |||
4238 | #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ | 4256 | #define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ |
4239 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ | 4257 | #define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ |
4240 | #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ | 4258 | #define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ |
4241 | #define FREE_BIT 21 | 4259 | #define N2N_LINK_RESET 21 |
4242 | #define PORT_UPDATE_NEEDED 22 | 4260 | #define PORT_UPDATE_NEEDED 22 |
4243 | #define FX00_RESET_RECOVERY 23 | 4261 | #define FX00_RESET_RECOVERY 23 |
4244 | #define FX00_TARGET_SCAN 24 | 4262 | #define FX00_TARGET_SCAN 24 |
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 5d8688e5bc7c..50c1e6c62e31 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h | |||
@@ -1366,6 +1366,11 @@ struct vp_rpt_id_entry_24xx { | |||
1366 | /* format 1 fabric */ | 1366 | /* format 1 fabric */ |
1367 | uint8_t vpstat1_subcode; /* vp_status=1 subcode */ | 1367 | uint8_t vpstat1_subcode; /* vp_status=1 subcode */ |
1368 | uint8_t flags; | 1368 | uint8_t flags; |
1369 | #define TOPO_MASK 0xE | ||
1370 | #define TOPO_FL 0x2 | ||
1371 | #define TOPO_N2N 0x4 | ||
1372 | #define TOPO_F 0x6 | ||
1373 | |||
1369 | uint16_t fip_flags; | 1374 | uint16_t fip_flags; |
1370 | uint8_t rsv2[12]; | 1375 | uint8_t rsv2[12]; |
1371 | 1376 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 2660a48d918a..178974896b5c 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
@@ -45,8 +45,7 @@ extern int qla2x00_fabric_login(scsi_qla_host_t *, fc_port_t *, uint16_t *); | |||
45 | extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); | 45 | extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); |
46 | 46 | ||
47 | extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); | 47 | extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); |
48 | extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, | 48 | extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool); |
49 | port_id_t); | ||
50 | 49 | ||
51 | extern void qla2x00_update_fcports(scsi_qla_host_t *); | 50 | extern void qla2x00_update_fcports(scsi_qla_host_t *); |
52 | 51 | ||
@@ -118,6 +117,7 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *, | |||
118 | fc_port_t *, uint16_t *); | 117 | fc_port_t *, uint16_t *); |
119 | int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); | 118 | int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); |
120 | void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); | 119 | void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); |
120 | int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *); | ||
121 | /* | 121 | /* |
122 | * Global Data in qla_os.c source file. | 122 | * Global Data in qla_os.c source file. |
123 | */ | 123 | */ |
@@ -212,7 +212,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); | |||
212 | void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, | 212 | void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, |
213 | uint16_t *); | 213 | uint16_t *); |
214 | int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); | 214 | int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); |
215 | int qla24xx_async_abort_cmd(srb_t *); | 215 | int qla24xx_async_abort_cmd(srb_t *, bool); |
216 | int qla24xx_post_relogin_work(struct scsi_qla_host *vha); | 216 | int qla24xx_post_relogin_work(struct scsi_qla_host *vha); |
217 | void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); | 217 | void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); |
218 | 218 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 7a3744006419..a0038d879b9d 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -1962,7 +1962,6 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha) | |||
1962 | void *entries; | 1962 | void *entries; |
1963 | struct ct_fdmiv2_hba_attr *eiter; | 1963 | struct ct_fdmiv2_hba_attr *eiter; |
1964 | struct qla_hw_data *ha = vha->hw; | 1964 | struct qla_hw_data *ha = vha->hw; |
1965 | struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb; | ||
1966 | struct new_utsname *p_sysid = NULL; | 1965 | struct new_utsname *p_sysid = NULL; |
1967 | 1966 | ||
1968 | /* Issue RHBA */ | 1967 | /* Issue RHBA */ |
@@ -2142,9 +2141,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha) | |||
2142 | /* MAX CT Payload Length */ | 2141 | /* MAX CT Payload Length */ |
2143 | eiter = entries + size; | 2142 | eiter = entries + size; |
2144 | eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); | 2143 | eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); |
2145 | eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ? | 2144 | eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size); |
2146 | le16_to_cpu(icb24->frame_payload_size) : | ||
2147 | le16_to_cpu(ha->init_cb->frame_payload_size); | ||
2148 | eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len); | 2145 | eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len); |
2149 | eiter->len = cpu_to_be16(4 + 4); | 2146 | eiter->len = cpu_to_be16(4 + 4); |
2150 | size += 4 + 4; | 2147 | size += 4 + 4; |
@@ -3394,19 +3391,40 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) | |||
3394 | 3391 | ||
3395 | void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) | 3392 | void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) |
3396 | { | 3393 | { |
3397 | if (sp->u.iocb_cmd.u.ctarg.req) { | 3394 | struct srb_iocb *c = &sp->u.iocb_cmd; |
3398 | dma_free_coherent(&vha->hw->pdev->dev, | 3395 | |
3399 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, | 3396 | switch (sp->type) { |
3400 | sp->u.iocb_cmd.u.ctarg.req, | 3397 | case SRB_ELS_DCMD: |
3401 | sp->u.iocb_cmd.u.ctarg.req_dma); | 3398 | if (c->u.els_plogi.els_plogi_pyld) |
3402 | sp->u.iocb_cmd.u.ctarg.req = NULL; | 3399 | dma_free_coherent(&vha->hw->pdev->dev, |
3403 | } | 3400 | c->u.els_plogi.tx_size, |
3404 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | 3401 | c->u.els_plogi.els_plogi_pyld, |
3405 | dma_free_coherent(&vha->hw->pdev->dev, | 3402 | c->u.els_plogi.els_plogi_pyld_dma); |
3406 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, | 3403 | |
3407 | sp->u.iocb_cmd.u.ctarg.rsp, | 3404 | if (c->u.els_plogi.els_resp_pyld) |
3408 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | 3405 | dma_free_coherent(&vha->hw->pdev->dev, |
3409 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | 3406 | c->u.els_plogi.rx_size, |
3407 | c->u.els_plogi.els_resp_pyld, | ||
3408 | c->u.els_plogi.els_resp_pyld_dma); | ||
3409 | break; | ||
3410 | case SRB_CT_PTHRU_CMD: | ||
3411 | default: | ||
3412 | if (sp->u.iocb_cmd.u.ctarg.req) { | ||
3413 | dma_free_coherent(&vha->hw->pdev->dev, | ||
3414 | sp->u.iocb_cmd.u.ctarg.req_allocated_size, | ||
3415 | sp->u.iocb_cmd.u.ctarg.req, | ||
3416 | sp->u.iocb_cmd.u.ctarg.req_dma); | ||
3417 | sp->u.iocb_cmd.u.ctarg.req = NULL; | ||
3418 | } | ||
3419 | |||
3420 | if (sp->u.iocb_cmd.u.ctarg.rsp) { | ||
3421 | dma_free_coherent(&vha->hw->pdev->dev, | ||
3422 | sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, | ||
3423 | sp->u.iocb_cmd.u.ctarg.rsp, | ||
3424 | sp->u.iocb_cmd.u.ctarg.rsp_dma); | ||
3425 | sp->u.iocb_cmd.u.ctarg.rsp = NULL; | ||
3426 | } | ||
3427 | break; | ||
3410 | } | 3428 | } |
3411 | 3429 | ||
3412 | sp->free(sp); | 3430 | sp->free(sp); |
@@ -3483,6 +3501,14 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
3483 | fcport->rscn_gen++; | 3501 | fcport->rscn_gen++; |
3484 | fcport->scan_state = QLA_FCPORT_FOUND; | 3502 | fcport->scan_state = QLA_FCPORT_FOUND; |
3485 | fcport->flags |= FCF_FABRIC_DEVICE; | 3503 | fcport->flags |= FCF_FABRIC_DEVICE; |
3504 | if (fcport->login_retry == 0) { | ||
3505 | fcport->login_retry = | ||
3506 | vha->hw->login_retry_count; | ||
3507 | ql_dbg(ql_dbg_disc, vha, 0xffff, | ||
3508 | "Port login retry %8phN, lid 0x%04x cnt=%d.\n", | ||
3509 | fcport->port_name, fcport->loop_id, | ||
3510 | fcport->login_retry); | ||
3511 | } | ||
3486 | switch (fcport->disc_state) { | 3512 | switch (fcport->disc_state) { |
3487 | case DSC_LOGIN_COMPLETE: | 3513 | case DSC_LOGIN_COMPLETE: |
3488 | /* recheck session is still intact. */ | 3514 | /* recheck session is still intact. */ |
@@ -3981,6 +4007,14 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) | |||
3981 | } else { | 4007 | } else { |
3982 | if (fcport->rscn_rcvd || | 4008 | if (fcport->rscn_rcvd || |
3983 | fcport->disc_state != DSC_LOGIN_COMPLETE) { | 4009 | fcport->disc_state != DSC_LOGIN_COMPLETE) { |
4010 | if (fcport->login_retry == 0) { | ||
4011 | fcport->login_retry = | ||
4012 | vha->hw->login_retry_count; | ||
4013 | ql_dbg(ql_dbg_disc, vha, 0x20a3, | ||
4014 | "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", | ||
4015 | fcport->port_name, fcport->loop_id, | ||
4016 | fcport->login_retry); | ||
4017 | } | ||
3984 | fcport->rscn_rcvd = 0; | 4018 | fcport->rscn_rcvd = 0; |
3985 | qla24xx_fcport_handle_login(vha, fcport); | 4019 | qla24xx_fcport_handle_login(vha, fcport); |
3986 | } | 4020 | } |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 1b19b954bbae..b934977c5c26 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -50,16 +50,15 @@ qla2x00_sp_timeout(struct timer_list *t) | |||
50 | { | 50 | { |
51 | srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); | 51 | srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); |
52 | struct srb_iocb *iocb; | 52 | struct srb_iocb *iocb; |
53 | scsi_qla_host_t *vha = sp->vha; | ||
54 | struct req_que *req; | 53 | struct req_que *req; |
55 | unsigned long flags; | 54 | unsigned long flags; |
56 | 55 | ||
57 | spin_lock_irqsave(&vha->hw->hardware_lock, flags); | 56 | spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); |
58 | req = vha->hw->req_q_map[0]; | 57 | req = sp->qpair->req; |
59 | req->outstanding_cmds[sp->handle] = NULL; | 58 | req->outstanding_cmds[sp->handle] = NULL; |
60 | iocb = &sp->u.iocb_cmd; | 59 | iocb = &sp->u.iocb_cmd; |
60 | spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); | ||
61 | iocb->timeout(sp); | 61 | iocb->timeout(sp); |
62 | spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); | ||
63 | } | 62 | } |
64 | 63 | ||
65 | void | 64 | void |
@@ -100,6 +99,8 @@ qla2x00_async_iocb_timeout(void *data) | |||
100 | srb_t *sp = data; | 99 | srb_t *sp = data; |
101 | fc_port_t *fcport = sp->fcport; | 100 | fc_port_t *fcport = sp->fcport; |
102 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 101 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
102 | int rc, h; | ||
103 | unsigned long flags; | ||
103 | 104 | ||
104 | if (fcport) { | 105 | if (fcport) { |
105 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, | 106 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, |
@@ -114,11 +115,26 @@ qla2x00_async_iocb_timeout(void *data) | |||
114 | 115 | ||
115 | switch (sp->type) { | 116 | switch (sp->type) { |
116 | case SRB_LOGIN_CMD: | 117 | case SRB_LOGIN_CMD: |
117 | /* Retry as needed. */ | 118 | rc = qla24xx_async_abort_cmd(sp, false); |
118 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; | 119 | if (rc) { |
119 | lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? | 120 | /* Retry as needed. */ |
120 | QLA_LOGIO_LOGIN_RETRIED : 0; | 121 | lio->u.logio.data[0] = MBS_COMMAND_ERROR; |
121 | sp->done(sp, QLA_FUNCTION_TIMEOUT); | 122 | lio->u.logio.data[1] = |
123 | lio->u.logio.flags & SRB_LOGIN_RETRIED ? | ||
124 | QLA_LOGIO_LOGIN_RETRIED : 0; | ||
125 | spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); | ||
126 | for (h = 1; h < sp->qpair->req->num_outstanding_cmds; | ||
127 | h++) { | ||
128 | if (sp->qpair->req->outstanding_cmds[h] == | ||
129 | sp) { | ||
130 | sp->qpair->req->outstanding_cmds[h] = | ||
131 | NULL; | ||
132 | break; | ||
133 | } | ||
134 | } | ||
135 | spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); | ||
136 | sp->done(sp, QLA_FUNCTION_TIMEOUT); | ||
137 | } | ||
122 | break; | 138 | break; |
123 | case SRB_LOGOUT_CMD: | 139 | case SRB_LOGOUT_CMD: |
124 | case SRB_CT_PTHRU_CMD: | 140 | case SRB_CT_PTHRU_CMD: |
@@ -127,7 +143,21 @@ qla2x00_async_iocb_timeout(void *data) | |||
127 | case SRB_NACK_PRLI: | 143 | case SRB_NACK_PRLI: |
128 | case SRB_NACK_LOGO: | 144 | case SRB_NACK_LOGO: |
129 | case SRB_CTRL_VP: | 145 | case SRB_CTRL_VP: |
130 | sp->done(sp, QLA_FUNCTION_TIMEOUT); | 146 | rc = qla24xx_async_abort_cmd(sp, false); |
147 | if (rc) { | ||
148 | spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); | ||
149 | for (h = 1; h < sp->qpair->req->num_outstanding_cmds; | ||
150 | h++) { | ||
151 | if (sp->qpair->req->outstanding_cmds[h] == | ||
152 | sp) { | ||
153 | sp->qpair->req->outstanding_cmds[h] = | ||
154 | NULL; | ||
155 | break; | ||
156 | } | ||
157 | } | ||
158 | spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); | ||
159 | sp->done(sp, QLA_FUNCTION_TIMEOUT); | ||
160 | } | ||
131 | break; | 161 | break; |
132 | } | 162 | } |
133 | } | 163 | } |
@@ -160,6 +190,22 @@ qla2x00_async_login_sp_done(void *ptr, int res) | |||
160 | sp->free(sp); | 190 | sp->free(sp); |
161 | } | 191 | } |
162 | 192 | ||
193 | static inline bool | ||
194 | fcport_is_smaller(fc_port_t *fcport) | ||
195 | { | ||
196 | if (wwn_to_u64(fcport->port_name) < | ||
197 | wwn_to_u64(fcport->vha->port_name)) | ||
198 | return true; | ||
199 | else | ||
200 | return false; | ||
201 | } | ||
202 | |||
203 | static inline bool | ||
204 | fcport_is_bigger(fc_port_t *fcport) | ||
205 | { | ||
206 | return !fcport_is_smaller(fcport); | ||
207 | } | ||
208 | |||
163 | int | 209 | int |
164 | qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, | 210 | qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, |
165 | uint16_t *data) | 211 | uint16_t *data) |
@@ -189,13 +235,16 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
189 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); | 235 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); |
190 | 236 | ||
191 | sp->done = qla2x00_async_login_sp_done; | 237 | sp->done = qla2x00_async_login_sp_done; |
192 | lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; | 238 | if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { |
239 | lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; | ||
240 | } else { | ||
241 | lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; | ||
193 | 242 | ||
194 | if (fcport->fc4f_nvme) | 243 | if (fcport->fc4f_nvme) |
195 | lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; | 244 | lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; |
245 | |||
246 | } | ||
196 | 247 | ||
197 | if (data[1] & QLA_LOGIO_LOGIN_RETRIED) | ||
198 | lio->u.logio.flags |= SRB_LOGIN_RETRIED; | ||
199 | rval = qla2x00_start_sp(sp); | 248 | rval = qla2x00_start_sp(sp); |
200 | if (rval != QLA_SUCCESS) { | 249 | if (rval != QLA_SUCCESS) { |
201 | fcport->flags |= FCF_LOGIN_NEEDED; | 250 | fcport->flags |= FCF_LOGIN_NEEDED; |
@@ -370,6 +419,19 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
370 | __qla24xx_handle_gpdb_event(vha, ea); | 419 | __qla24xx_handle_gpdb_event(vha, ea); |
371 | } | 420 | } |
372 | 421 | ||
422 | int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) | ||
423 | { | ||
424 | struct qla_work_evt *e; | ||
425 | |||
426 | e = qla2x00_alloc_work(vha, QLA_EVT_ELS_PLOGI); | ||
427 | if (!e) | ||
428 | return QLA_FUNCTION_FAILED; | ||
429 | |||
430 | e->u.fcport.fcport = fcport; | ||
431 | fcport->flags |= FCF_ASYNC_ACTIVE; | ||
432 | return qla2x00_post_work(vha, e); | ||
433 | } | ||
434 | |||
373 | static void | 435 | static void |
374 | qla2x00_async_adisc_sp_done(void *ptr, int res) | 436 | qla2x00_async_adisc_sp_done(void *ptr, int res) |
375 | { | 437 | { |
@@ -382,7 +444,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res) | |||
382 | "Async done-%s res %x %8phC\n", | 444 | "Async done-%s res %x %8phC\n", |
383 | sp->name, res, sp->fcport->port_name); | 445 | sp->name, res, sp->fcport->port_name); |
384 | 446 | ||
385 | sp->fcport->flags &= ~FCF_ASYNC_SENT; | 447 | sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); |
386 | 448 | ||
387 | memset(&ea, 0, sizeof(ea)); | 449 | memset(&ea, 0, sizeof(ea)); |
388 | ea.event = FCME_ADISC_DONE; | 450 | ea.event = FCME_ADISC_DONE; |
@@ -418,6 +480,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
418 | 480 | ||
419 | lio = &sp->u.iocb_cmd; | 481 | lio = &sp->u.iocb_cmd; |
420 | lio->timeout = qla2x00_async_iocb_timeout; | 482 | lio->timeout = qla2x00_async_iocb_timeout; |
483 | sp->gen1 = fcport->rscn_gen; | ||
484 | sp->gen2 = fcport->login_gen; | ||
421 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); | 485 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); |
422 | 486 | ||
423 | sp->done = qla2x00_async_adisc_sp_done; | 487 | sp->done = qla2x00_async_adisc_sp_done; |
@@ -464,7 +528,6 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, | |||
464 | 528 | ||
465 | if (ea->rc) { /* rval */ | 529 | if (ea->rc) { /* rval */ |
466 | if (fcport->login_retry == 0) { | 530 | if (fcport->login_retry == 0) { |
467 | fcport->login_retry = vha->hw->login_retry_count; | ||
468 | ql_dbg(ql_dbg_disc, vha, 0x20de, | 531 | ql_dbg(ql_dbg_disc, vha, 0x20de, |
469 | "GNL failed Port login retry %8phN, retry cnt=%d.\n", | 532 | "GNL failed Port login retry %8phN, retry cnt=%d.\n", |
470 | fcport->port_name, fcport->login_retry); | 533 | fcport->port_name, fcport->login_retry); |
@@ -497,35 +560,51 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, | |||
497 | for (i = 0; i < n; i++) { | 560 | for (i = 0; i < n; i++) { |
498 | e = &vha->gnl.l[i]; | 561 | e = &vha->gnl.l[i]; |
499 | wwn = wwn_to_u64(e->port_name); | 562 | wwn = wwn_to_u64(e->port_name); |
563 | id.b.domain = e->port_id[2]; | ||
564 | id.b.area = e->port_id[1]; | ||
565 | id.b.al_pa = e->port_id[0]; | ||
566 | id.b.rsvd_1 = 0; | ||
500 | 567 | ||
501 | if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) | 568 | if (memcmp((u8 *)&wwn, fcport->port_name, WWN_SIZE)) |
502 | continue; | 569 | continue; |
503 | 570 | ||
571 | if (IS_SW_RESV_ADDR(id)) | ||
572 | continue; | ||
573 | |||
504 | found = 1; | 574 | found = 1; |
505 | id.b.domain = e->port_id[2]; | ||
506 | id.b.area = e->port_id[1]; | ||
507 | id.b.al_pa = e->port_id[0]; | ||
508 | id.b.rsvd_1 = 0; | ||
509 | 575 | ||
510 | loop_id = le16_to_cpu(e->nport_handle); | 576 | loop_id = le16_to_cpu(e->nport_handle); |
511 | loop_id = (loop_id & 0x7fff); | 577 | loop_id = (loop_id & 0x7fff); |
578 | if (fcport->fc4f_nvme) | ||
579 | current_login_state = e->current_login_state >> 4; | ||
580 | else | ||
581 | current_login_state = e->current_login_state & 0xf; | ||
582 | |||
512 | 583 | ||
513 | ql_dbg(ql_dbg_disc, vha, 0x20e2, | 584 | ql_dbg(ql_dbg_disc, vha, 0x20e2, |
514 | "%s found %8phC CLS [%d|%d] ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", | 585 | "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", |
515 | __func__, fcport->port_name, | 586 | __func__, fcport->port_name, |
516 | e->current_login_state, fcport->fw_login_state, | 587 | e->current_login_state, fcport->fw_login_state, |
517 | id.b.domain, id.b.area, id.b.al_pa, | 588 | fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa, |
518 | fcport->d_id.b.domain, fcport->d_id.b.area, | 589 | fcport->d_id.b.domain, fcport->d_id.b.area, |
519 | fcport->d_id.b.al_pa, loop_id, fcport->loop_id); | 590 | fcport->d_id.b.al_pa, loop_id, fcport->loop_id); |
520 | 591 | ||
521 | if ((id.b24 != fcport->d_id.b24) || | 592 | switch (fcport->disc_state) { |
522 | ((fcport->loop_id != FC_NO_LOOP_ID) && | 593 | case DSC_DELETE_PEND: |
523 | (fcport->loop_id != loop_id))) { | 594 | case DSC_DELETED: |
524 | ql_dbg(ql_dbg_disc, vha, 0x20e3, | 595 | break; |
525 | "%s %d %8phC post del sess\n", | 596 | default: |
526 | __func__, __LINE__, fcport->port_name); | 597 | if ((id.b24 != fcport->d_id.b24 && |
527 | qlt_schedule_sess_for_deletion(fcport); | 598 | fcport->d_id.b24) || |
528 | return; | 599 | (fcport->loop_id != FC_NO_LOOP_ID && |
600 | fcport->loop_id != loop_id)) { | ||
601 | ql_dbg(ql_dbg_disc, vha, 0x20e3, | ||
602 | "%s %d %8phC post del sess\n", | ||
603 | __func__, __LINE__, fcport->port_name); | ||
604 | qlt_schedule_sess_for_deletion(fcport); | ||
605 | return; | ||
606 | } | ||
607 | break; | ||
529 | } | 608 | } |
530 | 609 | ||
531 | fcport->loop_id = loop_id; | 610 | fcport->loop_id = loop_id; |
@@ -544,68 +623,148 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, | |||
544 | fcport->login_pause = 1; | 623 | fcport->login_pause = 1; |
545 | } | 624 | } |
546 | 625 | ||
547 | if (fcport->fc4f_nvme) | 626 | switch (vha->hw->current_topology) { |
548 | current_login_state = e->current_login_state >> 4; | 627 | default: |
549 | else | 628 | switch (current_login_state) { |
550 | current_login_state = e->current_login_state & 0xf; | 629 | case DSC_LS_PRLI_COMP: |
551 | 630 | ql_dbg(ql_dbg_disc + ql_dbg_verbose, | |
552 | switch (current_login_state) { | 631 | vha, 0x20e4, "%s %d %8phC post gpdb\n", |
553 | case DSC_LS_PRLI_COMP: | 632 | __func__, __LINE__, fcport->port_name); |
554 | ql_dbg(ql_dbg_disc, vha, 0x20e4, | ||
555 | "%s %d %8phC post gpdb\n", | ||
556 | __func__, __LINE__, fcport->port_name); | ||
557 | 633 | ||
558 | if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) | 634 | if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) |
559 | fcport->port_type = FCT_INITIATOR; | 635 | fcport->port_type = FCT_INITIATOR; |
560 | else | 636 | else |
561 | fcport->port_type = FCT_TARGET; | 637 | fcport->port_type = FCT_TARGET; |
638 | data[0] = data[1] = 0; | ||
639 | qla2x00_post_async_adisc_work(vha, fcport, | ||
640 | data); | ||
641 | break; | ||
642 | case DSC_LS_PORT_UNAVAIL: | ||
643 | default: | ||
644 | if (fcport->loop_id != FC_NO_LOOP_ID) | ||
645 | qla2x00_clear_loop_id(fcport); | ||
562 | 646 | ||
563 | data[0] = data[1] = 0; | 647 | fcport->loop_id = loop_id; |
564 | qla2x00_post_async_adisc_work(vha, fcport, data); | ||
565 | break; | ||
566 | case DSC_LS_PORT_UNAVAIL: | ||
567 | default: | ||
568 | if (fcport->loop_id == FC_NO_LOOP_ID) { | ||
569 | qla2x00_find_new_loop_id(vha, fcport); | ||
570 | fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; | 648 | fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; |
649 | qla24xx_fcport_handle_login(vha, fcport); | ||
650 | break; | ||
571 | } | 651 | } |
572 | ql_dbg(ql_dbg_disc, vha, 0x20e5, | ||
573 | "%s %d %8phC\n", | ||
574 | __func__, __LINE__, fcport->port_name); | ||
575 | qla24xx_fcport_handle_login(vha, fcport); | ||
576 | break; | 652 | break; |
577 | } | 653 | case ISP_CFG_N: |
654 | fcport->fw_login_state = current_login_state; | ||
655 | fcport->d_id = id; | ||
656 | switch (current_login_state) { | ||
657 | case DSC_LS_PRLI_COMP: | ||
658 | if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) | ||
659 | fcport->port_type = FCT_INITIATOR; | ||
660 | else | ||
661 | fcport->port_type = FCT_TARGET; | ||
662 | |||
663 | data[0] = data[1] = 0; | ||
664 | qla2x00_post_async_adisc_work(vha, fcport, | ||
665 | data); | ||
666 | break; | ||
667 | case DSC_LS_PLOGI_COMP: | ||
668 | if (fcport_is_bigger(fcport)) { | ||
669 | /* local adapter is smaller */ | ||
670 | if (fcport->loop_id != FC_NO_LOOP_ID) | ||
671 | qla2x00_clear_loop_id(fcport); | ||
672 | |||
673 | fcport->loop_id = loop_id; | ||
674 | qla24xx_fcport_handle_login(vha, | ||
675 | fcport); | ||
676 | break; | ||
677 | } | ||
678 | /* drop through */ | ||
679 | default: | ||
680 | if (fcport_is_smaller(fcport)) { | ||
681 | /* local adapter is bigger */ | ||
682 | if (fcport->loop_id != FC_NO_LOOP_ID) | ||
683 | qla2x00_clear_loop_id(fcport); | ||
684 | |||
685 | fcport->loop_id = loop_id; | ||
686 | qla24xx_fcport_handle_login(vha, | ||
687 | fcport); | ||
688 | } | ||
689 | break; | ||
690 | } | ||
691 | break; | ||
692 | } /* switch (ha->current_topology) */ | ||
578 | } | 693 | } |
579 | 694 | ||
580 | if (!found) { | 695 | if (!found) { |
581 | /* fw has no record of this port */ | 696 | switch (vha->hw->current_topology) { |
582 | for (i = 0; i < n; i++) { | 697 | case ISP_CFG_F: |
583 | e = &vha->gnl.l[i]; | 698 | case ISP_CFG_FL: |
584 | id.b.domain = e->port_id[0]; | 699 | for (i = 0; i < n; i++) { |
585 | id.b.area = e->port_id[1]; | 700 | e = &vha->gnl.l[i]; |
586 | id.b.al_pa = e->port_id[2]; | 701 | id.b.domain = e->port_id[0]; |
587 | id.b.rsvd_1 = 0; | 702 | id.b.area = e->port_id[1]; |
588 | loop_id = le16_to_cpu(e->nport_handle); | 703 | id.b.al_pa = e->port_id[2]; |
589 | 704 | id.b.rsvd_1 = 0; | |
590 | if (fcport->d_id.b24 == id.b24) { | 705 | loop_id = le16_to_cpu(e->nport_handle); |
591 | conflict_fcport = | 706 | |
592 | qla2x00_find_fcport_by_wwpn(vha, | 707 | if (fcport->d_id.b24 == id.b24) { |
593 | e->port_name, 0); | 708 | conflict_fcport = |
594 | if (conflict_fcport) { | 709 | qla2x00_find_fcport_by_wwpn(vha, |
595 | qlt_schedule_sess_for_deletion | 710 | e->port_name, 0); |
596 | (conflict_fcport); | 711 | if (conflict_fcport) { |
597 | ql_dbg(ql_dbg_disc, vha, 0x20e6, | 712 | ql_dbg(ql_dbg_disc + ql_dbg_verbose, |
598 | "%s %d %8phC post del sess\n", | 713 | vha, 0x20e5, |
599 | __func__, __LINE__, | 714 | "%s %d %8phC post del sess\n", |
600 | conflict_fcport->port_name); | 715 | __func__, __LINE__, |
716 | conflict_fcport->port_name); | ||
717 | qlt_schedule_sess_for_deletion | ||
718 | (conflict_fcport); | ||
719 | } | ||
601 | } | 720 | } |
721 | /* | ||
722 | * FW already picked this loop id for | ||
723 | * another fcport | ||
724 | */ | ||
725 | if (fcport->loop_id == loop_id) | ||
726 | fcport->loop_id = FC_NO_LOOP_ID; | ||
602 | } | 727 | } |
603 | 728 | qla24xx_fcport_handle_login(vha, fcport); | |
604 | /* FW already picked this loop id for another fcport */ | 729 | break; |
605 | if (fcport->loop_id == loop_id) | 730 | case ISP_CFG_N: |
606 | fcport->loop_id = FC_NO_LOOP_ID; | 731 | fcport->disc_state = DSC_DELETED; |
732 | if (time_after_eq(jiffies, fcport->dm_login_expire)) { | ||
733 | if (fcport->n2n_link_reset_cnt < 2) { | ||
734 | fcport->n2n_link_reset_cnt++; | ||
735 | /* | ||
736 | * remote port is not sending PLOGI. | ||
737 | * Reset link to kick start his state | ||
738 | * machine | ||
739 | */ | ||
740 | set_bit(N2N_LINK_RESET, | ||
741 | &vha->dpc_flags); | ||
742 | } else { | ||
743 | if (fcport->n2n_chip_reset < 1) { | ||
744 | ql_log(ql_log_info, vha, 0x705d, | ||
745 | "Chip reset to bring laser down"); | ||
746 | set_bit(ISP_ABORT_NEEDED, | ||
747 | &vha->dpc_flags); | ||
748 | fcport->n2n_chip_reset++; | ||
749 | } else { | ||
750 | ql_log(ql_log_info, vha, 0x705d, | ||
751 | "Remote port %8ph is not coming back\n", | ||
752 | fcport->port_name); | ||
753 | fcport->scan_state = 0; | ||
754 | } | ||
755 | } | ||
756 | qla2xxx_wake_dpc(vha); | ||
757 | } else { | ||
758 | /* | ||
759 | * report port suppose to do PLOGI. Give him | ||
760 | * more time. FW will catch it. | ||
761 | */ | ||
762 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
763 | } | ||
764 | break; | ||
765 | default: | ||
766 | break; | ||
607 | } | 767 | } |
608 | qla24xx_fcport_handle_login(vha, fcport); | ||
609 | } | 768 | } |
610 | } /* gnl_event */ | 769 | } /* gnl_event */ |
611 | 770 | ||
@@ -911,9 +1070,9 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
911 | } | 1070 | } |
912 | 1071 | ||
913 | ql_dbg(ql_dbg_disc, vha, 0x211b, | 1072 | ql_dbg(ql_dbg_disc, vha, 0x211b, |
914 | "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d.\n", | 1073 | "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", |
915 | fcport->port_name, sp->handle, fcport->loop_id, | 1074 | fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, |
916 | fcport->d_id.b24, fcport->login_retry); | 1075 | fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc"); |
917 | 1076 | ||
918 | return rval; | 1077 | return rval; |
919 | 1078 | ||
@@ -1055,8 +1214,9 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
1055 | fcport->flags &= ~FCF_ASYNC_SENT; | 1214 | fcport->flags &= ~FCF_ASYNC_SENT; |
1056 | 1215 | ||
1057 | ql_dbg(ql_dbg_disc, vha, 0x20d2, | 1216 | ql_dbg(ql_dbg_disc, vha, 0x20d2, |
1058 | "%s %8phC DS %d LS %d rc %d\n", __func__, fcport->port_name, | 1217 | "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name, |
1059 | fcport->disc_state, pd->current_login_state, ea->rc); | 1218 | fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme, |
1219 | ea->rc); | ||
1060 | 1220 | ||
1061 | if (fcport->disc_state == DSC_DELETE_PEND) | 1221 | if (fcport->disc_state == DSC_DELETE_PEND) |
1062 | return; | 1222 | return; |
@@ -1074,9 +1234,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) | |||
1074 | case PDS_PLOGI_COMPLETE: | 1234 | case PDS_PLOGI_COMPLETE: |
1075 | case PDS_PRLI_PENDING: | 1235 | case PDS_PRLI_PENDING: |
1076 | case PDS_PRLI2_PENDING: | 1236 | case PDS_PRLI2_PENDING: |
1077 | ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n", | 1237 | /* Set discovery state back to GNL to Relogin attempt */ |
1078 | __func__, __LINE__, fcport->port_name); | 1238 | if (qla_dual_mode_enabled(vha) || |
1079 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 1239 | qla_ini_mode_enabled(vha)) { |
1240 | fcport->disc_state = DSC_GNL; | ||
1241 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
1242 | } | ||
1080 | return; | 1243 | return; |
1081 | case PDS_LOGO_PENDING: | 1244 | case PDS_LOGO_PENDING: |
1082 | case PDS_PORT_UNAVAILABLE: | 1245 | case PDS_PORT_UNAVAILABLE: |
@@ -1174,39 +1337,80 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
1174 | return 0; | 1337 | return 0; |
1175 | } | 1338 | } |
1176 | 1339 | ||
1177 | if (fcport->login_retry > 0) | ||
1178 | fcport->login_retry--; | ||
1179 | |||
1180 | switch (fcport->disc_state) { | 1340 | switch (fcport->disc_state) { |
1181 | case DSC_DELETED: | 1341 | case DSC_DELETED: |
1182 | wwn = wwn_to_u64(fcport->node_name); | 1342 | wwn = wwn_to_u64(fcport->node_name); |
1183 | if (wwn == 0) { | 1343 | switch (vha->hw->current_topology) { |
1184 | ql_dbg(ql_dbg_disc, vha, 0xffff, | 1344 | case ISP_CFG_N: |
1185 | "%s %d %8phC post GNNID\n", | 1345 | if (fcport_is_smaller(fcport)) { |
1186 | __func__, __LINE__, fcport->port_name); | 1346 | /* this adapter is bigger */ |
1187 | qla24xx_post_gnnid_work(vha, fcport); | 1347 | if (fcport->login_retry) { |
1188 | } else if (fcport->loop_id == FC_NO_LOOP_ID) { | 1348 | if (fcport->loop_id == FC_NO_LOOP_ID) { |
1189 | ql_dbg(ql_dbg_disc, vha, 0x20bd, | 1349 | qla2x00_find_new_loop_id(vha, |
1190 | "%s %d %8phC post gnl\n", | 1350 | fcport); |
1191 | __func__, __LINE__, fcport->port_name); | 1351 | fcport->fw_login_state = |
1192 | qla24xx_post_gnl_work(vha, fcport); | 1352 | DSC_LS_PORT_UNAVAIL; |
1193 | } else { | 1353 | } |
1194 | qla_chk_n2n_b4_login(vha, fcport); | 1354 | fcport->login_retry--; |
1355 | qla_post_els_plogi_work(vha, fcport); | ||
1356 | } else { | ||
1357 | ql_log(ql_log_info, vha, 0x705d, | ||
1358 | "Unable to reach remote port %8phC", | ||
1359 | fcport->port_name); | ||
1360 | } | ||
1361 | } else { | ||
1362 | qla24xx_post_gnl_work(vha, fcport); | ||
1363 | } | ||
1364 | break; | ||
1365 | default: | ||
1366 | if (wwn == 0) { | ||
1367 | ql_dbg(ql_dbg_disc, vha, 0xffff, | ||
1368 | "%s %d %8phC post GNNID\n", | ||
1369 | __func__, __LINE__, fcport->port_name); | ||
1370 | qla24xx_post_gnnid_work(vha, fcport); | ||
1371 | } else if (fcport->loop_id == FC_NO_LOOP_ID) { | ||
1372 | ql_dbg(ql_dbg_disc, vha, 0x20bd, | ||
1373 | "%s %d %8phC post gnl\n", | ||
1374 | __func__, __LINE__, fcport->port_name); | ||
1375 | qla24xx_post_gnl_work(vha, fcport); | ||
1376 | } else { | ||
1377 | qla_chk_n2n_b4_login(vha, fcport); | ||
1378 | } | ||
1379 | break; | ||
1195 | } | 1380 | } |
1196 | break; | 1381 | break; |
1197 | 1382 | ||
1198 | case DSC_GNL: | 1383 | case DSC_GNL: |
1199 | if (fcport->login_pause) { | 1384 | switch (vha->hw->current_topology) { |
1200 | fcport->last_rscn_gen = fcport->rscn_gen; | 1385 | case ISP_CFG_N: |
1201 | fcport->last_login_gen = fcport->login_gen; | 1386 | if ((fcport->current_login_state & 0xf) == 0x6) { |
1202 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 1387 | ql_dbg(ql_dbg_disc, vha, 0x2118, |
1388 | "%s %d %8phC post GPDB work\n", | ||
1389 | __func__, __LINE__, fcport->port_name); | ||
1390 | fcport->chip_reset = | ||
1391 | vha->hw->base_qpair->chip_reset; | ||
1392 | qla24xx_post_gpdb_work(vha, fcport, 0); | ||
1393 | } else { | ||
1394 | ql_dbg(ql_dbg_disc, vha, 0x2118, | ||
1395 | "%s %d %8phC post NVMe PRLI\n", | ||
1396 | __func__, __LINE__, fcport->port_name); | ||
1397 | qla24xx_post_prli_work(vha, fcport); | ||
1398 | } | ||
1399 | break; | ||
1400 | default: | ||
1401 | if (fcport->login_pause) { | ||
1402 | fcport->last_rscn_gen = fcport->rscn_gen; | ||
1403 | fcport->last_login_gen = fcport->login_gen; | ||
1404 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
1405 | break; | ||
1406 | } | ||
1407 | qla_chk_n2n_b4_login(vha, fcport); | ||
1203 | break; | 1408 | break; |
1204 | } | 1409 | } |
1205 | |||
1206 | qla_chk_n2n_b4_login(vha, fcport); | ||
1207 | break; | 1410 | break; |
1208 | 1411 | ||
1209 | case DSC_LOGIN_FAILED: | 1412 | case DSC_LOGIN_FAILED: |
1413 | fcport->login_retry--; | ||
1210 | ql_dbg(ql_dbg_disc, vha, 0x20d0, | 1414 | ql_dbg(ql_dbg_disc, vha, 0x20d0, |
1211 | "%s %d %8phC post gidpn\n", | 1415 | "%s %d %8phC post gidpn\n", |
1212 | __func__, __LINE__, fcport->port_name); | 1416 | __func__, __LINE__, fcport->port_name); |
@@ -1221,6 +1425,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
1221 | ql_dbg(ql_dbg_disc, vha, 0x20d1, | 1425 | ql_dbg(ql_dbg_disc, vha, 0x20d1, |
1222 | "%s %d %8phC post adisc\n", | 1426 | "%s %d %8phC post adisc\n", |
1223 | __func__, __LINE__, fcport->port_name); | 1427 | __func__, __LINE__, fcport->port_name); |
1428 | fcport->login_retry--; | ||
1224 | data[0] = data[1] = 0; | 1429 | data[0] = data[1] = 0; |
1225 | qla2x00_post_async_adisc_work(vha, fcport, data); | 1430 | qla2x00_post_async_adisc_work(vha, fcport, data); |
1226 | break; | 1431 | break; |
@@ -1304,17 +1509,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, | |||
1304 | } | 1509 | } |
1305 | } | 1510 | } |
1306 | 1511 | ||
1307 | if (fcport->flags & FCF_ASYNC_SENT) { | ||
1308 | fcport->login_retry++; | ||
1309 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
1310 | return; | ||
1311 | } | ||
1312 | |||
1313 | if (fcport->disc_state == DSC_DELETE_PEND) { | ||
1314 | fcport->login_retry++; | ||
1315 | return; | ||
1316 | } | ||
1317 | |||
1318 | if (fcport->last_rscn_gen != fcport->rscn_gen) { | 1512 | if (fcport->last_rscn_gen != fcport->rscn_gen) { |
1319 | ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", | 1513 | ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", |
1320 | __func__, __LINE__, fcport->port_name); | 1514 | __func__, __LINE__, fcport->port_name); |
@@ -1326,6 +1520,15 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, | |||
1326 | qla24xx_fcport_handle_login(vha, fcport); | 1520 | qla24xx_fcport_handle_login(vha, fcport); |
1327 | } | 1521 | } |
1328 | 1522 | ||
1523 | |||
1524 | void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) | ||
1525 | { | ||
1526 | ql_dbg(ql_dbg_disc, vha, 0x2118, | ||
1527 | "%s %d %8phC post PRLI\n", | ||
1528 | __func__, __LINE__, ea->fcport->port_name); | ||
1529 | qla24xx_post_prli_work(vha, ea->fcport); | ||
1530 | } | ||
1531 | |||
1329 | void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) | 1532 | void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) |
1330 | { | 1533 | { |
1331 | fc_port_t *f, *tf; | 1534 | fc_port_t *f, *tf; |
@@ -1427,6 +1630,9 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) | |||
1427 | case FCME_GFPNID_DONE: | 1630 | case FCME_GFPNID_DONE: |
1428 | qla24xx_handle_gfpnid_event(vha, ea); | 1631 | qla24xx_handle_gfpnid_event(vha, ea); |
1429 | break; | 1632 | break; |
1633 | case FCME_ELS_PLOGI_DONE: | ||
1634 | qla_handle_els_plogi_done(vha, ea); | ||
1635 | break; | ||
1430 | default: | 1636 | default: |
1431 | BUG_ON(1); | 1637 | BUG_ON(1); |
1432 | break; | 1638 | break; |
@@ -1520,7 +1726,7 @@ qla24xx_abort_iocb_timeout(void *data) | |||
1520 | struct srb_iocb *abt = &sp->u.iocb_cmd; | 1726 | struct srb_iocb *abt = &sp->u.iocb_cmd; |
1521 | 1727 | ||
1522 | abt->u.abt.comp_status = CS_TIMEOUT; | 1728 | abt->u.abt.comp_status = CS_TIMEOUT; |
1523 | complete(&abt->u.abt.comp); | 1729 | sp->done(sp, QLA_FUNCTION_TIMEOUT); |
1524 | } | 1730 | } |
1525 | 1731 | ||
1526 | static void | 1732 | static void |
@@ -1529,12 +1735,16 @@ qla24xx_abort_sp_done(void *ptr, int res) | |||
1529 | srb_t *sp = ptr; | 1735 | srb_t *sp = ptr; |
1530 | struct srb_iocb *abt = &sp->u.iocb_cmd; | 1736 | struct srb_iocb *abt = &sp->u.iocb_cmd; |
1531 | 1737 | ||
1532 | if (del_timer(&sp->u.iocb_cmd.timer)) | 1738 | if (del_timer(&sp->u.iocb_cmd.timer)) { |
1533 | complete(&abt->u.abt.comp); | 1739 | if (sp->flags & SRB_WAKEUP_ON_COMP) |
1740 | complete(&abt->u.abt.comp); | ||
1741 | else | ||
1742 | sp->free(sp); | ||
1743 | } | ||
1534 | } | 1744 | } |
1535 | 1745 | ||
1536 | int | 1746 | int |
1537 | qla24xx_async_abort_cmd(srb_t *cmd_sp) | 1747 | qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) |
1538 | { | 1748 | { |
1539 | scsi_qla_host_t *vha = cmd_sp->vha; | 1749 | scsi_qla_host_t *vha = cmd_sp->vha; |
1540 | fc_port_t *fcport = cmd_sp->fcport; | 1750 | fc_port_t *fcport = cmd_sp->fcport; |
@@ -1549,6 +1759,8 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp) | |||
1549 | abt_iocb = &sp->u.iocb_cmd; | 1759 | abt_iocb = &sp->u.iocb_cmd; |
1550 | sp->type = SRB_ABT_CMD; | 1760 | sp->type = SRB_ABT_CMD; |
1551 | sp->name = "abort"; | 1761 | sp->name = "abort"; |
1762 | if (wait) | ||
1763 | sp->flags = SRB_WAKEUP_ON_COMP; | ||
1552 | 1764 | ||
1553 | abt_iocb->timeout = qla24xx_abort_iocb_timeout; | 1765 | abt_iocb->timeout = qla24xx_abort_iocb_timeout; |
1554 | init_completion(&abt_iocb->u.abt.comp); | 1766 | init_completion(&abt_iocb->u.abt.comp); |
@@ -1572,10 +1784,11 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp) | |||
1572 | "Abort command issued - hdl=%x, target_id=%x\n", | 1784 | "Abort command issued - hdl=%x, target_id=%x\n", |
1573 | cmd_sp->handle, fcport->tgt_id); | 1785 | cmd_sp->handle, fcport->tgt_id); |
1574 | 1786 | ||
1575 | wait_for_completion(&abt_iocb->u.abt.comp); | 1787 | if (wait) { |
1576 | 1788 | wait_for_completion(&abt_iocb->u.abt.comp); | |
1577 | rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? | 1789 | rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? |
1578 | QLA_SUCCESS : QLA_FUNCTION_FAILED; | 1790 | QLA_SUCCESS : QLA_FUNCTION_FAILED; |
1791 | } | ||
1579 | 1792 | ||
1580 | done_free_sp: | 1793 | done_free_sp: |
1581 | sp->free(sp); | 1794 | sp->free(sp); |
@@ -1611,7 +1824,7 @@ qla24xx_async_abort_command(srb_t *sp) | |||
1611 | return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, | 1824 | return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, |
1612 | FXDISC_ABORT_IOCTL); | 1825 | FXDISC_ABORT_IOCTL); |
1613 | 1826 | ||
1614 | return qla24xx_async_abort_cmd(sp); | 1827 | return qla24xx_async_abort_cmd(sp, true); |
1615 | } | 1828 | } |
1616 | 1829 | ||
1617 | static void | 1830 | static void |
@@ -1799,7 +2012,6 @@ void | |||
1799 | qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, | 2012 | qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, |
1800 | uint16_t *data) | 2013 | uint16_t *data) |
1801 | { | 2014 | { |
1802 | qla2x00_mark_device_lost(vha, fcport, 1, 0); | ||
1803 | qlt_logo_completion_handler(fcport, data[0]); | 2015 | qlt_logo_completion_handler(fcport, data[0]); |
1804 | fcport->login_gen++; | 2016 | fcport->login_gen++; |
1805 | fcport->flags &= ~FCF_ASYNC_ACTIVE; | 2017 | fcport->flags &= ~FCF_ASYNC_ACTIVE; |
@@ -4050,7 +4262,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
4050 | id.b.al_pa = al_pa; | 4262 | id.b.al_pa = al_pa; |
4051 | id.b.rsvd_1 = 0; | 4263 | id.b.rsvd_1 = 0; |
4052 | spin_lock_irqsave(&ha->hardware_lock, flags); | 4264 | spin_lock_irqsave(&ha->hardware_lock, flags); |
4053 | qlt_update_host_map(vha, id); | 4265 | if (!(topo == 2 && ha->flags.n2n_bigger)) |
4266 | qlt_update_host_map(vha, id); | ||
4054 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 4267 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
4055 | 4268 | ||
4056 | if (!vha->flags.init_done) | 4269 | if (!vha->flags.init_done) |
@@ -4308,7 +4521,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) | |||
4308 | cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; | 4521 | cnt = (uint8_t *)icb->reserved_3 - (uint8_t *)icb->add_firmware_options; |
4309 | while (cnt--) | 4522 | while (cnt--) |
4310 | *dptr1++ = *dptr2++; | 4523 | *dptr1++ = *dptr2++; |
4311 | 4524 | ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); | |
4312 | /* Use alternate WWN? */ | 4525 | /* Use alternate WWN? */ |
4313 | if (nv->host_p[1] & BIT_7) { | 4526 | if (nv->host_p[1] & BIT_7) { |
4314 | memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); | 4527 | memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); |
@@ -4591,20 +4804,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
4591 | 4804 | ||
4592 | } else if (ha->current_topology == ISP_CFG_N) { | 4805 | } else if (ha->current_topology == ISP_CFG_N) { |
4593 | clear_bit(RSCN_UPDATE, &flags); | 4806 | clear_bit(RSCN_UPDATE, &flags); |
4594 | if (ha->flags.rida_fmt2) { | 4807 | if (qla_tgt_mode_enabled(vha)) { |
4595 | /* With Rida Format 2, the login is already triggered. | 4808 | /* allow the other side to start the login */ |
4596 | * We know who is on the other side of the wire. | ||
4597 | * No need to login to do login to find out or drop into | ||
4598 | * qla2x00_configure_local_loop(). | ||
4599 | */ | ||
4600 | clear_bit(LOCAL_LOOP_UPDATE, &flags); | 4809 | clear_bit(LOCAL_LOOP_UPDATE, &flags); |
4601 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 4810 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
4602 | } else { | ||
4603 | if (qla_tgt_mode_enabled(vha)) { | ||
4604 | /* allow the other side to start the login */ | ||
4605 | clear_bit(LOCAL_LOOP_UPDATE, &flags); | ||
4606 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
4607 | } | ||
4608 | } | 4811 | } |
4609 | } else if (ha->current_topology == ISP_CFG_NL) { | 4812 | } else if (ha->current_topology == ISP_CFG_NL) { |
4610 | clear_bit(RSCN_UPDATE, &flags); | 4813 | clear_bit(RSCN_UPDATE, &flags); |
@@ -4688,110 +4891,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
4688 | } | 4891 | } |
4689 | 4892 | ||
4690 | /* | 4893 | /* |
4691 | * N2N Login | ||
4692 | * Updates Fibre Channel Device Database with local loop devices. | ||
4693 | * | ||
4694 | * Input: | ||
4695 | * ha = adapter block pointer. | ||
4696 | * | ||
4697 | * Returns: | ||
4698 | */ | ||
4699 | static int qla24xx_n2n_handle_login(struct scsi_qla_host *vha, | ||
4700 | fc_port_t *fcport) | ||
4701 | { | ||
4702 | struct qla_hw_data *ha = vha->hw; | ||
4703 | int res = QLA_SUCCESS, rval; | ||
4704 | int greater_wwpn = 0; | ||
4705 | int logged_in = 0; | ||
4706 | |||
4707 | if (ha->current_topology != ISP_CFG_N) | ||
4708 | return res; | ||
4709 | |||
4710 | if (wwn_to_u64(vha->port_name) > | ||
4711 | wwn_to_u64(vha->n2n_port_name)) { | ||
4712 | ql_dbg(ql_dbg_disc, vha, 0x2002, | ||
4713 | "HBA WWPN is greater %llx > target %llx\n", | ||
4714 | wwn_to_u64(vha->port_name), | ||
4715 | wwn_to_u64(vha->n2n_port_name)); | ||
4716 | greater_wwpn = 1; | ||
4717 | fcport->d_id.b24 = vha->n2n_id; | ||
4718 | } | ||
4719 | |||
4720 | fcport->loop_id = vha->loop_id; | ||
4721 | fcport->fc4f_nvme = 0; | ||
4722 | fcport->query = 1; | ||
4723 | |||
4724 | ql_dbg(ql_dbg_disc, vha, 0x4001, | ||
4725 | "Initiate N2N login handler: HBA port_id=%06x loopid=%d\n", | ||
4726 | fcport->d_id.b24, vha->loop_id); | ||
4727 | |||
4728 | /* Fill in member data. */ | ||
4729 | if (!greater_wwpn) { | ||
4730 | rval = qla2x00_get_port_database(vha, fcport, 0); | ||
4731 | ql_dbg(ql_dbg_disc, vha, 0x1051, | ||
4732 | "Remote login-state (%x/%x) port_id=%06x loop_id=%x, rval=%d\n", | ||
4733 | fcport->current_login_state, fcport->last_login_state, | ||
4734 | fcport->d_id.b24, fcport->loop_id, rval); | ||
4735 | |||
4736 | if (((fcport->current_login_state & 0xf) == 0x4) || | ||
4737 | ((fcport->current_login_state & 0xf) == 0x6)) | ||
4738 | logged_in = 1; | ||
4739 | } | ||
4740 | |||
4741 | if (logged_in || greater_wwpn) { | ||
4742 | if (!vha->nvme_local_port && vha->flags.nvme_enabled) | ||
4743 | qla_nvme_register_hba(vha); | ||
4744 | |||
4745 | /* Set connected N_Port d_id */ | ||
4746 | if (vha->flags.nvme_enabled) | ||
4747 | fcport->fc4f_nvme = 1; | ||
4748 | |||
4749 | fcport->scan_state = QLA_FCPORT_FOUND; | ||
4750 | fcport->fw_login_state = DSC_LS_PORT_UNAVAIL; | ||
4751 | fcport->disc_state = DSC_GNL; | ||
4752 | fcport->n2n_flag = 1; | ||
4753 | fcport->flags = 3; | ||
4754 | vha->hw->flags.gpsc_supported = 0; | ||
4755 | |||
4756 | if (greater_wwpn) { | ||
4757 | ql_dbg(ql_dbg_disc, vha, 0x20e5, | ||
4758 | "%s %d PLOGI ELS %8phC\n", | ||
4759 | __func__, __LINE__, fcport->port_name); | ||
4760 | |||
4761 | res = qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, | ||
4762 | fcport, fcport->d_id); | ||
4763 | } | ||
4764 | |||
4765 | if (res != QLA_SUCCESS) { | ||
4766 | ql_log(ql_log_info, vha, 0xd04d, | ||
4767 | "PLOGI Failed: portid=%06x - retrying\n", | ||
4768 | fcport->d_id.b24); | ||
4769 | res = QLA_SUCCESS; | ||
4770 | } else { | ||
4771 | /* State 0x6 means FCP PRLI complete */ | ||
4772 | if ((fcport->current_login_state & 0xf) == 0x6) { | ||
4773 | ql_dbg(ql_dbg_disc, vha, 0x2118, | ||
4774 | "%s %d %8phC post GPDB work\n", | ||
4775 | __func__, __LINE__, fcport->port_name); | ||
4776 | fcport->chip_reset = | ||
4777 | vha->hw->base_qpair->chip_reset; | ||
4778 | qla24xx_post_gpdb_work(vha, fcport, 0); | ||
4779 | } else { | ||
4780 | ql_dbg(ql_dbg_disc, vha, 0x2118, | ||
4781 | "%s %d %8phC post NVMe PRLI\n", | ||
4782 | __func__, __LINE__, fcport->port_name); | ||
4783 | qla24xx_post_prli_work(vha, fcport); | ||
4784 | } | ||
4785 | } | ||
4786 | } else { | ||
4787 | /* Wait for next database change */ | ||
4788 | set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); | ||
4789 | } | ||
4790 | |||
4791 | return res; | ||
4792 | } | ||
4793 | |||
4794 | /* | ||
4795 | * qla2x00_configure_local_loop | 4894 | * qla2x00_configure_local_loop |
4796 | * Updates Fibre Channel Device Database with local loop devices. | 4895 | * Updates Fibre Channel Device Database with local loop devices. |
4797 | * | 4896 | * |
@@ -4817,6 +4916,31 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) | |||
4817 | struct qla_hw_data *ha = vha->hw; | 4916 | struct qla_hw_data *ha = vha->hw; |
4818 | unsigned long flags; | 4917 | unsigned long flags; |
4819 | 4918 | ||
4919 | /* Inititae N2N login. */ | ||
4920 | if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { | ||
4921 | /* borrowing */ | ||
4922 | u32 *bp, i, sz; | ||
4923 | |||
4924 | memset(ha->init_cb, 0, ha->init_cb_size); | ||
4925 | sz = min_t(int, sizeof(struct els_plogi_payload), | ||
4926 | ha->init_cb_size); | ||
4927 | rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, | ||
4928 | (void *)ha->init_cb, sz); | ||
4929 | if (rval == QLA_SUCCESS) { | ||
4930 | bp = (uint32_t *)ha->init_cb; | ||
4931 | for (i = 0; i < sz/4 ; i++, bp++) | ||
4932 | *bp = cpu_to_be32(*bp); | ||
4933 | |||
4934 | memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb, | ||
4935 | sizeof(ha->plogi_els_payld.data)); | ||
4936 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
4937 | } else { | ||
4938 | ql_dbg(ql_dbg_init, vha, 0x00d1, | ||
4939 | "PLOGI ELS param read fail.\n"); | ||
4940 | } | ||
4941 | return QLA_SUCCESS; | ||
4942 | } | ||
4943 | |||
4820 | found_devs = 0; | 4944 | found_devs = 0; |
4821 | new_fcport = NULL; | 4945 | new_fcport = NULL; |
4822 | entries = MAX_FIBRE_DEVICES_LOOP; | 4946 | entries = MAX_FIBRE_DEVICES_LOOP; |
@@ -4848,14 +4972,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) | |||
4848 | } | 4972 | } |
4849 | new_fcport->flags &= ~FCF_FABRIC_DEVICE; | 4973 | new_fcport->flags &= ~FCF_FABRIC_DEVICE; |
4850 | 4974 | ||
4851 | /* Inititae N2N login. */ | ||
4852 | if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { | ||
4853 | rval = qla24xx_n2n_handle_login(vha, new_fcport); | ||
4854 | if (rval != QLA_SUCCESS) | ||
4855 | goto cleanup_allocation; | ||
4856 | return QLA_SUCCESS; | ||
4857 | } | ||
4858 | |||
4859 | /* Add devices to port list. */ | 4975 | /* Add devices to port list. */ |
4860 | id_iter = (char *)ha->gid_list; | 4976 | id_iter = (char *)ha->gid_list; |
4861 | for (index = 0; index < entries; index++) { | 4977 | for (index = 0; index < entries; index++) { |
@@ -5054,6 +5170,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
5054 | struct fc_rport *rport; | 5170 | struct fc_rport *rport; |
5055 | unsigned long flags; | 5171 | unsigned long flags; |
5056 | 5172 | ||
5173 | if (atomic_read(&fcport->state) == FCS_ONLINE) | ||
5174 | return; | ||
5175 | |||
5057 | rport_ids.node_name = wwn_to_u64(fcport->node_name); | 5176 | rport_ids.node_name = wwn_to_u64(fcport->node_name); |
5058 | rport_ids.port_name = wwn_to_u64(fcport->port_name); | 5177 | rport_ids.port_name = wwn_to_u64(fcport->port_name); |
5059 | rport_ids.port_id = fcport->d_id.b.domain << 16 | | 5178 | rport_ids.port_id = fcport->d_id.b.domain << 16 | |
@@ -5109,25 +5228,28 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
5109 | if (IS_SW_RESV_ADDR(fcport->d_id)) | 5228 | if (IS_SW_RESV_ADDR(fcport->d_id)) |
5110 | return; | 5229 | return; |
5111 | 5230 | ||
5112 | ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", | 5231 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); |
5113 | __func__, fcport->port_name); | 5232 | fcport->disc_state = DSC_LOGIN_COMPLETE; |
5114 | 5233 | fcport->deleted = 0; | |
5115 | if (IS_QLAFX00(vha->hw)) { | 5234 | fcport->logout_on_delete = 1; |
5116 | qla2x00_set_fcport_state(fcport, FCS_ONLINE); | 5235 | fcport->login_retry = vha->hw->login_retry_count; |
5117 | } else { | 5236 | fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; |
5118 | fcport->login_retry = 0; | ||
5119 | fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); | ||
5120 | fcport->disc_state = DSC_LOGIN_COMPLETE; | ||
5121 | fcport->deleted = 0; | ||
5122 | fcport->logout_on_delete = 1; | ||
5123 | qla2x00_set_fcport_state(fcport, FCS_ONLINE); | ||
5124 | } | ||
5125 | 5237 | ||
5126 | qla2x00_set_fcport_state(fcport, FCS_ONLINE); | ||
5127 | qla2x00_iidma_fcport(vha, fcport); | 5238 | qla2x00_iidma_fcport(vha, fcport); |
5128 | 5239 | ||
5240 | switch (vha->hw->current_topology) { | ||
5241 | case ISP_CFG_N: | ||
5242 | case ISP_CFG_NL: | ||
5243 | fcport->keep_nport_handle = 1; | ||
5244 | break; | ||
5245 | default: | ||
5246 | break; | ||
5247 | } | ||
5248 | |||
5129 | if (fcport->fc4f_nvme) { | 5249 | if (fcport->fc4f_nvme) { |
5130 | qla_nvme_register_remote(vha, fcport); | 5250 | qla_nvme_register_remote(vha, fcport); |
5251 | fcport->disc_state = DSC_LOGIN_COMPLETE; | ||
5252 | qla2x00_set_fcport_state(fcport, FCS_ONLINE); | ||
5131 | return; | 5253 | return; |
5132 | } | 5254 | } |
5133 | 5255 | ||
@@ -5168,6 +5290,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
5168 | qla24xx_post_gpsc_work(vha, fcport); | 5290 | qla24xx_post_gpsc_work(vha, fcport); |
5169 | } | 5291 | } |
5170 | } | 5292 | } |
5293 | qla2x00_set_fcport_state(fcport, FCS_ONLINE); | ||
5171 | } | 5294 | } |
5172 | 5295 | ||
5173 | /* | 5296 | /* |
@@ -5668,6 +5791,34 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) | |||
5668 | } | 5791 | } |
5669 | 5792 | ||
5670 | 5793 | ||
5794 | /* FW does not set aside Loop id for MGMT Server/FFFFFAh */ | ||
5795 | int | ||
5796 | qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *vha) | ||
5797 | { | ||
5798 | int loop_id = FC_NO_LOOP_ID; | ||
5799 | int lid = NPH_MGMT_SERVER - vha->vp_idx; | ||
5800 | unsigned long flags; | ||
5801 | struct qla_hw_data *ha = vha->hw; | ||
5802 | |||
5803 | if (vha->vp_idx == 0) { | ||
5804 | set_bit(NPH_MGMT_SERVER, ha->loop_id_map); | ||
5805 | return NPH_MGMT_SERVER; | ||
5806 | } | ||
5807 | |||
5808 | /* pick id from high and work down to low */ | ||
5809 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
5810 | for (; lid > 0; lid--) { | ||
5811 | if (!test_bit(lid, vha->hw->loop_id_map)) { | ||
5812 | set_bit(lid, vha->hw->loop_id_map); | ||
5813 | loop_id = lid; | ||
5814 | break; | ||
5815 | } | ||
5816 | } | ||
5817 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
5818 | |||
5819 | return loop_id; | ||
5820 | } | ||
5821 | |||
5671 | /* | 5822 | /* |
5672 | * qla2x00_fabric_login | 5823 | * qla2x00_fabric_login |
5673 | * Issue fabric login command. | 5824 | * Issue fabric login command. |
@@ -6335,6 +6486,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
6335 | ql_log(ql_log_info, vha, 0x00af, | 6486 | ql_log(ql_log_info, vha, 0x00af, |
6336 | "Performing ISP error recovery - ha=%p.\n", ha); | 6487 | "Performing ISP error recovery - ha=%p.\n", ha); |
6337 | 6488 | ||
6489 | ha->flags.purge_mbox = 1; | ||
6338 | /* For ISP82XX, reset_chip is just disabling interrupts. | 6490 | /* For ISP82XX, reset_chip is just disabling interrupts. |
6339 | * Driver waits for the completion of the commands. | 6491 | * Driver waits for the completion of the commands. |
6340 | * the interrupts need to be enabled. | 6492 | * the interrupts need to be enabled. |
@@ -6349,13 +6501,31 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
6349 | ha->current_topology = 0; | 6501 | ha->current_topology = 0; |
6350 | ha->flags.fw_started = 0; | 6502 | ha->flags.fw_started = 0; |
6351 | ha->flags.fw_init_done = 0; | 6503 | ha->flags.fw_init_done = 0; |
6352 | ha->base_qpair->chip_reset++; | 6504 | ha->chip_reset++; |
6505 | ha->base_qpair->chip_reset = ha->chip_reset; | ||
6353 | for (i = 0; i < ha->max_qpairs; i++) { | 6506 | for (i = 0; i < ha->max_qpairs; i++) { |
6354 | if (ha->queue_pair_map[i]) | 6507 | if (ha->queue_pair_map[i]) |
6355 | ha->queue_pair_map[i]->chip_reset = | 6508 | ha->queue_pair_map[i]->chip_reset = |
6356 | ha->base_qpair->chip_reset; | 6509 | ha->base_qpair->chip_reset; |
6357 | } | 6510 | } |
6358 | 6511 | ||
6512 | /* purge MBox commands */ | ||
6513 | if (atomic_read(&ha->num_pend_mbx_stage3)) { | ||
6514 | clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); | ||
6515 | complete(&ha->mbx_intr_comp); | ||
6516 | } | ||
6517 | |||
6518 | i = 0; | ||
6519 | while (atomic_read(&ha->num_pend_mbx_stage3) || | ||
6520 | atomic_read(&ha->num_pend_mbx_stage2) || | ||
6521 | atomic_read(&ha->num_pend_mbx_stage1)) { | ||
6522 | msleep(20); | ||
6523 | i++; | ||
6524 | if (i > 50) | ||
6525 | break; | ||
6526 | } | ||
6527 | ha->flags.purge_mbox = 0; | ||
6528 | |||
6359 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | 6529 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
6360 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { | 6530 | if (atomic_read(&vha->loop_state) != LOOP_DOWN) { |
6361 | atomic_set(&vha->loop_state, LOOP_DOWN); | 6531 | atomic_set(&vha->loop_state, LOOP_DOWN); |
@@ -6861,7 +7031,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) | |||
6861 | (uint8_t *)&icb->interrupt_delay_timer; | 7031 | (uint8_t *)&icb->interrupt_delay_timer; |
6862 | while (cnt--) | 7032 | while (cnt--) |
6863 | *dptr1++ = *dptr2++; | 7033 | *dptr1++ = *dptr2++; |
6864 | 7034 | ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); | |
6865 | /* | 7035 | /* |
6866 | * Setup driver NVRAM options. | 7036 | * Setup driver NVRAM options. |
6867 | */ | 7037 | */ |
@@ -6960,6 +7130,9 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) | |||
6960 | if (ql2xloginretrycount) | 7130 | if (ql2xloginretrycount) |
6961 | ha->login_retry_count = ql2xloginretrycount; | 7131 | ha->login_retry_count = ql2xloginretrycount; |
6962 | 7132 | ||
7133 | /* N2N: driver will initiate Login instead of FW */ | ||
7134 | icb->firmware_options_3 |= BIT_8; | ||
7135 | |||
6963 | /* Enable ZIO. */ | 7136 | /* Enable ZIO. */ |
6964 | if (!vha->flags.init_done) { | 7137 | if (!vha->flags.init_done) { |
6965 | ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & | 7138 | ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & |
@@ -7069,7 +7242,7 @@ check_valid_image: | |||
7069 | ha->active_image = QLA27XX_SECONDARY_IMAGE; | 7242 | ha->active_image = QLA27XX_SECONDARY_IMAGE; |
7070 | } | 7243 | } |
7071 | 7244 | ||
7072 | ql_dbg(ql_dbg_init, vha, 0x018f, "%s image\n", | 7245 | ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n", |
7073 | ha->active_image == 0 ? "default bootld and fw" : | 7246 | ha->active_image == 0 ? "default bootld and fw" : |
7074 | ha->active_image == 1 ? "primary" : | 7247 | ha->active_image == 1 ? "primary" : |
7075 | ha->active_image == 2 ? "secondary" : | 7248 | ha->active_image == 2 ? "secondary" : |
@@ -7917,7 +8090,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
7917 | 8090 | ||
7918 | /* Use extended-initialization control block. */ | 8091 | /* Use extended-initialization control block. */ |
7919 | memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); | 8092 | memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb)); |
7920 | 8093 | ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size); | |
7921 | /* | 8094 | /* |
7922 | * Setup driver NVRAM options. | 8095 | * Setup driver NVRAM options. |
7923 | */ | 8096 | */ |
@@ -8042,8 +8215,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) | |||
8042 | } | 8215 | } |
8043 | 8216 | ||
8044 | /* enable RIDA Format2 */ | 8217 | /* enable RIDA Format2 */ |
8045 | if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) | 8218 | icb->firmware_options_3 |= BIT_0; |
8046 | icb->firmware_options_3 |= BIT_0; | 8219 | |
8220 | /* N2N: driver will initiate Login instead of FW */ | ||
8221 | icb->firmware_options_3 |= BIT_8; | ||
8047 | 8222 | ||
8048 | if (IS_QLA27XX(ha)) { | 8223 | if (IS_QLA27XX(ha)) { |
8049 | icb->firmware_options_3 |= BIT_8; | 8224 | icb->firmware_options_3 |= BIT_8; |
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 59fd5a9dfeb8..4351736b2426 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h | |||
@@ -58,14 +58,12 @@ qla2x00_debounce_register(volatile uint16_t __iomem *addr) | |||
58 | static inline void | 58 | static inline void |
59 | qla2x00_poll(struct rsp_que *rsp) | 59 | qla2x00_poll(struct rsp_que *rsp) |
60 | { | 60 | { |
61 | unsigned long flags; | ||
62 | struct qla_hw_data *ha = rsp->hw; | 61 | struct qla_hw_data *ha = rsp->hw; |
63 | local_irq_save(flags); | 62 | |
64 | if (IS_P3P_TYPE(ha)) | 63 | if (IS_P3P_TYPE(ha)) |
65 | qla82xx_poll(0, rsp); | 64 | qla82xx_poll(0, rsp); |
66 | else | 65 | else |
67 | ha->isp_ops->intr_handler(0, rsp); | 66 | ha->isp_ops->intr_handler(0, rsp); |
68 | local_irq_restore(flags); | ||
69 | } | 67 | } |
70 | 68 | ||
71 | static inline uint8_t * | 69 | static inline uint8_t * |
@@ -204,6 +202,12 @@ qla2x00_reset_active(scsi_qla_host_t *vha) | |||
204 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); | 202 | test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); |
205 | } | 203 | } |
206 | 204 | ||
205 | static inline int | ||
206 | qla2x00_chip_is_down(scsi_qla_host_t *vha) | ||
207 | { | ||
208 | return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started); | ||
209 | } | ||
210 | |||
207 | static inline srb_t * | 211 | static inline srb_t * |
208 | qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) | 212 | qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) |
209 | { | 213 | { |
@@ -278,8 +282,6 @@ qla2x00_init_timer(srb_t *sp, unsigned long tmo) | |||
278 | init_completion(&sp->comp); | 282 | init_completion(&sp->comp); |
279 | if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) | 283 | if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) |
280 | init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); | 284 | init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); |
281 | if (sp->type == SRB_ELS_DCMD) | ||
282 | init_completion(&sp->u.iocb_cmd.u.els_logo.comp); | ||
283 | add_timer(&sp->u.iocb_cmd.timer); | 285 | add_timer(&sp->u.iocb_cmd.timer); |
284 | } | 286 | } |
285 | 287 | ||
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index dd93a22fe843..42ac8e097419 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
@@ -2241,12 +2241,15 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) | |||
2241 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 2241 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2242 | 2242 | ||
2243 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; | 2243 | logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; |
2244 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); | 2244 | if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) { |
2245 | 2245 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); | |
2246 | if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) | 2246 | } else { |
2247 | logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); | 2247 | logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); |
2248 | if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) | 2248 | if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI) |
2249 | logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); | 2249 | logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); |
2250 | if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) | ||
2251 | logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); | ||
2252 | } | ||
2250 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); | 2253 | logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); |
2251 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; | 2254 | logio->port_id[0] = sp->fcport->d_id.b.al_pa; |
2252 | logio->port_id[1] = sp->fcport->d_id.b.area; | 2255 | logio->port_id[1] = sp->fcport->d_id.b.area; |
@@ -2463,6 +2466,7 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, | |||
2463 | sp->fcport = fcport; | 2466 | sp->fcport = fcport; |
2464 | elsio->timeout = qla2x00_els_dcmd_iocb_timeout; | 2467 | elsio->timeout = qla2x00_els_dcmd_iocb_timeout; |
2465 | qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); | 2468 | qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); |
2469 | init_completion(&sp->u.iocb_cmd.u.els_logo.comp); | ||
2466 | sp->done = qla2x00_els_dcmd_sp_done; | 2470 | sp->done = qla2x00_els_dcmd_sp_done; |
2467 | sp->free = qla2x00_els_dcmd_sp_free; | 2471 | sp->free = qla2x00_els_dcmd_sp_free; |
2468 | 2472 | ||
@@ -2510,7 +2514,6 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) | |||
2510 | { | 2514 | { |
2511 | scsi_qla_host_t *vha = sp->vha; | 2515 | scsi_qla_host_t *vha = sp->vha; |
2512 | struct srb_iocb *elsio = &sp->u.iocb_cmd; | 2516 | struct srb_iocb *elsio = &sp->u.iocb_cmd; |
2513 | uint32_t dsd_len = 24; | ||
2514 | 2517 | ||
2515 | els_iocb->entry_type = ELS_IOCB_TYPE; | 2518 | els_iocb->entry_type = ELS_IOCB_TYPE; |
2516 | els_iocb->entry_count = 1; | 2519 | els_iocb->entry_count = 1; |
@@ -2533,20 +2536,21 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) | |||
2533 | els_iocb->control_flags = 0; | 2536 | els_iocb->control_flags = 0; |
2534 | 2537 | ||
2535 | if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { | 2538 | if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { |
2536 | els_iocb->tx_byte_count = sizeof(struct els_plogi_payload); | 2539 | els_iocb->tx_byte_count = els_iocb->tx_len = |
2540 | sizeof(struct els_plogi_payload); | ||
2537 | els_iocb->tx_address[0] = | 2541 | els_iocb->tx_address[0] = |
2538 | cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma)); | 2542 | cpu_to_le32(LSD(elsio->u.els_plogi.els_plogi_pyld_dma)); |
2539 | els_iocb->tx_address[1] = | 2543 | els_iocb->tx_address[1] = |
2540 | cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma)); | 2544 | cpu_to_le32(MSD(elsio->u.els_plogi.els_plogi_pyld_dma)); |
2541 | els_iocb->tx_len = dsd_len; | ||
2542 | 2545 | ||
2543 | els_iocb->rx_dsd_count = 1; | 2546 | els_iocb->rx_dsd_count = 1; |
2544 | els_iocb->rx_byte_count = sizeof(struct els_plogi_payload); | 2547 | els_iocb->rx_byte_count = els_iocb->rx_len = |
2548 | sizeof(struct els_plogi_payload); | ||
2545 | els_iocb->rx_address[0] = | 2549 | els_iocb->rx_address[0] = |
2546 | cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma)); | 2550 | cpu_to_le32(LSD(elsio->u.els_plogi.els_resp_pyld_dma)); |
2547 | els_iocb->rx_address[1] = | 2551 | els_iocb->rx_address[1] = |
2548 | cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma)); | 2552 | cpu_to_le32(MSD(elsio->u.els_plogi.els_resp_pyld_dma)); |
2549 | els_iocb->rx_len = dsd_len; | 2553 | |
2550 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, | 2554 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, |
2551 | "PLOGI ELS IOCB:\n"); | 2555 | "PLOGI ELS IOCB:\n"); |
2552 | ql_dump_buffer(ql_log_info, vha, 0x0109, | 2556 | ql_dump_buffer(ql_log_info, vha, 0x0109, |
@@ -2569,33 +2573,12 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) | |||
2569 | } | 2573 | } |
2570 | 2574 | ||
2571 | static void | 2575 | static void |
2572 | qla2x00_els_dcmd2_sp_free(void *data) | ||
2573 | { | ||
2574 | srb_t *sp = data; | ||
2575 | struct srb_iocb *elsio = &sp->u.iocb_cmd; | ||
2576 | |||
2577 | if (elsio->u.els_plogi.els_plogi_pyld) | ||
2578 | dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, | ||
2579 | elsio->u.els_plogi.els_plogi_pyld, | ||
2580 | elsio->u.els_plogi.els_plogi_pyld_dma); | ||
2581 | |||
2582 | if (elsio->u.els_plogi.els_resp_pyld) | ||
2583 | dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE, | ||
2584 | elsio->u.els_plogi.els_resp_pyld, | ||
2585 | elsio->u.els_plogi.els_resp_pyld_dma); | ||
2586 | |||
2587 | del_timer(&elsio->timer); | ||
2588 | qla2x00_rel_sp(sp); | ||
2589 | } | ||
2590 | |||
2591 | static void | ||
2592 | qla2x00_els_dcmd2_iocb_timeout(void *data) | 2576 | qla2x00_els_dcmd2_iocb_timeout(void *data) |
2593 | { | 2577 | { |
2594 | srb_t *sp = data; | 2578 | srb_t *sp = data; |
2595 | fc_port_t *fcport = sp->fcport; | 2579 | fc_port_t *fcport = sp->fcport; |
2596 | struct scsi_qla_host *vha = sp->vha; | 2580 | struct scsi_qla_host *vha = sp->vha; |
2597 | struct qla_hw_data *ha = vha->hw; | 2581 | struct qla_hw_data *ha = vha->hw; |
2598 | struct srb_iocb *lio = &sp->u.iocb_cmd; | ||
2599 | unsigned long flags = 0; | 2582 | unsigned long flags = 0; |
2600 | int res; | 2583 | int res; |
2601 | 2584 | ||
@@ -2611,7 +2594,7 @@ qla2x00_els_dcmd2_iocb_timeout(void *data) | |||
2611 | (res == QLA_SUCCESS) ? "successful" : "failed"); | 2594 | (res == QLA_SUCCESS) ? "successful" : "failed"); |
2612 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 2595 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
2613 | 2596 | ||
2614 | complete(&lio->u.els_plogi.comp); | 2597 | sp->done(sp, QLA_FUNCTION_TIMEOUT); |
2615 | } | 2598 | } |
2616 | 2599 | ||
2617 | static void | 2600 | static void |
@@ -2621,17 +2604,55 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res) | |||
2621 | fc_port_t *fcport = sp->fcport; | 2604 | fc_port_t *fcport = sp->fcport; |
2622 | struct srb_iocb *lio = &sp->u.iocb_cmd; | 2605 | struct srb_iocb *lio = &sp->u.iocb_cmd; |
2623 | struct scsi_qla_host *vha = sp->vha; | 2606 | struct scsi_qla_host *vha = sp->vha; |
2607 | struct event_arg ea; | ||
2608 | struct qla_work_evt *e; | ||
2609 | |||
2610 | ql_dbg(ql_dbg_disc, vha, 0x3072, | ||
2611 | "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", | ||
2612 | sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); | ||
2624 | 2613 | ||
2625 | ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3072, | 2614 | fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); |
2626 | "%s ELS hdl=%x, portid=%06x done %8phC\n", | 2615 | del_timer(&sp->u.iocb_cmd.timer); |
2627 | sp->name, sp->handle, fcport->d_id.b24, fcport->port_name); | ||
2628 | 2616 | ||
2629 | complete(&lio->u.els_plogi.comp); | 2617 | if (sp->flags & SRB_WAKEUP_ON_COMP) |
2618 | complete(&lio->u.els_plogi.comp); | ||
2619 | else { | ||
2620 | if (res) { | ||
2621 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
2622 | } else { | ||
2623 | memset(&ea, 0, sizeof(ea)); | ||
2624 | ea.fcport = fcport; | ||
2625 | ea.rc = res; | ||
2626 | ea.event = FCME_ELS_PLOGI_DONE; | ||
2627 | qla2x00_fcport_event_handler(vha, &ea); | ||
2628 | } | ||
2629 | |||
2630 | e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); | ||
2631 | if (!e) { | ||
2632 | struct srb_iocb *elsio = &sp->u.iocb_cmd; | ||
2633 | |||
2634 | if (elsio->u.els_plogi.els_plogi_pyld) | ||
2635 | dma_free_coherent(&sp->vha->hw->pdev->dev, | ||
2636 | elsio->u.els_plogi.tx_size, | ||
2637 | elsio->u.els_plogi.els_plogi_pyld, | ||
2638 | elsio->u.els_plogi.els_plogi_pyld_dma); | ||
2639 | |||
2640 | if (elsio->u.els_plogi.els_resp_pyld) | ||
2641 | dma_free_coherent(&sp->vha->hw->pdev->dev, | ||
2642 | elsio->u.els_plogi.rx_size, | ||
2643 | elsio->u.els_plogi.els_resp_pyld, | ||
2644 | elsio->u.els_plogi.els_resp_pyld_dma); | ||
2645 | sp->free(sp); | ||
2646 | return; | ||
2647 | } | ||
2648 | e->u.iosb.sp = sp; | ||
2649 | qla2x00_post_work(vha, e); | ||
2650 | } | ||
2630 | } | 2651 | } |
2631 | 2652 | ||
2632 | int | 2653 | int |
2633 | qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, | 2654 | qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, |
2634 | fc_port_t *fcport, port_id_t remote_did) | 2655 | fc_port_t *fcport, bool wait) |
2635 | { | 2656 | { |
2636 | srb_t *sp; | 2657 | srb_t *sp; |
2637 | struct srb_iocb *elsio = NULL; | 2658 | struct srb_iocb *elsio = NULL; |
@@ -2649,23 +2670,23 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, | |||
2649 | } | 2670 | } |
2650 | 2671 | ||
2651 | elsio = &sp->u.iocb_cmd; | 2672 | elsio = &sp->u.iocb_cmd; |
2652 | fcport->d_id.b.domain = remote_did.b.domain; | ||
2653 | fcport->d_id.b.area = remote_did.b.area; | ||
2654 | fcport->d_id.b.al_pa = remote_did.b.al_pa; | ||
2655 | |||
2656 | ql_dbg(ql_dbg_io, vha, 0x3073, | 2673 | ql_dbg(ql_dbg_io, vha, 0x3073, |
2657 | "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); | 2674 | "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); |
2658 | 2675 | ||
2676 | fcport->flags |= FCF_ASYNC_SENT; | ||
2659 | sp->type = SRB_ELS_DCMD; | 2677 | sp->type = SRB_ELS_DCMD; |
2660 | sp->name = "ELS_DCMD"; | 2678 | sp->name = "ELS_DCMD"; |
2661 | sp->fcport = fcport; | 2679 | sp->fcport = fcport; |
2662 | 2680 | ||
2663 | elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; | 2681 | elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; |
2664 | init_completion(&elsio->u.els_plogi.comp); | 2682 | init_completion(&elsio->u.els_plogi.comp); |
2665 | qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); | 2683 | if (wait) |
2684 | sp->flags = SRB_WAKEUP_ON_COMP; | ||
2685 | |||
2686 | qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2); | ||
2666 | 2687 | ||
2667 | sp->done = qla2x00_els_dcmd2_sp_done; | 2688 | sp->done = qla2x00_els_dcmd2_sp_done; |
2668 | sp->free = qla2x00_els_dcmd2_sp_free; | 2689 | elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; |
2669 | 2690 | ||
2670 | ptr = elsio->u.els_plogi.els_plogi_pyld = | 2691 | ptr = elsio->u.els_plogi.els_plogi_pyld = |
2671 | dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, | 2692 | dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, |
@@ -2690,33 +2711,52 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, | |||
2690 | 2711 | ||
2691 | memset(ptr, 0, sizeof(struct els_plogi_payload)); | 2712 | memset(ptr, 0, sizeof(struct els_plogi_payload)); |
2692 | memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); | 2713 | memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); |
2714 | memcpy(elsio->u.els_plogi.els_plogi_pyld->data, | ||
2715 | &ha->plogi_els_payld.data, | ||
2716 | sizeof(elsio->u.els_plogi.els_plogi_pyld->data)); | ||
2717 | |||
2693 | elsio->u.els_plogi.els_cmd = els_opcode; | 2718 | elsio->u.els_plogi.els_cmd = els_opcode; |
2694 | elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; | 2719 | elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; |
2695 | qla24xx_get_port_login_templ(vha, ptr_dma + 4, | ||
2696 | &elsio->u.els_plogi.els_plogi_pyld->data[0], | ||
2697 | sizeof(struct els_plogi_payload)); | ||
2698 | 2720 | ||
2699 | ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); | 2721 | ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); |
2700 | ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x0109, | 2722 | ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, |
2701 | (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); | 2723 | (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); |
2702 | 2724 | ||
2703 | rval = qla2x00_start_sp(sp); | 2725 | rval = qla2x00_start_sp(sp); |
2704 | if (rval != QLA_SUCCESS) { | 2726 | if (rval != QLA_SUCCESS) { |
2705 | rval = QLA_FUNCTION_FAILED; | 2727 | rval = QLA_FUNCTION_FAILED; |
2706 | goto out; | 2728 | } else { |
2729 | ql_dbg(ql_dbg_disc, vha, 0x3074, | ||
2730 | "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n", | ||
2731 | sp->name, sp->handle, fcport->loop_id, | ||
2732 | fcport->d_id.b24, vha->d_id.b24); | ||
2707 | } | 2733 | } |
2708 | 2734 | ||
2709 | ql_dbg(ql_dbg_io, vha, 0x3074, | 2735 | if (wait) { |
2710 | "%s PLOGI sent, hdl=%x, loopid=%x, portid=%06x\n", | 2736 | wait_for_completion(&elsio->u.els_plogi.comp); |
2711 | sp->name, sp->handle, fcport->loop_id, fcport->d_id.b24); | ||
2712 | |||
2713 | wait_for_completion(&elsio->u.els_plogi.comp); | ||
2714 | 2737 | ||
2715 | if (elsio->u.els_plogi.comp_status != CS_COMPLETE) | 2738 | if (elsio->u.els_plogi.comp_status != CS_COMPLETE) |
2716 | rval = QLA_FUNCTION_FAILED; | 2739 | rval = QLA_FUNCTION_FAILED; |
2740 | } else { | ||
2741 | goto done; | ||
2742 | } | ||
2717 | 2743 | ||
2718 | out: | 2744 | out: |
2745 | fcport->flags &= ~(FCF_ASYNC_SENT); | ||
2746 | if (elsio->u.els_plogi.els_plogi_pyld) | ||
2747 | dma_free_coherent(&sp->vha->hw->pdev->dev, | ||
2748 | elsio->u.els_plogi.tx_size, | ||
2749 | elsio->u.els_plogi.els_plogi_pyld, | ||
2750 | elsio->u.els_plogi.els_plogi_pyld_dma); | ||
2751 | |||
2752 | if (elsio->u.els_plogi.els_resp_pyld) | ||
2753 | dma_free_coherent(&sp->vha->hw->pdev->dev, | ||
2754 | elsio->u.els_plogi.rx_size, | ||
2755 | elsio->u.els_plogi.els_resp_pyld, | ||
2756 | elsio->u.els_plogi.els_resp_pyld_dma); | ||
2757 | |||
2719 | sp->free(sp); | 2758 | sp->free(sp); |
2759 | done: | ||
2720 | return rval; | 2760 | return rval; |
2721 | } | 2761 | } |
2722 | 2762 | ||
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 7756106d4555..36cbb29c84f6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -911,7 +911,8 @@ skip_rio: | |||
911 | if (!atomic_read(&vha->loop_down_timer)) | 911 | if (!atomic_read(&vha->loop_down_timer)) |
912 | atomic_set(&vha->loop_down_timer, | 912 | atomic_set(&vha->loop_down_timer, |
913 | LOOP_DOWN_TIME); | 913 | LOOP_DOWN_TIME); |
914 | qla2x00_mark_all_devices_lost(vha, 1); | 914 | if (!N2N_TOPO(ha)) |
915 | qla2x00_mark_all_devices_lost(vha, 1); | ||
915 | } | 916 | } |
916 | 917 | ||
917 | if (vha->vp_idx) { | 918 | if (vha->vp_idx) { |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f0ec13d48bf3..2c6c2cd5a0d0 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -59,6 +59,7 @@ static struct rom_cmd { | |||
59 | { MBC_IOCB_COMMAND_A64 }, | 59 | { MBC_IOCB_COMMAND_A64 }, |
60 | { MBC_GET_ADAPTER_LOOP_ID }, | 60 | { MBC_GET_ADAPTER_LOOP_ID }, |
61 | { MBC_READ_SFP }, | 61 | { MBC_READ_SFP }, |
62 | { MBC_GET_RNID_PARAMS }, | ||
62 | }; | 63 | }; |
63 | 64 | ||
64 | static int is_rom_cmd(uint16_t cmd) | 65 | static int is_rom_cmd(uint16_t cmd) |
@@ -110,6 +111,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
110 | unsigned long wait_time; | 111 | unsigned long wait_time; |
111 | struct qla_hw_data *ha = vha->hw; | 112 | struct qla_hw_data *ha = vha->hw; |
112 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 113 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
114 | u32 chip_reset; | ||
113 | 115 | ||
114 | 116 | ||
115 | ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); | 117 | ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); |
@@ -140,7 +142,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
140 | 142 | ||
141 | rval = QLA_SUCCESS; | 143 | rval = QLA_SUCCESS; |
142 | abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); | 144 | abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); |
143 | 145 | chip_reset = ha->chip_reset; | |
144 | 146 | ||
145 | if (ha->flags.pci_channel_io_perm_failure) { | 147 | if (ha->flags.pci_channel_io_perm_failure) { |
146 | ql_log(ql_log_warn, vha, 0x1003, | 148 | ql_log(ql_log_warn, vha, 0x1003, |
@@ -167,6 +169,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
167 | return QLA_FUNCTION_TIMEOUT; | 169 | return QLA_FUNCTION_TIMEOUT; |
168 | } | 170 | } |
169 | 171 | ||
172 | atomic_inc(&ha->num_pend_mbx_stage1); | ||
170 | /* | 173 | /* |
171 | * Wait for active mailbox commands to finish by waiting at most tov | 174 | * Wait for active mailbox commands to finish by waiting at most tov |
172 | * seconds. This is to serialize actual issuing of mailbox cmds during | 175 | * seconds. This is to serialize actual issuing of mailbox cmds during |
@@ -177,8 +180,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
177 | ql_log(ql_log_warn, vha, 0xd035, | 180 | ql_log(ql_log_warn, vha, 0xd035, |
178 | "Cmd access timeout, cmd=0x%x, Exiting.\n", | 181 | "Cmd access timeout, cmd=0x%x, Exiting.\n", |
179 | mcp->mb[0]); | 182 | mcp->mb[0]); |
183 | atomic_dec(&ha->num_pend_mbx_stage1); | ||
180 | return QLA_FUNCTION_TIMEOUT; | 184 | return QLA_FUNCTION_TIMEOUT; |
181 | } | 185 | } |
186 | atomic_dec(&ha->num_pend_mbx_stage1); | ||
187 | if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { | ||
188 | rval = QLA_ABORTED; | ||
189 | goto premature_exit; | ||
190 | } | ||
182 | 191 | ||
183 | ha->flags.mbox_busy = 1; | 192 | ha->flags.mbox_busy = 1; |
184 | /* Save mailbox command for debug */ | 193 | /* Save mailbox command for debug */ |
@@ -189,6 +198,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
189 | 198 | ||
190 | spin_lock_irqsave(&ha->hardware_lock, flags); | 199 | spin_lock_irqsave(&ha->hardware_lock, flags); |
191 | 200 | ||
201 | if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { | ||
202 | rval = QLA_ABORTED; | ||
203 | ha->flags.mbox_busy = 0; | ||
204 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
205 | goto premature_exit; | ||
206 | } | ||
207 | |||
192 | /* Load mailbox registers. */ | 208 | /* Load mailbox registers. */ |
193 | if (IS_P3P_TYPE(ha)) | 209 | if (IS_P3P_TYPE(ha)) |
194 | optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; | 210 | optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; |
@@ -231,7 +247,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
231 | "jiffies=%lx.\n", jiffies); | 247 | "jiffies=%lx.\n", jiffies); |
232 | 248 | ||
233 | /* Wait for mbx cmd completion until timeout */ | 249 | /* Wait for mbx cmd completion until timeout */ |
234 | 250 | atomic_inc(&ha->num_pend_mbx_stage2); | |
235 | if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { | 251 | if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { |
236 | set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); | 252 | set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); |
237 | 253 | ||
@@ -241,6 +257,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
241 | spin_unlock_irqrestore(&ha->hardware_lock, | 257 | spin_unlock_irqrestore(&ha->hardware_lock, |
242 | flags); | 258 | flags); |
243 | ha->flags.mbox_busy = 0; | 259 | ha->flags.mbox_busy = 0; |
260 | atomic_dec(&ha->num_pend_mbx_stage2); | ||
244 | ql_dbg(ql_dbg_mbx, vha, 0x1010, | 261 | ql_dbg(ql_dbg_mbx, vha, 0x1010, |
245 | "Pending mailbox timeout, exiting.\n"); | 262 | "Pending mailbox timeout, exiting.\n"); |
246 | rval = QLA_FUNCTION_TIMEOUT; | 263 | rval = QLA_FUNCTION_TIMEOUT; |
@@ -254,6 +271,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
254 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 271 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
255 | 272 | ||
256 | wait_time = jiffies; | 273 | wait_time = jiffies; |
274 | atomic_inc(&ha->num_pend_mbx_stage3); | ||
257 | if (!wait_for_completion_timeout(&ha->mbx_intr_comp, | 275 | if (!wait_for_completion_timeout(&ha->mbx_intr_comp, |
258 | mcp->tov * HZ)) { | 276 | mcp->tov * HZ)) { |
259 | ql_dbg(ql_dbg_mbx, vha, 0x117a, | 277 | ql_dbg(ql_dbg_mbx, vha, 0x117a, |
@@ -261,7 +279,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
261 | spin_lock_irqsave(&ha->hardware_lock, flags); | 279 | spin_lock_irqsave(&ha->hardware_lock, flags); |
262 | clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); | 280 | clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); |
263 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 281 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
282 | |||
283 | } else if (ha->flags.purge_mbox || | ||
284 | chip_reset != ha->chip_reset) { | ||
285 | ha->flags.mbox_busy = 0; | ||
286 | atomic_dec(&ha->num_pend_mbx_stage2); | ||
287 | atomic_dec(&ha->num_pend_mbx_stage3); | ||
288 | rval = QLA_ABORTED; | ||
289 | goto premature_exit; | ||
264 | } | 290 | } |
291 | atomic_dec(&ha->num_pend_mbx_stage3); | ||
292 | |||
265 | if (time_after(jiffies, wait_time + 5 * HZ)) | 293 | if (time_after(jiffies, wait_time + 5 * HZ)) |
266 | ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", | 294 | ql_log(ql_log_warn, vha, 0x1015, "cmd=0x%x, waited %d msecs\n", |
267 | command, jiffies_to_msecs(jiffies - wait_time)); | 295 | command, jiffies_to_msecs(jiffies - wait_time)); |
@@ -275,6 +303,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
275 | spin_unlock_irqrestore(&ha->hardware_lock, | 303 | spin_unlock_irqrestore(&ha->hardware_lock, |
276 | flags); | 304 | flags); |
277 | ha->flags.mbox_busy = 0; | 305 | ha->flags.mbox_busy = 0; |
306 | atomic_dec(&ha->num_pend_mbx_stage2); | ||
278 | ql_dbg(ql_dbg_mbx, vha, 0x1012, | 307 | ql_dbg(ql_dbg_mbx, vha, 0x1012, |
279 | "Pending mailbox timeout, exiting.\n"); | 308 | "Pending mailbox timeout, exiting.\n"); |
280 | rval = QLA_FUNCTION_TIMEOUT; | 309 | rval = QLA_FUNCTION_TIMEOUT; |
@@ -289,6 +318,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
289 | 318 | ||
290 | wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ | 319 | wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ |
291 | while (!ha->flags.mbox_int) { | 320 | while (!ha->flags.mbox_int) { |
321 | if (ha->flags.purge_mbox || | ||
322 | chip_reset != ha->chip_reset) { | ||
323 | ha->flags.mbox_busy = 0; | ||
324 | atomic_dec(&ha->num_pend_mbx_stage2); | ||
325 | rval = QLA_ABORTED; | ||
326 | goto premature_exit; | ||
327 | } | ||
328 | |||
292 | if (time_after(jiffies, wait_time)) | 329 | if (time_after(jiffies, wait_time)) |
293 | break; | 330 | break; |
294 | 331 | ||
@@ -312,6 +349,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
312 | "Waited %d sec.\n", | 349 | "Waited %d sec.\n", |
313 | (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); | 350 | (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); |
314 | } | 351 | } |
352 | atomic_dec(&ha->num_pend_mbx_stage2); | ||
315 | 353 | ||
316 | /* Check whether we timed out */ | 354 | /* Check whether we timed out */ |
317 | if (ha->flags.mbox_int) { | 355 | if (ha->flags.mbox_int) { |
@@ -390,7 +428,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) | |||
390 | /* Capture FW dump only, if PCI device active */ | 428 | /* Capture FW dump only, if PCI device active */ |
391 | if (!pci_channel_offline(vha->hw->pdev)) { | 429 | if (!pci_channel_offline(vha->hw->pdev)) { |
392 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); | 430 | pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); |
393 | if (w == 0xffff || ictrl == 0xffffffff) { | 431 | if (w == 0xffff || ictrl == 0xffffffff || |
432 | (chip_reset != ha->chip_reset)) { | ||
394 | /* This is special case if there is unload | 433 | /* This is special case if there is unload |
395 | * of driver happening and if PCI device go | 434 | * of driver happening and if PCI device go |
396 | * into bad state due to PCI error condition | 435 | * into bad state due to PCI error condition |
@@ -497,7 +536,11 @@ premature_exit: | |||
497 | complete(&ha->mbx_cmd_comp); | 536 | complete(&ha->mbx_cmd_comp); |
498 | 537 | ||
499 | mbx_done: | 538 | mbx_done: |
500 | if (rval) { | 539 | if (rval == QLA_ABORTED) { |
540 | ql_log(ql_log_info, vha, 0xd035, | ||
541 | "Chip Reset in progress. Purging Mbox cmd=0x%x.\n", | ||
542 | mcp->mb[0]); | ||
543 | } else if (rval) { | ||
501 | if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { | 544 | if (ql2xextended_error_logging & (ql_dbg_disc|ql_dbg_mbx)) { |
502 | pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR, | 545 | pr_warn("%s [%s]-%04x:%ld: **** Failed", QL_MSGHDR, |
503 | dev_name(&ha->pdev->dev), 0x1020+0x800, | 546 | dev_name(&ha->pdev->dev), 0x1020+0x800, |
@@ -2177,7 +2220,10 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) | |||
2177 | mcp->out_mb = MBX_2|MBX_1|MBX_0; | 2220 | mcp->out_mb = MBX_2|MBX_1|MBX_0; |
2178 | } else if (IS_FWI2_CAPABLE(vha->hw)) { | 2221 | } else if (IS_FWI2_CAPABLE(vha->hw)) { |
2179 | mcp->mb[0] = MBC_LIP_FULL_LOGIN; | 2222 | mcp->mb[0] = MBC_LIP_FULL_LOGIN; |
2180 | mcp->mb[1] = BIT_6; | 2223 | if (N2N_TOPO(vha->hw)) |
2224 | mcp->mb[1] = BIT_4; /* re-init */ | ||
2225 | else | ||
2226 | mcp->mb[1] = BIT_6; /* LIP */ | ||
2181 | mcp->mb[2] = 0; | 2227 | mcp->mb[2] = 0; |
2182 | mcp->mb[3] = vha->hw->loop_reset_delay; | 2228 | mcp->mb[3] = vha->hw->loop_reset_delay; |
2183 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; | 2229 | mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; |
@@ -3797,30 +3843,68 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
3797 | "Format 1: WWPN %8phC.\n", | 3843 | "Format 1: WWPN %8phC.\n", |
3798 | vha->port_name); | 3844 | vha->port_name); |
3799 | 3845 | ||
3800 | /* N2N. direct connect */ | 3846 | switch (rptid_entry->u.f1.flags & TOPO_MASK) { |
3801 | if (IS_QLA27XX(ha) && | 3847 | case TOPO_N2N: |
3802 | ((rptid_entry->u.f1.flags>>1) & 0x7) == 2) { | 3848 | ha->current_topology = ISP_CFG_N; |
3803 | /* if our portname is higher then initiate N2N login */ | 3849 | spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); |
3804 | if (wwn_to_u64(vha->port_name) > | 3850 | fcport = qla2x00_find_fcport_by_wwpn(vha, |
3805 | wwn_to_u64(rptid_entry->u.f1.port_name)) { | 3851 | rptid_entry->u.f1.port_name, 1); |
3806 | // ??? qlt_update_host_map(vha, id); | 3852 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
3807 | vha->n2n_id = 0x1; | 3853 | |
3808 | ql_dbg(ql_dbg_async, vha, 0x5075, | 3854 | if (fcport) { |
3809 | "Format 1: Setting n2n_update_needed for id %d\n", | 3855 | fcport->plogi_nack_done_deadline = jiffies + HZ; |
3810 | vha->n2n_id); | 3856 | fcport->dm_login_expire = jiffies + 3*HZ; |
3857 | fcport->scan_state = QLA_FCPORT_FOUND; | ||
3858 | switch (fcport->disc_state) { | ||
3859 | case DSC_DELETED: | ||
3860 | set_bit(RELOGIN_NEEDED, | ||
3861 | &vha->dpc_flags); | ||
3862 | break; | ||
3863 | case DSC_DELETE_PEND: | ||
3864 | break; | ||
3865 | default: | ||
3866 | qlt_schedule_sess_for_deletion(fcport); | ||
3867 | break; | ||
3868 | } | ||
3811 | } else { | 3869 | } else { |
3812 | ql_dbg(ql_dbg_async, vha, 0x5075, | 3870 | id.b24 = 0; |
3813 | "Format 1: Remote login - Waiting for WWPN %8phC.\n", | 3871 | if (wwn_to_u64(vha->port_name) > |
3814 | rptid_entry->u.f1.port_name); | 3872 | wwn_to_u64(rptid_entry->u.f1.port_name)) { |
3873 | vha->d_id.b24 = 0; | ||
3874 | vha->d_id.b.al_pa = 1; | ||
3875 | ha->flags.n2n_bigger = 1; | ||
3876 | |||
3877 | id.b.al_pa = 2; | ||
3878 | ql_dbg(ql_dbg_async, vha, 0x5075, | ||
3879 | "Format 1: assign local id %x remote id %x\n", | ||
3880 | vha->d_id.b24, id.b24); | ||
3881 | } else { | ||
3882 | ql_dbg(ql_dbg_async, vha, 0x5075, | ||
3883 | "Format 1: Remote login - Waiting for WWPN %8phC.\n", | ||
3884 | rptid_entry->u.f1.port_name); | ||
3885 | ha->flags.n2n_bigger = 0; | ||
3886 | } | ||
3887 | qla24xx_post_newsess_work(vha, &id, | ||
3888 | rptid_entry->u.f1.port_name, | ||
3889 | rptid_entry->u.f1.node_name, | ||
3890 | NULL, | ||
3891 | FC4_TYPE_UNKNOWN); | ||
3815 | } | 3892 | } |
3816 | 3893 | ||
3817 | memcpy(vha->n2n_port_name, rptid_entry->u.f1.port_name, | 3894 | /* if our portname is higher then initiate N2N login */ |
3818 | WWN_SIZE); | 3895 | |
3819 | set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); | 3896 | set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); |
3820 | set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); | ||
3821 | set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); | ||
3822 | ha->flags.n2n_ae = 1; | 3897 | ha->flags.n2n_ae = 1; |
3823 | return; | 3898 | return; |
3899 | break; | ||
3900 | case TOPO_FL: | ||
3901 | ha->current_topology = ISP_CFG_FL; | ||
3902 | break; | ||
3903 | case TOPO_F: | ||
3904 | ha->current_topology = ISP_CFG_F; | ||
3905 | break; | ||
3906 | default: | ||
3907 | break; | ||
3824 | } | 3908 | } |
3825 | 3909 | ||
3826 | ha->flags.gpsc_supported = 1; | 3910 | ha->flags.gpsc_supported = 1; |
@@ -3909,30 +3993,9 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
3909 | rptid_entry->u.f2.port_name, 1); | 3993 | rptid_entry->u.f2.port_name, 1); |
3910 | 3994 | ||
3911 | if (fcport) { | 3995 | if (fcport) { |
3996 | fcport->login_retry = vha->hw->login_retry_count; | ||
3912 | fcport->plogi_nack_done_deadline = jiffies + HZ; | 3997 | fcport->plogi_nack_done_deadline = jiffies + HZ; |
3913 | fcport->scan_state = QLA_FCPORT_FOUND; | 3998 | fcport->scan_state = QLA_FCPORT_FOUND; |
3914 | switch (fcport->disc_state) { | ||
3915 | case DSC_DELETED: | ||
3916 | ql_dbg(ql_dbg_disc, vha, 0x210d, | ||
3917 | "%s %d %8phC login\n", | ||
3918 | __func__, __LINE__, fcport->port_name); | ||
3919 | qla24xx_fcport_handle_login(vha, fcport); | ||
3920 | break; | ||
3921 | case DSC_DELETE_PEND: | ||
3922 | break; | ||
3923 | default: | ||
3924 | qlt_schedule_sess_for_deletion(fcport); | ||
3925 | break; | ||
3926 | } | ||
3927 | } else { | ||
3928 | id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0]; | ||
3929 | id.b.area = rptid_entry->u.f2.remote_nport_id[1]; | ||
3930 | id.b.domain = rptid_entry->u.f2.remote_nport_id[2]; | ||
3931 | qla24xx_post_newsess_work(vha, &id, | ||
3932 | rptid_entry->u.f2.port_name, | ||
3933 | rptid_entry->u.f2.node_name, | ||
3934 | NULL, | ||
3935 | FC4_TYPE_UNKNOWN); | ||
3936 | } | 3999 | } |
3937 | } | 4000 | } |
3938 | } | 4001 | } |
@@ -4663,7 +4726,7 @@ qla24xx_get_port_login_templ(scsi_qla_host_t *vha, dma_addr_t buf_dma, | |||
4663 | "Done %s.\n", __func__); | 4726 | "Done %s.\n", __func__); |
4664 | bp = (uint32_t *) buf; | 4727 | bp = (uint32_t *) buf; |
4665 | for (i = 0; i < (bufsiz-4)/4; i++, bp++) | 4728 | for (i = 0; i < (bufsiz-4)/4; i++, bp++) |
4666 | *bp = cpu_to_be32(*bp); | 4729 | *bp = le32_to_cpu(*bp); |
4667 | } | 4730 | } |
4668 | 4731 | ||
4669 | return rval; | 4732 | return rval; |
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index aa727d07b702..d620f4bebcd0 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
@@ -492,7 +492,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) | |||
492 | "Couldn't allocate vp_id.\n"); | 492 | "Couldn't allocate vp_id.\n"); |
493 | goto create_vhost_failed; | 493 | goto create_vhost_failed; |
494 | } | 494 | } |
495 | vha->mgmt_svr_loop_id = NPH_MGMT_SERVER; | 495 | vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha); |
496 | 496 | ||
497 | vha->dpc_flags = 0L; | 497 | vha->dpc_flags = 0L; |
498 | 498 | ||
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index c5a963c2c86e..20d9dc39f0fb 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c | |||
@@ -30,6 +30,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) | |||
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
32 | 32 | ||
33 | if (!vha->nvme_local_port && qla_nvme_register_hba(vha)) | ||
34 | return 0; | ||
35 | |||
33 | if (!(fcport->nvme_prli_service_param & | 36 | if (!(fcport->nvme_prli_service_param & |
34 | (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || | 37 | (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) || |
35 | (fcport->nvme_flag & NVME_FLAG_REGISTERED)) | 38 | (fcport->nvme_flag & NVME_FLAG_REGISTERED)) |
@@ -676,15 +679,15 @@ void qla_nvme_delete(struct scsi_qla_host *vha) | |||
676 | } | 679 | } |
677 | } | 680 | } |
678 | 681 | ||
679 | void qla_nvme_register_hba(struct scsi_qla_host *vha) | 682 | int qla_nvme_register_hba(struct scsi_qla_host *vha) |
680 | { | 683 | { |
681 | struct nvme_fc_port_template *tmpl; | 684 | struct nvme_fc_port_template *tmpl; |
682 | struct qla_hw_data *ha; | 685 | struct qla_hw_data *ha; |
683 | struct nvme_fc_port_info pinfo; | 686 | struct nvme_fc_port_info pinfo; |
684 | int ret; | 687 | int ret = EINVAL; |
685 | 688 | ||
686 | if (!IS_ENABLED(CONFIG_NVME_FC)) | 689 | if (!IS_ENABLED(CONFIG_NVME_FC)) |
687 | return; | 690 | return ret; |
688 | 691 | ||
689 | ha = vha->hw; | 692 | ha = vha->hw; |
690 | tmpl = &qla_nvme_fc_transport; | 693 | tmpl = &qla_nvme_fc_transport; |
@@ -711,7 +714,9 @@ void qla_nvme_register_hba(struct scsi_qla_host *vha) | |||
711 | if (ret) { | 714 | if (ret) { |
712 | ql_log(ql_log_warn, vha, 0xffff, | 715 | ql_log(ql_log_warn, vha, 0xffff, |
713 | "register_localport failed: ret=%x\n", ret); | 716 | "register_localport failed: ret=%x\n", ret); |
714 | return; | 717 | } else { |
718 | vha->nvme_local_port->private = vha; | ||
715 | } | 719 | } |
716 | vha->nvme_local_port->private = vha; | 720 | |
721 | return ret; | ||
717 | } | 722 | } |
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h index 816854ada654..4941d107fb1c 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.h +++ b/drivers/scsi/qla2xxx/qla_nvme.h | |||
@@ -142,7 +142,7 @@ struct pt_ls4_rx_unsol { | |||
142 | /* | 142 | /* |
143 | * Global functions prototype in qla_nvme.c source file. | 143 | * Global functions prototype in qla_nvme.c source file. |
144 | */ | 144 | */ |
145 | void qla_nvme_register_hba(struct scsi_qla_host *); | 145 | int qla_nvme_register_hba(struct scsi_qla_host *); |
146 | int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *); | 146 | int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *); |
147 | void qla_nvme_delete(struct scsi_qla_host *); | 147 | void qla_nvme_delete(struct scsi_qla_host *); |
148 | void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res); | 148 | void qla_nvme_abort(struct qla_hw_data *, struct srb *sp, int res); |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1fbd16c8c9a7..42b8f0d3e580 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -2816,6 +2816,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2816 | ha->link_data_rate = PORT_SPEED_UNKNOWN; | 2816 | ha->link_data_rate = PORT_SPEED_UNKNOWN; |
2817 | ha->optrom_size = OPTROM_SIZE_2300; | 2817 | ha->optrom_size = OPTROM_SIZE_2300; |
2818 | ha->max_exchg = FW_MAX_EXCHANGES_CNT; | 2818 | ha->max_exchg = FW_MAX_EXCHANGES_CNT; |
2819 | atomic_set(&ha->num_pend_mbx_stage1, 0); | ||
2820 | atomic_set(&ha->num_pend_mbx_stage2, 0); | ||
2821 | atomic_set(&ha->num_pend_mbx_stage3, 0); | ||
2819 | 2822 | ||
2820 | /* Assign ISP specific operations. */ | 2823 | /* Assign ISP specific operations. */ |
2821 | if (IS_QLA2100(ha)) { | 2824 | if (IS_QLA2100(ha)) { |
@@ -3046,7 +3049,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3046 | host = base_vha->host; | 3049 | host = base_vha->host; |
3047 | base_vha->req = req; | 3050 | base_vha->req = req; |
3048 | if (IS_QLA2XXX_MIDTYPE(ha)) | 3051 | if (IS_QLA2XXX_MIDTYPE(ha)) |
3049 | base_vha->mgmt_svr_loop_id = NPH_MGMT_SERVER; | 3052 | base_vha->mgmt_svr_loop_id = |
3053 | qla2x00_reserve_mgmt_server_loop_id(base_vha); | ||
3050 | else | 3054 | else |
3051 | base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + | 3055 | base_vha->mgmt_svr_loop_id = MANAGEMENT_SERVER + |
3052 | base_vha->vp_idx; | 3056 | base_vha->vp_idx; |
@@ -3830,14 +3834,6 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
3830 | return; | 3834 | return; |
3831 | 3835 | ||
3832 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 3836 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
3833 | |||
3834 | if (fcport->login_retry == 0) { | ||
3835 | fcport->login_retry = vha->hw->login_retry_count; | ||
3836 | |||
3837 | ql_dbg(ql_dbg_disc, vha, 0x20a3, | ||
3838 | "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n", | ||
3839 | fcport->port_name, fcport->loop_id, fcport->login_retry); | ||
3840 | } | ||
3841 | } | 3837 | } |
3842 | 3838 | ||
3843 | /* | 3839 | /* |
@@ -4785,7 +4781,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4785 | struct qlt_plogi_ack_t *pla = | 4781 | struct qlt_plogi_ack_t *pla = |
4786 | (struct qlt_plogi_ack_t *)e->u.new_sess.pla; | 4782 | (struct qlt_plogi_ack_t *)e->u.new_sess.pla; |
4787 | uint8_t free_fcport = 0; | 4783 | uint8_t free_fcport = 0; |
4788 | u64 wwn; | ||
4789 | 4784 | ||
4790 | ql_dbg(ql_dbg_disc, vha, 0xffff, | 4785 | ql_dbg(ql_dbg_disc, vha, 0xffff, |
4791 | "%s %d %8phC enter\n", | 4786 | "%s %d %8phC enter\n", |
@@ -4813,10 +4808,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4813 | fcport->d_id = e->u.new_sess.id; | 4808 | fcport->d_id = e->u.new_sess.id; |
4814 | fcport->flags |= FCF_FABRIC_DEVICE; | 4809 | fcport->flags |= FCF_FABRIC_DEVICE; |
4815 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; | 4810 | fcport->fw_login_state = DSC_LS_PLOGI_PEND; |
4816 | if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP) | 4811 | if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP) |
4817 | fcport->fc4_type = FC4_TYPE_FCP_SCSI; | 4812 | fcport->fc4_type = FC4_TYPE_FCP_SCSI; |
4818 | 4813 | ||
4819 | if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) { | 4814 | if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) { |
4820 | fcport->fc4_type = FC4_TYPE_OTHER; | 4815 | fcport->fc4_type = FC4_TYPE_OTHER; |
4821 | fcport->fc4f_nvme = FC4_TYPE_NVME; | 4816 | fcport->fc4f_nvme = FC4_TYPE_NVME; |
4822 | } | 4817 | } |
@@ -4858,9 +4853,6 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4858 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); | 4853 | spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); |
4859 | 4854 | ||
4860 | if (fcport) { | 4855 | if (fcport) { |
4861 | if (N2N_TOPO(vha->hw)) | ||
4862 | fcport->flags &= ~FCF_FABRIC_DEVICE; | ||
4863 | |||
4864 | fcport->id_changed = 1; | 4856 | fcport->id_changed = 1; |
4865 | fcport->scan_state = QLA_FCPORT_FOUND; | 4857 | fcport->scan_state = QLA_FCPORT_FOUND; |
4866 | memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); | 4858 | memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); |
@@ -4921,12 +4913,22 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
4921 | if (dfcp) | 4913 | if (dfcp) |
4922 | qlt_schedule_sess_for_deletion(tfcp); | 4914 | qlt_schedule_sess_for_deletion(tfcp); |
4923 | 4915 | ||
4924 | wwn = wwn_to_u64(fcport->node_name); | ||
4925 | 4916 | ||
4926 | if (!wwn) | 4917 | if (N2N_TOPO(vha->hw)) |
4927 | qla24xx_async_gnnid(vha, fcport); | 4918 | fcport->flags &= ~FCF_FABRIC_DEVICE; |
4928 | else | 4919 | |
4929 | qla24xx_async_gnl(vha, fcport); | 4920 | if (N2N_TOPO(vha->hw)) { |
4921 | if (vha->flags.nvme_enabled) { | ||
4922 | fcport->fc4f_nvme = 1; | ||
4923 | fcport->n2n_flag = 1; | ||
4924 | } | ||
4925 | fcport->fw_login_state = 0; | ||
4926 | /* | ||
4927 | * wait link init done before sending login | ||
4928 | */ | ||
4929 | } else { | ||
4930 | qla24xx_fcport_handle_login(vha, fcport); | ||
4931 | } | ||
4930 | } | 4932 | } |
4931 | } | 4933 | } |
4932 | 4934 | ||
@@ -5061,6 +5063,10 @@ qla2x00_do_work(struct scsi_qla_host *vha) | |||
5061 | case QLA_EVT_IIDMA: | 5063 | case QLA_EVT_IIDMA: |
5062 | qla_do_iidma_work(vha, e->u.fcport.fcport); | 5064 | qla_do_iidma_work(vha, e->u.fcport.fcport); |
5063 | break; | 5065 | break; |
5066 | case QLA_EVT_ELS_PLOGI: | ||
5067 | qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, | ||
5068 | e->u.fcport.fcport, false); | ||
5069 | break; | ||
5064 | } | 5070 | } |
5065 | if (e->flags & QLA_EVT_FLAG_FREE) | 5071 | if (e->flags & QLA_EVT_FLAG_FREE) |
5066 | kfree(e); | 5072 | kfree(e); |
@@ -5090,7 +5096,7 @@ int qla24xx_post_relogin_work(struct scsi_qla_host *vha) | |||
5090 | void qla2x00_relogin(struct scsi_qla_host *vha) | 5096 | void qla2x00_relogin(struct scsi_qla_host *vha) |
5091 | { | 5097 | { |
5092 | fc_port_t *fcport; | 5098 | fc_port_t *fcport; |
5093 | int status; | 5099 | int status, relogin_needed = 0; |
5094 | struct event_arg ea; | 5100 | struct event_arg ea; |
5095 | 5101 | ||
5096 | list_for_each_entry(fcport, &vha->vp_fcports, list) { | 5102 | list_for_each_entry(fcport, &vha->vp_fcports, list) { |
@@ -5099,47 +5105,59 @@ void qla2x00_relogin(struct scsi_qla_host *vha) | |||
5099 | * to it if we haven't run out of retries. | 5105 | * to it if we haven't run out of retries. |
5100 | */ | 5106 | */ |
5101 | if (atomic_read(&fcport->state) != FCS_ONLINE && | 5107 | if (atomic_read(&fcport->state) != FCS_ONLINE && |
5102 | fcport->login_retry && | 5108 | fcport->login_retry) { |
5103 | !(fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE))) { | 5109 | if (fcport->scan_state != QLA_FCPORT_FOUND || |
5104 | if (vha->hw->current_topology != ISP_CFG_NL) { | 5110 | fcport->disc_state == DSC_LOGIN_COMPLETE) |
5105 | ql_dbg(ql_dbg_disc, fcport->vha, 0x2108, | 5111 | continue; |
5106 | "%s %8phC DS %d LS %d\n", __func__, | 5112 | |
5107 | fcport->port_name, fcport->disc_state, | 5113 | if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE) || |
5108 | fcport->fw_login_state); | 5114 | fcport->disc_state == DSC_DELETE_PEND) { |
5109 | memset(&ea, 0, sizeof(ea)); | 5115 | relogin_needed = 1; |
5110 | ea.event = FCME_RELOGIN; | 5116 | } else { |
5111 | ea.fcport = fcport; | 5117 | if (vha->hw->current_topology != ISP_CFG_NL) { |
5112 | qla2x00_fcport_event_handler(vha, &ea); | 5118 | memset(&ea, 0, sizeof(ea)); |
5113 | } else if (vha->hw->current_topology == ISP_CFG_NL) { | 5119 | ea.event = FCME_RELOGIN; |
5114 | fcport->login_retry--; | 5120 | ea.fcport = fcport; |
5115 | status = qla2x00_local_device_login(vha, | 5121 | qla2x00_fcport_event_handler(vha, &ea); |
5116 | fcport); | 5122 | } else if (vha->hw->current_topology == |
5117 | if (status == QLA_SUCCESS) { | 5123 | ISP_CFG_NL) { |
5118 | fcport->old_loop_id = fcport->loop_id; | 5124 | fcport->login_retry--; |
5119 | ql_dbg(ql_dbg_disc, vha, 0x2003, | 5125 | status = |
5120 | "Port login OK: logged in ID 0x%x.\n", | 5126 | qla2x00_local_device_login(vha, |
5121 | fcport->loop_id); | 5127 | fcport); |
5122 | qla2x00_update_fcport(vha, fcport); | 5128 | if (status == QLA_SUCCESS) { |
5123 | } else if (status == 1) { | 5129 | fcport->old_loop_id = |
5124 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 5130 | fcport->loop_id; |
5125 | /* retry the login again */ | 5131 | ql_dbg(ql_dbg_disc, vha, 0x2003, |
5126 | ql_dbg(ql_dbg_disc, vha, 0x2007, | 5132 | "Port login OK: logged in ID 0x%x.\n", |
5127 | "Retrying %d login again loop_id 0x%x.\n", | 5133 | fcport->loop_id); |
5128 | fcport->login_retry, | 5134 | qla2x00_update_fcport |
5129 | fcport->loop_id); | 5135 | (vha, fcport); |
5130 | } else { | 5136 | } else if (status == 1) { |
5131 | fcport->login_retry = 0; | 5137 | set_bit(RELOGIN_NEEDED, |
5132 | } | 5138 | &vha->dpc_flags); |
5139 | /* retry the login again */ | ||
5140 | ql_dbg(ql_dbg_disc, vha, 0x2007, | ||
5141 | "Retrying %d login again loop_id 0x%x.\n", | ||
5142 | fcport->login_retry, | ||
5143 | fcport->loop_id); | ||
5144 | } else { | ||
5145 | fcport->login_retry = 0; | ||
5146 | } | ||
5133 | 5147 | ||
5134 | if (fcport->login_retry == 0 && | 5148 | if (fcport->login_retry == 0 && |
5135 | status != QLA_SUCCESS) | 5149 | status != QLA_SUCCESS) |
5136 | qla2x00_clear_loop_id(fcport); | 5150 | qla2x00_clear_loop_id(fcport); |
5151 | } | ||
5137 | } | 5152 | } |
5138 | } | 5153 | } |
5139 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) | 5154 | if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) |
5140 | break; | 5155 | break; |
5141 | } | 5156 | } |
5142 | 5157 | ||
5158 | if (relogin_needed) | ||
5159 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | ||
5160 | |||
5143 | ql_dbg(ql_dbg_disc, vha, 0x400e, | 5161 | ql_dbg(ql_dbg_disc, vha, 0x400e, |
5144 | "Relogin end.\n"); | 5162 | "Relogin end.\n"); |
5145 | } | 5163 | } |
@@ -6179,6 +6197,11 @@ intr_on_check: | |||
6179 | if (!IS_QLAFX00(ha)) | 6197 | if (!IS_QLAFX00(ha)) |
6180 | qla2x00_do_dpc_all_vps(base_vha); | 6198 | qla2x00_do_dpc_all_vps(base_vha); |
6181 | 6199 | ||
6200 | if (test_and_clear_bit(N2N_LINK_RESET, | ||
6201 | &base_vha->dpc_flags)) { | ||
6202 | qla2x00_lip_reset(base_vha); | ||
6203 | } | ||
6204 | |||
6182 | ha->dpc_active = 0; | 6205 | ha->dpc_active = 0; |
6183 | end_loop: | 6206 | end_loop: |
6184 | set_current_state(TASK_INTERRUPTIBLE); | 6207 | set_current_state(TASK_INTERRUPTIBLE); |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 1027b0cb7fa3..8c811b251d42 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
@@ -805,6 +805,10 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, | |||
805 | 805 | ||
806 | list_for_each_entry(pla, &vha->plogi_ack_list, list) { | 806 | list_for_each_entry(pla, &vha->plogi_ack_list, list) { |
807 | if (pla->id.b24 == id->b24) { | 807 | if (pla->id.b24 == id->b24) { |
808 | ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, | ||
809 | "%s %d %8phC Term INOT due to new INOT", | ||
810 | __func__, __LINE__, | ||
811 | pla->iocb.u.isp24.port_name); | ||
808 | qlt_send_term_imm_notif(vha, &pla->iocb, 1); | 812 | qlt_send_term_imm_notif(vha, &pla->iocb, 1); |
809 | memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); | 813 | memcpy(&pla->iocb, iocb, sizeof(pla->iocb)); |
810 | return pla; | 814 | return pla; |
@@ -982,8 +986,9 @@ void qlt_free_session_done(struct work_struct *work) | |||
982 | 986 | ||
983 | logo.id = sess->d_id; | 987 | logo.id = sess->d_id; |
984 | logo.cmd_count = 0; | 988 | logo.cmd_count = 0; |
989 | if (!own) | ||
990 | qlt_send_first_logo(vha, &logo); | ||
985 | sess->send_els_logo = 0; | 991 | sess->send_els_logo = 0; |
986 | qlt_send_first_logo(vha, &logo); | ||
987 | } | 992 | } |
988 | 993 | ||
989 | if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { | 994 | if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) { |
@@ -1053,7 +1058,6 @@ void qlt_free_session_done(struct work_struct *work) | |||
1053 | sess->disc_state = DSC_DELETED; | 1058 | sess->disc_state = DSC_DELETED; |
1054 | sess->fw_login_state = DSC_LS_PORT_UNAVAIL; | 1059 | sess->fw_login_state = DSC_LS_PORT_UNAVAIL; |
1055 | sess->deleted = QLA_SESS_DELETED; | 1060 | sess->deleted = QLA_SESS_DELETED; |
1056 | sess->login_retry = vha->hw->login_retry_count; | ||
1057 | 1061 | ||
1058 | if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { | 1062 | if (sess->login_succ && !IS_SW_RESV_ADDR(sess->d_id)) { |
1059 | vha->fcport_count--; | 1063 | vha->fcport_count--; |
@@ -1073,6 +1077,7 @@ void qlt_free_session_done(struct work_struct *work) | |||
1073 | struct qlt_plogi_ack_t *con = | 1077 | struct qlt_plogi_ack_t *con = |
1074 | sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; | 1078 | sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]; |
1075 | struct imm_ntfy_from_isp *iocb; | 1079 | struct imm_ntfy_from_isp *iocb; |
1080 | own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; | ||
1076 | 1081 | ||
1077 | if (con) { | 1082 | if (con) { |
1078 | iocb = &con->iocb; | 1083 | iocb = &con->iocb; |
@@ -1156,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess) | |||
1156 | if (sess->se_sess) | 1161 | if (sess->se_sess) |
1157 | vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); | 1162 | vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); |
1158 | 1163 | ||
1159 | qla2x00_mark_device_lost(vha, sess, 1, 1); | 1164 | qla2x00_mark_device_lost(vha, sess, 0, 0); |
1160 | 1165 | ||
1161 | sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; | 1166 | sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; |
1162 | sess->disc_state = DSC_DELETE_PEND; | 1167 | sess->disc_state = DSC_DELETE_PEND; |
@@ -3782,7 +3787,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) | |||
3782 | return; | 3787 | return; |
3783 | } | 3788 | } |
3784 | cmd->jiffies_at_free = get_jiffies_64(); | 3789 | cmd->jiffies_at_free = get_jiffies_64(); |
3785 | percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); | 3790 | target_free_tag(sess->se_sess, &cmd->se_cmd); |
3786 | } | 3791 | } |
3787 | EXPORT_SYMBOL(qlt_free_cmd); | 3792 | EXPORT_SYMBOL(qlt_free_cmd); |
3788 | 3793 | ||
@@ -4145,7 +4150,7 @@ out_term: | |||
4145 | qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); | 4150 | qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); |
4146 | 4151 | ||
4147 | qlt_decr_num_pend_cmds(vha); | 4152 | qlt_decr_num_pend_cmds(vha); |
4148 | percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); | 4153 | target_free_tag(sess->se_sess, &cmd->se_cmd); |
4149 | spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); | 4154 | spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); |
4150 | 4155 | ||
4151 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | 4156 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
@@ -4276,9 +4281,9 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, | |||
4276 | { | 4281 | { |
4277 | struct se_session *se_sess = sess->se_sess; | 4282 | struct se_session *se_sess = sess->se_sess; |
4278 | struct qla_tgt_cmd *cmd; | 4283 | struct qla_tgt_cmd *cmd; |
4279 | int tag; | 4284 | int tag, cpu; |
4280 | 4285 | ||
4281 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 4286 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
4282 | if (tag < 0) | 4287 | if (tag < 0) |
4283 | return NULL; | 4288 | return NULL; |
4284 | 4289 | ||
@@ -4291,6 +4296,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, | |||
4291 | qlt_incr_num_pend_cmds(vha); | 4296 | qlt_incr_num_pend_cmds(vha); |
4292 | cmd->vha = vha; | 4297 | cmd->vha = vha; |
4293 | cmd->se_cmd.map_tag = tag; | 4298 | cmd->se_cmd.map_tag = tag; |
4299 | cmd->se_cmd.map_cpu = cpu; | ||
4294 | cmd->sess = sess; | 4300 | cmd->sess = sess; |
4295 | cmd->loop_id = sess->loop_id; | 4301 | cmd->loop_id = sess->loop_id; |
4296 | cmd->conf_compl_supported = sess->conf_compl_supported; | 4302 | cmd->conf_compl_supported = sess->conf_compl_supported; |
@@ -4714,6 +4720,10 @@ static int qlt_handle_login(struct scsi_qla_host *vha, | |||
4714 | 4720 | ||
4715 | pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); | 4721 | pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); |
4716 | if (!pla) { | 4722 | if (!pla) { |
4723 | ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, | ||
4724 | "%s %d %8phC Term INOT due to mem alloc fail", | ||
4725 | __func__, __LINE__, | ||
4726 | iocb->u.isp24.port_name); | ||
4717 | qlt_send_term_imm_notif(vha, iocb, 1); | 4727 | qlt_send_term_imm_notif(vha, iocb, 1); |
4718 | goto out; | 4728 | goto out; |
4719 | } | 4729 | } |
@@ -5293,7 +5303,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, | |||
5293 | struct fc_port *sess; | 5303 | struct fc_port *sess; |
5294 | struct se_session *se_sess; | 5304 | struct se_session *se_sess; |
5295 | struct qla_tgt_cmd *cmd; | 5305 | struct qla_tgt_cmd *cmd; |
5296 | int tag; | 5306 | int tag, cpu; |
5297 | unsigned long flags; | 5307 | unsigned long flags; |
5298 | 5308 | ||
5299 | if (unlikely(tgt->tgt_stop)) { | 5309 | if (unlikely(tgt->tgt_stop)) { |
@@ -5325,7 +5335,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, | |||
5325 | 5335 | ||
5326 | se_sess = sess->se_sess; | 5336 | se_sess = sess->se_sess; |
5327 | 5337 | ||
5328 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 5338 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
5329 | if (tag < 0) | 5339 | if (tag < 0) |
5330 | return; | 5340 | return; |
5331 | 5341 | ||
@@ -5356,6 +5366,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, | |||
5356 | cmd->reset_count = ha->base_qpair->chip_reset; | 5366 | cmd->reset_count = ha->base_qpair->chip_reset; |
5357 | cmd->q_full = 1; | 5367 | cmd->q_full = 1; |
5358 | cmd->qpair = ha->base_qpair; | 5368 | cmd->qpair = ha->base_qpair; |
5369 | cmd->se_cmd.map_cpu = cpu; | ||
5359 | 5370 | ||
5360 | if (qfull) { | 5371 | if (qfull) { |
5361 | cmd->q_full = 1; | 5372 | cmd->q_full = 1; |
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 731ca0d8520a..0ccd06f11f12 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c | |||
@@ -571,6 +571,15 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha, | |||
571 | } | 571 | } |
572 | break; | 572 | break; |
573 | 573 | ||
574 | case T268_BUF_TYPE_REQ_MIRROR: | ||
575 | case T268_BUF_TYPE_RSP_MIRROR: | ||
576 | /* | ||
577 | * Mirror pointers are not implemented in the | ||
578 | * driver, instead shadow pointers are used by | ||
579 | * the drier. Skip these entries. | ||
580 | */ | ||
581 | qla27xx_skip_entry(ent, buf); | ||
582 | break; | ||
574 | default: | 583 | default: |
575 | ql_dbg(ql_dbg_async, vha, 0xd02b, | 584 | ql_dbg(ql_dbg_async, vha, 0xd02b, |
576 | "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); | 585 | "%s: unknown buffer %x\n", __func__, ent->t268.buf_type); |
@@ -1028,8 +1037,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) | |||
1028 | ql_log(ql_log_warn, vha, 0xd300, | 1037 | ql_log(ql_log_warn, vha, 0xd300, |
1029 | "Firmware has been previously dumped (%p)," | 1038 | "Firmware has been previously dumped (%p)," |
1030 | " -- ignoring request\n", vha->hw->fw_dump); | 1039 | " -- ignoring request\n", vha->hw->fw_dump); |
1031 | else | 1040 | else { |
1041 | QLA_FW_STOPPED(vha->hw); | ||
1032 | qla27xx_execute_fwdt_template(vha); | 1042 | qla27xx_execute_fwdt_template(vha); |
1043 | } | ||
1033 | 1044 | ||
1034 | #ifndef __CHECKER__ | 1045 | #ifndef __CHECKER__ |
1035 | if (!hardware_locked) | 1046 | if (!hardware_locked) |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 1ad7582220c3..3850b28518e5 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "10.00.00.07-k" | 10 | #define QLA2XXX_VERSION "10.00.00.08-k" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 10 | 12 | #define QLA_DRIVER_MAJOR_VER 10 |
13 | #define QLA_DRIVER_MINOR_VER 0 | 13 | #define QLA_DRIVER_MINOR_VER 0 |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7732e9336d43..e03d12a5f986 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -1049,10 +1049,8 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { | |||
1049 | NULL, | 1049 | NULL, |
1050 | }; | 1050 | }; |
1051 | 1051 | ||
1052 | static struct se_portal_group *tcm_qla2xxx_make_tpg( | 1052 | static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn, |
1053 | struct se_wwn *wwn, | 1053 | const char *name) |
1054 | struct config_group *group, | ||
1055 | const char *name) | ||
1056 | { | 1054 | { |
1057 | struct tcm_qla2xxx_lport *lport = container_of(wwn, | 1055 | struct tcm_qla2xxx_lport *lport = container_of(wwn, |
1058 | struct tcm_qla2xxx_lport, lport_wwn); | 1056 | struct tcm_qla2xxx_lport, lport_wwn); |
@@ -1171,10 +1169,8 @@ static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { | |||
1171 | NULL, | 1169 | NULL, |
1172 | }; | 1170 | }; |
1173 | 1171 | ||
1174 | static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg( | 1172 | static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, |
1175 | struct se_wwn *wwn, | 1173 | const char *name) |
1176 | struct config_group *group, | ||
1177 | const char *name) | ||
1178 | { | 1174 | { |
1179 | struct tcm_qla2xxx_lport *lport = container_of(wwn, | 1175 | struct tcm_qla2xxx_lport *lport = container_of(wwn, |
1180 | struct tcm_qla2xxx_lport, lport_wwn); | 1176 | struct tcm_qla2xxx_lport, lport_wwn); |
@@ -1465,8 +1461,7 @@ static void tcm_qla2xxx_free_session(struct fc_port *sess) | |||
1465 | } | 1461 | } |
1466 | target_wait_for_sess_cmds(se_sess); | 1462 | target_wait_for_sess_cmds(se_sess); |
1467 | 1463 | ||
1468 | transport_deregister_session_configfs(sess->se_sess); | 1464 | target_remove_session(se_sess); |
1469 | transport_deregister_session(sess->se_sess); | ||
1470 | } | 1465 | } |
1471 | 1466 | ||
1472 | static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, | 1467 | static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, |
@@ -1543,7 +1538,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( | |||
1543 | * Locate our struct se_node_acl either from an explict NodeACL created | 1538 | * Locate our struct se_node_acl either from an explict NodeACL created |
1544 | * via ConfigFS, or via running in TPG demo mode. | 1539 | * via ConfigFS, or via running in TPG demo mode. |
1545 | */ | 1540 | */ |
1546 | se_sess = target_alloc_session(&tpg->se_tpg, num_tags, | 1541 | se_sess = target_setup_session(&tpg->se_tpg, num_tags, |
1547 | sizeof(struct qla_tgt_cmd), | 1542 | sizeof(struct qla_tgt_cmd), |
1548 | TARGET_PROT_ALL, port_name, | 1543 | TARGET_PROT_ALL, port_name, |
1549 | qlat_sess, tcm_qla2xxx_session_cb); | 1544 | qlat_sess, tcm_qla2xxx_session_cb); |
@@ -1624,9 +1619,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, | |||
1624 | 1619 | ||
1625 | sess->conf_compl_supported = conf_compl_supported; | 1620 | sess->conf_compl_supported = conf_compl_supported; |
1626 | 1621 | ||
1627 | /* Reset logout parameters to default */ | ||
1628 | sess->logout_on_delete = 1; | ||
1629 | sess->keep_nport_handle = 0; | ||
1630 | } | 1622 | } |
1631 | 1623 | ||
1632 | /* | 1624 | /* |
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 8578e566ab41..9d09228eee28 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -959,7 +959,7 @@ static inline void update_can_queue(struct Scsi_Host *host, u_int in_ptr, u_int | |||
959 | /* Temporary workaround until bug is found and fixed (one bug has been found | 959 | /* Temporary workaround until bug is found and fixed (one bug has been found |
960 | already, but fixing it makes things even worse) -jj */ | 960 | already, but fixing it makes things even worse) -jj */ |
961 | int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; | 961 | int num_free = QLOGICPTI_REQ_QUEUE_LEN - REQ_QUEUE_DEPTH(in_ptr, out_ptr) - 64; |
962 | host->can_queue = atomic_read(&host->host_busy) + num_free; | 962 | host->can_queue = scsi_host_busy(host) + num_free; |
963 | host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); | 963 | host->sg_tablesize = QLOGICPTI_MAX_SG(num_free); |
964 | } | 964 | } |
965 | 965 | ||
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 4c60c260c5da..fc1356d101b0 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -162,12 +162,12 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) | |||
162 | (level > 1)) { | 162 | (level > 1)) { |
163 | scsi_print_result(cmd, "Done", disposition); | 163 | scsi_print_result(cmd, "Done", disposition); |
164 | scsi_print_command(cmd); | 164 | scsi_print_command(cmd); |
165 | if (status_byte(cmd->result) & CHECK_CONDITION) | 165 | if (status_byte(cmd->result) == CHECK_CONDITION) |
166 | scsi_print_sense(cmd); | 166 | scsi_print_sense(cmd); |
167 | if (level > 3) | 167 | if (level > 3) |
168 | scmd_printk(KERN_INFO, cmd, | 168 | scmd_printk(KERN_INFO, cmd, |
169 | "scsi host busy %d failed %d\n", | 169 | "scsi host busy %d failed %d\n", |
170 | atomic_read(&cmd->device->host->host_busy), | 170 | scsi_host_busy(cmd->device->host), |
171 | cmd->device->host->host_failed); | 171 | cmd->device->host->host_failed); |
172 | } | 172 | } |
173 | } | 173 | } |
diff --git a/drivers/scsi/scsi.h b/drivers/scsi/scsi.h index 6dcc4c685d1d..4fd75a3aff66 100644 --- a/drivers/scsi/scsi.h +++ b/drivers/scsi/scsi.h | |||
@@ -43,7 +43,4 @@ struct scsi_device; | |||
43 | struct scsi_target; | 43 | struct scsi_target; |
44 | struct scatterlist; | 44 | struct scatterlist; |
45 | 45 | ||
46 | /* obsolete typedef junk. */ | ||
47 | #include "scsi_typedefs.h" | ||
48 | |||
49 | #endif /* _SCSI_H */ | 46 | #endif /* _SCSI_H */ |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 364e71861bfd..60bcc6df97a9 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -164,29 +164,29 @@ static const char *sdebug_version_date = "20180128"; | |||
164 | #define SDEBUG_OPT_RESET_NOISE 0x2000 | 164 | #define SDEBUG_OPT_RESET_NOISE 0x2000 |
165 | #define SDEBUG_OPT_NO_CDB_NOISE 0x4000 | 165 | #define SDEBUG_OPT_NO_CDB_NOISE 0x4000 |
166 | #define SDEBUG_OPT_HOST_BUSY 0x8000 | 166 | #define SDEBUG_OPT_HOST_BUSY 0x8000 |
167 | #define SDEBUG_OPT_CMD_ABORT 0x10000 | ||
167 | #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ | 168 | #define SDEBUG_OPT_ALL_NOISE (SDEBUG_OPT_NOISE | SDEBUG_OPT_Q_NOISE | \ |
168 | SDEBUG_OPT_RESET_NOISE) | 169 | SDEBUG_OPT_RESET_NOISE) |
169 | #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ | 170 | #define SDEBUG_OPT_ALL_INJECTING (SDEBUG_OPT_RECOVERED_ERR | \ |
170 | SDEBUG_OPT_TRANSPORT_ERR | \ | 171 | SDEBUG_OPT_TRANSPORT_ERR | \ |
171 | SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ | 172 | SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR | \ |
172 | SDEBUG_OPT_SHORT_TRANSFER | \ | 173 | SDEBUG_OPT_SHORT_TRANSFER | \ |
173 | SDEBUG_OPT_HOST_BUSY) | 174 | SDEBUG_OPT_HOST_BUSY | \ |
175 | SDEBUG_OPT_CMD_ABORT) | ||
174 | /* When "every_nth" > 0 then modulo "every_nth" commands: | 176 | /* When "every_nth" > 0 then modulo "every_nth" commands: |
175 | * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set | 177 | * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set |
176 | * - a RECOVERED_ERROR is simulated on successful read and write | 178 | * - a RECOVERED_ERROR is simulated on successful read and write |
177 | * commands if SDEBUG_OPT_RECOVERED_ERR is set. | 179 | * commands if SDEBUG_OPT_RECOVERED_ERR is set. |
178 | * - a TRANSPORT_ERROR is simulated on successful read and write | 180 | * - a TRANSPORT_ERROR is simulated on successful read and write |
179 | * commands if SDEBUG_OPT_TRANSPORT_ERR is set. | 181 | * commands if SDEBUG_OPT_TRANSPORT_ERR is set. |
182 | * - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and | ||
183 | * CMD_ABORT | ||
180 | * | 184 | * |
181 | * When "every_nth" < 0 then after "- every_nth" commands: | 185 | * When "every_nth" < 0 then after "- every_nth" commands the selected |
182 | * - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set | 186 | * error will be injected. The error will be injected on every subsequent |
183 | * - a RECOVERED_ERROR is simulated on successful read and write | 187 | * command until some other action occurs; for example, the user writing |
184 | * commands if SDEBUG_OPT_RECOVERED_ERR is set. | 188 | * a new value (other than -1 or 1) to every_nth: |
185 | * - a TRANSPORT_ERROR is simulated on successful read and write | 189 | * echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth |
186 | * commands if _DEBUG_OPT_TRANSPORT_ERR is set. | ||
187 | * This will continue on every subsequent command until some other action | ||
188 | * occurs (e.g. the user * writing a new value (other than -1 or 1) to | ||
189 | * every_nth via sysfs). | ||
190 | */ | 190 | */ |
191 | 191 | ||
192 | /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in | 192 | /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in |
@@ -281,6 +281,7 @@ struct sdebug_defer { | |||
281 | int issuing_cpu; | 281 | int issuing_cpu; |
282 | bool init_hrt; | 282 | bool init_hrt; |
283 | bool init_wq; | 283 | bool init_wq; |
284 | bool aborted; /* true when blk_abort_request() already called */ | ||
284 | enum sdeb_defer_type defer_t; | 285 | enum sdeb_defer_type defer_t; |
285 | }; | 286 | }; |
286 | 287 | ||
@@ -296,6 +297,7 @@ struct sdebug_queued_cmd { | |||
296 | unsigned int inj_dix:1; | 297 | unsigned int inj_dix:1; |
297 | unsigned int inj_short:1; | 298 | unsigned int inj_short:1; |
298 | unsigned int inj_host_busy:1; | 299 | unsigned int inj_host_busy:1; |
300 | unsigned int inj_cmd_abort:1; | ||
299 | }; | 301 | }; |
300 | 302 | ||
301 | struct sdebug_queue { | 303 | struct sdebug_queue { |
@@ -3792,6 +3794,7 @@ static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd) | |||
3792 | /* Queued (deferred) command completions converge here. */ | 3794 | /* Queued (deferred) command completions converge here. */ |
3793 | static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) | 3795 | static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) |
3794 | { | 3796 | { |
3797 | bool aborted = sd_dp->aborted; | ||
3795 | int qc_idx; | 3798 | int qc_idx; |
3796 | int retiring = 0; | 3799 | int retiring = 0; |
3797 | unsigned long iflags; | 3800 | unsigned long iflags; |
@@ -3801,6 +3804,8 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) | |||
3801 | struct sdebug_dev_info *devip; | 3804 | struct sdebug_dev_info *devip; |
3802 | 3805 | ||
3803 | sd_dp->defer_t = SDEB_DEFER_NONE; | 3806 | sd_dp->defer_t = SDEB_DEFER_NONE; |
3807 | if (unlikely(aborted)) | ||
3808 | sd_dp->aborted = false; | ||
3804 | qc_idx = sd_dp->qc_idx; | 3809 | qc_idx = sd_dp->qc_idx; |
3805 | sqp = sdebug_q_arr + sd_dp->sqa_idx; | 3810 | sqp = sdebug_q_arr + sd_dp->sqa_idx; |
3806 | if (sdebug_statistics) { | 3811 | if (sdebug_statistics) { |
@@ -3852,6 +3857,11 @@ static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp) | |||
3852 | atomic_set(&retired_max_queue, k + 1); | 3857 | atomic_set(&retired_max_queue, k + 1); |
3853 | } | 3858 | } |
3854 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); | 3859 | spin_unlock_irqrestore(&sqp->qc_lock, iflags); |
3860 | if (unlikely(aborted)) { | ||
3861 | if (sdebug_verbose) | ||
3862 | pr_info("bypassing scsi_done() due to aborted cmd\n"); | ||
3863 | return; | ||
3864 | } | ||
3855 | scp->scsi_done(scp); /* callback to mid level */ | 3865 | scp->scsi_done(scp); /* callback to mid level */ |
3856 | } | 3866 | } |
3857 | 3867 | ||
@@ -4312,7 +4322,8 @@ static void setup_inject(struct sdebug_queue *sqp, | |||
4312 | if (sdebug_every_nth > 0) | 4322 | if (sdebug_every_nth > 0) |
4313 | sqcp->inj_recovered = sqcp->inj_transport | 4323 | sqcp->inj_recovered = sqcp->inj_transport |
4314 | = sqcp->inj_dif | 4324 | = sqcp->inj_dif |
4315 | = sqcp->inj_dix = sqcp->inj_short = 0; | 4325 | = sqcp->inj_dix = sqcp->inj_short |
4326 | = sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0; | ||
4316 | return; | 4327 | return; |
4317 | } | 4328 | } |
4318 | sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts); | 4329 | sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts); |
@@ -4321,6 +4332,7 @@ static void setup_inject(struct sdebug_queue *sqp, | |||
4321 | sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts); | 4332 | sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts); |
4322 | sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts); | 4333 | sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts); |
4323 | sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts); | 4334 | sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts); |
4335 | sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts); | ||
4324 | } | 4336 | } |
4325 | 4337 | ||
4326 | /* Complete the processing of the thread that queued a SCSI command to this | 4338 | /* Complete the processing of the thread that queued a SCSI command to this |
@@ -4458,7 +4470,14 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, | |||
4458 | if (sdebug_statistics) | 4470 | if (sdebug_statistics) |
4459 | sd_dp->issuing_cpu = raw_smp_processor_id(); | 4471 | sd_dp->issuing_cpu = raw_smp_processor_id(); |
4460 | sd_dp->defer_t = SDEB_DEFER_WQ; | 4472 | sd_dp->defer_t = SDEB_DEFER_WQ; |
4473 | if (unlikely(sqcp->inj_cmd_abort)) | ||
4474 | sd_dp->aborted = true; | ||
4461 | schedule_work(&sd_dp->ew.work); | 4475 | schedule_work(&sd_dp->ew.work); |
4476 | if (unlikely(sqcp->inj_cmd_abort)) { | ||
4477 | sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", | ||
4478 | cmnd->request->tag); | ||
4479 | blk_abort_request(cmnd->request); | ||
4480 | } | ||
4462 | } | 4481 | } |
4463 | if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && | 4482 | if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && |
4464 | (scsi_result == device_qfull_result))) | 4483 | (scsi_result == device_qfull_result))) |
@@ -4844,12 +4863,11 @@ static ssize_t fake_rw_store(struct device_driver *ddp, const char *buf, | |||
4844 | (unsigned long)sdebug_dev_size_mb * | 4863 | (unsigned long)sdebug_dev_size_mb * |
4845 | 1048576; | 4864 | 1048576; |
4846 | 4865 | ||
4847 | fake_storep = vmalloc(sz); | 4866 | fake_storep = vzalloc(sz); |
4848 | if (NULL == fake_storep) { | 4867 | if (NULL == fake_storep) { |
4849 | pr_err("out of memory, 9\n"); | 4868 | pr_err("out of memory, 9\n"); |
4850 | return -ENOMEM; | 4869 | return -ENOMEM; |
4851 | } | 4870 | } |
4852 | memset(fake_storep, 0, sz); | ||
4853 | } | 4871 | } |
4854 | sdebug_fake_rw = n; | 4872 | sdebug_fake_rw = n; |
4855 | } | 4873 | } |
@@ -5391,13 +5409,12 @@ static int __init scsi_debug_init(void) | |||
5391 | } | 5409 | } |
5392 | 5410 | ||
5393 | if (sdebug_fake_rw == 0) { | 5411 | if (sdebug_fake_rw == 0) { |
5394 | fake_storep = vmalloc(sz); | 5412 | fake_storep = vzalloc(sz); |
5395 | if (NULL == fake_storep) { | 5413 | if (NULL == fake_storep) { |
5396 | pr_err("out of memory, 1\n"); | 5414 | pr_err("out of memory, 1\n"); |
5397 | ret = -ENOMEM; | 5415 | ret = -ENOMEM; |
5398 | goto free_q_arr; | 5416 | goto free_q_arr; |
5399 | } | 5417 | } |
5400 | memset(fake_storep, 0, sz); | ||
5401 | if (sdebug_num_parts > 0) | 5418 | if (sdebug_num_parts > 0) |
5402 | sdebug_build_parts(fake_storep, sz); | 5419 | sdebug_build_parts(fake_storep, sz); |
5403 | } | 5420 | } |
@@ -5790,11 +5807,13 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, | |||
5790 | fini: | 5807 | fini: |
5791 | if (F_DELAY_OVERR & flags) | 5808 | if (F_DELAY_OVERR & flags) |
5792 | return schedule_resp(scp, devip, errsts, pfp, 0, 0); | 5809 | return schedule_resp(scp, devip, errsts, pfp, 0, 0); |
5793 | else if ((sdebug_jdelay || sdebug_ndelay) && (flags & F_LONG_DELAY)) { | 5810 | else if ((flags & F_LONG_DELAY) && (sdebug_jdelay > 0 || |
5811 | sdebug_ndelay > 10000)) { | ||
5794 | /* | 5812 | /* |
5795 | * If any delay is active, for F_SSU_DELAY want at least 1 | 5813 | * Skip long delays if ndelay <= 10 microseconds. Otherwise |
5796 | * second and if sdebug_jdelay>0 want a long delay of that | 5814 | * for Start Stop Unit (SSU) want at least 1 second delay and |
5797 | * many seconds; for F_SYNC_DELAY want 1/20 of that. | 5815 | * if sdebug_jdelay>1 want a long delay of that many seconds. |
5816 | * For Synchronize Cache want 1/20 of SSU's delay. | ||
5798 | */ | 5817 | */ |
5799 | int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; | 5818 | int jdelay = (sdebug_jdelay < 2) ? 1 : sdebug_jdelay; |
5800 | int denom = (flags & F_SYNC_DELAY) ? 20 : 1; | 5819 | int denom = (flags & F_SYNC_DELAY) ? 20 : 1; |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 2715cdaa669c..b7a8fdfeb2f4 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -66,7 +66,7 @@ void scsi_eh_wakeup(struct Scsi_Host *shost) | |||
66 | { | 66 | { |
67 | lockdep_assert_held(shost->host_lock); | 67 | lockdep_assert_held(shost->host_lock); |
68 | 68 | ||
69 | if (atomic_read(&shost->host_busy) == shost->host_failed) { | 69 | if (scsi_host_busy(shost) == shost->host_failed) { |
70 | trace_scsi_eh_wakeup(shost); | 70 | trace_scsi_eh_wakeup(shost); |
71 | wake_up_process(shost->ehandler); | 71 | wake_up_process(shost->ehandler); |
72 | SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost, | 72 | SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost, |
@@ -2169,7 +2169,7 @@ int scsi_error_handler(void *data) | |||
2169 | break; | 2169 | break; |
2170 | 2170 | ||
2171 | if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || | 2171 | if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) || |
2172 | shost->host_failed != atomic_read(&shost->host_busy)) { | 2172 | shost->host_failed != scsi_host_busy(shost)) { |
2173 | SCSI_LOG_ERROR_RECOVERY(1, | 2173 | SCSI_LOG_ERROR_RECOVERY(1, |
2174 | shost_printk(KERN_INFO, shost, | 2174 | shost_printk(KERN_INFO, shost, |
2175 | "scsi_eh_%d: sleeping\n", | 2175 | "scsi_eh_%d: sleeping\n", |
@@ -2184,7 +2184,7 @@ int scsi_error_handler(void *data) | |||
2184 | "scsi_eh_%d: waking up %d/%d/%d\n", | 2184 | "scsi_eh_%d: waking up %d/%d/%d\n", |
2185 | shost->host_no, shost->host_eh_scheduled, | 2185 | shost->host_no, shost->host_eh_scheduled, |
2186 | shost->host_failed, | 2186 | shost->host_failed, |
2187 | atomic_read(&shost->host_busy))); | 2187 | scsi_host_busy(shost))); |
2188 | 2188 | ||
2189 | /* | 2189 | /* |
2190 | * We have a host that is failing for some reason. Figure out | 2190 | * We have a host that is failing for some reason. Figure out |
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 0a875491f5a7..cc30fccc1a2e 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c | |||
@@ -100,8 +100,8 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd, | |||
100 | SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev, | 100 | SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev, |
101 | "Ioctl returned 0x%x\n", result)); | 101 | "Ioctl returned 0x%x\n", result)); |
102 | 102 | ||
103 | if ((driver_byte(result) & DRIVER_SENSE) && | 103 | if (driver_byte(result) == DRIVER_SENSE && |
104 | (scsi_sense_valid(&sshdr))) { | 104 | scsi_sense_valid(&sshdr)) { |
105 | switch (sshdr.sense_key) { | 105 | switch (sshdr.sense_key) { |
106 | case ILLEGAL_REQUEST: | 106 | case ILLEGAL_REQUEST: |
107 | if (cmd[0] == ALLOW_MEDIUM_REMOVAL) | 107 | if (cmd[0] == ALLOW_MEDIUM_REMOVAL) |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9cb9a166fa0c..0adfb3bce0fd 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -345,7 +345,8 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) | |||
345 | unsigned long flags; | 345 | unsigned long flags; |
346 | 346 | ||
347 | rcu_read_lock(); | 347 | rcu_read_lock(); |
348 | atomic_dec(&shost->host_busy); | 348 | if (!shost->use_blk_mq) |
349 | atomic_dec(&shost->host_busy); | ||
349 | if (unlikely(scsi_host_in_recovery(shost))) { | 350 | if (unlikely(scsi_host_in_recovery(shost))) { |
350 | spin_lock_irqsave(shost->host_lock, flags); | 351 | spin_lock_irqsave(shost->host_lock, flags); |
351 | if (shost->host_failed || shost->host_eh_scheduled) | 352 | if (shost->host_failed || shost->host_eh_scheduled) |
@@ -371,7 +372,7 @@ void scsi_device_unbusy(struct scsi_device *sdev) | |||
371 | static void scsi_kick_queue(struct request_queue *q) | 372 | static void scsi_kick_queue(struct request_queue *q) |
372 | { | 373 | { |
373 | if (q->mq_ops) | 374 | if (q->mq_ops) |
374 | blk_mq_start_hw_queues(q); | 375 | blk_mq_run_hw_queues(q, false); |
375 | else | 376 | else |
376 | blk_run_queue(q); | 377 | blk_run_queue(q); |
377 | } | 378 | } |
@@ -444,7 +445,12 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget) | |||
444 | 445 | ||
445 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) | 446 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
446 | { | 447 | { |
447 | if (shost->can_queue > 0 && | 448 | /* |
449 | * blk-mq can handle host queue busy efficiently via host-wide driver | ||
450 | * tag allocation | ||
451 | */ | ||
452 | |||
453 | if (!shost->use_blk_mq && shost->can_queue > 0 && | ||
448 | atomic_read(&shost->host_busy) >= shost->can_queue) | 454 | atomic_read(&shost->host_busy) >= shost->can_queue) |
449 | return true; | 455 | return true; |
450 | if (atomic_read(&shost->host_blocked) > 0) | 456 | if (atomic_read(&shost->host_blocked) > 0) |
@@ -662,6 +668,7 @@ static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd) | |||
662 | cmd->request->next_rq->special = NULL; | 668 | cmd->request->next_rq->special = NULL; |
663 | } | 669 | } |
664 | 670 | ||
671 | /* Returns false when no more bytes to process, true if there are more */ | ||
665 | static bool scsi_end_request(struct request *req, blk_status_t error, | 672 | static bool scsi_end_request(struct request *req, blk_status_t error, |
666 | unsigned int bytes, unsigned int bidi_bytes) | 673 | unsigned int bytes, unsigned int bidi_bytes) |
667 | { | 674 | { |
@@ -760,161 +767,39 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) | |||
760 | } | 767 | } |
761 | } | 768 | } |
762 | 769 | ||
763 | /* | 770 | /* Helper for scsi_io_completion() when "reprep" action required. */ |
764 | * Function: scsi_io_completion() | 771 | static void scsi_io_completion_reprep(struct scsi_cmnd *cmd, |
765 | * | 772 | struct request_queue *q) |
766 | * Purpose: Completion processing for block device I/O requests. | 773 | { |
767 | * | 774 | /* A new command will be prepared and issued. */ |
768 | * Arguments: cmd - command that is finished. | 775 | if (q->mq_ops) { |
769 | * | 776 | scsi_mq_requeue_cmd(cmd); |
770 | * Lock status: Assumed that no lock is held upon entry. | 777 | } else { |
771 | * | 778 | /* Unprep request and put it back at head of the queue. */ |
772 | * Returns: Nothing | 779 | scsi_release_buffers(cmd); |
773 | * | 780 | scsi_requeue_command(q, cmd); |
774 | * Notes: We will finish off the specified number of sectors. If we | 781 | } |
775 | * are done, the command block will be released and the queue | 782 | } |
776 | * function will be goosed. If we are not done then we have to | 783 | |
777 | * figure out what to do next: | 784 | /* Helper for scsi_io_completion() when special action required. */ |
778 | * | 785 | static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) |
779 | * a) We can call scsi_requeue_command(). The request | ||
780 | * will be unprepared and put back on the queue. Then | ||
781 | * a new command will be created for it. This should | ||
782 | * be used if we made forward progress, or if we want | ||
783 | * to switch from READ(10) to READ(6) for example. | ||
784 | * | ||
785 | * b) We can call __scsi_queue_insert(). The request will | ||
786 | * be put back on the queue and retried using the same | ||
787 | * command as before, possibly after a delay. | ||
788 | * | ||
789 | * c) We can call scsi_end_request() with -EIO to fail | ||
790 | * the remainder of the request. | ||
791 | */ | ||
792 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | ||
793 | { | 786 | { |
794 | int result = cmd->result; | ||
795 | struct request_queue *q = cmd->device->request_queue; | 787 | struct request_queue *q = cmd->device->request_queue; |
796 | struct request *req = cmd->request; | 788 | struct request *req = cmd->request; |
797 | blk_status_t error = BLK_STS_OK; | 789 | int level = 0; |
798 | struct scsi_sense_hdr sshdr; | ||
799 | bool sense_valid = false; | ||
800 | int sense_deferred = 0, level = 0; | ||
801 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, | 790 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY, |
802 | ACTION_DELAYED_RETRY} action; | 791 | ACTION_DELAYED_RETRY} action; |
803 | unsigned long wait_for = (cmd->allowed + 1) * req->timeout; | 792 | unsigned long wait_for = (cmd->allowed + 1) * req->timeout; |
793 | struct scsi_sense_hdr sshdr; | ||
794 | bool sense_valid; | ||
795 | bool sense_current = true; /* false implies "deferred sense" */ | ||
796 | blk_status_t blk_stat; | ||
804 | 797 | ||
805 | if (result) { | 798 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); |
806 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | 799 | if (sense_valid) |
807 | if (sense_valid) | 800 | sense_current = !scsi_sense_is_deferred(&sshdr); |
808 | sense_deferred = scsi_sense_is_deferred(&sshdr); | ||
809 | } | ||
810 | |||
811 | if (blk_rq_is_passthrough(req)) { | ||
812 | if (result) { | ||
813 | if (sense_valid) { | ||
814 | /* | ||
815 | * SG_IO wants current and deferred errors | ||
816 | */ | ||
817 | scsi_req(req)->sense_len = | ||
818 | min(8 + cmd->sense_buffer[7], | ||
819 | SCSI_SENSE_BUFFERSIZE); | ||
820 | } | ||
821 | if (!sense_deferred) | ||
822 | error = scsi_result_to_blk_status(cmd, result); | ||
823 | } | ||
824 | /* | ||
825 | * scsi_result_to_blk_status may have reset the host_byte | ||
826 | */ | ||
827 | scsi_req(req)->result = cmd->result; | ||
828 | scsi_req(req)->resid_len = scsi_get_resid(cmd); | ||
829 | |||
830 | if (scsi_bidi_cmnd(cmd)) { | ||
831 | /* | ||
832 | * Bidi commands Must be complete as a whole, | ||
833 | * both sides at once. | ||
834 | */ | ||
835 | scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; | ||
836 | if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req), | ||
837 | blk_rq_bytes(req->next_rq))) | ||
838 | BUG(); | ||
839 | return; | ||
840 | } | ||
841 | } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) { | ||
842 | /* | ||
843 | * Flush commands do not transfers any data, and thus cannot use | ||
844 | * good_bytes != blk_rq_bytes(req) as the signal for an error. | ||
845 | * This sets the error explicitly for the problem case. | ||
846 | */ | ||
847 | error = scsi_result_to_blk_status(cmd, result); | ||
848 | } | ||
849 | |||
850 | /* no bidi support for !blk_rq_is_passthrough yet */ | ||
851 | BUG_ON(blk_bidi_rq(req)); | ||
852 | |||
853 | /* | ||
854 | * Next deal with any sectors which we were able to correctly | ||
855 | * handle. | ||
856 | */ | ||
857 | SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, | ||
858 | "%u sectors total, %d bytes done.\n", | ||
859 | blk_rq_sectors(req), good_bytes)); | ||
860 | |||
861 | /* | ||
862 | * Recovered errors need reporting, but they're always treated as | ||
863 | * success, so fiddle the result code here. For passthrough requests | ||
864 | * we already took a copy of the original into sreq->result which | ||
865 | * is what gets returned to the user | ||
866 | */ | ||
867 | if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { | ||
868 | /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip | ||
869 | * print since caller wants ATA registers. Only occurs on | ||
870 | * SCSI ATA PASS_THROUGH commands when CK_COND=1 | ||
871 | */ | ||
872 | if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) | ||
873 | ; | ||
874 | else if (!(req->rq_flags & RQF_QUIET)) | ||
875 | scsi_print_sense(cmd); | ||
876 | result = 0; | ||
877 | /* for passthrough error may be set */ | ||
878 | error = BLK_STS_OK; | ||
879 | } | ||
880 | /* | ||
881 | * Another corner case: the SCSI status byte is non-zero but 'good'. | ||
882 | * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when | ||
883 | * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD | ||
884 | * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related | ||
885 | * intermediate statuses (both obsolete in SAM-4) as good. | ||
886 | */ | ||
887 | if (status_byte(result) && scsi_status_is_good(result)) { | ||
888 | result = 0; | ||
889 | error = BLK_STS_OK; | ||
890 | } | ||
891 | |||
892 | /* | ||
893 | * special case: failed zero length commands always need to | ||
894 | * drop down into the retry code. Otherwise, if we finished | ||
895 | * all bytes in the request we are done now. | ||
896 | */ | ||
897 | if (!(blk_rq_bytes(req) == 0 && error) && | ||
898 | !scsi_end_request(req, error, good_bytes, 0)) | ||
899 | return; | ||
900 | |||
901 | /* | ||
902 | * Kill remainder if no retrys. | ||
903 | */ | ||
904 | if (error && scsi_noretry_cmd(cmd)) { | ||
905 | if (scsi_end_request(req, error, blk_rq_bytes(req), 0)) | ||
906 | BUG(); | ||
907 | return; | ||
908 | } | ||
909 | |||
910 | /* | ||
911 | * If there had been no error, but we have leftover bytes in the | ||
912 | * requeues just queue the command up again. | ||
913 | */ | ||
914 | if (result == 0) | ||
915 | goto requeue; | ||
916 | 801 | ||
917 | error = scsi_result_to_blk_status(cmd, result); | 802 | blk_stat = scsi_result_to_blk_status(cmd, result); |
918 | 803 | ||
919 | if (host_byte(result) == DID_RESET) { | 804 | if (host_byte(result) == DID_RESET) { |
920 | /* Third party bus reset or reset for error recovery | 805 | /* Third party bus reset or reset for error recovery |
@@ -922,7 +807,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
922 | * happens. | 807 | * happens. |
923 | */ | 808 | */ |
924 | action = ACTION_RETRY; | 809 | action = ACTION_RETRY; |
925 | } else if (sense_valid && !sense_deferred) { | 810 | } else if (sense_valid && sense_current) { |
926 | switch (sshdr.sense_key) { | 811 | switch (sshdr.sense_key) { |
927 | case UNIT_ATTENTION: | 812 | case UNIT_ATTENTION: |
928 | if (cmd->device->removable) { | 813 | if (cmd->device->removable) { |
@@ -958,18 +843,18 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
958 | action = ACTION_REPREP; | 843 | action = ACTION_REPREP; |
959 | } else if (sshdr.asc == 0x10) /* DIX */ { | 844 | } else if (sshdr.asc == 0x10) /* DIX */ { |
960 | action = ACTION_FAIL; | 845 | action = ACTION_FAIL; |
961 | error = BLK_STS_PROTECTION; | 846 | blk_stat = BLK_STS_PROTECTION; |
962 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ | 847 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
963 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { | 848 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
964 | action = ACTION_FAIL; | 849 | action = ACTION_FAIL; |
965 | error = BLK_STS_TARGET; | 850 | blk_stat = BLK_STS_TARGET; |
966 | } else | 851 | } else |
967 | action = ACTION_FAIL; | 852 | action = ACTION_FAIL; |
968 | break; | 853 | break; |
969 | case ABORTED_COMMAND: | 854 | case ABORTED_COMMAND: |
970 | action = ACTION_FAIL; | 855 | action = ACTION_FAIL; |
971 | if (sshdr.asc == 0x10) /* DIF */ | 856 | if (sshdr.asc == 0x10) /* DIF */ |
972 | error = BLK_STS_PROTECTION; | 857 | blk_stat = BLK_STS_PROTECTION; |
973 | break; | 858 | break; |
974 | case NOT_READY: | 859 | case NOT_READY: |
975 | /* If the device is in the process of becoming | 860 | /* If the device is in the process of becoming |
@@ -1022,8 +907,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1022 | DEFAULT_RATELIMIT_BURST); | 907 | DEFAULT_RATELIMIT_BURST); |
1023 | 908 | ||
1024 | if (unlikely(scsi_logging_level)) | 909 | if (unlikely(scsi_logging_level)) |
1025 | level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, | 910 | level = |
1026 | SCSI_LOG_MLCOMPLETE_BITS); | 911 | SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, |
912 | SCSI_LOG_MLCOMPLETE_BITS); | ||
1027 | 913 | ||
1028 | /* | 914 | /* |
1029 | * if logging is enabled the failure will be printed | 915 | * if logging is enabled the failure will be printed |
@@ -1031,25 +917,16 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1031 | */ | 917 | */ |
1032 | if (!level && __ratelimit(&_rs)) { | 918 | if (!level && __ratelimit(&_rs)) { |
1033 | scsi_print_result(cmd, NULL, FAILED); | 919 | scsi_print_result(cmd, NULL, FAILED); |
1034 | if (driver_byte(result) & DRIVER_SENSE) | 920 | if (driver_byte(result) == DRIVER_SENSE) |
1035 | scsi_print_sense(cmd); | 921 | scsi_print_sense(cmd); |
1036 | scsi_print_command(cmd); | 922 | scsi_print_command(cmd); |
1037 | } | 923 | } |
1038 | } | 924 | } |
1039 | if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0)) | 925 | if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req), 0)) |
1040 | return; | 926 | return; |
1041 | /*FALLTHRU*/ | 927 | /*FALLTHRU*/ |
1042 | case ACTION_REPREP: | 928 | case ACTION_REPREP: |
1043 | requeue: | 929 | scsi_io_completion_reprep(cmd, q); |
1044 | /* Unprep the request and put it back at the head of the queue. | ||
1045 | * A new command will be prepared and issued. | ||
1046 | */ | ||
1047 | if (q->mq_ops) { | ||
1048 | scsi_mq_requeue_cmd(cmd); | ||
1049 | } else { | ||
1050 | scsi_release_buffers(cmd); | ||
1051 | scsi_requeue_command(q, cmd); | ||
1052 | } | ||
1053 | break; | 930 | break; |
1054 | case ACTION_RETRY: | 931 | case ACTION_RETRY: |
1055 | /* Retry the same command immediately */ | 932 | /* Retry the same command immediately */ |
@@ -1062,6 +939,185 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | |||
1062 | } | 939 | } |
1063 | } | 940 | } |
1064 | 941 | ||
942 | /* | ||
943 | * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a | ||
944 | * new result that may suppress further error checking. Also modifies | ||
945 | * *blk_statp in some cases. | ||
946 | */ | ||
947 | static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, | ||
948 | blk_status_t *blk_statp) | ||
949 | { | ||
950 | bool sense_valid; | ||
951 | bool sense_current = true; /* false implies "deferred sense" */ | ||
952 | struct request *req = cmd->request; | ||
953 | struct scsi_sense_hdr sshdr; | ||
954 | |||
955 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | ||
956 | if (sense_valid) | ||
957 | sense_current = !scsi_sense_is_deferred(&sshdr); | ||
958 | |||
959 | if (blk_rq_is_passthrough(req)) { | ||
960 | if (sense_valid) { | ||
961 | /* | ||
962 | * SG_IO wants current and deferred errors | ||
963 | */ | ||
964 | scsi_req(req)->sense_len = | ||
965 | min(8 + cmd->sense_buffer[7], | ||
966 | SCSI_SENSE_BUFFERSIZE); | ||
967 | } | ||
968 | if (sense_current) | ||
969 | *blk_statp = scsi_result_to_blk_status(cmd, result); | ||
970 | } else if (blk_rq_bytes(req) == 0 && sense_current) { | ||
971 | /* | ||
972 | * Flush commands do not transfers any data, and thus cannot use | ||
973 | * good_bytes != blk_rq_bytes(req) as the signal for an error. | ||
974 | * This sets *blk_statp explicitly for the problem case. | ||
975 | */ | ||
976 | *blk_statp = scsi_result_to_blk_status(cmd, result); | ||
977 | } | ||
978 | /* | ||
979 | * Recovered errors need reporting, but they're always treated as | ||
980 | * success, so fiddle the result code here. For passthrough requests | ||
981 | * we already took a copy of the original into sreq->result which | ||
982 | * is what gets returned to the user | ||
983 | */ | ||
984 | if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { | ||
985 | bool do_print = true; | ||
986 | /* | ||
987 | * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d] | ||
988 | * skip print since caller wants ATA registers. Only occurs | ||
989 | * on SCSI ATA PASS_THROUGH commands when CK_COND=1 | ||
990 | */ | ||
991 | if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) | ||
992 | do_print = false; | ||
993 | else if (req->rq_flags & RQF_QUIET) | ||
994 | do_print = false; | ||
995 | if (do_print) | ||
996 | scsi_print_sense(cmd); | ||
997 | result = 0; | ||
998 | /* for passthrough, *blk_statp may be set */ | ||
999 | *blk_statp = BLK_STS_OK; | ||
1000 | } | ||
1001 | /* | ||
1002 | * Another corner case: the SCSI status byte is non-zero but 'good'. | ||
1003 | * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when | ||
1004 | * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD | ||
1005 | * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related | ||
1006 | * intermediate statuses (both obsolete in SAM-4) as good. | ||
1007 | */ | ||
1008 | if (status_byte(result) && scsi_status_is_good(result)) { | ||
1009 | result = 0; | ||
1010 | *blk_statp = BLK_STS_OK; | ||
1011 | } | ||
1012 | return result; | ||
1013 | } | ||
1014 | |||
1015 | /* | ||
1016 | * Function: scsi_io_completion() | ||
1017 | * | ||
1018 | * Purpose: Completion processing for block device I/O requests. | ||
1019 | * | ||
1020 | * Arguments: cmd - command that is finished. | ||
1021 | * | ||
1022 | * Lock status: Assumed that no lock is held upon entry. | ||
1023 | * | ||
1024 | * Returns: Nothing | ||
1025 | * | ||
1026 | * Notes: We will finish off the specified number of sectors. If we | ||
1027 | * are done, the command block will be released and the queue | ||
1028 | * function will be goosed. If we are not done then we have to | ||
1029 | * figure out what to do next: | ||
1030 | * | ||
1031 | * a) We can call scsi_requeue_command(). The request | ||
1032 | * will be unprepared and put back on the queue. Then | ||
1033 | * a new command will be created for it. This should | ||
1034 | * be used if we made forward progress, or if we want | ||
1035 | * to switch from READ(10) to READ(6) for example. | ||
1036 | * | ||
1037 | * b) We can call __scsi_queue_insert(). The request will | ||
1038 | * be put back on the queue and retried using the same | ||
1039 | * command as before, possibly after a delay. | ||
1040 | * | ||
1041 | * c) We can call scsi_end_request() with blk_stat other than | ||
1042 | * BLK_STS_OK, to fail the remainder of the request. | ||
1043 | */ | ||
1044 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) | ||
1045 | { | ||
1046 | int result = cmd->result; | ||
1047 | struct request_queue *q = cmd->device->request_queue; | ||
1048 | struct request *req = cmd->request; | ||
1049 | blk_status_t blk_stat = BLK_STS_OK; | ||
1050 | |||
1051 | if (unlikely(result)) /* a nz result may or may not be an error */ | ||
1052 | result = scsi_io_completion_nz_result(cmd, result, &blk_stat); | ||
1053 | |||
1054 | if (unlikely(blk_rq_is_passthrough(req))) { | ||
1055 | /* | ||
1056 | * scsi_result_to_blk_status may have reset the host_byte | ||
1057 | */ | ||
1058 | scsi_req(req)->result = cmd->result; | ||
1059 | scsi_req(req)->resid_len = scsi_get_resid(cmd); | ||
1060 | |||
1061 | if (unlikely(scsi_bidi_cmnd(cmd))) { | ||
1062 | /* | ||
1063 | * Bidi commands Must be complete as a whole, | ||
1064 | * both sides at once. | ||
1065 | */ | ||
1066 | scsi_req(req->next_rq)->resid_len = scsi_in(cmd)->resid; | ||
1067 | if (scsi_end_request(req, BLK_STS_OK, blk_rq_bytes(req), | ||
1068 | blk_rq_bytes(req->next_rq))) | ||
1069 | WARN_ONCE(true, | ||
1070 | "Bidi command with remaining bytes"); | ||
1071 | return; | ||
1072 | } | ||
1073 | } | ||
1074 | |||
1075 | /* no bidi support yet, other than in pass-through */ | ||
1076 | if (unlikely(blk_bidi_rq(req))) { | ||
1077 | WARN_ONCE(true, "Only support bidi command in passthrough"); | ||
1078 | scmd_printk(KERN_ERR, cmd, "Killing bidi command\n"); | ||
1079 | if (scsi_end_request(req, BLK_STS_IOERR, blk_rq_bytes(req), | ||
1080 | blk_rq_bytes(req->next_rq))) | ||
1081 | WARN_ONCE(true, "Bidi command with remaining bytes"); | ||
1082 | return; | ||
1083 | } | ||
1084 | |||
1085 | /* | ||
1086 | * Next deal with any sectors which we were able to correctly | ||
1087 | * handle. | ||
1088 | */ | ||
1089 | SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, | ||
1090 | "%u sectors total, %d bytes done.\n", | ||
1091 | blk_rq_sectors(req), good_bytes)); | ||
1092 | |||
1093 | /* | ||
1094 | * Next deal with any sectors which we were able to correctly | ||
1095 | * handle. Failed, zero length commands always need to drop down | ||
1096 | * to retry code. Fast path should return in this block. | ||
1097 | */ | ||
1098 | if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { | ||
1099 | if (likely(!scsi_end_request(req, blk_stat, good_bytes, 0))) | ||
1100 | return; /* no bytes remaining */ | ||
1101 | } | ||
1102 | |||
1103 | /* Kill remainder if no retries. */ | ||
1104 | if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { | ||
1105 | if (scsi_end_request(req, blk_stat, blk_rq_bytes(req), 0)) | ||
1106 | WARN_ONCE(true, | ||
1107 | "Bytes remaining after failed, no-retry command"); | ||
1108 | return; | ||
1109 | } | ||
1110 | |||
1111 | /* | ||
1112 | * If there had been no error, but we have leftover bytes in the | ||
1113 | * requeues just queue the command up again. | ||
1114 | */ | ||
1115 | if (likely(result == 0)) | ||
1116 | scsi_io_completion_reprep(cmd, q); | ||
1117 | else | ||
1118 | scsi_io_completion_action(cmd, result); | ||
1119 | } | ||
1120 | |||
1065 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) | 1121 | static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb) |
1066 | { | 1122 | { |
1067 | int count; | 1123 | int count; |
@@ -1550,7 +1606,10 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
1550 | if (scsi_host_in_recovery(shost)) | 1606 | if (scsi_host_in_recovery(shost)) |
1551 | return 0; | 1607 | return 0; |
1552 | 1608 | ||
1553 | busy = atomic_inc_return(&shost->host_busy) - 1; | 1609 | if (!shost->use_blk_mq) |
1610 | busy = atomic_inc_return(&shost->host_busy) - 1; | ||
1611 | else | ||
1612 | busy = 0; | ||
1554 | if (atomic_read(&shost->host_blocked) > 0) { | 1613 | if (atomic_read(&shost->host_blocked) > 0) { |
1555 | if (busy) | 1614 | if (busy) |
1556 | goto starved; | 1615 | goto starved; |
@@ -1566,7 +1625,7 @@ static inline int scsi_host_queue_ready(struct request_queue *q, | |||
1566 | "unblocking host at zero depth\n")); | 1625 | "unblocking host at zero depth\n")); |
1567 | } | 1626 | } |
1568 | 1627 | ||
1569 | if (shost->can_queue > 0 && busy >= shost->can_queue) | 1628 | if (!shost->use_blk_mq && shost->can_queue > 0 && busy >= shost->can_queue) |
1570 | goto starved; | 1629 | goto starved; |
1571 | if (shost->host_self_blocked) | 1630 | if (shost->host_self_blocked) |
1572 | goto starved; | 1631 | goto starved; |
@@ -1652,7 +1711,9 @@ static void scsi_kill_request(struct request *req, struct request_queue *q) | |||
1652 | * with the locks as normal issue path does. | 1711 | * with the locks as normal issue path does. |
1653 | */ | 1712 | */ |
1654 | atomic_inc(&sdev->device_busy); | 1713 | atomic_inc(&sdev->device_busy); |
1655 | atomic_inc(&shost->host_busy); | 1714 | |
1715 | if (!shost->use_blk_mq) | ||
1716 | atomic_inc(&shost->host_busy); | ||
1656 | if (starget->can_queue > 0) | 1717 | if (starget->can_queue > 0) |
1657 | atomic_inc(&starget->target_busy); | 1718 | atomic_inc(&starget->target_busy); |
1658 | 1719 | ||
@@ -2555,7 +2616,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, | |||
2555 | * ILLEGAL REQUEST if the code page isn't supported */ | 2616 | * ILLEGAL REQUEST if the code page isn't supported */ |
2556 | 2617 | ||
2557 | if (use_10_for_ms && !scsi_status_is_good(result) && | 2618 | if (use_10_for_ms && !scsi_status_is_good(result) && |
2558 | (driver_byte(result) & DRIVER_SENSE)) { | 2619 | driver_byte(result) == DRIVER_SENSE) { |
2559 | if (scsi_sense_valid(sshdr)) { | 2620 | if (scsi_sense_valid(sshdr)) { |
2560 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && | 2621 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && |
2561 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { | 2622 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 0880d975eed3..78ca63dfba4a 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -614,7 +614,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
614 | * INQUIRY should not yield UNIT_ATTENTION | 614 | * INQUIRY should not yield UNIT_ATTENTION |
615 | * but many buggy devices do so anyway. | 615 | * but many buggy devices do so anyway. |
616 | */ | 616 | */ |
617 | if ((driver_byte(result) & DRIVER_SENSE) && | 617 | if (driver_byte(result) == DRIVER_SENSE && |
618 | scsi_sense_valid(&sshdr)) { | 618 | scsi_sense_valid(&sshdr)) { |
619 | if ((sshdr.sense_key == UNIT_ATTENTION) && | 619 | if ((sshdr.sense_key == UNIT_ATTENTION) && |
620 | ((sshdr.asc == 0x28) || | 620 | ((sshdr.asc == 0x28) || |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 7943b762c12d..3aee9464a7bf 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -382,7 +382,7 @@ static ssize_t | |||
382 | show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) | 382 | show_host_busy(struct device *dev, struct device_attribute *attr, char *buf) |
383 | { | 383 | { |
384 | struct Scsi_Host *shost = class_to_shost(dev); | 384 | struct Scsi_Host *shost = class_to_shost(dev); |
385 | return snprintf(buf, 20, "%d\n", atomic_read(&shost->host_busy)); | 385 | return snprintf(buf, 20, "%d\n", scsi_host_busy(shost)); |
386 | } | 386 | } |
387 | static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); | 387 | static DEVICE_ATTR(host_busy, S_IRUGO, show_host_busy, NULL); |
388 | 388 | ||
@@ -722,8 +722,24 @@ static ssize_t | |||
722 | sdev_store_delete(struct device *dev, struct device_attribute *attr, | 722 | sdev_store_delete(struct device *dev, struct device_attribute *attr, |
723 | const char *buf, size_t count) | 723 | const char *buf, size_t count) |
724 | { | 724 | { |
725 | if (device_remove_file_self(dev, attr)) | 725 | struct kernfs_node *kn; |
726 | scsi_remove_device(to_scsi_device(dev)); | 726 | |
727 | kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); | ||
728 | WARN_ON_ONCE(!kn); | ||
729 | /* | ||
730 | * Concurrent writes into the "delete" sysfs attribute may trigger | ||
731 | * concurrent calls to device_remove_file() and scsi_remove_device(). | ||
732 | * device_remove_file() handles concurrent removal calls by | ||
733 | * serializing these and by ignoring the second and later removal | ||
734 | * attempts. Concurrent calls of scsi_remove_device() are | ||
735 | * serialized. The second and later calls of scsi_remove_device() are | ||
736 | * ignored because the first call of that function changes the device | ||
737 | * state into SDEV_DEL. | ||
738 | */ | ||
739 | device_remove_file(dev, attr); | ||
740 | scsi_remove_device(to_scsi_device(dev)); | ||
741 | if (kn) | ||
742 | sysfs_unbreak_active_protection(kn); | ||
727 | return count; | 743 | return count; |
728 | }; | 744 | }; |
729 | static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); | 745 | static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 13948102ca29..381668fa135d 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -567,7 +567,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number, | |||
567 | 567 | ||
568 | INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, | 568 | INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, |
569 | FC_NL_ASYNC_EVENT, len); | 569 | FC_NL_ASYNC_EVENT, len); |
570 | event->seconds = get_seconds(); | 570 | event->seconds = ktime_get_real_seconds(); |
571 | event->vendor_id = 0; | 571 | event->vendor_id = 0; |
572 | event->host_no = shost->host_no; | 572 | event->host_no = shost->host_no; |
573 | event->event_datalen = sizeof(u32); /* bytes */ | 573 | event->event_datalen = sizeof(u32); /* bytes */ |
@@ -635,7 +635,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number, | |||
635 | 635 | ||
636 | INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, | 636 | INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, |
637 | FC_NL_ASYNC_EVENT, len); | 637 | FC_NL_ASYNC_EVENT, len); |
638 | event->seconds = get_seconds(); | 638 | event->seconds = ktime_get_real_seconds(); |
639 | event->vendor_id = vendor_id; | 639 | event->vendor_id = vendor_id; |
640 | event->host_no = shost->host_no; | 640 | event->host_no = shost->host_no; |
641 | event->event_datalen = data_len; /* bytes */ | 641 | event->event_datalen = data_len; /* bytes */ |
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 2ca150b16764..40b85b752b79 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c | |||
@@ -136,7 +136,7 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd, | |||
136 | REQ_FAILFAST_TRANSPORT | | 136 | REQ_FAILFAST_TRANSPORT | |
137 | REQ_FAILFAST_DRIVER, | 137 | REQ_FAILFAST_DRIVER, |
138 | 0, NULL); | 138 | 0, NULL); |
139 | if (!(driver_byte(result) & DRIVER_SENSE) || | 139 | if (driver_byte(result) != DRIVER_SENSE || |
140 | sshdr->sense_key != UNIT_ATTENTION) | 140 | sshdr->sense_key != UNIT_ATTENTION) |
141 | break; | 141 | break; |
142 | } | 142 | } |
diff --git a/drivers/scsi/scsi_typedefs.h b/drivers/scsi/scsi_typedefs.h deleted file mode 100644 index 2ed4c5cb7088..000000000000 --- a/drivers/scsi/scsi_typedefs.h +++ /dev/null | |||
@@ -1,2 +0,0 @@ | |||
1 | |||
2 | typedef struct scsi_cmnd Scsi_Cmnd; | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bbebdc3769b0..a58cee7a85f2 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -1635,7 +1635,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) | |||
1635 | if (res) { | 1635 | if (res) { |
1636 | sd_print_result(sdkp, "Synchronize Cache(10) failed", res); | 1636 | sd_print_result(sdkp, "Synchronize Cache(10) failed", res); |
1637 | 1637 | ||
1638 | if (driver_byte(res) & DRIVER_SENSE) | 1638 | if (driver_byte(res) == DRIVER_SENSE) |
1639 | sd_print_sense_hdr(sdkp, sshdr); | 1639 | sd_print_sense_hdr(sdkp, sshdr); |
1640 | 1640 | ||
1641 | /* we need to evaluate the error return */ | 1641 | /* we need to evaluate the error return */ |
@@ -1737,8 +1737,8 @@ static int sd_pr_command(struct block_device *bdev, u8 sa, | |||
1737 | result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data), | 1737 | result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data), |
1738 | &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL); | 1738 | &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL); |
1739 | 1739 | ||
1740 | if ((driver_byte(result) & DRIVER_SENSE) && | 1740 | if (driver_byte(result) == DRIVER_SENSE && |
1741 | (scsi_sense_valid(&sshdr))) { | 1741 | scsi_sense_valid(&sshdr)) { |
1742 | sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); | 1742 | sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result); |
1743 | scsi_print_sense_hdr(sdev, NULL, &sshdr); | 1743 | scsi_print_sense_hdr(sdev, NULL, &sshdr); |
1744 | } | 1744 | } |
@@ -2028,7 +2028,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) | |||
2028 | } else { | 2028 | } else { |
2029 | sdkp->device->no_write_same = 1; | 2029 | sdkp->device->no_write_same = 1; |
2030 | sd_config_write_same(sdkp); | 2030 | sd_config_write_same(sdkp); |
2031 | req->__data_len = blk_rq_bytes(req); | ||
2032 | req->rq_flags |= RQF_QUIET; | 2031 | req->rq_flags |= RQF_QUIET; |
2033 | } | 2032 | } |
2034 | break; | 2033 | break; |
@@ -2097,10 +2096,10 @@ sd_spinup_disk(struct scsi_disk *sdkp) | |||
2097 | retries++; | 2096 | retries++; |
2098 | } while (retries < 3 && | 2097 | } while (retries < 3 && |
2099 | (!scsi_status_is_good(the_result) || | 2098 | (!scsi_status_is_good(the_result) || |
2100 | ((driver_byte(the_result) & DRIVER_SENSE) && | 2099 | ((driver_byte(the_result) == DRIVER_SENSE) && |
2101 | sense_valid && sshdr.sense_key == UNIT_ATTENTION))); | 2100 | sense_valid && sshdr.sense_key == UNIT_ATTENTION))); |
2102 | 2101 | ||
2103 | if ((driver_byte(the_result) & DRIVER_SENSE) == 0) { | 2102 | if (driver_byte(the_result) != DRIVER_SENSE) { |
2104 | /* no sense, TUR either succeeded or failed | 2103 | /* no sense, TUR either succeeded or failed |
2105 | * with a status error */ | 2104 | * with a status error */ |
2106 | if(!spintime && !scsi_status_is_good(the_result)) { | 2105 | if(!spintime && !scsi_status_is_good(the_result)) { |
@@ -2226,7 +2225,7 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, | |||
2226 | struct scsi_sense_hdr *sshdr, int sense_valid, | 2225 | struct scsi_sense_hdr *sshdr, int sense_valid, |
2227 | int the_result) | 2226 | int the_result) |
2228 | { | 2227 | { |
2229 | if (driver_byte(the_result) & DRIVER_SENSE) | 2228 | if (driver_byte(the_result) == DRIVER_SENSE) |
2230 | sd_print_sense_hdr(sdkp, sshdr); | 2229 | sd_print_sense_hdr(sdkp, sshdr); |
2231 | else | 2230 | else |
2232 | sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); | 2231 | sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n"); |
@@ -3492,7 +3491,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) | |||
3492 | SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL); | 3491 | SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL); |
3493 | if (res) { | 3492 | if (res) { |
3494 | sd_print_result(sdkp, "Start/Stop Unit failed", res); | 3493 | sd_print_result(sdkp, "Start/Stop Unit failed", res); |
3495 | if (driver_byte(res) & DRIVER_SENSE) | 3494 | if (driver_byte(res) == DRIVER_SENSE) |
3496 | sd_print_sense_hdr(sdkp, &sshdr); | 3495 | sd_print_sense_hdr(sdkp, &sshdr); |
3497 | if (scsi_sense_valid(&sshdr) && | 3496 | if (scsi_sense_valid(&sshdr) && |
3498 | /* 0x3a is medium not present */ | 3497 | /* 0x3a is medium not present */ |
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 2bf3bf73886e..412c1787dcd9 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
@@ -148,12 +148,6 @@ int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd) | |||
148 | cmd->transfersize = sdkp->device->sector_size; | 148 | cmd->transfersize = sdkp->device->sector_size; |
149 | cmd->allowed = 0; | 149 | cmd->allowed = 0; |
150 | 150 | ||
151 | /* | ||
152 | * Report may return less bytes than requested. Make sure | ||
153 | * to report completion on the entire initial request. | ||
154 | */ | ||
155 | rq->__data_len = nr_bytes; | ||
156 | |||
157 | return BLKPREP_OK; | 151 | return BLKPREP_OK; |
158 | } | 152 | } |
159 | 153 | ||
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 139e13c73b41..8a254bb46a9b 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -1875,7 +1875,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1875 | int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; | 1875 | int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; |
1876 | int sg_tablesize = sfp->parentdp->sg_tablesize; | 1876 | int sg_tablesize = sfp->parentdp->sg_tablesize; |
1877 | int blk_size = buff_size, order; | 1877 | int blk_size = buff_size, order; |
1878 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; | 1878 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO; |
1879 | struct sg_device *sdp = sfp->parentdp; | 1879 | struct sg_device *sdp = sfp->parentdp; |
1880 | 1880 | ||
1881 | if (blk_size < 0) | 1881 | if (blk_size < 0) |
@@ -1905,9 +1905,6 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) | |||
1905 | if (sdp->device->host->unchecked_isa_dma) | 1905 | if (sdp->device->host->unchecked_isa_dma) |
1906 | gfp_mask |= GFP_DMA; | 1906 | gfp_mask |= GFP_DMA; |
1907 | 1907 | ||
1908 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) | ||
1909 | gfp_mask |= __GFP_ZERO; | ||
1910 | |||
1911 | order = get_order(num); | 1908 | order = get_order(num); |
1912 | retry: | 1909 | retry: |
1913 | ret_sz = 1 << (PAGE_SHIFT + order); | 1910 | ret_sz = 1 << (PAGE_SHIFT + order); |
@@ -1918,7 +1915,7 @@ retry: | |||
1918 | num = (rem_sz > scatter_elem_sz_prev) ? | 1915 | num = (rem_sz > scatter_elem_sz_prev) ? |
1919 | scatter_elem_sz_prev : rem_sz; | 1916 | scatter_elem_sz_prev : rem_sz; |
1920 | 1917 | ||
1921 | schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order); | 1918 | schp->pages[k] = alloc_pages(gfp_mask, order); |
1922 | if (!schp->pages[k]) | 1919 | if (!schp->pages[k]) |
1923 | goto out; | 1920 | goto out; |
1924 | 1921 | ||
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index dc3a0542a2e8..e97bf2670315 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h | |||
@@ -483,6 +483,8 @@ struct pqi_raid_error_info { | |||
483 | #define CISS_CMD_STATUS_TMF 0xd | 483 | #define CISS_CMD_STATUS_TMF 0xd |
484 | #define CISS_CMD_STATUS_AIO_DISABLED 0xe | 484 | #define CISS_CMD_STATUS_AIO_DISABLED 0xe |
485 | 485 | ||
486 | #define PQI_CMD_STATUS_ABORTED CISS_CMD_STATUS_ABORTED | ||
487 | |||
486 | #define PQI_NUM_EVENT_QUEUE_ELEMENTS 32 | 488 | #define PQI_NUM_EVENT_QUEUE_ELEMENTS 32 |
487 | #define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response) | 489 | #define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response) |
488 | 490 | ||
@@ -581,8 +583,8 @@ struct pqi_admin_queues_aligned { | |||
581 | struct pqi_admin_queues { | 583 | struct pqi_admin_queues { |
582 | void *iq_element_array; | 584 | void *iq_element_array; |
583 | void *oq_element_array; | 585 | void *oq_element_array; |
584 | volatile pqi_index_t *iq_ci; | 586 | pqi_index_t *iq_ci; |
585 | volatile pqi_index_t *oq_pi; | 587 | pqi_index_t __iomem *oq_pi; |
586 | dma_addr_t iq_element_array_bus_addr; | 588 | dma_addr_t iq_element_array_bus_addr; |
587 | dma_addr_t oq_element_array_bus_addr; | 589 | dma_addr_t oq_element_array_bus_addr; |
588 | dma_addr_t iq_ci_bus_addr; | 590 | dma_addr_t iq_ci_bus_addr; |
@@ -606,8 +608,8 @@ struct pqi_queue_group { | |||
606 | dma_addr_t oq_element_array_bus_addr; | 608 | dma_addr_t oq_element_array_bus_addr; |
607 | __le32 __iomem *iq_pi[2]; | 609 | __le32 __iomem *iq_pi[2]; |
608 | pqi_index_t iq_pi_copy[2]; | 610 | pqi_index_t iq_pi_copy[2]; |
609 | volatile pqi_index_t *iq_ci[2]; | 611 | pqi_index_t __iomem *iq_ci[2]; |
610 | volatile pqi_index_t *oq_pi; | 612 | pqi_index_t __iomem *oq_pi; |
611 | dma_addr_t iq_ci_bus_addr[2]; | 613 | dma_addr_t iq_ci_bus_addr[2]; |
612 | dma_addr_t oq_pi_bus_addr; | 614 | dma_addr_t oq_pi_bus_addr; |
613 | __le32 __iomem *oq_ci; | 615 | __le32 __iomem *oq_ci; |
@@ -620,7 +622,7 @@ struct pqi_event_queue { | |||
620 | u16 oq_id; | 622 | u16 oq_id; |
621 | u16 int_msg_num; | 623 | u16 int_msg_num; |
622 | void *oq_element_array; | 624 | void *oq_element_array; |
623 | volatile pqi_index_t *oq_pi; | 625 | pqi_index_t __iomem *oq_pi; |
624 | dma_addr_t oq_element_array_bus_addr; | 626 | dma_addr_t oq_element_array_bus_addr; |
625 | dma_addr_t oq_pi_bus_addr; | 627 | dma_addr_t oq_pi_bus_addr; |
626 | __le32 __iomem *oq_ci; | 628 | __le32 __iomem *oq_ci; |
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index b78d20b74ed8..2112ea6723c6 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c | |||
@@ -40,11 +40,11 @@ | |||
40 | #define BUILD_TIMESTAMP | 40 | #define BUILD_TIMESTAMP |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | #define DRIVER_VERSION "1.1.4-115" | 43 | #define DRIVER_VERSION "1.1.4-130" |
44 | #define DRIVER_MAJOR 1 | 44 | #define DRIVER_MAJOR 1 |
45 | #define DRIVER_MINOR 1 | 45 | #define DRIVER_MINOR 1 |
46 | #define DRIVER_RELEASE 4 | 46 | #define DRIVER_RELEASE 4 |
47 | #define DRIVER_REVISION 115 | 47 | #define DRIVER_REVISION 130 |
48 | 48 | ||
49 | #define DRIVER_NAME "Microsemi PQI Driver (v" \ | 49 | #define DRIVER_NAME "Microsemi PQI Driver (v" \ |
50 | DRIVER_VERSION BUILD_TIMESTAMP ")" | 50 | DRIVER_VERSION BUILD_TIMESTAMP ")" |
@@ -1197,20 +1197,30 @@ no_buffer: | |||
1197 | device->volume_offline = volume_offline; | 1197 | device->volume_offline = volume_offline; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | #define PQI_INQUIRY_PAGE0_RETRIES 3 | ||
1201 | |||
1200 | static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, | 1202 | static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, |
1201 | struct pqi_scsi_dev *device) | 1203 | struct pqi_scsi_dev *device) |
1202 | { | 1204 | { |
1203 | int rc; | 1205 | int rc; |
1204 | u8 *buffer; | 1206 | u8 *buffer; |
1207 | unsigned int retries; | ||
1205 | 1208 | ||
1206 | buffer = kmalloc(64, GFP_KERNEL); | 1209 | buffer = kmalloc(64, GFP_KERNEL); |
1207 | if (!buffer) | 1210 | if (!buffer) |
1208 | return -ENOMEM; | 1211 | return -ENOMEM; |
1209 | 1212 | ||
1210 | /* Send an inquiry to the device to see what it is. */ | 1213 | /* Send an inquiry to the device to see what it is. */ |
1211 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64); | 1214 | for (retries = 0;;) { |
1212 | if (rc) | 1215 | rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, |
1213 | goto out; | 1216 | buffer, 64); |
1217 | if (rc == 0) | ||
1218 | break; | ||
1219 | if (pqi_is_logical_device(device) || | ||
1220 | rc != PQI_CMD_STATUS_ABORTED || | ||
1221 | ++retries > PQI_INQUIRY_PAGE0_RETRIES) | ||
1222 | goto out; | ||
1223 | } | ||
1214 | 1224 | ||
1215 | scsi_sanitize_inquiry_string(&buffer[8], 8); | 1225 | scsi_sanitize_inquiry_string(&buffer[8], 8); |
1216 | scsi_sanitize_inquiry_string(&buffer[16], 16); | 1226 | scsi_sanitize_inquiry_string(&buffer[16], 16); |
@@ -2693,7 +2703,7 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, | |||
2693 | oq_ci = queue_group->oq_ci_copy; | 2703 | oq_ci = queue_group->oq_ci_copy; |
2694 | 2704 | ||
2695 | while (1) { | 2705 | while (1) { |
2696 | oq_pi = *queue_group->oq_pi; | 2706 | oq_pi = readl(queue_group->oq_pi); |
2697 | if (oq_pi == oq_ci) | 2707 | if (oq_pi == oq_ci) |
2698 | break; | 2708 | break; |
2699 | 2709 | ||
@@ -2784,7 +2794,7 @@ static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info, | |||
2784 | spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); | 2794 | spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags); |
2785 | 2795 | ||
2786 | iq_pi = queue_group->iq_pi_copy[RAID_PATH]; | 2796 | iq_pi = queue_group->iq_pi_copy[RAID_PATH]; |
2787 | iq_ci = *queue_group->iq_ci[RAID_PATH]; | 2797 | iq_ci = readl(queue_group->iq_ci[RAID_PATH]); |
2788 | 2798 | ||
2789 | if (pqi_num_elements_free(iq_pi, iq_ci, | 2799 | if (pqi_num_elements_free(iq_pi, iq_ci, |
2790 | ctrl_info->num_elements_per_iq)) | 2800 | ctrl_info->num_elements_per_iq)) |
@@ -2943,7 +2953,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) | |||
2943 | oq_ci = event_queue->oq_ci_copy; | 2953 | oq_ci = event_queue->oq_ci_copy; |
2944 | 2954 | ||
2945 | while (1) { | 2955 | while (1) { |
2946 | oq_pi = *event_queue->oq_pi; | 2956 | oq_pi = readl(event_queue->oq_pi); |
2947 | if (oq_pi == oq_ci) | 2957 | if (oq_pi == oq_ci) |
2948 | break; | 2958 | break; |
2949 | 2959 | ||
@@ -3167,7 +3177,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) | |||
3167 | size_t element_array_length_per_iq; | 3177 | size_t element_array_length_per_iq; |
3168 | size_t element_array_length_per_oq; | 3178 | size_t element_array_length_per_oq; |
3169 | void *element_array; | 3179 | void *element_array; |
3170 | void *next_queue_index; | 3180 | void __iomem *next_queue_index; |
3171 | void *aligned_pointer; | 3181 | void *aligned_pointer; |
3172 | unsigned int num_inbound_queues; | 3182 | unsigned int num_inbound_queues; |
3173 | unsigned int num_outbound_queues; | 3183 | unsigned int num_outbound_queues; |
@@ -3263,7 +3273,7 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) | |||
3263 | element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * | 3273 | element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS * |
3264 | PQI_EVENT_OQ_ELEMENT_LENGTH; | 3274 | PQI_EVENT_OQ_ELEMENT_LENGTH; |
3265 | 3275 | ||
3266 | next_queue_index = PTR_ALIGN(element_array, | 3276 | next_queue_index = (void __iomem *)PTR_ALIGN(element_array, |
3267 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | 3277 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
3268 | 3278 | ||
3269 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | 3279 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
@@ -3271,21 +3281,24 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) | |||
3271 | queue_group->iq_ci[RAID_PATH] = next_queue_index; | 3281 | queue_group->iq_ci[RAID_PATH] = next_queue_index; |
3272 | queue_group->iq_ci_bus_addr[RAID_PATH] = | 3282 | queue_group->iq_ci_bus_addr[RAID_PATH] = |
3273 | ctrl_info->queue_memory_base_dma_handle + | 3283 | ctrl_info->queue_memory_base_dma_handle + |
3274 | (next_queue_index - ctrl_info->queue_memory_base); | 3284 | (next_queue_index - |
3285 | (void __iomem *)ctrl_info->queue_memory_base); | ||
3275 | next_queue_index += sizeof(pqi_index_t); | 3286 | next_queue_index += sizeof(pqi_index_t); |
3276 | next_queue_index = PTR_ALIGN(next_queue_index, | 3287 | next_queue_index = PTR_ALIGN(next_queue_index, |
3277 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | 3288 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
3278 | queue_group->iq_ci[AIO_PATH] = next_queue_index; | 3289 | queue_group->iq_ci[AIO_PATH] = next_queue_index; |
3279 | queue_group->iq_ci_bus_addr[AIO_PATH] = | 3290 | queue_group->iq_ci_bus_addr[AIO_PATH] = |
3280 | ctrl_info->queue_memory_base_dma_handle + | 3291 | ctrl_info->queue_memory_base_dma_handle + |
3281 | (next_queue_index - ctrl_info->queue_memory_base); | 3292 | (next_queue_index - |
3293 | (void __iomem *)ctrl_info->queue_memory_base); | ||
3282 | next_queue_index += sizeof(pqi_index_t); | 3294 | next_queue_index += sizeof(pqi_index_t); |
3283 | next_queue_index = PTR_ALIGN(next_queue_index, | 3295 | next_queue_index = PTR_ALIGN(next_queue_index, |
3284 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | 3296 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
3285 | queue_group->oq_pi = next_queue_index; | 3297 | queue_group->oq_pi = next_queue_index; |
3286 | queue_group->oq_pi_bus_addr = | 3298 | queue_group->oq_pi_bus_addr = |
3287 | ctrl_info->queue_memory_base_dma_handle + | 3299 | ctrl_info->queue_memory_base_dma_handle + |
3288 | (next_queue_index - ctrl_info->queue_memory_base); | 3300 | (next_queue_index - |
3301 | (void __iomem *)ctrl_info->queue_memory_base); | ||
3289 | next_queue_index += sizeof(pqi_index_t); | 3302 | next_queue_index += sizeof(pqi_index_t); |
3290 | next_queue_index = PTR_ALIGN(next_queue_index, | 3303 | next_queue_index = PTR_ALIGN(next_queue_index, |
3291 | PQI_OPERATIONAL_INDEX_ALIGNMENT); | 3304 | PQI_OPERATIONAL_INDEX_ALIGNMENT); |
@@ -3294,7 +3307,8 @@ static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info) | |||
3294 | ctrl_info->event_queue.oq_pi = next_queue_index; | 3307 | ctrl_info->event_queue.oq_pi = next_queue_index; |
3295 | ctrl_info->event_queue.oq_pi_bus_addr = | 3308 | ctrl_info->event_queue.oq_pi_bus_addr = |
3296 | ctrl_info->queue_memory_base_dma_handle + | 3309 | ctrl_info->queue_memory_base_dma_handle + |
3297 | (next_queue_index - ctrl_info->queue_memory_base); | 3310 | (next_queue_index - |
3311 | (void __iomem *)ctrl_info->queue_memory_base); | ||
3298 | 3312 | ||
3299 | return 0; | 3313 | return 0; |
3300 | } | 3314 | } |
@@ -3368,7 +3382,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) | |||
3368 | admin_queues->oq_element_array = | 3382 | admin_queues->oq_element_array = |
3369 | &admin_queues_aligned->oq_element_array; | 3383 | &admin_queues_aligned->oq_element_array; |
3370 | admin_queues->iq_ci = &admin_queues_aligned->iq_ci; | 3384 | admin_queues->iq_ci = &admin_queues_aligned->iq_ci; |
3371 | admin_queues->oq_pi = &admin_queues_aligned->oq_pi; | 3385 | admin_queues->oq_pi = |
3386 | (pqi_index_t __iomem *)&admin_queues_aligned->oq_pi; | ||
3372 | 3387 | ||
3373 | admin_queues->iq_element_array_bus_addr = | 3388 | admin_queues->iq_element_array_bus_addr = |
3374 | ctrl_info->admin_queue_memory_base_dma_handle + | 3389 | ctrl_info->admin_queue_memory_base_dma_handle + |
@@ -3384,8 +3399,8 @@ static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info) | |||
3384 | ctrl_info->admin_queue_memory_base); | 3399 | ctrl_info->admin_queue_memory_base); |
3385 | admin_queues->oq_pi_bus_addr = | 3400 | admin_queues->oq_pi_bus_addr = |
3386 | ctrl_info->admin_queue_memory_base_dma_handle + | 3401 | ctrl_info->admin_queue_memory_base_dma_handle + |
3387 | ((void *)admin_queues->oq_pi - | 3402 | ((void __iomem *)admin_queues->oq_pi - |
3388 | ctrl_info->admin_queue_memory_base); | 3403 | (void __iomem *)ctrl_info->admin_queue_memory_base); |
3389 | 3404 | ||
3390 | return 0; | 3405 | return 0; |
3391 | } | 3406 | } |
@@ -3486,7 +3501,7 @@ static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info, | |||
3486 | timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; | 3501 | timeout = (PQI_ADMIN_REQUEST_TIMEOUT_SECS * HZ) + jiffies; |
3487 | 3502 | ||
3488 | while (1) { | 3503 | while (1) { |
3489 | oq_pi = *admin_queues->oq_pi; | 3504 | oq_pi = readl(admin_queues->oq_pi); |
3490 | if (oq_pi != oq_ci) | 3505 | if (oq_pi != oq_ci) |
3491 | break; | 3506 | break; |
3492 | if (time_after(jiffies, timeout)) { | 3507 | if (time_after(jiffies, timeout)) { |
@@ -3545,7 +3560,7 @@ static void pqi_start_io(struct pqi_ctrl_info *ctrl_info, | |||
3545 | DIV_ROUND_UP(iu_length, | 3560 | DIV_ROUND_UP(iu_length, |
3546 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); | 3561 | PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); |
3547 | 3562 | ||
3548 | iq_ci = *queue_group->iq_ci[path]; | 3563 | iq_ci = readl(queue_group->iq_ci[path]); |
3549 | 3564 | ||
3550 | if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, | 3565 | if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci, |
3551 | ctrl_info->num_elements_per_iq)) | 3566 | ctrl_info->num_elements_per_iq)) |
@@ -3621,29 +3636,24 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, | |||
3621 | complete(waiting); | 3636 | complete(waiting); |
3622 | } | 3637 | } |
3623 | 3638 | ||
3624 | static int pqi_submit_raid_request_synchronous_with_io_request( | 3639 | static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info |
3625 | struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request, | 3640 | *error_info) |
3626 | unsigned long timeout_msecs) | ||
3627 | { | 3641 | { |
3628 | int rc = 0; | 3642 | int rc = -EIO; |
3629 | DECLARE_COMPLETION_ONSTACK(wait); | ||
3630 | 3643 | ||
3631 | io_request->io_complete_callback = pqi_raid_synchronous_complete; | 3644 | switch (error_info->data_out_result) { |
3632 | io_request->context = &wait; | 3645 | case PQI_DATA_IN_OUT_GOOD: |
3633 | 3646 | if (error_info->status == SAM_STAT_GOOD) | |
3634 | pqi_start_io(ctrl_info, | 3647 | rc = 0; |
3635 | &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, | 3648 | break; |
3636 | io_request); | 3649 | case PQI_DATA_IN_OUT_UNDERFLOW: |
3637 | 3650 | if (error_info->status == SAM_STAT_GOOD || | |
3638 | if (timeout_msecs == NO_TIMEOUT) { | 3651 | error_info->status == SAM_STAT_CHECK_CONDITION) |
3639 | pqi_wait_for_completion_io(ctrl_info, &wait); | 3652 | rc = 0; |
3640 | } else { | 3653 | break; |
3641 | if (!wait_for_completion_io_timeout(&wait, | 3654 | case PQI_DATA_IN_OUT_ABORTED: |
3642 | msecs_to_jiffies(timeout_msecs))) { | 3655 | rc = PQI_CMD_STATUS_ABORTED; |
3643 | dev_warn(&ctrl_info->pci_dev->dev, | 3656 | break; |
3644 | "command timed out\n"); | ||
3645 | rc = -ETIMEDOUT; | ||
3646 | } | ||
3647 | } | 3657 | } |
3648 | 3658 | ||
3649 | return rc; | 3659 | return rc; |
@@ -3653,11 +3663,12 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, | |||
3653 | struct pqi_iu_header *request, unsigned int flags, | 3663 | struct pqi_iu_header *request, unsigned int flags, |
3654 | struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) | 3664 | struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) |
3655 | { | 3665 | { |
3656 | int rc; | 3666 | int rc = 0; |
3657 | struct pqi_io_request *io_request; | 3667 | struct pqi_io_request *io_request; |
3658 | unsigned long start_jiffies; | 3668 | unsigned long start_jiffies; |
3659 | unsigned long msecs_blocked; | 3669 | unsigned long msecs_blocked; |
3660 | size_t iu_length; | 3670 | size_t iu_length; |
3671 | DECLARE_COMPLETION_ONSTACK(wait); | ||
3661 | 3672 | ||
3662 | /* | 3673 | /* |
3663 | * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value | 3674 | * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value |
@@ -3686,11 +3697,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, | |||
3686 | pqi_ctrl_busy(ctrl_info); | 3697 | pqi_ctrl_busy(ctrl_info); |
3687 | timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); | 3698 | timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs); |
3688 | if (timeout_msecs == 0) { | 3699 | if (timeout_msecs == 0) { |
3700 | pqi_ctrl_unbusy(ctrl_info); | ||
3689 | rc = -ETIMEDOUT; | 3701 | rc = -ETIMEDOUT; |
3690 | goto out; | 3702 | goto out; |
3691 | } | 3703 | } |
3692 | 3704 | ||
3693 | if (pqi_ctrl_offline(ctrl_info)) { | 3705 | if (pqi_ctrl_offline(ctrl_info)) { |
3706 | pqi_ctrl_unbusy(ctrl_info); | ||
3694 | rc = -ENXIO; | 3707 | rc = -ENXIO; |
3695 | goto out; | 3708 | goto out; |
3696 | } | 3709 | } |
@@ -3708,8 +3721,25 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, | |||
3708 | PQI_REQUEST_HEADER_LENGTH; | 3721 | PQI_REQUEST_HEADER_LENGTH; |
3709 | memcpy(io_request->iu, request, iu_length); | 3722 | memcpy(io_request->iu, request, iu_length); |
3710 | 3723 | ||
3711 | rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info, | 3724 | io_request->io_complete_callback = pqi_raid_synchronous_complete; |
3712 | io_request, timeout_msecs); | 3725 | io_request->context = &wait; |
3726 | |||
3727 | pqi_start_io(ctrl_info, | ||
3728 | &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, | ||
3729 | io_request); | ||
3730 | |||
3731 | pqi_ctrl_unbusy(ctrl_info); | ||
3732 | |||
3733 | if (timeout_msecs == NO_TIMEOUT) { | ||
3734 | pqi_wait_for_completion_io(ctrl_info, &wait); | ||
3735 | } else { | ||
3736 | if (!wait_for_completion_io_timeout(&wait, | ||
3737 | msecs_to_jiffies(timeout_msecs))) { | ||
3738 | dev_warn(&ctrl_info->pci_dev->dev, | ||
3739 | "command timed out\n"); | ||
3740 | rc = -ETIMEDOUT; | ||
3741 | } | ||
3742 | } | ||
3713 | 3743 | ||
3714 | if (error_info) { | 3744 | if (error_info) { |
3715 | if (io_request->error_info) | 3745 | if (io_request->error_info) |
@@ -3718,25 +3748,13 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, | |||
3718 | else | 3748 | else |
3719 | memset(error_info, 0, sizeof(*error_info)); | 3749 | memset(error_info, 0, sizeof(*error_info)); |
3720 | } else if (rc == 0 && io_request->error_info) { | 3750 | } else if (rc == 0 && io_request->error_info) { |
3721 | u8 scsi_status; | 3751 | rc = pqi_process_raid_io_error_synchronous( |
3722 | struct pqi_raid_error_info *raid_error_info; | 3752 | io_request->error_info); |
3723 | |||
3724 | raid_error_info = io_request->error_info; | ||
3725 | scsi_status = raid_error_info->status; | ||
3726 | |||
3727 | if (scsi_status == SAM_STAT_CHECK_CONDITION && | ||
3728 | raid_error_info->data_out_result == | ||
3729 | PQI_DATA_IN_OUT_UNDERFLOW) | ||
3730 | scsi_status = SAM_STAT_GOOD; | ||
3731 | |||
3732 | if (scsi_status != SAM_STAT_GOOD) | ||
3733 | rc = -EIO; | ||
3734 | } | 3753 | } |
3735 | 3754 | ||
3736 | pqi_free_io_request(io_request); | 3755 | pqi_free_io_request(io_request); |
3737 | 3756 | ||
3738 | out: | 3757 | out: |
3739 | pqi_ctrl_unbusy(ctrl_info); | ||
3740 | up(&ctrl_info->sync_request_sem); | 3758 | up(&ctrl_info->sync_request_sem); |
3741 | 3759 | ||
3742 | return rc; | 3760 | return rc; |
@@ -5041,7 +5059,7 @@ static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info) | |||
5041 | iq_pi = queue_group->iq_pi_copy[path]; | 5059 | iq_pi = queue_group->iq_pi_copy[path]; |
5042 | 5060 | ||
5043 | while (1) { | 5061 | while (1) { |
5044 | iq_ci = *queue_group->iq_ci[path]; | 5062 | iq_ci = readl(queue_group->iq_ci[path]); |
5045 | if (iq_ci == iq_pi) | 5063 | if (iq_ci == iq_pi) |
5046 | break; | 5064 | break; |
5047 | pqi_check_ctrl_health(ctrl_info); | 5065 | pqi_check_ctrl_health(ctrl_info); |
@@ -6230,20 +6248,20 @@ static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info) | |||
6230 | admin_queues = &ctrl_info->admin_queues; | 6248 | admin_queues = &ctrl_info->admin_queues; |
6231 | admin_queues->iq_pi_copy = 0; | 6249 | admin_queues->iq_pi_copy = 0; |
6232 | admin_queues->oq_ci_copy = 0; | 6250 | admin_queues->oq_ci_copy = 0; |
6233 | *admin_queues->oq_pi = 0; | 6251 | writel(0, admin_queues->oq_pi); |
6234 | 6252 | ||
6235 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { | 6253 | for (i = 0; i < ctrl_info->num_queue_groups; i++) { |
6236 | ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; | 6254 | ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0; |
6237 | ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; | 6255 | ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0; |
6238 | ctrl_info->queue_groups[i].oq_ci_copy = 0; | 6256 | ctrl_info->queue_groups[i].oq_ci_copy = 0; |
6239 | 6257 | ||
6240 | *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0; | 6258 | writel(0, ctrl_info->queue_groups[i].iq_ci[RAID_PATH]); |
6241 | *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0; | 6259 | writel(0, ctrl_info->queue_groups[i].iq_ci[AIO_PATH]); |
6242 | *ctrl_info->queue_groups[i].oq_pi = 0; | 6260 | writel(0, ctrl_info->queue_groups[i].oq_pi); |
6243 | } | 6261 | } |
6244 | 6262 | ||
6245 | event_queue = &ctrl_info->event_queue; | 6263 | event_queue = &ctrl_info->event_queue; |
6246 | *event_queue->oq_pi = 0; | 6264 | writel(0, event_queue->oq_pi); |
6247 | event_queue->oq_ci_copy = 0; | 6265 | event_queue->oq_ci_copy = 0; |
6248 | } | 6266 | } |
6249 | 6267 | ||
@@ -6826,6 +6844,18 @@ static const struct pci_device_id pqi_pci_id_table[] = { | |||
6826 | }, | 6844 | }, |
6827 | { | 6845 | { |
6828 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | 6846 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
6847 | 0x1bd4, 0x004a) | ||
6848 | }, | ||
6849 | { | ||
6850 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | ||
6851 | 0x1bd4, 0x004b) | ||
6852 | }, | ||
6853 | { | ||
6854 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | ||
6855 | 0x1bd4, 0x004c) | ||
6856 | }, | ||
6857 | { | ||
6858 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | ||
6829 | PCI_VENDOR_ID_ADAPTEC2, 0x0110) | 6859 | PCI_VENDOR_ID_ADAPTEC2, 0x0110) |
6830 | }, | 6860 | }, |
6831 | { | 6861 | { |
@@ -6950,6 +6980,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { | |||
6950 | }, | 6980 | }, |
6951 | { | 6981 | { |
6952 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | 6982 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, |
6983 | PCI_VENDOR_ID_ADVANTECH, 0x8312) | ||
6984 | }, | ||
6985 | { | ||
6986 | PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, | ||
6953 | PCI_VENDOR_ID_DELL, 0x1fe0) | 6987 | PCI_VENDOR_ID_DELL, 0x1fe0) |
6954 | }, | 6988 | }, |
6955 | { | 6989 | { |
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c index 269ddf791a73..0abe17c1a73b 100644 --- a/drivers/scsi/snic/snic_debugfs.c +++ b/drivers/scsi/snic/snic_debugfs.c | |||
@@ -200,7 +200,7 @@ snic_stats_show(struct seq_file *sfp, void *data) | |||
200 | { | 200 | { |
201 | struct snic *snic = (struct snic *) sfp->private; | 201 | struct snic *snic = (struct snic *) sfp->private; |
202 | struct snic_stats *stats = &snic->s_stats; | 202 | struct snic_stats *stats = &snic->s_stats; |
203 | struct timespec last_isr_tms, last_ack_tms; | 203 | struct timespec64 last_isr_tms, last_ack_tms; |
204 | u64 maxio_tm; | 204 | u64 maxio_tm; |
205 | int i; | 205 | int i; |
206 | 206 | ||
@@ -312,12 +312,12 @@ snic_stats_show(struct seq_file *sfp, void *data) | |||
312 | "\t\t Other Statistics\n" | 312 | "\t\t Other Statistics\n" |
313 | "\n---------------------------------------------\n"); | 313 | "\n---------------------------------------------\n"); |
314 | 314 | ||
315 | jiffies_to_timespec(stats->misc.last_isr_time, &last_isr_tms); | 315 | jiffies_to_timespec64(stats->misc.last_isr_time, &last_isr_tms); |
316 | jiffies_to_timespec(stats->misc.last_ack_time, &last_ack_tms); | 316 | jiffies_to_timespec64(stats->misc.last_ack_time, &last_ack_tms); |
317 | 317 | ||
318 | seq_printf(sfp, | 318 | seq_printf(sfp, |
319 | "Last ISR Time : %llu (%8lu.%8lu)\n" | 319 | "Last ISR Time : %llu (%8llu.%09lu)\n" |
320 | "Last Ack Time : %llu (%8lu.%8lu)\n" | 320 | "Last Ack Time : %llu (%8llu.%09lu)\n" |
321 | "Ack ISRs : %llu\n" | 321 | "Ack ISRs : %llu\n" |
322 | "IO Cmpl ISRs : %llu\n" | 322 | "IO Cmpl ISRs : %llu\n" |
323 | "Err Notify ISRs : %llu\n" | 323 | "Err Notify ISRs : %llu\n" |
diff --git a/drivers/scsi/snic/snic_trc.c b/drivers/scsi/snic/snic_trc.c index f00ebf4717e0..fc60c933d6c0 100644 --- a/drivers/scsi/snic/snic_trc.c +++ b/drivers/scsi/snic/snic_trc.c | |||
@@ -65,12 +65,12 @@ static int | |||
65 | snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz) | 65 | snic_fmt_trc_data(struct snic_trc_data *td, char *buf, int buf_sz) |
66 | { | 66 | { |
67 | int len = 0; | 67 | int len = 0; |
68 | struct timespec tmspec; | 68 | struct timespec64 tmspec; |
69 | 69 | ||
70 | jiffies_to_timespec(td->ts, &tmspec); | 70 | jiffies_to_timespec64(td->ts, &tmspec); |
71 | 71 | ||
72 | len += snprintf(buf, buf_sz, | 72 | len += snprintf(buf, buf_sz, |
73 | "%lu.%10lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", | 73 | "%llu.%09lu %-25s %3d %4x %16llx %16llx %16llx %16llx %16llx\n", |
74 | tmspec.tv_sec, | 74 | tmspec.tv_sec, |
75 | tmspec.tv_nsec, | 75 | tmspec.tv_nsec, |
76 | td->fn, | 76 | td->fn, |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 50c66ccc4b41..307df2fa39a3 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -828,11 +828,8 @@ static int st_flush_write_buffer(struct scsi_tape * STp) | |||
828 | static int flush_buffer(struct scsi_tape *STp, int seek_next) | 828 | static int flush_buffer(struct scsi_tape *STp, int seek_next) |
829 | { | 829 | { |
830 | int backspace, result; | 830 | int backspace, result; |
831 | struct st_buffer *STbuffer; | ||
832 | struct st_partstat *STps; | 831 | struct st_partstat *STps; |
833 | 832 | ||
834 | STbuffer = STp->buffer; | ||
835 | |||
836 | /* | 833 | /* |
837 | * If there was a bus reset, block further access | 834 | * If there was a bus reset, block further access |
838 | * to this device. | 835 | * to this device. |
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c index 190770bdc194..91db17727963 100644 --- a/drivers/scsi/sym53c8xx_2/sym_fw.c +++ b/drivers/scsi/sym53c8xx_2/sym_fw.c | |||
@@ -295,10 +295,8 @@ static void | |||
295 | sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw) | 295 | sym_fw1_setup(struct sym_hcb *np, struct sym_fw *fw) |
296 | { | 296 | { |
297 | struct sym_fw1a_scr *scripta0; | 297 | struct sym_fw1a_scr *scripta0; |
298 | struct sym_fw1b_scr *scriptb0; | ||
299 | 298 | ||
300 | scripta0 = (struct sym_fw1a_scr *) np->scripta0; | 299 | scripta0 = (struct sym_fw1a_scr *) np->scripta0; |
301 | scriptb0 = (struct sym_fw1b_scr *) np->scriptb0; | ||
302 | 300 | ||
303 | /* | 301 | /* |
304 | * Fill variable parts in scripts. | 302 | * Fill variable parts in scripts. |
@@ -319,10 +317,8 @@ static void | |||
319 | sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw) | 317 | sym_fw2_setup(struct sym_hcb *np, struct sym_fw *fw) |
320 | { | 318 | { |
321 | struct sym_fw2a_scr *scripta0; | 319 | struct sym_fw2a_scr *scripta0; |
322 | struct sym_fw2b_scr *scriptb0; | ||
323 | 320 | ||
324 | scripta0 = (struct sym_fw2a_scr *) np->scripta0; | 321 | scripta0 = (struct sym_fw2a_scr *) np->scripta0; |
325 | scriptb0 = (struct sym_fw2b_scr *) np->scriptb0; | ||
326 | 322 | ||
327 | /* | 323 | /* |
328 | * Fill variable parts in scripts. | 324 | * Fill variable parts in scripts. |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 7320d5fe4cbc..5f10aa9bad9b 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c | |||
@@ -252,7 +252,7 @@ void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid) | |||
252 | cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); | 252 | cam_status = sym_xerr_cam_status(DID_ERROR, cp->xerr_status); |
253 | } | 253 | } |
254 | scsi_set_resid(cmd, resid); | 254 | scsi_set_resid(cmd, resid); |
255 | cmd->result = (drv_status << 24) + (cam_status << 16) + scsi_status; | 255 | cmd->result = (drv_status << 24) | (cam_status << 16) | scsi_status; |
256 | } | 256 | } |
257 | 257 | ||
258 | static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) | 258 | static int sym_scatter(struct sym_hcb *np, struct sym_ccb *cp, struct scsi_cmnd *cmd) |
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.h b/drivers/scsi/sym53c8xx_2/sym_glue.h index 805369521df8..e34801ae5d69 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.h +++ b/drivers/scsi/sym53c8xx_2/sym_glue.h | |||
@@ -256,7 +256,7 @@ sym_get_cam_status(struct scsi_cmnd *cmd) | |||
256 | static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid) | 256 | static inline void sym_set_cam_result_ok(struct sym_ccb *cp, struct scsi_cmnd *cmd, int resid) |
257 | { | 257 | { |
258 | scsi_set_resid(cmd, resid); | 258 | scsi_set_resid(cmd, resid); |
259 | cmd->result = (((DID_OK) << 16) + ((cp->ssss_status) & 0x7f)); | 259 | cmd->result = (DID_OK << 16) | (cp->ssss_status & 0x7f); |
260 | } | 260 | } |
261 | void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid); | 261 | void sym_set_cam_result_error(struct sym_hcb *np, struct sym_ccb *cp, int resid); |
262 | 262 | ||
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c index 378af306fda1..bd3f6e2d6834 100644 --- a/drivers/scsi/sym53c8xx_2/sym_hipd.c +++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c | |||
@@ -3855,7 +3855,7 @@ out_reject: | |||
3855 | 3855 | ||
3856 | int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) | 3856 | int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) |
3857 | { | 3857 | { |
3858 | int dp_sg, dp_sgmin, resid = 0; | 3858 | int dp_sg, resid = 0; |
3859 | int dp_ofs = 0; | 3859 | int dp_ofs = 0; |
3860 | 3860 | ||
3861 | /* | 3861 | /* |
@@ -3902,7 +3902,6 @@ int sym_compute_residual(struct sym_hcb *np, struct sym_ccb *cp) | |||
3902 | * We are now full comfortable in the computation | 3902 | * We are now full comfortable in the computation |
3903 | * of the data residual (2's complement). | 3903 | * of the data residual (2's complement). |
3904 | */ | 3904 | */ |
3905 | dp_sgmin = SYM_CONF_MAX_SG - cp->segments; | ||
3906 | resid = -cp->ext_ofs; | 3905 | resid = -cp->ext_ofs; |
3907 | for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { | 3906 | for (dp_sg = cp->ext_sg; dp_sg < SYM_CONF_MAX_SG; ++dp_sg) { |
3908 | u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); | 3907 | u_int tmp = scr_to_cpu(cp->phys.data[dp_sg].size); |
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e27b4d4e6ae2..e09fe6ab3572 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig | |||
@@ -100,3 +100,12 @@ config SCSI_UFS_QCOM | |||
100 | 100 | ||
101 | Select this if you have UFS controller on QCOM chipset. | 101 | Select this if you have UFS controller on QCOM chipset. |
102 | If unsure, say N. | 102 | If unsure, say N. |
103 | |||
104 | config SCSI_UFS_HISI | ||
105 | tristate "Hisilicon specific hooks to UFS controller platform driver" | ||
106 | depends on (ARCH_HISI || COMPILE_TEST) && SCSI_UFSHCD_PLATFORM | ||
107 | ---help--- | ||
108 | This selects the Hisilicon specific additions to UFSHCD platform driver. | ||
109 | |||
110 | Select this if you have UFS controller on Hisilicon chipset. | ||
111 | If unsure, say N. | ||
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 918f5791202d..2c50f03d8c4a 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile | |||
@@ -7,3 +7,4 @@ obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o | |||
7 | ufshcd-core-objs := ufshcd.o ufs-sysfs.o | 7 | ufshcd-core-objs := ufshcd.o ufs-sysfs.o |
8 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o | 8 | obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o |
9 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o | 9 | obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o |
10 | obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o | ||
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c new file mode 100644 index 000000000000..46df707e6f2c --- /dev/null +++ b/drivers/scsi/ufs/ufs-hisi.c | |||
@@ -0,0 +1,619 @@ | |||
1 | /* | ||
2 | * HiSilicon Hixxxx UFS Driver | ||
3 | * | ||
4 | * Copyright (c) 2016-2017 Linaro Ltd. | ||
5 | * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd. | ||
6 | * | ||
7 | * Released under the GPLv2 only. | ||
8 | * SPDX-License-Identifier: GPL-2.0 | ||
9 | */ | ||
10 | |||
11 | #include <linux/time.h> | ||
12 | #include <linux/of.h> | ||
13 | #include <linux/of_address.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/reset.h> | ||
17 | |||
18 | #include "ufshcd.h" | ||
19 | #include "ufshcd-pltfrm.h" | ||
20 | #include "unipro.h" | ||
21 | #include "ufs-hisi.h" | ||
22 | #include "ufshci.h" | ||
23 | |||
24 | static int ufs_hisi_check_hibern8(struct ufs_hba *hba) | ||
25 | { | ||
26 | int err = 0; | ||
27 | u32 tx_fsm_val_0 = 0; | ||
28 | u32 tx_fsm_val_1 = 0; | ||
29 | unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS); | ||
30 | |||
31 | do { | ||
32 | err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), | ||
33 | &tx_fsm_val_0); | ||
34 | err |= ufshcd_dme_get(hba, | ||
35 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1); | ||
36 | if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 && | ||
37 | tx_fsm_val_1 == TX_FSM_HIBERN8)) | ||
38 | break; | ||
39 | |||
40 | /* sleep for max. 200us */ | ||
41 | usleep_range(100, 200); | ||
42 | } while (time_before(jiffies, timeout)); | ||
43 | |||
44 | /* | ||
45 | * we might have scheduled out for long during polling so | ||
46 | * check the state again. | ||
47 | */ | ||
48 | if (time_after(jiffies, timeout)) { | ||
49 | err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0), | ||
50 | &tx_fsm_val_0); | ||
51 | err |= ufshcd_dme_get(hba, | ||
52 | UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1); | ||
53 | } | ||
54 | |||
55 | if (err) { | ||
56 | dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n", | ||
57 | __func__, err); | ||
58 | } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 || | ||
59 | tx_fsm_val_1 != TX_FSM_HIBERN8) { | ||
60 | err = -1; | ||
61 | dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n", | ||
62 | __func__, tx_fsm_val_0, tx_fsm_val_1); | ||
63 | } | ||
64 | |||
65 | return err; | ||
66 | } | ||
67 | |||
68 | static void ufs_hi3660_clk_init(struct ufs_hba *hba) | ||
69 | { | ||
70 | struct ufs_hisi_host *host = ufshcd_get_variant(hba); | ||
71 | |||
72 | ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); | ||
73 | if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN) | ||
74 | mdelay(1); | ||
75 | /* use abb clk */ | ||
76 | ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL); | ||
77 | ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN); | ||
78 | /* open mphy ref clk */ | ||
79 | ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); | ||
80 | } | ||
81 | |||
82 | static void ufs_hi3660_soc_init(struct ufs_hba *hba) | ||
83 | { | ||
84 | struct ufs_hisi_host *host = ufshcd_get_variant(hba); | ||
85 | u32 reg; | ||
86 | |||
87 | if (!IS_ERR(host->rst)) | ||
88 | reset_control_assert(host->rst); | ||
89 | |||
90 | /* HC_PSW powerup */ | ||
91 | ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL); | ||
92 | udelay(10); | ||
93 | /* notify PWR ready */ | ||
94 | ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL); | ||
95 | ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0, | ||
96 | UFS_DEVICE_RESET_CTRL); | ||
97 | |||
98 | reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL); | ||
99 | reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK; | ||
100 | /* set cfg clk freq */ | ||
101 | ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL); | ||
102 | /* set ref clk freq */ | ||
103 | ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL); | ||
104 | /* bypass ufs clk gate */ | ||
105 | ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS, | ||
106 | CLOCK_GATE_BYPASS); | ||
107 | ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL); | ||
108 | |||
109 | /* open psw clk */ | ||
110 | ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL); | ||
111 | /* disable ufshc iso */ | ||
112 | ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL); | ||
113 | /* disable phy iso */ | ||
114 | ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN); | ||
115 | /* notice iso disable */ | ||
116 | ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL); | ||
117 | |||
118 | /* disable lp_reset_n */ | ||
119 | ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN); | ||
120 | mdelay(1); | ||
121 | |||
122 | ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET, | ||
123 | UFS_DEVICE_RESET_CTRL); | ||
124 | |||
125 | msleep(20); | ||
126 | |||
127 | /* | ||
128 | * enable the fix of linereset recovery, | ||
129 | * and enable rx_reset/tx_rest beat | ||
130 | * enable ref_clk_en override(bit5) & | ||
131 | * override value = 1(bit4), with mask | ||
132 | */ | ||
133 | ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL); | ||
134 | |||
135 | if (!IS_ERR(host->rst)) | ||
136 | reset_control_deassert(host->rst); | ||
137 | } | ||
138 | |||
139 | static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba) | ||
140 | { | ||
141 | int err; | ||
142 | uint32_t value; | ||
143 | uint32_t reg; | ||
144 | |||
145 | /* Unipro VS_mphy_disable */ | ||
146 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1); | ||
147 | /* PA_HSSeries */ | ||
148 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2); | ||
149 | /* MPHY CBRATESEL */ | ||
150 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1); | ||
151 | /* MPHY CBOVRCTRL2 */ | ||
152 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D); | ||
153 | /* MPHY CBOVRCTRL3 */ | ||
154 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1); | ||
155 | /* Unipro VS_MphyCfgUpdt */ | ||
156 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); | ||
157 | /* MPHY RXOVRCTRL4 rx0 */ | ||
158 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58); | ||
159 | /* MPHY RXOVRCTRL4 rx1 */ | ||
160 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58); | ||
161 | /* MPHY RXOVRCTRL5 rx0 */ | ||
162 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB); | ||
163 | /* MPHY RXOVRCTRL5 rx1 */ | ||
164 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB); | ||
165 | /* MPHY RXSQCONTROL rx0 */ | ||
166 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1); | ||
167 | /* MPHY RXSQCONTROL rx1 */ | ||
168 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1); | ||
169 | /* Unipro VS_MphyCfgUpdt */ | ||
170 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); | ||
171 | |||
172 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1); | ||
173 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); | ||
174 | |||
175 | /* Tactive RX */ | ||
176 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7); | ||
177 | /* Tactive RX */ | ||
178 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7); | ||
179 | |||
180 | /* Gear3 Synclength */ | ||
181 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F); | ||
182 | /* Gear3 Synclength */ | ||
183 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F); | ||
184 | /* Gear2 Synclength */ | ||
185 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F); | ||
186 | /* Gear2 Synclength */ | ||
187 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F); | ||
188 | /* Gear1 Synclength */ | ||
189 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F); | ||
190 | /* Gear1 Synclength */ | ||
191 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F); | ||
192 | /* Thibernate Tx */ | ||
193 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5); | ||
194 | /* Thibernate Tx */ | ||
195 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5); | ||
196 | |||
197 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1); | ||
198 | /* Unipro VS_mphy_disable */ | ||
199 | ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value); | ||
200 | if (value != 0x1) | ||
201 | dev_info(hba->dev, | ||
202 | "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value); | ||
203 | |||
204 | /* Unipro VS_mphy_disable */ | ||
205 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0); | ||
206 | err = ufs_hisi_check_hibern8(hba); | ||
207 | if (err) | ||
208 | dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n"); | ||
209 | |||
210 | ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV); | ||
211 | |||
212 | /* disable auto H8 */ | ||
213 | reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); | ||
214 | reg = reg & (~UFS_AHIT_AH8ITV_MASK); | ||
215 | ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); | ||
216 | |||
217 | /* Unipro PA_Local_TX_LCC_Enable */ | ||
218 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x155E, 0x0), 0x0); | ||
219 | /* close Unipro VS_Mk2ExtnSupport */ | ||
220 | ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0); | ||
221 | ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value); | ||
222 | if (value != 0) { | ||
223 | /* Ensure close success */ | ||
224 | dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n"); | ||
225 | } | ||
226 | |||
227 | return err; | ||
228 | } | ||
229 | |||
230 | static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba) | ||
231 | { | ||
232 | struct ufs_hisi_host *host = ufshcd_get_variant(hba); | ||
233 | |||
234 | /* Unipro DL_AFC0CreditThreshold */ | ||
235 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0); | ||
236 | /* Unipro DL_TC0OutAckThreshold */ | ||
237 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0); | ||
238 | /* Unipro DL_TC0TXFCThreshold */ | ||
239 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9); | ||
240 | |||
241 | /* not bypass ufs clk gate */ | ||
242 | ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS, | ||
243 | CLOCK_GATE_BYPASS); | ||
244 | ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS, | ||
245 | UFS_SYSCTRL); | ||
246 | |||
247 | /* select received symbol cnt */ | ||
248 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000); | ||
249 | /* reset counter0 and enable */ | ||
250 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005); | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static int ufs_hi3660_link_startup_notify(struct ufs_hba *hba, | ||
256 | enum ufs_notify_change_status status) | ||
257 | { | ||
258 | int err = 0; | ||
259 | |||
260 | switch (status) { | ||
261 | case PRE_CHANGE: | ||
262 | err = ufs_hisi_link_startup_pre_change(hba); | ||
263 | break; | ||
264 | case POST_CHANGE: | ||
265 | err = ufs_hisi_link_startup_post_change(hba); | ||
266 | break; | ||
267 | default: | ||
268 | break; | ||
269 | } | ||
270 | |||
271 | return err; | ||
272 | } | ||
273 | |||
274 | struct ufs_hisi_dev_params { | ||
275 | u32 pwm_rx_gear; /* pwm rx gear to work in */ | ||
276 | u32 pwm_tx_gear; /* pwm tx gear to work in */ | ||
277 | u32 hs_rx_gear; /* hs rx gear to work in */ | ||
278 | u32 hs_tx_gear; /* hs tx gear to work in */ | ||
279 | u32 rx_lanes; /* number of rx lanes */ | ||
280 | u32 tx_lanes; /* number of tx lanes */ | ||
281 | u32 rx_pwr_pwm; /* rx pwm working pwr */ | ||
282 | u32 tx_pwr_pwm; /* tx pwm working pwr */ | ||
283 | u32 rx_pwr_hs; /* rx hs working pwr */ | ||
284 | u32 tx_pwr_hs; /* tx hs working pwr */ | ||
285 | u32 hs_rate; /* rate A/B to work in HS */ | ||
286 | u32 desired_working_mode; | ||
287 | }; | ||
288 | |||
289 | static int ufs_hisi_get_pwr_dev_param( | ||
290 | struct ufs_hisi_dev_params *hisi_param, | ||
291 | struct ufs_pa_layer_attr *dev_max, | ||
292 | struct ufs_pa_layer_attr *agreed_pwr) | ||
293 | { | ||
294 | int min_hisi_gear; | ||
295 | int min_dev_gear; | ||
296 | bool is_dev_sup_hs = false; | ||
297 | bool is_hisi_max_hs = false; | ||
298 | |||
299 | if (dev_max->pwr_rx == FASTAUTO_MODE || dev_max->pwr_rx == FAST_MODE) | ||
300 | is_dev_sup_hs = true; | ||
301 | |||
302 | if (hisi_param->desired_working_mode == FAST) { | ||
303 | is_hisi_max_hs = true; | ||
304 | min_hisi_gear = min_t(u32, hisi_param->hs_rx_gear, | ||
305 | hisi_param->hs_tx_gear); | ||
306 | } else { | ||
307 | min_hisi_gear = min_t(u32, hisi_param->pwm_rx_gear, | ||
308 | hisi_param->pwm_tx_gear); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * device doesn't support HS but | ||
313 | * hisi_param->desired_working_mode is HS, | ||
314 | * thus device and hisi_param don't agree | ||
315 | */ | ||
316 | if (!is_dev_sup_hs && is_hisi_max_hs) { | ||
317 | pr_err("%s: device not support HS\n", __func__); | ||
318 | return -ENOTSUPP; | ||
319 | } else if (is_dev_sup_hs && is_hisi_max_hs) { | ||
320 | /* | ||
321 | * since device supports HS, it supports FAST_MODE. | ||
322 | * since hisi_param->desired_working_mode is also HS | ||
323 | * then final decision (FAST/FASTAUTO) is done according | ||
324 | * to hisi_params as it is the restricting factor | ||
325 | */ | ||
326 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
327 | hisi_param->rx_pwr_hs; | ||
328 | } else { | ||
329 | /* | ||
330 | * here hisi_param->desired_working_mode is PWM. | ||
331 | * it doesn't matter whether device supports HS or PWM, | ||
332 | * in both cases hisi_param->desired_working_mode will | ||
333 | * determine the mode | ||
334 | */ | ||
335 | agreed_pwr->pwr_rx = agreed_pwr->pwr_tx = | ||
336 | hisi_param->rx_pwr_pwm; | ||
337 | } | ||
338 | |||
339 | /* | ||
340 | * we would like tx to work in the minimum number of lanes | ||
341 | * between device capability and vendor preferences. | ||
342 | * the same decision will be made for rx | ||
343 | */ | ||
344 | agreed_pwr->lane_tx = | ||
345 | min_t(u32, dev_max->lane_tx, hisi_param->tx_lanes); | ||
346 | agreed_pwr->lane_rx = | ||
347 | min_t(u32, dev_max->lane_rx, hisi_param->rx_lanes); | ||
348 | |||
349 | /* device maximum gear is the minimum between device rx and tx gears */ | ||
350 | min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx); | ||
351 | |||
352 | /* | ||
353 | * if both device capabilities and vendor pre-defined preferences are | ||
354 | * both HS or both PWM then set the minimum gear to be the chosen | ||
355 | * working gear. | ||
356 | * if one is PWM and one is HS then the one that is PWM get to decide | ||
357 | * what is the gear, as it is the one that also decided previously what | ||
358 | * pwr the device will be configured to. | ||
359 | */ | ||
360 | if ((is_dev_sup_hs && is_hisi_max_hs) || | ||
361 | (!is_dev_sup_hs && !is_hisi_max_hs)) | ||
362 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = | ||
363 | min_t(u32, min_dev_gear, min_hisi_gear); | ||
364 | else | ||
365 | agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_hisi_gear; | ||
366 | |||
367 | agreed_pwr->hs_rate = hisi_param->hs_rate; | ||
368 | |||
369 | pr_info("ufs final power mode: gear = %d, lane = %d, pwr = %d, rate = %d\n", | ||
370 | agreed_pwr->gear_rx, agreed_pwr->lane_rx, agreed_pwr->pwr_rx, | ||
371 | agreed_pwr->hs_rate); | ||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param) | ||
376 | { | ||
377 | hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX; | ||
378 | hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX; | ||
379 | hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX; | ||
380 | hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX; | ||
381 | hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX; | ||
382 | hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX; | ||
383 | hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM; | ||
384 | hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM; | ||
385 | hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS; | ||
386 | hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS; | ||
387 | hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE; | ||
388 | hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE; | ||
389 | } | ||
390 | |||
391 | static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) | ||
392 | { | ||
393 | /* update */ | ||
394 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); | ||
395 | /* PA_TxSkip */ | ||
396 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0); | ||
397 | /*PA_PWRModeUserData0 = 8191, default is 0*/ | ||
398 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191); | ||
399 | /*PA_PWRModeUserData1 = 65535, default is 0*/ | ||
400 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535); | ||
401 | /*PA_PWRModeUserData2 = 32767, default is 0*/ | ||
402 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767); | ||
403 | /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/ | ||
404 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191); | ||
405 | /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/ | ||
406 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535); | ||
407 | /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/ | ||
408 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767); | ||
409 | /*PA_PWRModeUserData3 = 8191, default is 0*/ | ||
410 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191); | ||
411 | /*PA_PWRModeUserData4 = 65535, default is 0*/ | ||
412 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535); | ||
413 | /*PA_PWRModeUserData5 = 32767, default is 0*/ | ||
414 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767); | ||
415 | /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/ | ||
416 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191); | ||
417 | /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/ | ||
418 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535); | ||
419 | /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/ | ||
420 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767); | ||
421 | } | ||
422 | |||
423 | static int ufs_hi3660_pwr_change_notify(struct ufs_hba *hba, | ||
424 | enum ufs_notify_change_status status, | ||
425 | struct ufs_pa_layer_attr *dev_max_params, | ||
426 | struct ufs_pa_layer_attr *dev_req_params) | ||
427 | { | ||
428 | struct ufs_hisi_dev_params ufs_hisi_cap; | ||
429 | int ret = 0; | ||
430 | |||
431 | if (!dev_req_params) { | ||
432 | dev_err(hba->dev, | ||
433 | "%s: incoming dev_req_params is NULL\n", __func__); | ||
434 | ret = -EINVAL; | ||
435 | goto out; | ||
436 | } | ||
437 | |||
438 | switch (status) { | ||
439 | case PRE_CHANGE: | ||
440 | ufs_hisi_set_dev_cap(&ufs_hisi_cap); | ||
441 | ret = ufs_hisi_get_pwr_dev_param( | ||
442 | &ufs_hisi_cap, dev_max_params, dev_req_params); | ||
443 | if (ret) { | ||
444 | dev_err(hba->dev, | ||
445 | "%s: failed to determine capabilities\n", __func__); | ||
446 | goto out; | ||
447 | } | ||
448 | |||
449 | ufs_hisi_pwr_change_pre_change(hba); | ||
450 | break; | ||
451 | case POST_CHANGE: | ||
452 | break; | ||
453 | default: | ||
454 | ret = -EINVAL; | ||
455 | break; | ||
456 | } | ||
457 | out: | ||
458 | return ret; | ||
459 | } | ||
460 | |||
461 | static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
462 | { | ||
463 | struct ufs_hisi_host *host = ufshcd_get_variant(hba); | ||
464 | |||
465 | if (ufshcd_is_runtime_pm(pm_op)) | ||
466 | return 0; | ||
467 | |||
468 | if (host->in_suspend) { | ||
469 | WARN_ON(1); | ||
470 | return 0; | ||
471 | } | ||
472 | |||
473 | ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); | ||
474 | udelay(10); | ||
475 | /* set ref_dig_clk override of PHY PCS to 0 */ | ||
476 | ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL); | ||
477 | |||
478 | host->in_suspend = true; | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) | ||
484 | { | ||
485 | struct ufs_hisi_host *host = ufshcd_get_variant(hba); | ||
486 | |||
487 | if (!host->in_suspend) | ||
488 | return 0; | ||
489 | |||
490 | /* set ref_dig_clk override of PHY PCS to 1 */ | ||
491 | ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL); | ||
492 | udelay(10); | ||
493 | ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL); | ||
494 | |||
495 | host->in_suspend = false; | ||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | static int ufs_hisi_get_resource(struct ufs_hisi_host *host) | ||
500 | { | ||
501 | struct resource *mem_res; | ||
502 | struct device *dev = host->hba->dev; | ||
503 | struct platform_device *pdev = to_platform_device(dev); | ||
504 | |||
505 | /* get resource of ufs sys ctrl */ | ||
506 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
507 | host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res); | ||
508 | if (IS_ERR(host->ufs_sys_ctrl)) | ||
509 | return PTR_ERR(host->ufs_sys_ctrl); | ||
510 | |||
511 | return 0; | ||
512 | } | ||
513 | |||
514 | static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba) | ||
515 | { | ||
516 | hba->rpm_lvl = UFS_PM_LVL_1; | ||
517 | hba->spm_lvl = UFS_PM_LVL_3; | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * ufs_hisi_init_common | ||
522 | * @hba: host controller instance | ||
523 | */ | ||
524 | static int ufs_hisi_init_common(struct ufs_hba *hba) | ||
525 | { | ||
526 | int err = 0; | ||
527 | struct device *dev = hba->dev; | ||
528 | struct ufs_hisi_host *host; | ||
529 | |||
530 | host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); | ||
531 | if (!host) | ||
532 | return -ENOMEM; | ||
533 | |||
534 | host->hba = hba; | ||
535 | ufshcd_set_variant(hba, host); | ||
536 | |||
537 | host->rst = devm_reset_control_get(dev, "rst"); | ||
538 | |||
539 | ufs_hisi_set_pm_lvl(hba); | ||
540 | |||
541 | err = ufs_hisi_get_resource(host); | ||
542 | if (err) { | ||
543 | ufshcd_set_variant(hba, NULL); | ||
544 | return err; | ||
545 | } | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static int ufs_hi3660_init(struct ufs_hba *hba) | ||
551 | { | ||
552 | int ret = 0; | ||
553 | struct device *dev = hba->dev; | ||
554 | |||
555 | ret = ufs_hisi_init_common(hba); | ||
556 | if (ret) { | ||
557 | dev_err(dev, "%s: ufs common init fail\n", __func__); | ||
558 | return ret; | ||
559 | } | ||
560 | |||
561 | ufs_hi3660_clk_init(hba); | ||
562 | |||
563 | ufs_hi3660_soc_init(hba); | ||
564 | |||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static struct ufs_hba_variant_ops ufs_hba_hisi_vops = { | ||
569 | .name = "hi3660", | ||
570 | .init = ufs_hi3660_init, | ||
571 | .link_startup_notify = ufs_hi3660_link_startup_notify, | ||
572 | .pwr_change_notify = ufs_hi3660_pwr_change_notify, | ||
573 | .suspend = ufs_hisi_suspend, | ||
574 | .resume = ufs_hisi_resume, | ||
575 | }; | ||
576 | |||
577 | static int ufs_hisi_probe(struct platform_device *pdev) | ||
578 | { | ||
579 | return ufshcd_pltfrm_init(pdev, &ufs_hba_hisi_vops); | ||
580 | } | ||
581 | |||
582 | static int ufs_hisi_remove(struct platform_device *pdev) | ||
583 | { | ||
584 | struct ufs_hba *hba = platform_get_drvdata(pdev); | ||
585 | |||
586 | ufshcd_remove(hba); | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static const struct of_device_id ufs_hisi_of_match[] = { | ||
591 | { .compatible = "hisilicon,hi3660-ufs" }, | ||
592 | {}, | ||
593 | }; | ||
594 | |||
595 | MODULE_DEVICE_TABLE(of, ufs_hisi_of_match); | ||
596 | |||
597 | static const struct dev_pm_ops ufs_hisi_pm_ops = { | ||
598 | .suspend = ufshcd_pltfrm_suspend, | ||
599 | .resume = ufshcd_pltfrm_resume, | ||
600 | .runtime_suspend = ufshcd_pltfrm_runtime_suspend, | ||
601 | .runtime_resume = ufshcd_pltfrm_runtime_resume, | ||
602 | .runtime_idle = ufshcd_pltfrm_runtime_idle, | ||
603 | }; | ||
604 | |||
605 | static struct platform_driver ufs_hisi_pltform = { | ||
606 | .probe = ufs_hisi_probe, | ||
607 | .remove = ufs_hisi_remove, | ||
608 | .shutdown = ufshcd_pltfrm_shutdown, | ||
609 | .driver = { | ||
610 | .name = "ufshcd-hisi", | ||
611 | .pm = &ufs_hisi_pm_ops, | ||
612 | .of_match_table = of_match_ptr(ufs_hisi_of_match), | ||
613 | }, | ||
614 | }; | ||
615 | module_platform_driver(ufs_hisi_pltform); | ||
616 | |||
617 | MODULE_LICENSE("GPL"); | ||
618 | MODULE_ALIAS("platform:ufshcd-hisi"); | ||
619 | MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver"); | ||
diff --git a/drivers/scsi/ufs/ufs-hisi.h b/drivers/scsi/ufs/ufs-hisi.h new file mode 100644 index 000000000000..3df9cd7acc29 --- /dev/null +++ b/drivers/scsi/ufs/ufs-hisi.h | |||
@@ -0,0 +1,115 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017, HiSilicon. All rights reserved. | ||
3 | * | ||
4 | * Released under the GPLv2 only. | ||
5 | * SPDX-License-Identifier: GPL-2.0 | ||
6 | */ | ||
7 | |||
8 | #ifndef UFS_HISI_H_ | ||
9 | #define UFS_HISI_H_ | ||
10 | |||
11 | #define HBRN8_POLL_TOUT_MS 1000 | ||
12 | |||
13 | /* | ||
14 | * ufs sysctrl specific define | ||
15 | */ | ||
16 | #define PSW_POWER_CTRL (0x04) | ||
17 | #define PHY_ISO_EN (0x08) | ||
18 | #define HC_LP_CTRL (0x0C) | ||
19 | #define PHY_CLK_CTRL (0x10) | ||
20 | #define PSW_CLK_CTRL (0x14) | ||
21 | #define CLOCK_GATE_BYPASS (0x18) | ||
22 | #define RESET_CTRL_EN (0x1C) | ||
23 | #define UFS_SYSCTRL (0x5C) | ||
24 | #define UFS_DEVICE_RESET_CTRL (0x60) | ||
25 | |||
26 | #define BIT_UFS_PSW_ISO_CTRL (1 << 16) | ||
27 | #define BIT_UFS_PSW_MTCMOS_EN (1 << 0) | ||
28 | #define BIT_UFS_REFCLK_ISO_EN (1 << 16) | ||
29 | #define BIT_UFS_PHY_ISO_CTRL (1 << 0) | ||
30 | #define BIT_SYSCTRL_LP_ISOL_EN (1 << 16) | ||
31 | #define BIT_SYSCTRL_PWR_READY (1 << 8) | ||
32 | #define BIT_SYSCTRL_REF_CLOCK_EN (1 << 24) | ||
33 | #define MASK_SYSCTRL_REF_CLOCK_SEL (0x3 << 8) | ||
34 | #define MASK_SYSCTRL_CFG_CLOCK_FREQ (0xFF) | ||
35 | #define UFS_FREQ_CFG_CLK (0x39) | ||
36 | #define BIT_SYSCTRL_PSW_CLK_EN (1 << 4) | ||
37 | #define MASK_UFS_CLK_GATE_BYPASS (0x3F) | ||
38 | #define BIT_SYSCTRL_LP_RESET_N (1 << 0) | ||
39 | #define BIT_UFS_REFCLK_SRC_SEl (1 << 0) | ||
40 | #define MASK_UFS_SYSCRTL_BYPASS (0x3F << 16) | ||
41 | #define MASK_UFS_DEVICE_RESET (0x1 << 16) | ||
42 | #define BIT_UFS_DEVICE_RESET (0x1) | ||
43 | |||
44 | /* | ||
45 | * M-TX Configuration Attributes for Hixxxx | ||
46 | */ | ||
47 | #define MPHY_TX_FSM_STATE 0x41 | ||
48 | #define TX_FSM_HIBERN8 0x1 | ||
49 | |||
50 | /* | ||
51 | * Hixxxx UFS HC specific Registers | ||
52 | */ | ||
53 | enum { | ||
54 | UFS_REG_OCPTHRTL = 0xc0, | ||
55 | UFS_REG_OOCPR = 0xc4, | ||
56 | |||
57 | UFS_REG_CDACFG = 0xd0, | ||
58 | UFS_REG_CDATX1 = 0xd4, | ||
59 | UFS_REG_CDATX2 = 0xd8, | ||
60 | UFS_REG_CDARX1 = 0xdc, | ||
61 | UFS_REG_CDARX2 = 0xe0, | ||
62 | UFS_REG_CDASTA = 0xe4, | ||
63 | |||
64 | UFS_REG_LBMCFG = 0xf0, | ||
65 | UFS_REG_LBMSTA = 0xf4, | ||
66 | UFS_REG_UFSMODE = 0xf8, | ||
67 | |||
68 | UFS_REG_HCLKDIV = 0xfc, | ||
69 | }; | ||
70 | |||
71 | /* AHIT - Auto-Hibernate Idle Timer */ | ||
72 | #define UFS_AHIT_AH8ITV_MASK 0x3FF | ||
73 | |||
74 | /* REG UFS_REG_OCPTHRTL definition */ | ||
75 | #define UFS_HCLKDIV_NORMAL_VALUE 0xE4 | ||
76 | |||
77 | /* vendor specific pre-defined parameters */ | ||
78 | #define SLOW 1 | ||
79 | #define FAST 2 | ||
80 | |||
81 | #define UFS_HISI_LIMIT_NUM_LANES_RX 2 | ||
82 | #define UFS_HISI_LIMIT_NUM_LANES_TX 2 | ||
83 | #define UFS_HISI_LIMIT_HSGEAR_RX UFS_HS_G3 | ||
84 | #define UFS_HISI_LIMIT_HSGEAR_TX UFS_HS_G3 | ||
85 | #define UFS_HISI_LIMIT_PWMGEAR_RX UFS_PWM_G4 | ||
86 | #define UFS_HISI_LIMIT_PWMGEAR_TX UFS_PWM_G4 | ||
87 | #define UFS_HISI_LIMIT_RX_PWR_PWM SLOW_MODE | ||
88 | #define UFS_HISI_LIMIT_TX_PWR_PWM SLOW_MODE | ||
89 | #define UFS_HISI_LIMIT_RX_PWR_HS FAST_MODE | ||
90 | #define UFS_HISI_LIMIT_TX_PWR_HS FAST_MODE | ||
91 | #define UFS_HISI_LIMIT_HS_RATE PA_HS_MODE_B | ||
92 | #define UFS_HISI_LIMIT_DESIRED_MODE FAST | ||
93 | |||
94 | struct ufs_hisi_host { | ||
95 | struct ufs_hba *hba; | ||
96 | void __iomem *ufs_sys_ctrl; | ||
97 | |||
98 | struct reset_control *rst; | ||
99 | |||
100 | uint64_t caps; | ||
101 | |||
102 | bool in_suspend; | ||
103 | }; | ||
104 | |||
105 | #define ufs_sys_ctrl_writel(host, val, reg) \ | ||
106 | writel((val), (host)->ufs_sys_ctrl + (reg)) | ||
107 | #define ufs_sys_ctrl_readl(host, reg) readl((host)->ufs_sys_ctrl + (reg)) | ||
108 | #define ufs_sys_ctrl_set_bits(host, mask, reg) \ | ||
109 | ufs_sys_ctrl_writel( \ | ||
110 | (host), ((mask) | (ufs_sys_ctrl_readl((host), (reg)))), (reg)) | ||
111 | #define ufs_sys_ctrl_clr_bits(host, mask, reg) \ | ||
112 | ufs_sys_ctrl_writel((host), \ | ||
113 | ((~(mask)) & (ufs_sys_ctrl_readl((host), (reg)))), \ | ||
114 | (reg)) | ||
115 | #endif /* UFS_HISI_H_ */ | ||
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index 221820a7c78b..75ee5906b966 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c | |||
@@ -50,19 +50,10 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host); | |||
50 | static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, | 50 | static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba, |
51 | u32 clk_cycles); | 51 | u32 clk_cycles); |
52 | 52 | ||
53 | static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len, | ||
54 | char *prefix) | ||
55 | { | ||
56 | print_hex_dump(KERN_ERR, prefix, | ||
57 | len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE, | ||
58 | 16, 4, (void __force *)hba->mmio_base + offset, | ||
59 | len * 4, false); | ||
60 | } | ||
61 | |||
62 | static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, | 53 | static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len, |
63 | char *prefix, void *priv) | 54 | const char *prefix, void *priv) |
64 | { | 55 | { |
65 | ufs_qcom_dump_regs(hba, offset, len, prefix); | 56 | ufshcd_dump_regs(hba, offset, len * 4, prefix); |
66 | } | 57 | } |
67 | 58 | ||
68 | static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) | 59 | static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes) |
@@ -1431,7 +1422,7 @@ out: | |||
1431 | 1422 | ||
1432 | static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, | 1423 | static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, |
1433 | void *priv, void (*print_fn)(struct ufs_hba *hba, | 1424 | void *priv, void (*print_fn)(struct ufs_hba *hba, |
1434 | int offset, int num_regs, char *str, void *priv)) | 1425 | int offset, int num_regs, const char *str, void *priv)) |
1435 | { | 1426 | { |
1436 | u32 reg; | 1427 | u32 reg; |
1437 | struct ufs_qcom_host *host; | 1428 | struct ufs_qcom_host *host; |
@@ -1613,7 +1604,7 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host) | |||
1613 | 1604 | ||
1614 | static void ufs_qcom_testbus_read(struct ufs_hba *hba) | 1605 | static void ufs_qcom_testbus_read(struct ufs_hba *hba) |
1615 | { | 1606 | { |
1616 | ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS "); | 1607 | ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS "); |
1617 | } | 1608 | } |
1618 | 1609 | ||
1619 | static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) | 1610 | static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) |
@@ -1639,8 +1630,8 @@ static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba) | |||
1639 | 1630 | ||
1640 | static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) | 1631 | static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) |
1641 | { | 1632 | { |
1642 | ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16, | 1633 | ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4, |
1643 | "HCI Vendor Specific Registers "); | 1634 | "HCI Vendor Specific Registers "); |
1644 | 1635 | ||
1645 | /* sleep a bit intermittently as we are dumping too much data */ | 1636 | /* sleep a bit intermittently as we are dumping too much data */ |
1646 | ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); | 1637 | ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper); |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 397081d320b1..9d5d2ca7fc4f 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -99,8 +99,29 @@ | |||
99 | _ret; \ | 99 | _ret; \ |
100 | }) | 100 | }) |
101 | 101 | ||
102 | #define ufshcd_hex_dump(prefix_str, buf, len) \ | 102 | #define ufshcd_hex_dump(prefix_str, buf, len) do { \ |
103 | print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false) | 103 | size_t __len = (len); \ |
104 | print_hex_dump(KERN_ERR, prefix_str, \ | ||
105 | __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\ | ||
106 | 16, 4, buf, __len, false); \ | ||
107 | } while (0) | ||
108 | |||
109 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, | ||
110 | const char *prefix) | ||
111 | { | ||
112 | u8 *regs; | ||
113 | |||
114 | regs = kzalloc(len, GFP_KERNEL); | ||
115 | if (!regs) | ||
116 | return -ENOMEM; | ||
117 | |||
118 | memcpy_fromio(regs, hba->mmio_base + offset, len); | ||
119 | ufshcd_hex_dump(prefix, regs, len); | ||
120 | kfree(regs); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(ufshcd_dump_regs); | ||
104 | 125 | ||
105 | enum { | 126 | enum { |
106 | UFSHCD_MAX_CHANNEL = 0, | 127 | UFSHCD_MAX_CHANNEL = 0, |
@@ -321,18 +342,19 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, | |||
321 | sector_t lba = -1; | 342 | sector_t lba = -1; |
322 | u8 opcode = 0; | 343 | u8 opcode = 0; |
323 | u32 intr, doorbell; | 344 | u32 intr, doorbell; |
324 | struct ufshcd_lrb *lrbp; | 345 | struct ufshcd_lrb *lrbp = &hba->lrb[tag]; |
325 | int transfer_len = -1; | 346 | int transfer_len = -1; |
326 | 347 | ||
327 | /* trace UPIU also */ | 348 | if (!trace_ufshcd_command_enabled()) { |
328 | ufshcd_add_cmd_upiu_trace(hba, tag, str); | 349 | /* trace UPIU W/O tracing command */ |
329 | 350 | if (lrbp->cmd) | |
330 | if (!trace_ufshcd_command_enabled()) | 351 | ufshcd_add_cmd_upiu_trace(hba, tag, str); |
331 | return; | 352 | return; |
332 | 353 | } | |
333 | lrbp = &hba->lrb[tag]; | ||
334 | 354 | ||
335 | if (lrbp->cmd) { /* data phase exists */ | 355 | if (lrbp->cmd) { /* data phase exists */ |
356 | /* trace UPIU also */ | ||
357 | ufshcd_add_cmd_upiu_trace(hba, tag, str); | ||
336 | opcode = (u8)(*lrbp->cmd->cmnd); | 358 | opcode = (u8)(*lrbp->cmd->cmnd); |
337 | if ((opcode == READ_10) || (opcode == WRITE_10)) { | 359 | if ((opcode == READ_10) || (opcode == WRITE_10)) { |
338 | /* | 360 | /* |
@@ -386,15 +408,7 @@ static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, | |||
386 | 408 | ||
387 | static void ufshcd_print_host_regs(struct ufs_hba *hba) | 409 | static void ufshcd_print_host_regs(struct ufs_hba *hba) |
388 | { | 410 | { |
389 | /* | 411 | ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); |
390 | * hex_dump reads its data without the readl macro. This might | ||
391 | * cause inconsistency issues on some platform, as the printed | ||
392 | * values may be from cache and not the most recent value. | ||
393 | * To know whether you are looking at an un-cached version verify | ||
394 | * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked | ||
395 | * during platform/pci probe function. | ||
396 | */ | ||
397 | ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE); | ||
398 | dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", | 412 | dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n", |
399 | hba->ufs_version, hba->capabilities); | 413 | hba->ufs_version, hba->capabilities); |
400 | dev_err(hba->dev, | 414 | dev_err(hba->dev, |
@@ -7290,7 +7304,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba, | |||
7290 | sdev_printk(KERN_WARNING, sdp, | 7304 | sdev_printk(KERN_WARNING, sdp, |
7291 | "START_STOP failed for power mode: %d, result %x\n", | 7305 | "START_STOP failed for power mode: %d, result %x\n", |
7292 | pwr_mode, ret); | 7306 | pwr_mode, ret); |
7293 | if (driver_byte(ret) & DRIVER_SENSE) | 7307 | if (driver_byte(ret) == DRIVER_SENSE) |
7294 | scsi_print_sense_hdr(sdp, NULL, &sshdr); | 7308 | scsi_print_sense_hdr(sdp, NULL, &sshdr); |
7295 | } | 7309 | } |
7296 | 7310 | ||
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index f51758f1e5cc..33fdd3f281ae 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h | |||
@@ -1043,4 +1043,7 @@ static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun) | |||
1043 | return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; | 1043 | return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID; |
1044 | } | 1044 | } |
1045 | 1045 | ||
1046 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, | ||
1047 | const char *prefix); | ||
1048 | |||
1046 | #endif /* End of Header */ | 1049 | #endif /* End of Header */ |
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 0ebc4818e132..95d0a22b2ad6 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c | |||
@@ -1090,10 +1090,8 @@ static struct configfs_attribute *lio_target_tpg_attrs[] = { | |||
1090 | 1090 | ||
1091 | /* Start items for lio_target_tiqn_cit */ | 1091 | /* Start items for lio_target_tiqn_cit */ |
1092 | 1092 | ||
1093 | static struct se_portal_group *lio_target_tiqn_addtpg( | 1093 | static struct se_portal_group *lio_target_tiqn_addtpg(struct se_wwn *wwn, |
1094 | struct se_wwn *wwn, | 1094 | const char *name) |
1095 | struct config_group *group, | ||
1096 | const char *name) | ||
1097 | { | 1095 | { |
1098 | struct iscsi_portal_group *tpg; | 1096 | struct iscsi_portal_group *tpg; |
1099 | struct iscsi_tiqn *tiqn; | 1097 | struct iscsi_tiqn *tiqn; |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 99501785cdc1..923b1a9fc3dc 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -369,7 +369,7 @@ static int iscsi_login_zero_tsih_s1( | |||
369 | return -ENOMEM; | 369 | return -ENOMEM; |
370 | } | 370 | } |
371 | 371 | ||
372 | sess->se_sess = transport_init_session(TARGET_PROT_NORMAL); | 372 | sess->se_sess = transport_alloc_session(TARGET_PROT_NORMAL); |
373 | if (IS_ERR(sess->se_sess)) { | 373 | if (IS_ERR(sess->se_sess)) { |
374 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, | 374 | iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, |
375 | ISCSI_LOGIN_STATUS_NO_RESOURCES); | 375 | ISCSI_LOGIN_STATUS_NO_RESOURCES); |
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 4b34f71547c6..101d62105c93 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c | |||
@@ -636,8 +636,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication) | |||
636 | none = strstr(buf1, NONE); | 636 | none = strstr(buf1, NONE); |
637 | if (none) | 637 | if (none) |
638 | goto out; | 638 | goto out; |
639 | strncat(buf1, ",", strlen(",")); | 639 | strlcat(buf1, "," NONE, sizeof(buf1)); |
640 | strncat(buf1, NONE, strlen(NONE)); | ||
641 | if (iscsi_update_param_value(param, buf1) < 0) | 640 | if (iscsi_update_param_value(param, buf1) < 0) |
642 | return -EINVAL; | 641 | return -EINVAL; |
643 | } | 642 | } |
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 4435bf374d2d..49be1e41290c 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -17,7 +17,7 @@ | |||
17 | ******************************************************************************/ | 17 | ******************************************************************************/ |
18 | 18 | ||
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/percpu_ida.h> | 20 | #include <linux/sched/signal.h> |
21 | #include <net/ipv6.h> /* ipv6_addr_equal() */ | 21 | #include <net/ipv6.h> /* ipv6_addr_equal() */ |
22 | #include <scsi/scsi_tcq.h> | 22 | #include <scsi/scsi_tcq.h> |
23 | #include <scsi/iscsi_proto.h> | 23 | #include <scsi/iscsi_proto.h> |
@@ -147,6 +147,30 @@ void iscsit_free_r2ts_from_list(struct iscsi_cmd *cmd) | |||
147 | spin_unlock_bh(&cmd->r2t_lock); | 147 | spin_unlock_bh(&cmd->r2t_lock); |
148 | } | 148 | } |
149 | 149 | ||
150 | static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup) | ||
151 | { | ||
152 | int tag = -1; | ||
153 | DEFINE_WAIT(wait); | ||
154 | struct sbq_wait_state *ws; | ||
155 | |||
156 | if (state == TASK_RUNNING) | ||
157 | return tag; | ||
158 | |||
159 | ws = &se_sess->sess_tag_pool.ws[0]; | ||
160 | for (;;) { | ||
161 | prepare_to_wait_exclusive(&ws->wait, &wait, state); | ||
162 | if (signal_pending_state(state, current)) | ||
163 | break; | ||
164 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, cpup); | ||
165 | if (tag >= 0) | ||
166 | break; | ||
167 | schedule(); | ||
168 | } | ||
169 | |||
170 | finish_wait(&ws->wait, &wait); | ||
171 | return tag; | ||
172 | } | ||
173 | |||
150 | /* | 174 | /* |
151 | * May be called from software interrupt (timer) context for allocating | 175 | * May be called from software interrupt (timer) context for allocating |
152 | * iSCSI NopINs. | 176 | * iSCSI NopINs. |
@@ -155,9 +179,11 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) | |||
155 | { | 179 | { |
156 | struct iscsi_cmd *cmd; | 180 | struct iscsi_cmd *cmd; |
157 | struct se_session *se_sess = conn->sess->se_sess; | 181 | struct se_session *se_sess = conn->sess->se_sess; |
158 | int size, tag; | 182 | int size, tag, cpu; |
159 | 183 | ||
160 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, state); | 184 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
185 | if (tag < 0) | ||
186 | tag = iscsit_wait_for_tag(se_sess, state, &cpu); | ||
161 | if (tag < 0) | 187 | if (tag < 0) |
162 | return NULL; | 188 | return NULL; |
163 | 189 | ||
@@ -166,6 +192,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) | |||
166 | memset(cmd, 0, size); | 192 | memset(cmd, 0, size); |
167 | 193 | ||
168 | cmd->se_cmd.map_tag = tag; | 194 | cmd->se_cmd.map_tag = tag; |
195 | cmd->se_cmd.map_cpu = cpu; | ||
169 | cmd->conn = conn; | 196 | cmd->conn = conn; |
170 | cmd->data_direction = DMA_NONE; | 197 | cmd->data_direction = DMA_NONE; |
171 | INIT_LIST_HEAD(&cmd->i_conn_node); | 198 | INIT_LIST_HEAD(&cmd->i_conn_node); |
@@ -711,7 +738,7 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) | |||
711 | kfree(cmd->iov_data); | 738 | kfree(cmd->iov_data); |
712 | kfree(cmd->text_in_ptr); | 739 | kfree(cmd->text_in_ptr); |
713 | 740 | ||
714 | percpu_ida_free(&sess->se_sess->sess_tag_pool, se_cmd->map_tag); | 741 | target_free_tag(sess->se_sess, se_cmd); |
715 | } | 742 | } |
716 | EXPORT_SYMBOL(iscsit_release_cmd); | 743 | EXPORT_SYMBOL(iscsit_release_cmd); |
717 | 744 | ||
@@ -1026,26 +1053,8 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn) | |||
1026 | 1053 | ||
1027 | void iscsit_start_nopin_timer(struct iscsi_conn *conn) | 1054 | void iscsit_start_nopin_timer(struct iscsi_conn *conn) |
1028 | { | 1055 | { |
1029 | struct iscsi_session *sess = conn->sess; | ||
1030 | struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess); | ||
1031 | /* | ||
1032 | * NOPIN timeout is disabled.. | ||
1033 | */ | ||
1034 | if (!na->nopin_timeout) | ||
1035 | return; | ||
1036 | |||
1037 | spin_lock_bh(&conn->nopin_timer_lock); | 1056 | spin_lock_bh(&conn->nopin_timer_lock); |
1038 | if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) { | 1057 | __iscsit_start_nopin_timer(conn); |
1039 | spin_unlock_bh(&conn->nopin_timer_lock); | ||
1040 | return; | ||
1041 | } | ||
1042 | |||
1043 | conn->nopin_timer_flags &= ~ISCSI_TF_STOP; | ||
1044 | conn->nopin_timer_flags |= ISCSI_TF_RUNNING; | ||
1045 | mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); | ||
1046 | |||
1047 | pr_debug("Started NOPIN Timer on CID: %d at %u second" | ||
1048 | " interval\n", conn->cid, na->nopin_timeout); | ||
1049 | spin_unlock_bh(&conn->nopin_timer_lock); | 1058 | spin_unlock_bh(&conn->nopin_timer_lock); |
1050 | } | 1059 | } |
1051 | 1060 | ||
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 60d5b918c4ac..bc8918f382e4 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c | |||
@@ -239,10 +239,7 @@ out: | |||
239 | return ret; | 239 | return ret; |
240 | 240 | ||
241 | release: | 241 | release: |
242 | if (se_cmd) | 242 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); |
243 | transport_generic_free_cmd(se_cmd, 0); | ||
244 | else | ||
245 | kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); | ||
246 | goto out; | 243 | goto out; |
247 | } | 244 | } |
248 | 245 | ||
@@ -768,7 +765,7 @@ static int tcm_loop_make_nexus( | |||
768 | if (!tl_nexus) | 765 | if (!tl_nexus) |
769 | return -ENOMEM; | 766 | return -ENOMEM; |
770 | 767 | ||
771 | tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0, | 768 | tl_nexus->se_sess = target_setup_session(&tl_tpg->tl_se_tpg, 0, 0, |
772 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, | 769 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, |
773 | name, tl_nexus, tcm_loop_alloc_sess_cb); | 770 | name, tl_nexus, tcm_loop_alloc_sess_cb); |
774 | if (IS_ERR(tl_nexus->se_sess)) { | 771 | if (IS_ERR(tl_nexus->se_sess)) { |
@@ -808,7 +805,7 @@ static int tcm_loop_drop_nexus( | |||
808 | /* | 805 | /* |
809 | * Release the SCSI I_T Nexus to the emulated Target Port | 806 | * Release the SCSI I_T Nexus to the emulated Target Port |
810 | */ | 807 | */ |
811 | transport_deregister_session(tl_nexus->se_sess); | 808 | target_remove_session(se_sess); |
812 | tpg->tl_nexus = NULL; | 809 | tpg->tl_nexus = NULL; |
813 | kfree(tl_nexus); | 810 | kfree(tl_nexus); |
814 | return 0; | 811 | return 0; |
@@ -983,10 +980,8 @@ static struct configfs_attribute *tcm_loop_tpg_attrs[] = { | |||
983 | 980 | ||
984 | /* Start items for tcm_loop_naa_cit */ | 981 | /* Start items for tcm_loop_naa_cit */ |
985 | 982 | ||
986 | static struct se_portal_group *tcm_loop_make_naa_tpg( | 983 | static struct se_portal_group *tcm_loop_make_naa_tpg(struct se_wwn *wwn, |
987 | struct se_wwn *wwn, | 984 | const char *name) |
988 | struct config_group *group, | ||
989 | const char *name) | ||
990 | { | 985 | { |
991 | struct tcm_loop_hba *tl_hba = container_of(wwn, | 986 | struct tcm_loop_hba *tl_hba = container_of(wwn, |
992 | struct tcm_loop_hba, tl_hba_wwn); | 987 | struct tcm_loop_hba, tl_hba_wwn); |
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index fb1003921d85..3d10189ecedc 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c | |||
@@ -209,7 +209,7 @@ static struct sbp_session *sbp_session_create( | |||
209 | INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); | 209 | INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work); |
210 | sess->guid = guid; | 210 | sess->guid = guid; |
211 | 211 | ||
212 | sess->se_sess = target_alloc_session(&tpg->se_tpg, 128, | 212 | sess->se_sess = target_setup_session(&tpg->se_tpg, 128, |
213 | sizeof(struct sbp_target_request), | 213 | sizeof(struct sbp_target_request), |
214 | TARGET_PROT_NORMAL, guid_str, | 214 | TARGET_PROT_NORMAL, guid_str, |
215 | sess, NULL); | 215 | sess, NULL); |
@@ -235,8 +235,7 @@ static void sbp_session_release(struct sbp_session *sess, bool cancel_work) | |||
235 | if (cancel_work) | 235 | if (cancel_work) |
236 | cancel_delayed_work_sync(&sess->maint_work); | 236 | cancel_delayed_work_sync(&sess->maint_work); |
237 | 237 | ||
238 | transport_deregister_session_configfs(sess->se_sess); | 238 | target_remove_session(sess->se_sess); |
239 | transport_deregister_session(sess->se_sess); | ||
240 | 239 | ||
241 | if (sess->card) | 240 | if (sess->card) |
242 | fw_card_put(sess->card); | 241 | fw_card_put(sess->card); |
@@ -926,15 +925,16 @@ static struct sbp_target_request *sbp_mgt_get_req(struct sbp_session *sess, | |||
926 | { | 925 | { |
927 | struct se_session *se_sess = sess->se_sess; | 926 | struct se_session *se_sess = sess->se_sess; |
928 | struct sbp_target_request *req; | 927 | struct sbp_target_request *req; |
929 | int tag; | 928 | int tag, cpu; |
930 | 929 | ||
931 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 930 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
932 | if (tag < 0) | 931 | if (tag < 0) |
933 | return ERR_PTR(-ENOMEM); | 932 | return ERR_PTR(-ENOMEM); |
934 | 933 | ||
935 | req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag]; | 934 | req = &((struct sbp_target_request *)se_sess->sess_cmd_map)[tag]; |
936 | memset(req, 0, sizeof(*req)); | 935 | memset(req, 0, sizeof(*req)); |
937 | req->se_cmd.map_tag = tag; | 936 | req->se_cmd.map_tag = tag; |
937 | req->se_cmd.map_cpu = cpu; | ||
938 | req->se_cmd.tag = next_orb; | 938 | req->se_cmd.tag = next_orb; |
939 | 939 | ||
940 | return req; | 940 | return req; |
@@ -1460,7 +1460,7 @@ static void sbp_free_request(struct sbp_target_request *req) | |||
1460 | kfree(req->pg_tbl); | 1460 | kfree(req->pg_tbl); |
1461 | kfree(req->cmd_buf); | 1461 | kfree(req->cmd_buf); |
1462 | 1462 | ||
1463 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | 1463 | target_free_tag(se_sess, se_cmd); |
1464 | } | 1464 | } |
1465 | 1465 | ||
1466 | static void sbp_mgt_agent_process(struct work_struct *work) | 1466 | static void sbp_mgt_agent_process(struct work_struct *work) |
@@ -2005,10 +2005,8 @@ static void sbp_pre_unlink_lun( | |||
2005 | pr_err("unlink LUN: failed to update unit directory\n"); | 2005 | pr_err("unlink LUN: failed to update unit directory\n"); |
2006 | } | 2006 | } |
2007 | 2007 | ||
2008 | static struct se_portal_group *sbp_make_tpg( | 2008 | static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn, |
2009 | struct se_wwn *wwn, | 2009 | const char *name) |
2010 | struct config_group *group, | ||
2011 | const char *name) | ||
2012 | { | 2010 | { |
2013 | struct sbp_tport *tport = | 2011 | struct sbp_tport *tport = |
2014 | container_of(wwn, struct sbp_tport, tport_wwn); | 2012 | container_of(wwn, struct sbp_tport, tport_wwn); |
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 5ccef7d597fa..f6b1549f4142 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -263,8 +263,8 @@ static struct config_group *target_core_register_fabric( | |||
263 | &tf->tf_discovery_cit); | 263 | &tf->tf_discovery_cit); |
264 | configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group); | 264 | configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group); |
265 | 265 | ||
266 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:" | 266 | pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n", |
267 | " %s\n", tf->tf_group.cg_item.ci_name); | 267 | config_item_name(&tf->tf_group.cg_item)); |
268 | return &tf->tf_group; | 268 | return &tf->tf_group; |
269 | } | 269 | } |
270 | 270 | ||
@@ -810,7 +810,7 @@ static ssize_t pi_prot_type_store(struct config_item *item, | |||
810 | dev->transport->name); | 810 | dev->transport->name); |
811 | return -ENOSYS; | 811 | return -ENOSYS; |
812 | } | 812 | } |
813 | if (!(dev->dev_flags & DF_CONFIGURED)) { | 813 | if (!target_dev_configured(dev)) { |
814 | pr_err("DIF protection requires device to be configured\n"); | 814 | pr_err("DIF protection requires device to be configured\n"); |
815 | return -ENODEV; | 815 | return -ENODEV; |
816 | } | 816 | } |
@@ -859,7 +859,7 @@ static ssize_t pi_prot_format_store(struct config_item *item, | |||
859 | dev->transport->name); | 859 | dev->transport->name); |
860 | return -ENOSYS; | 860 | return -ENOSYS; |
861 | } | 861 | } |
862 | if (!(dev->dev_flags & DF_CONFIGURED)) { | 862 | if (!target_dev_configured(dev)) { |
863 | pr_err("DIF protection format requires device to be configured\n"); | 863 | pr_err("DIF protection format requires device to be configured\n"); |
864 | return -ENODEV; | 864 | return -ENODEV; |
865 | } | 865 | } |
@@ -1948,7 +1948,7 @@ static ssize_t target_dev_enable_show(struct config_item *item, char *page) | |||
1948 | { | 1948 | { |
1949 | struct se_device *dev = to_device(item); | 1949 | struct se_device *dev = to_device(item); |
1950 | 1950 | ||
1951 | return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); | 1951 | return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev)); |
1952 | } | 1952 | } |
1953 | 1953 | ||
1954 | static ssize_t target_dev_enable_store(struct config_item *item, | 1954 | static ssize_t target_dev_enable_store(struct config_item *item, |
@@ -2473,7 +2473,7 @@ static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item, | |||
2473 | " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); | 2473 | " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); |
2474 | return -EINVAL; | 2474 | return -EINVAL; |
2475 | } | 2475 | } |
2476 | if (!(dev->dev_flags & DF_CONFIGURED)) { | 2476 | if (!target_dev_configured(dev)) { |
2477 | pr_err("Unable to set alua_access_state while device is" | 2477 | pr_err("Unable to set alua_access_state while device is" |
2478 | " not configured\n"); | 2478 | " not configured\n"); |
2479 | return -ENODEV; | 2479 | return -ENODEV; |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index e27db4d45a9d..47b5ef153135 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -336,7 +336,6 @@ int core_enable_device_list_for_node( | |||
336 | return -ENOMEM; | 336 | return -ENOMEM; |
337 | } | 337 | } |
338 | 338 | ||
339 | atomic_set(&new->ua_count, 0); | ||
340 | spin_lock_init(&new->ua_lock); | 339 | spin_lock_init(&new->ua_lock); |
341 | INIT_LIST_HEAD(&new->ua_list); | 340 | INIT_LIST_HEAD(&new->ua_list); |
342 | INIT_LIST_HEAD(&new->lun_link); | 341 | INIT_LIST_HEAD(&new->lun_link); |
@@ -879,39 +878,21 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) | |||
879 | } | 878 | } |
880 | EXPORT_SYMBOL(target_to_linux_sector); | 879 | EXPORT_SYMBOL(target_to_linux_sector); |
881 | 880 | ||
882 | /** | ||
883 | * target_find_device - find a se_device by its dev_index | ||
884 | * @id: dev_index | ||
885 | * @do_depend: true if caller needs target_depend_item to be done | ||
886 | * | ||
887 | * If do_depend is true, the caller must do a target_undepend_item | ||
888 | * when finished using the device. | ||
889 | * | ||
890 | * If do_depend is false, the caller must be called in a configfs | ||
891 | * callback or during removal. | ||
892 | */ | ||
893 | struct se_device *target_find_device(int id, bool do_depend) | ||
894 | { | ||
895 | struct se_device *dev; | ||
896 | |||
897 | mutex_lock(&device_mutex); | ||
898 | dev = idr_find(&devices_idr, id); | ||
899 | if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item)) | ||
900 | dev = NULL; | ||
901 | mutex_unlock(&device_mutex); | ||
902 | return dev; | ||
903 | } | ||
904 | EXPORT_SYMBOL(target_find_device); | ||
905 | |||
906 | struct devices_idr_iter { | 881 | struct devices_idr_iter { |
882 | struct config_item *prev_item; | ||
907 | int (*fn)(struct se_device *dev, void *data); | 883 | int (*fn)(struct se_device *dev, void *data); |
908 | void *data; | 884 | void *data; |
909 | }; | 885 | }; |
910 | 886 | ||
911 | static int target_devices_idr_iter(int id, void *p, void *data) | 887 | static int target_devices_idr_iter(int id, void *p, void *data) |
888 | __must_hold(&device_mutex) | ||
912 | { | 889 | { |
913 | struct devices_idr_iter *iter = data; | 890 | struct devices_idr_iter *iter = data; |
914 | struct se_device *dev = p; | 891 | struct se_device *dev = p; |
892 | int ret; | ||
893 | |||
894 | config_item_put(iter->prev_item); | ||
895 | iter->prev_item = NULL; | ||
915 | 896 | ||
916 | /* | 897 | /* |
917 | * We add the device early to the idr, so it can be used | 898 | * We add the device early to the idr, so it can be used |
@@ -919,10 +900,18 @@ static int target_devices_idr_iter(int id, void *p, void *data) | |||
919 | * to allow other callers to access partially setup devices, | 900 | * to allow other callers to access partially setup devices, |
920 | * so we skip them here. | 901 | * so we skip them here. |
921 | */ | 902 | */ |
922 | if (!(dev->dev_flags & DF_CONFIGURED)) | 903 | if (!target_dev_configured(dev)) |
904 | return 0; | ||
905 | |||
906 | iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item); | ||
907 | if (!iter->prev_item) | ||
923 | return 0; | 908 | return 0; |
909 | mutex_unlock(&device_mutex); | ||
910 | |||
911 | ret = iter->fn(dev, iter->data); | ||
924 | 912 | ||
925 | return iter->fn(dev, iter->data); | 913 | mutex_lock(&device_mutex); |
914 | return ret; | ||
926 | } | 915 | } |
927 | 916 | ||
928 | /** | 917 | /** |
@@ -936,15 +925,13 @@ static int target_devices_idr_iter(int id, void *p, void *data) | |||
936 | int target_for_each_device(int (*fn)(struct se_device *dev, void *data), | 925 | int target_for_each_device(int (*fn)(struct se_device *dev, void *data), |
937 | void *data) | 926 | void *data) |
938 | { | 927 | { |
939 | struct devices_idr_iter iter; | 928 | struct devices_idr_iter iter = { .fn = fn, .data = data }; |
940 | int ret; | 929 | int ret; |
941 | 930 | ||
942 | iter.fn = fn; | ||
943 | iter.data = data; | ||
944 | |||
945 | mutex_lock(&device_mutex); | 931 | mutex_lock(&device_mutex); |
946 | ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); | 932 | ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); |
947 | mutex_unlock(&device_mutex); | 933 | mutex_unlock(&device_mutex); |
934 | config_item_put(iter.prev_item); | ||
948 | return ret; | 935 | return ret; |
949 | } | 936 | } |
950 | 937 | ||
@@ -953,7 +940,7 @@ int target_configure_device(struct se_device *dev) | |||
953 | struct se_hba *hba = dev->se_hba; | 940 | struct se_hba *hba = dev->se_hba; |
954 | int ret, id; | 941 | int ret, id; |
955 | 942 | ||
956 | if (dev->dev_flags & DF_CONFIGURED) { | 943 | if (target_dev_configured(dev)) { |
957 | pr_err("se_dev->se_dev_ptr already set for storage" | 944 | pr_err("se_dev->se_dev_ptr already set for storage" |
958 | " object\n"); | 945 | " object\n"); |
959 | return -EEXIST; | 946 | return -EEXIST; |
@@ -1058,7 +1045,7 @@ void target_free_device(struct se_device *dev) | |||
1058 | 1045 | ||
1059 | WARN_ON(!list_empty(&dev->dev_sep_list)); | 1046 | WARN_ON(!list_empty(&dev->dev_sep_list)); |
1060 | 1047 | ||
1061 | if (dev->dev_flags & DF_CONFIGURED) { | 1048 | if (target_dev_configured(dev)) { |
1062 | destroy_workqueue(dev->tmr_wq); | 1049 | destroy_workqueue(dev->tmr_wq); |
1063 | 1050 | ||
1064 | dev->transport->destroy_device(dev); | 1051 | dev->transport->destroy_device(dev); |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index e1416b007aa4..aa2f4f632ebe 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/configfs.h> | 34 | #include <linux/configfs.h> |
35 | 35 | ||
36 | #include <target/target_core_base.h> | 36 | #include <target/target_core_base.h> |
37 | #include <target/target_core_backend.h> | ||
37 | #include <target/target_core_fabric.h> | 38 | #include <target/target_core_fabric.h> |
38 | 39 | ||
39 | #include "target_core_internal.h" | 40 | #include "target_core_internal.h" |
@@ -642,7 +643,7 @@ static int target_fabric_port_link( | |||
642 | } | 643 | } |
643 | dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group); | 644 | dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group); |
644 | 645 | ||
645 | if (!(dev->dev_flags & DF_CONFIGURED)) { | 646 | if (!target_dev_configured(dev)) { |
646 | pr_err("se_device not configured yet, cannot port link\n"); | 647 | pr_err("se_device not configured yet, cannot port link\n"); |
647 | return -ENODEV; | 648 | return -ENODEV; |
648 | } | 649 | } |
@@ -841,7 +842,7 @@ static struct config_group *target_fabric_make_tpg( | |||
841 | return ERR_PTR(-ENOSYS); | 842 | return ERR_PTR(-ENOSYS); |
842 | } | 843 | } |
843 | 844 | ||
844 | se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name); | 845 | se_tpg = tf->tf_ops->fabric_make_tpg(wwn, name); |
845 | if (!se_tpg || IS_ERR(se_tpg)) | 846 | if (!se_tpg || IS_ERR(se_tpg)) |
846 | return ERR_PTR(-EINVAL); | 847 | return ERR_PTR(-EINVAL); |
847 | 848 | ||
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index dead30b1d32c..0c6635587930 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h | |||
@@ -138,7 +138,7 @@ int init_se_kmem_caches(void); | |||
138 | void release_se_kmem_caches(void); | 138 | void release_se_kmem_caches(void); |
139 | u32 scsi_get_new_index(scsi_index_t); | 139 | u32 scsi_get_new_index(scsi_index_t); |
140 | void transport_subsystem_check_init(void); | 140 | void transport_subsystem_check_init(void); |
141 | int transport_cmd_finish_abort(struct se_cmd *, int); | 141 | int transport_cmd_finish_abort(struct se_cmd *); |
142 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); | 142 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); |
143 | void transport_dump_dev_state(struct se_device *, char *, int *); | 143 | void transport_dump_dev_state(struct se_device *, char *, int *); |
144 | void transport_dump_dev_info(struct se_device *, struct se_lun *, | 144 | void transport_dump_dev_info(struct se_device *, struct se_lun *, |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index b054682e974f..ebac2b49b9c6 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
@@ -978,9 +978,10 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
978 | } | 978 | } |
979 | case COMPARE_AND_WRITE: | 979 | case COMPARE_AND_WRITE: |
980 | if (!dev->dev_attrib.emulate_caw) { | 980 | if (!dev->dev_attrib.emulate_caw) { |
981 | pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject" | 981 | pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject COMPARE_AND_WRITE\n", |
982 | " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name, | 982 | dev->se_hba->backend->ops->name, |
983 | dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial); | 983 | config_item_name(&dev->dev_group.cg_item), |
984 | dev->t10_wwn.unit_serial); | ||
984 | return TCM_UNSUPPORTED_SCSI_OPCODE; | 985 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
985 | } | 986 | } |
986 | sectors = cdb[13]; | 987 | sectors = cdb[13]; |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 9c7bc1ca341a..6d1179a7f043 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -75,25 +75,6 @@ void core_tmr_release_req(struct se_tmr_req *tmr) | |||
75 | kfree(tmr); | 75 | kfree(tmr); |
76 | } | 76 | } |
77 | 77 | ||
78 | static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) | ||
79 | { | ||
80 | unsigned long flags; | ||
81 | bool remove = true, send_tas; | ||
82 | /* | ||
83 | * TASK ABORTED status (TAS) bit support | ||
84 | */ | ||
85 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
86 | send_tas = (cmd->transport_state & CMD_T_TAS); | ||
87 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
88 | |||
89 | if (send_tas) { | ||
90 | remove = false; | ||
91 | transport_send_task_abort(cmd); | ||
92 | } | ||
93 | |||
94 | return transport_cmd_finish_abort(cmd, remove); | ||
95 | } | ||
96 | |||
97 | static int target_check_cdb_and_preempt(struct list_head *list, | 78 | static int target_check_cdb_and_preempt(struct list_head *list, |
98 | struct se_cmd *cmd) | 79 | struct se_cmd *cmd) |
99 | { | 80 | { |
@@ -142,7 +123,7 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, | |||
142 | return false; | 123 | return false; |
143 | } | 124 | } |
144 | } | 125 | } |
145 | if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { | 126 | if (sess->sess_tearing_down) { |
146 | pr_debug("Attempted to abort io tag: %llu already shutdown," | 127 | pr_debug("Attempted to abort io tag: %llu already shutdown," |
147 | " skipping\n", se_cmd->tag); | 128 | " skipping\n", se_cmd->tag); |
148 | spin_unlock(&se_cmd->t_state_lock); | 129 | spin_unlock(&se_cmd->t_state_lock); |
@@ -187,13 +168,12 @@ void core_tmr_abort_task( | |||
187 | if (!__target_check_io_state(se_cmd, se_sess, 0)) | 168 | if (!__target_check_io_state(se_cmd, se_sess, 0)) |
188 | continue; | 169 | continue; |
189 | 170 | ||
190 | list_del_init(&se_cmd->se_cmd_list); | ||
191 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 171 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
192 | 172 | ||
193 | cancel_work_sync(&se_cmd->work); | 173 | cancel_work_sync(&se_cmd->work); |
194 | transport_wait_for_tasks(se_cmd); | 174 | transport_wait_for_tasks(se_cmd); |
195 | 175 | ||
196 | if (!transport_cmd_finish_abort(se_cmd, true)) | 176 | if (!transport_cmd_finish_abort(se_cmd)) |
197 | target_put_sess_cmd(se_cmd); | 177 | target_put_sess_cmd(se_cmd); |
198 | 178 | ||
199 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" | 179 | printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" |
@@ -259,7 +239,7 @@ static void core_tmr_drain_tmr_list( | |||
259 | spin_unlock(&sess->sess_cmd_lock); | 239 | spin_unlock(&sess->sess_cmd_lock); |
260 | continue; | 240 | continue; |
261 | } | 241 | } |
262 | if (sess->sess_tearing_down || cmd->cmd_wait_set) { | 242 | if (sess->sess_tearing_down) { |
263 | spin_unlock(&cmd->t_state_lock); | 243 | spin_unlock(&cmd->t_state_lock); |
264 | spin_unlock(&sess->sess_cmd_lock); | 244 | spin_unlock(&sess->sess_cmd_lock); |
265 | continue; | 245 | continue; |
@@ -291,7 +271,7 @@ static void core_tmr_drain_tmr_list( | |||
291 | cancel_work_sync(&cmd->work); | 271 | cancel_work_sync(&cmd->work); |
292 | transport_wait_for_tasks(cmd); | 272 | transport_wait_for_tasks(cmd); |
293 | 273 | ||
294 | if (!transport_cmd_finish_abort(cmd, 1)) | 274 | if (!transport_cmd_finish_abort(cmd)) |
295 | target_put_sess_cmd(cmd); | 275 | target_put_sess_cmd(cmd); |
296 | } | 276 | } |
297 | } | 277 | } |
@@ -380,7 +360,7 @@ static void core_tmr_drain_state_list( | |||
380 | cancel_work_sync(&cmd->work); | 360 | cancel_work_sync(&cmd->work); |
381 | transport_wait_for_tasks(cmd); | 361 | transport_wait_for_tasks(cmd); |
382 | 362 | ||
383 | if (!core_tmr_handle_tas_abort(cmd, tas)) | 363 | if (!transport_cmd_finish_abort(cmd)) |
384 | target_put_sess_cmd(cmd); | 364 | target_put_sess_cmd(cmd); |
385 | } | 365 | } |
386 | } | 366 | } |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index ee5081ba5313..86c0156e6c88 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -64,7 +64,7 @@ struct kmem_cache *t10_alua_lba_map_cache; | |||
64 | struct kmem_cache *t10_alua_lba_map_mem_cache; | 64 | struct kmem_cache *t10_alua_lba_map_mem_cache; |
65 | 65 | ||
66 | static void transport_complete_task_attr(struct se_cmd *cmd); | 66 | static void transport_complete_task_attr(struct se_cmd *cmd); |
67 | static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); | 67 | static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); |
68 | static void transport_handle_queue_full(struct se_cmd *cmd, | 68 | static void transport_handle_queue_full(struct se_cmd *cmd, |
69 | struct se_device *dev, int err, bool write_pending); | 69 | struct se_device *dev, int err, bool write_pending); |
70 | static void target_complete_ok_work(struct work_struct *work); | 70 | static void target_complete_ok_work(struct work_struct *work); |
@@ -224,7 +224,27 @@ void transport_subsystem_check_init(void) | |||
224 | sub_api_initialized = 1; | 224 | sub_api_initialized = 1; |
225 | } | 225 | } |
226 | 226 | ||
227 | struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) | 227 | /** |
228 | * transport_init_session - initialize a session object | ||
229 | * @se_sess: Session object pointer. | ||
230 | * | ||
231 | * The caller must have zero-initialized @se_sess before calling this function. | ||
232 | */ | ||
233 | void transport_init_session(struct se_session *se_sess) | ||
234 | { | ||
235 | INIT_LIST_HEAD(&se_sess->sess_list); | ||
236 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | ||
237 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); | ||
238 | spin_lock_init(&se_sess->sess_cmd_lock); | ||
239 | init_waitqueue_head(&se_sess->cmd_list_wq); | ||
240 | } | ||
241 | EXPORT_SYMBOL(transport_init_session); | ||
242 | |||
243 | /** | ||
244 | * transport_alloc_session - allocate a session object and initialize it | ||
245 | * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. | ||
246 | */ | ||
247 | struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) | ||
228 | { | 248 | { |
229 | struct se_session *se_sess; | 249 | struct se_session *se_sess; |
230 | 250 | ||
@@ -234,17 +254,20 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops) | |||
234 | " se_sess_cache\n"); | 254 | " se_sess_cache\n"); |
235 | return ERR_PTR(-ENOMEM); | 255 | return ERR_PTR(-ENOMEM); |
236 | } | 256 | } |
237 | INIT_LIST_HEAD(&se_sess->sess_list); | 257 | transport_init_session(se_sess); |
238 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | ||
239 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); | ||
240 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | ||
241 | spin_lock_init(&se_sess->sess_cmd_lock); | ||
242 | se_sess->sup_prot_ops = sup_prot_ops; | 258 | se_sess->sup_prot_ops = sup_prot_ops; |
243 | 259 | ||
244 | return se_sess; | 260 | return se_sess; |
245 | } | 261 | } |
246 | EXPORT_SYMBOL(transport_init_session); | 262 | EXPORT_SYMBOL(transport_alloc_session); |
247 | 263 | ||
264 | /** | ||
265 | * transport_alloc_session_tags - allocate target driver private data | ||
266 | * @se_sess: Session pointer. | ||
267 | * @tag_num: Maximum number of in-flight commands between initiator and target. | ||
268 | * @tag_size: Size in bytes of the private data a target driver associates with | ||
269 | * each command. | ||
270 | */ | ||
248 | int transport_alloc_session_tags(struct se_session *se_sess, | 271 | int transport_alloc_session_tags(struct se_session *se_sess, |
249 | unsigned int tag_num, unsigned int tag_size) | 272 | unsigned int tag_num, unsigned int tag_size) |
250 | { | 273 | { |
@@ -260,7 +283,8 @@ int transport_alloc_session_tags(struct se_session *se_sess, | |||
260 | } | 283 | } |
261 | } | 284 | } |
262 | 285 | ||
263 | rc = percpu_ida_init(&se_sess->sess_tag_pool, tag_num); | 286 | rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, |
287 | false, GFP_KERNEL, NUMA_NO_NODE); | ||
264 | if (rc < 0) { | 288 | if (rc < 0) { |
265 | pr_err("Unable to init se_sess->sess_tag_pool," | 289 | pr_err("Unable to init se_sess->sess_tag_pool," |
266 | " tag_num: %u\n", tag_num); | 290 | " tag_num: %u\n", tag_num); |
@@ -273,9 +297,16 @@ int transport_alloc_session_tags(struct se_session *se_sess, | |||
273 | } | 297 | } |
274 | EXPORT_SYMBOL(transport_alloc_session_tags); | 298 | EXPORT_SYMBOL(transport_alloc_session_tags); |
275 | 299 | ||
276 | struct se_session *transport_init_session_tags(unsigned int tag_num, | 300 | /** |
277 | unsigned int tag_size, | 301 | * transport_init_session_tags - allocate a session and target driver private data |
278 | enum target_prot_op sup_prot_ops) | 302 | * @tag_num: Maximum number of in-flight commands between initiator and target. |
303 | * @tag_size: Size in bytes of the private data a target driver associates with | ||
304 | * each command. | ||
305 | * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. | ||
306 | */ | ||
307 | static struct se_session * | ||
308 | transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, | ||
309 | enum target_prot_op sup_prot_ops) | ||
279 | { | 310 | { |
280 | struct se_session *se_sess; | 311 | struct se_session *se_sess; |
281 | int rc; | 312 | int rc; |
@@ -291,7 +322,7 @@ struct se_session *transport_init_session_tags(unsigned int tag_num, | |||
291 | return ERR_PTR(-EINVAL); | 322 | return ERR_PTR(-EINVAL); |
292 | } | 323 | } |
293 | 324 | ||
294 | se_sess = transport_init_session(sup_prot_ops); | 325 | se_sess = transport_alloc_session(sup_prot_ops); |
295 | if (IS_ERR(se_sess)) | 326 | if (IS_ERR(se_sess)) |
296 | return se_sess; | 327 | return se_sess; |
297 | 328 | ||
@@ -303,7 +334,6 @@ struct se_session *transport_init_session_tags(unsigned int tag_num, | |||
303 | 334 | ||
304 | return se_sess; | 335 | return se_sess; |
305 | } | 336 | } |
306 | EXPORT_SYMBOL(transport_init_session_tags); | ||
307 | 337 | ||
308 | /* | 338 | /* |
309 | * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. | 339 | * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. |
@@ -316,6 +346,7 @@ void __transport_register_session( | |||
316 | { | 346 | { |
317 | const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; | 347 | const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; |
318 | unsigned char buf[PR_REG_ISID_LEN]; | 348 | unsigned char buf[PR_REG_ISID_LEN]; |
349 | unsigned long flags; | ||
319 | 350 | ||
320 | se_sess->se_tpg = se_tpg; | 351 | se_sess->se_tpg = se_tpg; |
321 | se_sess->fabric_sess_ptr = fabric_sess_ptr; | 352 | se_sess->fabric_sess_ptr = fabric_sess_ptr; |
@@ -352,7 +383,7 @@ void __transport_register_session( | |||
352 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); | 383 | se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); |
353 | } | 384 | } |
354 | 385 | ||
355 | spin_lock_irq(&se_nacl->nacl_sess_lock); | 386 | spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); |
356 | /* | 387 | /* |
357 | * The se_nacl->nacl_sess pointer will be set to the | 388 | * The se_nacl->nacl_sess pointer will be set to the |
358 | * last active I_T Nexus for each struct se_node_acl. | 389 | * last active I_T Nexus for each struct se_node_acl. |
@@ -361,7 +392,7 @@ void __transport_register_session( | |||
361 | 392 | ||
362 | list_add_tail(&se_sess->sess_acl_list, | 393 | list_add_tail(&se_sess->sess_acl_list, |
363 | &se_nacl->acl_sess_list); | 394 | &se_nacl->acl_sess_list); |
364 | spin_unlock_irq(&se_nacl->nacl_sess_lock); | 395 | spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); |
365 | } | 396 | } |
366 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); | 397 | list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); |
367 | 398 | ||
@@ -385,7 +416,7 @@ void transport_register_session( | |||
385 | EXPORT_SYMBOL(transport_register_session); | 416 | EXPORT_SYMBOL(transport_register_session); |
386 | 417 | ||
387 | struct se_session * | 418 | struct se_session * |
388 | target_alloc_session(struct se_portal_group *tpg, | 419 | target_setup_session(struct se_portal_group *tpg, |
389 | unsigned int tag_num, unsigned int tag_size, | 420 | unsigned int tag_num, unsigned int tag_size, |
390 | enum target_prot_op prot_op, | 421 | enum target_prot_op prot_op, |
391 | const char *initiatorname, void *private, | 422 | const char *initiatorname, void *private, |
@@ -401,7 +432,7 @@ target_alloc_session(struct se_portal_group *tpg, | |||
401 | if (tag_num != 0) | 432 | if (tag_num != 0) |
402 | sess = transport_init_session_tags(tag_num, tag_size, prot_op); | 433 | sess = transport_init_session_tags(tag_num, tag_size, prot_op); |
403 | else | 434 | else |
404 | sess = transport_init_session(prot_op); | 435 | sess = transport_alloc_session(prot_op); |
405 | 436 | ||
406 | if (IS_ERR(sess)) | 437 | if (IS_ERR(sess)) |
407 | return sess; | 438 | return sess; |
@@ -427,7 +458,7 @@ target_alloc_session(struct se_portal_group *tpg, | |||
427 | transport_register_session(tpg, sess->se_node_acl, sess, private); | 458 | transport_register_session(tpg, sess->se_node_acl, sess, private); |
428 | return sess; | 459 | return sess; |
429 | } | 460 | } |
430 | EXPORT_SYMBOL(target_alloc_session); | 461 | EXPORT_SYMBOL(target_setup_session); |
431 | 462 | ||
432 | ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) | 463 | ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) |
433 | { | 464 | { |
@@ -547,7 +578,7 @@ void transport_free_session(struct se_session *se_sess) | |||
547 | target_put_nacl(se_nacl); | 578 | target_put_nacl(se_nacl); |
548 | } | 579 | } |
549 | if (se_sess->sess_cmd_map) { | 580 | if (se_sess->sess_cmd_map) { |
550 | percpu_ida_destroy(&se_sess->sess_tag_pool); | 581 | sbitmap_queue_free(&se_sess->sess_tag_pool); |
551 | kvfree(se_sess->sess_cmd_map); | 582 | kvfree(se_sess->sess_cmd_map); |
552 | } | 583 | } |
553 | kmem_cache_free(se_sess_cache, se_sess); | 584 | kmem_cache_free(se_sess_cache, se_sess); |
@@ -585,6 +616,13 @@ void transport_deregister_session(struct se_session *se_sess) | |||
585 | } | 616 | } |
586 | EXPORT_SYMBOL(transport_deregister_session); | 617 | EXPORT_SYMBOL(transport_deregister_session); |
587 | 618 | ||
619 | void target_remove_session(struct se_session *se_sess) | ||
620 | { | ||
621 | transport_deregister_session_configfs(se_sess); | ||
622 | transport_deregister_session(se_sess); | ||
623 | } | ||
624 | EXPORT_SYMBOL(target_remove_session); | ||
625 | |||
588 | static void target_remove_from_state_list(struct se_cmd *cmd) | 626 | static void target_remove_from_state_list(struct se_cmd *cmd) |
589 | { | 627 | { |
590 | struct se_device *dev = cmd->se_dev; | 628 | struct se_device *dev = cmd->se_dev; |
@@ -601,6 +639,13 @@ static void target_remove_from_state_list(struct se_cmd *cmd) | |||
601 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); | 639 | spin_unlock_irqrestore(&dev->execute_task_lock, flags); |
602 | } | 640 | } |
603 | 641 | ||
642 | /* | ||
643 | * This function is called by the target core after the target core has | ||
644 | * finished processing a SCSI command or SCSI TMF. Both the regular command | ||
645 | * processing code and the code for aborting commands can call this | ||
646 | * function. CMD_T_STOP is set if and only if another thread is waiting | ||
647 | * inside transport_wait_for_tasks() for t_transport_stop_comp. | ||
648 | */ | ||
604 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | 649 | static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) |
605 | { | 650 | { |
606 | unsigned long flags; | 651 | unsigned long flags; |
@@ -650,23 +695,27 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) | |||
650 | percpu_ref_put(&lun->lun_ref); | 695 | percpu_ref_put(&lun->lun_ref); |
651 | } | 696 | } |
652 | 697 | ||
653 | int transport_cmd_finish_abort(struct se_cmd *cmd, int remove) | 698 | int transport_cmd_finish_abort(struct se_cmd *cmd) |
654 | { | 699 | { |
700 | bool send_tas = cmd->transport_state & CMD_T_TAS; | ||
655 | bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); | 701 | bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); |
656 | int ret = 0; | 702 | int ret = 0; |
657 | 703 | ||
704 | if (send_tas) | ||
705 | transport_send_task_abort(cmd); | ||
706 | |||
658 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) | 707 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) |
659 | transport_lun_remove_cmd(cmd); | 708 | transport_lun_remove_cmd(cmd); |
660 | /* | 709 | /* |
661 | * Allow the fabric driver to unmap any resources before | 710 | * Allow the fabric driver to unmap any resources before |
662 | * releasing the descriptor via TFO->release_cmd() | 711 | * releasing the descriptor via TFO->release_cmd() |
663 | */ | 712 | */ |
664 | if (remove) | 713 | if (!send_tas) |
665 | cmd->se_tfo->aborted_task(cmd); | 714 | cmd->se_tfo->aborted_task(cmd); |
666 | 715 | ||
667 | if (transport_cmd_check_stop_to_fabric(cmd)) | 716 | if (transport_cmd_check_stop_to_fabric(cmd)) |
668 | return 1; | 717 | return 1; |
669 | if (remove && ack_kref) | 718 | if (!send_tas && ack_kref) |
670 | ret = target_put_sess_cmd(cmd); | 719 | ret = target_put_sess_cmd(cmd); |
671 | 720 | ||
672 | return ret; | 721 | return ret; |
@@ -1267,7 +1316,7 @@ void transport_init_se_cmd( | |||
1267 | INIT_LIST_HEAD(&cmd->se_cmd_list); | 1316 | INIT_LIST_HEAD(&cmd->se_cmd_list); |
1268 | INIT_LIST_HEAD(&cmd->state_list); | 1317 | INIT_LIST_HEAD(&cmd->state_list); |
1269 | init_completion(&cmd->t_transport_stop_comp); | 1318 | init_completion(&cmd->t_transport_stop_comp); |
1270 | init_completion(&cmd->cmd_wait_comp); | 1319 | cmd->compl = NULL; |
1271 | spin_lock_init(&cmd->t_state_lock); | 1320 | spin_lock_init(&cmd->t_state_lock); |
1272 | INIT_WORK(&cmd->work, NULL); | 1321 | INIT_WORK(&cmd->work, NULL); |
1273 | kref_init(&cmd->cmd_kref); | 1322 | kref_init(&cmd->cmd_kref); |
@@ -2079,9 +2128,6 @@ static void transport_complete_qf(struct se_cmd *cmd) | |||
2079 | if (cmd->scsi_status) | 2128 | if (cmd->scsi_status) |
2080 | goto queue_status; | 2129 | goto queue_status; |
2081 | 2130 | ||
2082 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | ||
2083 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | ||
2084 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; | ||
2085 | translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); | 2131 | translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); |
2086 | goto queue_status; | 2132 | goto queue_status; |
2087 | } | 2133 | } |
@@ -2593,20 +2639,37 @@ static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) | |||
2593 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 2639 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
2594 | } | 2640 | } |
2595 | 2641 | ||
2642 | /* | ||
2643 | * This function is called by frontend drivers after processing of a command | ||
2644 | * has finished. | ||
2645 | * | ||
2646 | * The protocol for ensuring that either the regular flow or the TMF | ||
2647 | * code drops one reference is as follows: | ||
2648 | * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause | ||
2649 | * the frontend driver to drop one reference, synchronously or asynchronously. | ||
2650 | * - During regular command processing the target core sets CMD_T_COMPLETE | ||
2651 | * before invoking one of the .queue_*() functions. | ||
2652 | * - The code that aborts commands skips commands and TMFs for which | ||
2653 | * CMD_T_COMPLETE has been set. | ||
2654 | * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for | ||
2655 | * commands that will be aborted. | ||
2656 | * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set | ||
2657 | * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). | ||
2658 | * - For aborted commands for which CMD_T_TAS has been set .queue_status() will | ||
2659 | * be called and will drop a reference. | ||
2660 | * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() | ||
2661 | * will be called. transport_cmd_finish_abort() will drop the final reference. | ||
2662 | */ | ||
2596 | int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | 2663 | int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
2597 | { | 2664 | { |
2665 | DECLARE_COMPLETION_ONSTACK(compl); | ||
2598 | int ret = 0; | 2666 | int ret = 0; |
2599 | bool aborted = false, tas = false; | 2667 | bool aborted = false, tas = false; |
2600 | 2668 | ||
2601 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { | 2669 | if (wait_for_tasks) |
2602 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) | 2670 | target_wait_free_cmd(cmd, &aborted, &tas); |
2603 | target_wait_free_cmd(cmd, &aborted, &tas); | ||
2604 | 2671 | ||
2605 | if (!aborted || tas) | 2672 | if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { |
2606 | ret = target_put_sess_cmd(cmd); | ||
2607 | } else { | ||
2608 | if (wait_for_tasks) | ||
2609 | target_wait_free_cmd(cmd, &aborted, &tas); | ||
2610 | /* | 2673 | /* |
2611 | * Handle WRITE failure case where transport_generic_new_cmd() | 2674 | * Handle WRITE failure case where transport_generic_new_cmd() |
2612 | * has already added se_cmd to state_list, but fabric has | 2675 | * has already added se_cmd to state_list, but fabric has |
@@ -2617,20 +2680,14 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | |||
2617 | 2680 | ||
2618 | if (cmd->se_lun) | 2681 | if (cmd->se_lun) |
2619 | transport_lun_remove_cmd(cmd); | 2682 | transport_lun_remove_cmd(cmd); |
2620 | |||
2621 | if (!aborted || tas) | ||
2622 | ret = target_put_sess_cmd(cmd); | ||
2623 | } | 2683 | } |
2624 | /* | 2684 | if (aborted) |
2625 | * If the task has been internally aborted due to TMR ABORT_TASK | 2685 | cmd->compl = &compl; |
2626 | * or LUN_RESET, target_core_tmr.c is responsible for performing | 2686 | if (!aborted || tas) |
2627 | * the remaining calls to target_put_sess_cmd(), and not the | 2687 | ret = target_put_sess_cmd(cmd); |
2628 | * callers of this function. | ||
2629 | */ | ||
2630 | if (aborted) { | 2688 | if (aborted) { |
2631 | pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); | 2689 | pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); |
2632 | wait_for_completion(&cmd->cmd_wait_comp); | 2690 | wait_for_completion(&compl); |
2633 | cmd->se_tfo->release_cmd(cmd); | ||
2634 | ret = 1; | 2691 | ret = 1; |
2635 | } | 2692 | } |
2636 | return ret; | 2693 | return ret; |
@@ -2691,30 +2748,21 @@ static void target_release_cmd_kref(struct kref *kref) | |||
2691 | { | 2748 | { |
2692 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); | 2749 | struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); |
2693 | struct se_session *se_sess = se_cmd->se_sess; | 2750 | struct se_session *se_sess = se_cmd->se_sess; |
2751 | struct completion *compl = se_cmd->compl; | ||
2694 | unsigned long flags; | 2752 | unsigned long flags; |
2695 | bool fabric_stop; | ||
2696 | 2753 | ||
2697 | if (se_sess) { | 2754 | if (se_sess) { |
2698 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2755 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2699 | |||
2700 | spin_lock(&se_cmd->t_state_lock); | ||
2701 | fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && | ||
2702 | (se_cmd->transport_state & CMD_T_ABORTED); | ||
2703 | spin_unlock(&se_cmd->t_state_lock); | ||
2704 | |||
2705 | if (se_cmd->cmd_wait_set || fabric_stop) { | ||
2706 | list_del_init(&se_cmd->se_cmd_list); | ||
2707 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | ||
2708 | target_free_cmd_mem(se_cmd); | ||
2709 | complete(&se_cmd->cmd_wait_comp); | ||
2710 | return; | ||
2711 | } | ||
2712 | list_del_init(&se_cmd->se_cmd_list); | 2756 | list_del_init(&se_cmd->se_cmd_list); |
2757 | if (list_empty(&se_sess->sess_cmd_list)) | ||
2758 | wake_up(&se_sess->cmd_list_wq); | ||
2713 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2759 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2714 | } | 2760 | } |
2715 | 2761 | ||
2716 | target_free_cmd_mem(se_cmd); | 2762 | target_free_cmd_mem(se_cmd); |
2717 | se_cmd->se_tfo->release_cmd(se_cmd); | 2763 | se_cmd->se_tfo->release_cmd(se_cmd); |
2764 | if (compl) | ||
2765 | complete(compl); | ||
2718 | } | 2766 | } |
2719 | 2767 | ||
2720 | /** | 2768 | /** |
@@ -2833,78 +2881,41 @@ void target_show_cmd(const char *pfx, struct se_cmd *cmd) | |||
2833 | EXPORT_SYMBOL(target_show_cmd); | 2881 | EXPORT_SYMBOL(target_show_cmd); |
2834 | 2882 | ||
2835 | /** | 2883 | /** |
2836 | * target_sess_cmd_list_set_waiting - Flag all commands in | 2884 | * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued. |
2837 | * sess_cmd_list to complete cmd_wait_comp. Set | ||
2838 | * sess_tearing_down so no more commands are queued. | ||
2839 | * @se_sess: session to flag | 2885 | * @se_sess: session to flag |
2840 | */ | 2886 | */ |
2841 | void target_sess_cmd_list_set_waiting(struct se_session *se_sess) | 2887 | void target_sess_cmd_list_set_waiting(struct se_session *se_sess) |
2842 | { | 2888 | { |
2843 | struct se_cmd *se_cmd, *tmp_cmd; | ||
2844 | unsigned long flags; | 2889 | unsigned long flags; |
2845 | int rc; | ||
2846 | 2890 | ||
2847 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2891 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2848 | if (se_sess->sess_tearing_down) { | ||
2849 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | ||
2850 | return; | ||
2851 | } | ||
2852 | se_sess->sess_tearing_down = 1; | 2892 | se_sess->sess_tearing_down = 1; |
2853 | list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); | ||
2854 | |||
2855 | list_for_each_entry_safe(se_cmd, tmp_cmd, | ||
2856 | &se_sess->sess_wait_list, se_cmd_list) { | ||
2857 | rc = kref_get_unless_zero(&se_cmd->cmd_kref); | ||
2858 | if (rc) { | ||
2859 | se_cmd->cmd_wait_set = 1; | ||
2860 | spin_lock(&se_cmd->t_state_lock); | ||
2861 | se_cmd->transport_state |= CMD_T_FABRIC_STOP; | ||
2862 | spin_unlock(&se_cmd->t_state_lock); | ||
2863 | } else | ||
2864 | list_del_init(&se_cmd->se_cmd_list); | ||
2865 | } | ||
2866 | |||
2867 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2893 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2868 | } | 2894 | } |
2869 | EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); | 2895 | EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); |
2870 | 2896 | ||
2871 | /** | 2897 | /** |
2872 | * target_wait_for_sess_cmds - Wait for outstanding descriptors | 2898 | * target_wait_for_sess_cmds - Wait for outstanding commands |
2873 | * @se_sess: session to wait for active I/O | 2899 | * @se_sess: session to wait for active I/O |
2874 | */ | 2900 | */ |
2875 | void target_wait_for_sess_cmds(struct se_session *se_sess) | 2901 | void target_wait_for_sess_cmds(struct se_session *se_sess) |
2876 | { | 2902 | { |
2877 | struct se_cmd *se_cmd, *tmp_cmd; | 2903 | struct se_cmd *cmd; |
2878 | unsigned long flags; | 2904 | int ret; |
2879 | bool tas; | ||
2880 | |||
2881 | list_for_each_entry_safe(se_cmd, tmp_cmd, | ||
2882 | &se_sess->sess_wait_list, se_cmd_list) { | ||
2883 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" | ||
2884 | " %d\n", se_cmd, se_cmd->t_state, | ||
2885 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | ||
2886 | |||
2887 | spin_lock_irqsave(&se_cmd->t_state_lock, flags); | ||
2888 | tas = (se_cmd->transport_state & CMD_T_TAS); | ||
2889 | spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); | ||
2890 | |||
2891 | if (!target_put_sess_cmd(se_cmd)) { | ||
2892 | if (tas) | ||
2893 | target_put_sess_cmd(se_cmd); | ||
2894 | } | ||
2895 | |||
2896 | wait_for_completion(&se_cmd->cmd_wait_comp); | ||
2897 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" | ||
2898 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | ||
2899 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | ||
2900 | |||
2901 | se_cmd->se_tfo->release_cmd(se_cmd); | ||
2902 | } | ||
2903 | 2905 | ||
2904 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2906 | WARN_ON_ONCE(!se_sess->sess_tearing_down); |
2905 | WARN_ON(!list_empty(&se_sess->sess_cmd_list)); | ||
2906 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | ||
2907 | 2907 | ||
2908 | spin_lock_irq(&se_sess->sess_cmd_lock); | ||
2909 | do { | ||
2910 | ret = wait_event_interruptible_lock_irq_timeout( | ||
2911 | se_sess->cmd_list_wq, | ||
2912 | list_empty(&se_sess->sess_cmd_list), | ||
2913 | se_sess->sess_cmd_lock, 180 * HZ); | ||
2914 | list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list) | ||
2915 | target_show_cmd("session shutdown: still waiting for ", | ||
2916 | cmd); | ||
2917 | } while (ret <= 0); | ||
2918 | spin_unlock_irq(&se_sess->sess_cmd_lock); | ||
2908 | } | 2919 | } |
2909 | EXPORT_SYMBOL(target_wait_for_sess_cmds); | 2920 | EXPORT_SYMBOL(target_wait_for_sess_cmds); |
2910 | 2921 | ||
@@ -3166,12 +3177,23 @@ static const struct sense_info sense_info_table[] = { | |||
3166 | }, | 3177 | }, |
3167 | }; | 3178 | }; |
3168 | 3179 | ||
3169 | static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) | 3180 | /** |
3181 | * translate_sense_reason - translate a sense reason into T10 key, asc and ascq | ||
3182 | * @cmd: SCSI command in which the resulting sense buffer or SCSI status will | ||
3183 | * be stored. | ||
3184 | * @reason: LIO sense reason code. If this argument has the value | ||
3185 | * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If | ||
3186 | * dequeuing a unit attention fails due to multiple commands being processed | ||
3187 | * concurrently, set the command status to BUSY. | ||
3188 | * | ||
3189 | * Return: 0 upon success or -EINVAL if the sense buffer is too small. | ||
3190 | */ | ||
3191 | static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) | ||
3170 | { | 3192 | { |
3171 | const struct sense_info *si; | 3193 | const struct sense_info *si; |
3172 | u8 *buffer = cmd->sense_buffer; | 3194 | u8 *buffer = cmd->sense_buffer; |
3173 | int r = (__force int)reason; | 3195 | int r = (__force int)reason; |
3174 | u8 asc, ascq; | 3196 | u8 key, asc, ascq; |
3175 | bool desc_format = target_sense_desc_format(cmd->se_dev); | 3197 | bool desc_format = target_sense_desc_format(cmd->se_dev); |
3176 | 3198 | ||
3177 | if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) | 3199 | if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key) |
@@ -3180,9 +3202,13 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) | |||
3180 | si = &sense_info_table[(__force int) | 3202 | si = &sense_info_table[(__force int) |
3181 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; | 3203 | TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; |
3182 | 3204 | ||
3205 | key = si->key; | ||
3183 | if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { | 3206 | if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { |
3184 | core_scsi3_ua_for_check_condition(cmd, &asc, &ascq); | 3207 | if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, |
3185 | WARN_ON_ONCE(asc == 0); | 3208 | &ascq)) { |
3209 | cmd->scsi_status = SAM_STAT_BUSY; | ||
3210 | return; | ||
3211 | } | ||
3186 | } else if (si->asc == 0) { | 3212 | } else if (si->asc == 0) { |
3187 | WARN_ON_ONCE(cmd->scsi_asc == 0); | 3213 | WARN_ON_ONCE(cmd->scsi_asc == 0); |
3188 | asc = cmd->scsi_asc; | 3214 | asc = cmd->scsi_asc; |
@@ -3192,13 +3218,14 @@ static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) | |||
3192 | ascq = si->ascq; | 3218 | ascq = si->ascq; |
3193 | } | 3219 | } |
3194 | 3220 | ||
3195 | scsi_build_sense_buffer(desc_format, buffer, si->key, asc, ascq); | 3221 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; |
3222 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | ||
3223 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; | ||
3224 | scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); | ||
3196 | if (si->add_sector_info) | 3225 | if (si->add_sector_info) |
3197 | return scsi_set_sense_information(buffer, | 3226 | WARN_ON_ONCE(scsi_set_sense_information(buffer, |
3198 | cmd->scsi_sense_length, | 3227 | cmd->scsi_sense_length, |
3199 | cmd->bad_sector); | 3228 | cmd->bad_sector) < 0); |
3200 | |||
3201 | return 0; | ||
3202 | } | 3229 | } |
3203 | 3230 | ||
3204 | int | 3231 | int |
@@ -3215,16 +3242,8 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, | |||
3215 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; | 3242 | cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; |
3216 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 3243 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
3217 | 3244 | ||
3218 | if (!from_transport) { | 3245 | if (!from_transport) |
3219 | int rc; | 3246 | translate_sense_reason(cmd, reason); |
3220 | |||
3221 | cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; | ||
3222 | cmd->scsi_status = SAM_STAT_CHECK_CONDITION; | ||
3223 | cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; | ||
3224 | rc = translate_sense_reason(cmd, reason); | ||
3225 | if (rc) | ||
3226 | return rc; | ||
3227 | } | ||
3228 | 3247 | ||
3229 | trace_target_cmd_complete(cmd); | 3248 | trace_target_cmd_complete(cmd); |
3230 | return cmd->se_tfo->queue_status(cmd); | 3249 | return cmd->se_tfo->queue_status(cmd); |
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index be25eb807a5f..c8ac242ce888 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c | |||
@@ -55,7 +55,7 @@ target_scsi3_ua_check(struct se_cmd *cmd) | |||
55 | rcu_read_unlock(); | 55 | rcu_read_unlock(); |
56 | return 0; | 56 | return 0; |
57 | } | 57 | } |
58 | if (!atomic_read(&deve->ua_count)) { | 58 | if (list_empty_careful(&deve->ua_list)) { |
59 | rcu_read_unlock(); | 59 | rcu_read_unlock(); |
60 | return 0; | 60 | return 0; |
61 | } | 61 | } |
@@ -154,7 +154,6 @@ int core_scsi3_ua_allocate( | |||
154 | &deve->ua_list); | 154 | &deve->ua_list); |
155 | spin_unlock(&deve->ua_lock); | 155 | spin_unlock(&deve->ua_lock); |
156 | 156 | ||
157 | atomic_inc_mb(&deve->ua_count); | ||
158 | return 0; | 157 | return 0; |
159 | } | 158 | } |
160 | list_add_tail(&ua->ua_nacl_list, &deve->ua_list); | 159 | list_add_tail(&ua->ua_nacl_list, &deve->ua_list); |
@@ -164,7 +163,6 @@ int core_scsi3_ua_allocate( | |||
164 | " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun, | 163 | " 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun, |
165 | asc, ascq); | 164 | asc, ascq); |
166 | 165 | ||
167 | atomic_inc_mb(&deve->ua_count); | ||
168 | return 0; | 166 | return 0; |
169 | } | 167 | } |
170 | 168 | ||
@@ -196,16 +194,17 @@ void core_scsi3_ua_release_all( | |||
196 | list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { | 194 | list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) { |
197 | list_del(&ua->ua_nacl_list); | 195 | list_del(&ua->ua_nacl_list); |
198 | kmem_cache_free(se_ua_cache, ua); | 196 | kmem_cache_free(se_ua_cache, ua); |
199 | |||
200 | atomic_dec_mb(&deve->ua_count); | ||
201 | } | 197 | } |
202 | spin_unlock(&deve->ua_lock); | 198 | spin_unlock(&deve->ua_lock); |
203 | } | 199 | } |
204 | 200 | ||
205 | void core_scsi3_ua_for_check_condition( | 201 | /* |
206 | struct se_cmd *cmd, | 202 | * Dequeue a unit attention from the unit attention list. This function |
207 | u8 *asc, | 203 | * returns true if the dequeuing succeeded and if *@key, *@asc and *@ascq have |
208 | u8 *ascq) | 204 | * been set. |
205 | */ | ||
206 | bool core_scsi3_ua_for_check_condition(struct se_cmd *cmd, u8 *key, u8 *asc, | ||
207 | u8 *ascq) | ||
209 | { | 208 | { |
210 | struct se_device *dev = cmd->se_dev; | 209 | struct se_device *dev = cmd->se_dev; |
211 | struct se_dev_entry *deve; | 210 | struct se_dev_entry *deve; |
@@ -214,23 +213,23 @@ void core_scsi3_ua_for_check_condition( | |||
214 | struct se_ua *ua = NULL, *ua_p; | 213 | struct se_ua *ua = NULL, *ua_p; |
215 | int head = 1; | 214 | int head = 1; |
216 | 215 | ||
217 | if (!sess) | 216 | if (WARN_ON_ONCE(!sess)) |
218 | return; | 217 | return false; |
219 | 218 | ||
220 | nacl = sess->se_node_acl; | 219 | nacl = sess->se_node_acl; |
221 | if (!nacl) | 220 | if (WARN_ON_ONCE(!nacl)) |
222 | return; | 221 | return false; |
223 | 222 | ||
224 | rcu_read_lock(); | 223 | rcu_read_lock(); |
225 | deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); | 224 | deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); |
226 | if (!deve) { | 225 | if (!deve) { |
227 | rcu_read_unlock(); | 226 | rcu_read_unlock(); |
228 | return; | 227 | *key = ILLEGAL_REQUEST; |
229 | } | 228 | *asc = 0x25; /* LOGICAL UNIT NOT SUPPORTED */ |
230 | if (!atomic_read(&deve->ua_count)) { | 229 | *ascq = 0; |
231 | rcu_read_unlock(); | 230 | return true; |
232 | return; | ||
233 | } | 231 | } |
232 | *key = UNIT_ATTENTION; | ||
234 | /* | 233 | /* |
235 | * The highest priority Unit Attentions are placed at the head of the | 234 | * The highest priority Unit Attentions are placed at the head of the |
236 | * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION + | 235 | * struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION + |
@@ -260,8 +259,6 @@ void core_scsi3_ua_for_check_condition( | |||
260 | } | 259 | } |
261 | list_del(&ua->ua_nacl_list); | 260 | list_del(&ua->ua_nacl_list); |
262 | kmem_cache_free(se_ua_cache, ua); | 261 | kmem_cache_free(se_ua_cache, ua); |
263 | |||
264 | atomic_dec_mb(&deve->ua_count); | ||
265 | } | 262 | } |
266 | spin_unlock(&deve->ua_lock); | 263 | spin_unlock(&deve->ua_lock); |
267 | rcu_read_unlock(); | 264 | rcu_read_unlock(); |
@@ -273,6 +270,8 @@ void core_scsi3_ua_for_check_condition( | |||
273 | (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : | 270 | (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" : |
274 | "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl, | 271 | "Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl, |
275 | cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); | 272 | cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq); |
273 | |||
274 | return head == 0; | ||
276 | } | 275 | } |
277 | 276 | ||
278 | int core_scsi3_ua_clear_for_request_sense( | 277 | int core_scsi3_ua_clear_for_request_sense( |
@@ -299,7 +298,7 @@ int core_scsi3_ua_clear_for_request_sense( | |||
299 | rcu_read_unlock(); | 298 | rcu_read_unlock(); |
300 | return -EINVAL; | 299 | return -EINVAL; |
301 | } | 300 | } |
302 | if (!atomic_read(&deve->ua_count)) { | 301 | if (list_empty_careful(&deve->ua_list)) { |
303 | rcu_read_unlock(); | 302 | rcu_read_unlock(); |
304 | return -EPERM; | 303 | return -EPERM; |
305 | } | 304 | } |
@@ -322,8 +321,6 @@ int core_scsi3_ua_clear_for_request_sense( | |||
322 | } | 321 | } |
323 | list_del(&ua->ua_nacl_list); | 322 | list_del(&ua->ua_nacl_list); |
324 | kmem_cache_free(se_ua_cache, ua); | 323 | kmem_cache_free(se_ua_cache, ua); |
325 | |||
326 | atomic_dec_mb(&deve->ua_count); | ||
327 | } | 324 | } |
328 | spin_unlock(&deve->ua_lock); | 325 | spin_unlock(&deve->ua_lock); |
329 | rcu_read_unlock(); | 326 | rcu_read_unlock(); |
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h index b0f4205a96cd..76487c9be090 100644 --- a/drivers/target/target_core_ua.h +++ b/drivers/target/target_core_ua.h | |||
@@ -37,7 +37,8 @@ extern sense_reason_t target_scsi3_ua_check(struct se_cmd *); | |||
37 | extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8); | 37 | extern int core_scsi3_ua_allocate(struct se_dev_entry *, u8, u8); |
38 | extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8); | 38 | extern void target_ua_allocate_lun(struct se_node_acl *, u32, u8, u8); |
39 | extern void core_scsi3_ua_release_all(struct se_dev_entry *); | 39 | extern void core_scsi3_ua_release_all(struct se_dev_entry *); |
40 | extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *); | 40 | extern bool core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *, |
41 | u8 *); | ||
41 | extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, | 42 | extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *, |
42 | u8 *, u8 *); | 43 | u8 *, u8 *); |
43 | 44 | ||
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index d8dc3d22051f..9cd404acdb82 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -83,14 +83,10 @@ | |||
83 | #define DATA_BLOCK_SIZE PAGE_SIZE | 83 | #define DATA_BLOCK_SIZE PAGE_SIZE |
84 | #define DATA_BLOCK_SHIFT PAGE_SHIFT | 84 | #define DATA_BLOCK_SHIFT PAGE_SHIFT |
85 | #define DATA_BLOCK_BITS_DEF (256 * 1024) | 85 | #define DATA_BLOCK_BITS_DEF (256 * 1024) |
86 | #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) | ||
87 | 86 | ||
88 | #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) | 87 | #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) |
89 | #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) | 88 | #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) |
90 | 89 | ||
91 | /* The total size of the ring is 8M + 256K * PAGE_SIZE */ | ||
92 | #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) | ||
93 | |||
94 | /* | 90 | /* |
95 | * Default number of global data blocks(512K * PAGE_SIZE) | 91 | * Default number of global data blocks(512K * PAGE_SIZE) |
96 | * when the unmap thread will be started. | 92 | * when the unmap thread will be started. |
@@ -98,6 +94,7 @@ | |||
98 | #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) | 94 | #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) |
99 | 95 | ||
100 | static u8 tcmu_kern_cmd_reply_supported; | 96 | static u8 tcmu_kern_cmd_reply_supported; |
97 | static u8 tcmu_netlink_blocked; | ||
101 | 98 | ||
102 | static struct device *tcmu_root_device; | 99 | static struct device *tcmu_root_device; |
103 | 100 | ||
@@ -107,9 +104,16 @@ struct tcmu_hba { | |||
107 | 104 | ||
108 | #define TCMU_CONFIG_LEN 256 | 105 | #define TCMU_CONFIG_LEN 256 |
109 | 106 | ||
107 | static DEFINE_MUTEX(tcmu_nl_cmd_mutex); | ||
108 | static LIST_HEAD(tcmu_nl_cmd_list); | ||
109 | |||
110 | struct tcmu_dev; | ||
111 | |||
110 | struct tcmu_nl_cmd { | 112 | struct tcmu_nl_cmd { |
111 | /* wake up thread waiting for reply */ | 113 | /* wake up thread waiting for reply */ |
112 | struct completion complete; | 114 | struct completion complete; |
115 | struct list_head nl_list; | ||
116 | struct tcmu_dev *udev; | ||
113 | int cmd; | 117 | int cmd; |
114 | int status; | 118 | int status; |
115 | }; | 119 | }; |
@@ -133,7 +137,7 @@ struct tcmu_dev { | |||
133 | struct inode *inode; | 137 | struct inode *inode; |
134 | 138 | ||
135 | struct tcmu_mailbox *mb_addr; | 139 | struct tcmu_mailbox *mb_addr; |
136 | size_t dev_size; | 140 | uint64_t dev_size; |
137 | u32 cmdr_size; | 141 | u32 cmdr_size; |
138 | u32 cmdr_last_cleaned; | 142 | u32 cmdr_last_cleaned; |
139 | /* Offset of data area from start of mb */ | 143 | /* Offset of data area from start of mb */ |
@@ -161,10 +165,7 @@ struct tcmu_dev { | |||
161 | 165 | ||
162 | struct list_head timedout_entry; | 166 | struct list_head timedout_entry; |
163 | 167 | ||
164 | spinlock_t nl_cmd_lock; | ||
165 | struct tcmu_nl_cmd curr_nl_cmd; | 168 | struct tcmu_nl_cmd curr_nl_cmd; |
166 | /* wake up threads waiting on curr_nl_cmd */ | ||
167 | wait_queue_head_t nl_cmd_wq; | ||
168 | 169 | ||
169 | char dev_config[TCMU_CONFIG_LEN]; | 170 | char dev_config[TCMU_CONFIG_LEN]; |
170 | 171 | ||
@@ -255,6 +256,92 @@ MODULE_PARM_DESC(global_max_data_area_mb, | |||
255 | "Max MBs allowed to be allocated to all the tcmu device's " | 256 | "Max MBs allowed to be allocated to all the tcmu device's " |
256 | "data areas."); | 257 | "data areas."); |
257 | 258 | ||
259 | static int tcmu_get_block_netlink(char *buffer, | ||
260 | const struct kernel_param *kp) | ||
261 | { | ||
262 | return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? | ||
263 | "blocked" : "unblocked"); | ||
264 | } | ||
265 | |||
266 | static int tcmu_set_block_netlink(const char *str, | ||
267 | const struct kernel_param *kp) | ||
268 | { | ||
269 | int ret; | ||
270 | u8 val; | ||
271 | |||
272 | ret = kstrtou8(str, 0, &val); | ||
273 | if (ret < 0) | ||
274 | return ret; | ||
275 | |||
276 | if (val > 1) { | ||
277 | pr_err("Invalid block netlink value %u\n", val); | ||
278 | return -EINVAL; | ||
279 | } | ||
280 | |||
281 | tcmu_netlink_blocked = val; | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static const struct kernel_param_ops tcmu_block_netlink_op = { | ||
286 | .set = tcmu_set_block_netlink, | ||
287 | .get = tcmu_get_block_netlink, | ||
288 | }; | ||
289 | |||
290 | module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); | ||
291 | MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); | ||
292 | |||
293 | static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) | ||
294 | { | ||
295 | struct tcmu_dev *udev = nl_cmd->udev; | ||
296 | |||
297 | if (!tcmu_netlink_blocked) { | ||
298 | pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); | ||
299 | return -EBUSY; | ||
300 | } | ||
301 | |||
302 | if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { | ||
303 | pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); | ||
304 | nl_cmd->status = -EINTR; | ||
305 | list_del(&nl_cmd->nl_list); | ||
306 | complete(&nl_cmd->complete); | ||
307 | } | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static int tcmu_set_reset_netlink(const char *str, | ||
312 | const struct kernel_param *kp) | ||
313 | { | ||
314 | struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; | ||
315 | int ret; | ||
316 | u8 val; | ||
317 | |||
318 | ret = kstrtou8(str, 0, &val); | ||
319 | if (ret < 0) | ||
320 | return ret; | ||
321 | |||
322 | if (val != 1) { | ||
323 | pr_err("Invalid reset netlink value %u\n", val); | ||
324 | return -EINVAL; | ||
325 | } | ||
326 | |||
327 | mutex_lock(&tcmu_nl_cmd_mutex); | ||
328 | list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { | ||
329 | ret = tcmu_fail_netlink_cmd(nl_cmd); | ||
330 | if (ret) | ||
331 | break; | ||
332 | } | ||
333 | mutex_unlock(&tcmu_nl_cmd_mutex); | ||
334 | |||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | static const struct kernel_param_ops tcmu_reset_netlink_op = { | ||
339 | .set = tcmu_set_reset_netlink, | ||
340 | }; | ||
341 | |||
342 | module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); | ||
343 | MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); | ||
344 | |||
258 | /* multicast group */ | 345 | /* multicast group */ |
259 | enum tcmu_multicast_groups { | 346 | enum tcmu_multicast_groups { |
260 | TCMU_MCGRP_CONFIG, | 347 | TCMU_MCGRP_CONFIG, |
@@ -274,48 +361,50 @@ static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { | |||
274 | 361 | ||
275 | static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) | 362 | static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) |
276 | { | 363 | { |
277 | struct se_device *dev; | 364 | struct tcmu_dev *udev = NULL; |
278 | struct tcmu_dev *udev; | ||
279 | struct tcmu_nl_cmd *nl_cmd; | 365 | struct tcmu_nl_cmd *nl_cmd; |
280 | int dev_id, rc, ret = 0; | 366 | int dev_id, rc, ret = 0; |
281 | bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE); | ||
282 | 367 | ||
283 | if (!info->attrs[TCMU_ATTR_CMD_STATUS] || | 368 | if (!info->attrs[TCMU_ATTR_CMD_STATUS] || |
284 | !info->attrs[TCMU_ATTR_DEVICE_ID]) { | 369 | !info->attrs[TCMU_ATTR_DEVICE_ID]) { |
285 | printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); | 370 | printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); |
286 | return -EINVAL; | 371 | return -EINVAL; |
287 | } | 372 | } |
288 | 373 | ||
289 | dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); | 374 | dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); |
290 | rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); | 375 | rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); |
291 | 376 | ||
292 | dev = target_find_device(dev_id, !is_removed); | 377 | mutex_lock(&tcmu_nl_cmd_mutex); |
293 | if (!dev) { | 378 | list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { |
294 | printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n", | 379 | if (nl_cmd->udev->se_dev.dev_index == dev_id) { |
295 | completed_cmd, rc, dev_id); | 380 | udev = nl_cmd->udev; |
296 | return -ENODEV; | 381 | break; |
382 | } | ||
297 | } | 383 | } |
298 | udev = TCMU_DEV(dev); | ||
299 | 384 | ||
300 | spin_lock(&udev->nl_cmd_lock); | 385 | if (!udev) { |
301 | nl_cmd = &udev->curr_nl_cmd; | 386 | pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", |
387 | completed_cmd, rc, dev_id); | ||
388 | ret = -ENODEV; | ||
389 | goto unlock; | ||
390 | } | ||
391 | list_del(&nl_cmd->nl_list); | ||
302 | 392 | ||
303 | pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id, | 393 | pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", |
304 | nl_cmd->cmd, completed_cmd, rc); | 394 | udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, |
395 | nl_cmd->status); | ||
305 | 396 | ||
306 | if (nl_cmd->cmd != completed_cmd) { | 397 | if (nl_cmd->cmd != completed_cmd) { |
307 | printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n", | 398 | pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", |
308 | completed_cmd, nl_cmd->cmd); | 399 | udev->name, completed_cmd, nl_cmd->cmd); |
309 | ret = -EINVAL; | 400 | ret = -EINVAL; |
310 | } else { | 401 | goto unlock; |
311 | nl_cmd->status = rc; | ||
312 | } | 402 | } |
313 | 403 | ||
314 | spin_unlock(&udev->nl_cmd_lock); | 404 | nl_cmd->status = rc; |
315 | if (!is_removed) | 405 | complete(&nl_cmd->complete); |
316 | target_undepend_item(&dev->dev_group.cg_item); | 406 | unlock: |
317 | if (!ret) | 407 | mutex_unlock(&tcmu_nl_cmd_mutex); |
318 | complete(&nl_cmd->complete); | ||
319 | return ret; | 408 | return ret; |
320 | } | 409 | } |
321 | 410 | ||
@@ -982,7 +1071,6 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) | |||
982 | &udev->cmd_timer); | 1071 | &udev->cmd_timer); |
983 | if (ret) { | 1072 | if (ret) { |
984 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); | 1073 | tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); |
985 | mutex_unlock(&udev->cmdr_lock); | ||
986 | 1074 | ||
987 | *scsi_err = TCM_OUT_OF_RESOURCES; | 1075 | *scsi_err = TCM_OUT_OF_RESOURCES; |
988 | return -1; | 1076 | return -1; |
@@ -1282,6 +1370,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
1282 | udev->max_blocks = DATA_BLOCK_BITS_DEF; | 1370 | udev->max_blocks = DATA_BLOCK_BITS_DEF; |
1283 | mutex_init(&udev->cmdr_lock); | 1371 | mutex_init(&udev->cmdr_lock); |
1284 | 1372 | ||
1373 | INIT_LIST_HEAD(&udev->node); | ||
1285 | INIT_LIST_HEAD(&udev->timedout_entry); | 1374 | INIT_LIST_HEAD(&udev->timedout_entry); |
1286 | INIT_LIST_HEAD(&udev->cmdr_queue); | 1375 | INIT_LIST_HEAD(&udev->cmdr_queue); |
1287 | idr_init(&udev->commands); | 1376 | idr_init(&udev->commands); |
@@ -1289,9 +1378,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
1289 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); | 1378 | timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); |
1290 | timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); | 1379 | timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); |
1291 | 1380 | ||
1292 | init_waitqueue_head(&udev->nl_cmd_wq); | ||
1293 | spin_lock_init(&udev->nl_cmd_lock); | ||
1294 | |||
1295 | INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); | 1381 | INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); |
1296 | 1382 | ||
1297 | return &udev->se_dev; | 1383 | return &udev->se_dev; |
@@ -1565,38 +1651,48 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) | |||
1565 | return 0; | 1651 | return 0; |
1566 | } | 1652 | } |
1567 | 1653 | ||
1568 | static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) | 1654 | static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) |
1569 | { | 1655 | { |
1570 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; | 1656 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; |
1571 | 1657 | ||
1572 | if (!tcmu_kern_cmd_reply_supported) | 1658 | if (!tcmu_kern_cmd_reply_supported) |
1573 | return; | 1659 | return 0; |
1574 | 1660 | ||
1575 | if (udev->nl_reply_supported <= 0) | 1661 | if (udev->nl_reply_supported <= 0) |
1576 | return; | 1662 | return 0; |
1663 | |||
1664 | mutex_lock(&tcmu_nl_cmd_mutex); | ||
1577 | 1665 | ||
1578 | relock: | 1666 | if (tcmu_netlink_blocked) { |
1579 | spin_lock(&udev->nl_cmd_lock); | 1667 | mutex_unlock(&tcmu_nl_cmd_mutex); |
1668 | pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, | ||
1669 | udev->name); | ||
1670 | return -EAGAIN; | ||
1671 | } | ||
1580 | 1672 | ||
1581 | if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { | 1673 | if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { |
1582 | spin_unlock(&udev->nl_cmd_lock); | 1674 | mutex_unlock(&tcmu_nl_cmd_mutex); |
1583 | pr_debug("sleeping for open nl cmd\n"); | 1675 | pr_warn("netlink cmd %d already executing on %s\n", |
1584 | wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC)); | 1676 | nl_cmd->cmd, udev->name); |
1585 | goto relock; | 1677 | return -EBUSY; |
1586 | } | 1678 | } |
1587 | 1679 | ||
1588 | memset(nl_cmd, 0, sizeof(*nl_cmd)); | 1680 | memset(nl_cmd, 0, sizeof(*nl_cmd)); |
1589 | nl_cmd->cmd = cmd; | 1681 | nl_cmd->cmd = cmd; |
1682 | nl_cmd->udev = udev; | ||
1590 | init_completion(&nl_cmd->complete); | 1683 | init_completion(&nl_cmd->complete); |
1684 | INIT_LIST_HEAD(&nl_cmd->nl_list); | ||
1685 | |||
1686 | list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); | ||
1591 | 1687 | ||
1592 | spin_unlock(&udev->nl_cmd_lock); | 1688 | mutex_unlock(&tcmu_nl_cmd_mutex); |
1689 | return 0; | ||
1593 | } | 1690 | } |
1594 | 1691 | ||
1595 | static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) | 1692 | static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) |
1596 | { | 1693 | { |
1597 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; | 1694 | struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; |
1598 | int ret; | 1695 | int ret; |
1599 | DEFINE_WAIT(__wait); | ||
1600 | 1696 | ||
1601 | if (!tcmu_kern_cmd_reply_supported) | 1697 | if (!tcmu_kern_cmd_reply_supported) |
1602 | return 0; | 1698 | return 0; |
@@ -1607,13 +1703,10 @@ static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) | |||
1607 | pr_debug("sleeping for nl reply\n"); | 1703 | pr_debug("sleeping for nl reply\n"); |
1608 | wait_for_completion(&nl_cmd->complete); | 1704 | wait_for_completion(&nl_cmd->complete); |
1609 | 1705 | ||
1610 | spin_lock(&udev->nl_cmd_lock); | 1706 | mutex_lock(&tcmu_nl_cmd_mutex); |
1611 | nl_cmd->cmd = TCMU_CMD_UNSPEC; | 1707 | nl_cmd->cmd = TCMU_CMD_UNSPEC; |
1612 | ret = nl_cmd->status; | 1708 | ret = nl_cmd->status; |
1613 | nl_cmd->status = 0; | 1709 | mutex_unlock(&tcmu_nl_cmd_mutex); |
1614 | spin_unlock(&udev->nl_cmd_lock); | ||
1615 | |||
1616 | wake_up_all(&udev->nl_cmd_wq); | ||
1617 | 1710 | ||
1618 | return ret; | 1711 | return ret; |
1619 | } | 1712 | } |
@@ -1657,19 +1750,21 @@ free_skb: | |||
1657 | 1750 | ||
1658 | static int tcmu_netlink_event_send(struct tcmu_dev *udev, | 1751 | static int tcmu_netlink_event_send(struct tcmu_dev *udev, |
1659 | enum tcmu_genl_cmd cmd, | 1752 | enum tcmu_genl_cmd cmd, |
1660 | struct sk_buff **buf, void **hdr) | 1753 | struct sk_buff *skb, void *msg_header) |
1661 | { | 1754 | { |
1662 | int ret = 0; | 1755 | int ret; |
1663 | struct sk_buff *skb = *buf; | ||
1664 | void *msg_header = *hdr; | ||
1665 | 1756 | ||
1666 | genlmsg_end(skb, msg_header); | 1757 | genlmsg_end(skb, msg_header); |
1667 | 1758 | ||
1668 | tcmu_init_genl_cmd_reply(udev, cmd); | 1759 | ret = tcmu_init_genl_cmd_reply(udev, cmd); |
1760 | if (ret) { | ||
1761 | nlmsg_free(skb); | ||
1762 | return ret; | ||
1763 | } | ||
1669 | 1764 | ||
1670 | ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, | 1765 | ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, |
1671 | TCMU_MCGRP_CONFIG, GFP_KERNEL); | 1766 | TCMU_MCGRP_CONFIG, GFP_KERNEL); |
1672 | /* We don't care if no one is listening */ | 1767 | /* We don't care if no one is listening */ |
1673 | if (ret == -ESRCH) | 1768 | if (ret == -ESRCH) |
1674 | ret = 0; | 1769 | ret = 0; |
1675 | if (!ret) | 1770 | if (!ret) |
@@ -1687,9 +1782,8 @@ static int tcmu_send_dev_add_event(struct tcmu_dev *udev) | |||
1687 | &msg_header); | 1782 | &msg_header); |
1688 | if (ret < 0) | 1783 | if (ret < 0) |
1689 | return ret; | 1784 | return ret; |
1690 | return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb, | 1785 | return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, |
1691 | &msg_header); | 1786 | msg_header); |
1692 | |||
1693 | } | 1787 | } |
1694 | 1788 | ||
1695 | static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) | 1789 | static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) |
@@ -1703,7 +1797,7 @@ static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) | |||
1703 | if (ret < 0) | 1797 | if (ret < 0) |
1704 | return ret; | 1798 | return ret; |
1705 | return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, | 1799 | return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, |
1706 | &skb, &msg_header); | 1800 | skb, msg_header); |
1707 | } | 1801 | } |
1708 | 1802 | ||
1709 | static int tcmu_update_uio_info(struct tcmu_dev *udev) | 1803 | static int tcmu_update_uio_info(struct tcmu_dev *udev) |
@@ -1745,9 +1839,11 @@ static int tcmu_configure_device(struct se_device *dev) | |||
1745 | 1839 | ||
1746 | info = &udev->uio_info; | 1840 | info = &udev->uio_info; |
1747 | 1841 | ||
1842 | mutex_lock(&udev->cmdr_lock); | ||
1748 | udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks), | 1843 | udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks), |
1749 | sizeof(unsigned long), | 1844 | sizeof(unsigned long), |
1750 | GFP_KERNEL); | 1845 | GFP_KERNEL); |
1846 | mutex_unlock(&udev->cmdr_lock); | ||
1751 | if (!udev->data_bitmap) { | 1847 | if (!udev->data_bitmap) { |
1752 | ret = -ENOMEM; | 1848 | ret = -ENOMEM; |
1753 | goto err_bitmap_alloc; | 1849 | goto err_bitmap_alloc; |
@@ -1842,11 +1938,6 @@ err_bitmap_alloc: | |||
1842 | return ret; | 1938 | return ret; |
1843 | } | 1939 | } |
1844 | 1940 | ||
1845 | static bool tcmu_dev_configured(struct tcmu_dev *udev) | ||
1846 | { | ||
1847 | return udev->uio_info.uio_dev ? true : false; | ||
1848 | } | ||
1849 | |||
1850 | static void tcmu_free_device(struct se_device *dev) | 1941 | static void tcmu_free_device(struct se_device *dev) |
1851 | { | 1942 | { |
1852 | struct tcmu_dev *udev = TCMU_DEV(dev); | 1943 | struct tcmu_dev *udev = TCMU_DEV(dev); |
@@ -1953,45 +2044,76 @@ enum { | |||
1953 | 2044 | ||
1954 | static match_table_t tokens = { | 2045 | static match_table_t tokens = { |
1955 | {Opt_dev_config, "dev_config=%s"}, | 2046 | {Opt_dev_config, "dev_config=%s"}, |
1956 | {Opt_dev_size, "dev_size=%u"}, | 2047 | {Opt_dev_size, "dev_size=%s"}, |
1957 | {Opt_hw_block_size, "hw_block_size=%u"}, | 2048 | {Opt_hw_block_size, "hw_block_size=%d"}, |
1958 | {Opt_hw_max_sectors, "hw_max_sectors=%u"}, | 2049 | {Opt_hw_max_sectors, "hw_max_sectors=%d"}, |
1959 | {Opt_nl_reply_supported, "nl_reply_supported=%d"}, | 2050 | {Opt_nl_reply_supported, "nl_reply_supported=%d"}, |
1960 | {Opt_max_data_area_mb, "max_data_area_mb=%u"}, | 2051 | {Opt_max_data_area_mb, "max_data_area_mb=%d"}, |
1961 | {Opt_err, NULL} | 2052 | {Opt_err, NULL} |
1962 | }; | 2053 | }; |
1963 | 2054 | ||
1964 | static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) | 2055 | static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) |
1965 | { | 2056 | { |
1966 | unsigned long tmp_ul; | 2057 | int val, ret; |
1967 | char *arg_p; | ||
1968 | int ret; | ||
1969 | 2058 | ||
1970 | arg_p = match_strdup(arg); | 2059 | ret = match_int(arg, &val); |
1971 | if (!arg_p) | ||
1972 | return -ENOMEM; | ||
1973 | |||
1974 | ret = kstrtoul(arg_p, 0, &tmp_ul); | ||
1975 | kfree(arg_p); | ||
1976 | if (ret < 0) { | 2060 | if (ret < 0) { |
1977 | pr_err("kstrtoul() failed for dev attrib\n"); | 2061 | pr_err("match_int() failed for dev attrib. Error %d.\n", |
2062 | ret); | ||
1978 | return ret; | 2063 | return ret; |
1979 | } | 2064 | } |
1980 | if (!tmp_ul) { | 2065 | |
1981 | pr_err("dev attrib must be nonzero\n"); | 2066 | if (val <= 0) { |
2067 | pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", | ||
2068 | val); | ||
1982 | return -EINVAL; | 2069 | return -EINVAL; |
1983 | } | 2070 | } |
1984 | *dev_attrib = tmp_ul; | 2071 | *dev_attrib = val; |
1985 | return 0; | 2072 | return 0; |
1986 | } | 2073 | } |
1987 | 2074 | ||
2075 | static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) | ||
2076 | { | ||
2077 | int val, ret; | ||
2078 | |||
2079 | ret = match_int(arg, &val); | ||
2080 | if (ret < 0) { | ||
2081 | pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", | ||
2082 | ret); | ||
2083 | return ret; | ||
2084 | } | ||
2085 | |||
2086 | if (val <= 0) { | ||
2087 | pr_err("Invalid max_data_area %d.\n", val); | ||
2088 | return -EINVAL; | ||
2089 | } | ||
2090 | |||
2091 | mutex_lock(&udev->cmdr_lock); | ||
2092 | if (udev->data_bitmap) { | ||
2093 | pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); | ||
2094 | ret = -EINVAL; | ||
2095 | goto unlock; | ||
2096 | } | ||
2097 | |||
2098 | udev->max_blocks = TCMU_MBS_TO_BLOCKS(val); | ||
2099 | if (udev->max_blocks > tcmu_global_max_blocks) { | ||
2100 | pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", | ||
2101 | val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); | ||
2102 | udev->max_blocks = tcmu_global_max_blocks; | ||
2103 | } | ||
2104 | |||
2105 | unlock: | ||
2106 | mutex_unlock(&udev->cmdr_lock); | ||
2107 | return ret; | ||
2108 | } | ||
2109 | |||
1988 | static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | 2110 | static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, |
1989 | const char *page, ssize_t count) | 2111 | const char *page, ssize_t count) |
1990 | { | 2112 | { |
1991 | struct tcmu_dev *udev = TCMU_DEV(dev); | 2113 | struct tcmu_dev *udev = TCMU_DEV(dev); |
1992 | char *orig, *ptr, *opts, *arg_p; | 2114 | char *orig, *ptr, *opts; |
1993 | substring_t args[MAX_OPT_ARGS]; | 2115 | substring_t args[MAX_OPT_ARGS]; |
1994 | int ret = 0, token, tmpval; | 2116 | int ret = 0, token; |
1995 | 2117 | ||
1996 | opts = kstrdup(page, GFP_KERNEL); | 2118 | opts = kstrdup(page, GFP_KERNEL); |
1997 | if (!opts) | 2119 | if (!opts) |
@@ -2014,15 +2136,10 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | |||
2014 | pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); | 2136 | pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); |
2015 | break; | 2137 | break; |
2016 | case Opt_dev_size: | 2138 | case Opt_dev_size: |
2017 | arg_p = match_strdup(&args[0]); | 2139 | ret = match_u64(&args[0], &udev->dev_size); |
2018 | if (!arg_p) { | ||
2019 | ret = -ENOMEM; | ||
2020 | break; | ||
2021 | } | ||
2022 | ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); | ||
2023 | kfree(arg_p); | ||
2024 | if (ret < 0) | 2140 | if (ret < 0) |
2025 | pr_err("kstrtoul() failed for dev_size=\n"); | 2141 | pr_err("match_u64() failed for dev_size=. Error %d.\n", |
2142 | ret); | ||
2026 | break; | 2143 | break; |
2027 | case Opt_hw_block_size: | 2144 | case Opt_hw_block_size: |
2028 | ret = tcmu_set_dev_attrib(&args[0], | 2145 | ret = tcmu_set_dev_attrib(&args[0], |
@@ -2033,48 +2150,13 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | |||
2033 | &(dev->dev_attrib.hw_max_sectors)); | 2150 | &(dev->dev_attrib.hw_max_sectors)); |
2034 | break; | 2151 | break; |
2035 | case Opt_nl_reply_supported: | 2152 | case Opt_nl_reply_supported: |
2036 | arg_p = match_strdup(&args[0]); | 2153 | ret = match_int(&args[0], &udev->nl_reply_supported); |
2037 | if (!arg_p) { | ||
2038 | ret = -ENOMEM; | ||
2039 | break; | ||
2040 | } | ||
2041 | ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported); | ||
2042 | kfree(arg_p); | ||
2043 | if (ret < 0) | 2154 | if (ret < 0) |
2044 | pr_err("kstrtoint() failed for nl_reply_supported=\n"); | 2155 | pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", |
2156 | ret); | ||
2045 | break; | 2157 | break; |
2046 | case Opt_max_data_area_mb: | 2158 | case Opt_max_data_area_mb: |
2047 | if (dev->export_count) { | 2159 | ret = tcmu_set_max_blocks_param(udev, &args[0]); |
2048 | pr_err("Unable to set max_data_area_mb while exports exist\n"); | ||
2049 | ret = -EINVAL; | ||
2050 | break; | ||
2051 | } | ||
2052 | |||
2053 | arg_p = match_strdup(&args[0]); | ||
2054 | if (!arg_p) { | ||
2055 | ret = -ENOMEM; | ||
2056 | break; | ||
2057 | } | ||
2058 | ret = kstrtoint(arg_p, 0, &tmpval); | ||
2059 | kfree(arg_p); | ||
2060 | if (ret < 0) { | ||
2061 | pr_err("kstrtoint() failed for max_data_area_mb=\n"); | ||
2062 | break; | ||
2063 | } | ||
2064 | |||
2065 | if (tmpval <= 0) { | ||
2066 | pr_err("Invalid max_data_area %d\n", tmpval); | ||
2067 | ret = -EINVAL; | ||
2068 | break; | ||
2069 | } | ||
2070 | |||
2071 | udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval); | ||
2072 | if (udev->max_blocks > tcmu_global_max_blocks) { | ||
2073 | pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", | ||
2074 | tmpval, | ||
2075 | TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); | ||
2076 | udev->max_blocks = tcmu_global_max_blocks; | ||
2077 | } | ||
2078 | break; | 2160 | break; |
2079 | default: | 2161 | default: |
2080 | break; | 2162 | break; |
@@ -2095,7 +2177,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) | |||
2095 | 2177 | ||
2096 | bl = sprintf(b + bl, "Config: %s ", | 2178 | bl = sprintf(b + bl, "Config: %s ", |
2097 | udev->dev_config[0] ? udev->dev_config : "NULL"); | 2179 | udev->dev_config[0] ? udev->dev_config : "NULL"); |
2098 | bl += sprintf(b + bl, "Size: %zu ", udev->dev_size); | 2180 | bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); |
2099 | bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", | 2181 | bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", |
2100 | TCMU_BLOCKS_TO_MBS(udev->max_blocks)); | 2182 | TCMU_BLOCKS_TO_MBS(udev->max_blocks)); |
2101 | 2183 | ||
@@ -2222,7 +2304,7 @@ static int tcmu_send_dev_config_event(struct tcmu_dev *udev, | |||
2222 | return ret; | 2304 | return ret; |
2223 | } | 2305 | } |
2224 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, | 2306 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, |
2225 | &skb, &msg_header); | 2307 | skb, msg_header); |
2226 | } | 2308 | } |
2227 | 2309 | ||
2228 | 2310 | ||
@@ -2239,7 +2321,7 @@ static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, | |||
2239 | return -EINVAL; | 2321 | return -EINVAL; |
2240 | 2322 | ||
2241 | /* Check if device has been configured before */ | 2323 | /* Check if device has been configured before */ |
2242 | if (tcmu_dev_configured(udev)) { | 2324 | if (target_dev_configured(&udev->se_dev)) { |
2243 | ret = tcmu_send_dev_config_event(udev, page); | 2325 | ret = tcmu_send_dev_config_event(udev, page); |
2244 | if (ret) { | 2326 | if (ret) { |
2245 | pr_err("Unable to reconfigure device\n"); | 2327 | pr_err("Unable to reconfigure device\n"); |
@@ -2264,7 +2346,7 @@ static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) | |||
2264 | struct se_dev_attrib, da_group); | 2346 | struct se_dev_attrib, da_group); |
2265 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); | 2347 | struct tcmu_dev *udev = TCMU_DEV(da->da_dev); |
2266 | 2348 | ||
2267 | return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); | 2349 | return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); |
2268 | } | 2350 | } |
2269 | 2351 | ||
2270 | static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) | 2352 | static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) |
@@ -2284,7 +2366,7 @@ static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) | |||
2284 | return ret; | 2366 | return ret; |
2285 | } | 2367 | } |
2286 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, | 2368 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, |
2287 | &skb, &msg_header); | 2369 | skb, msg_header); |
2288 | } | 2370 | } |
2289 | 2371 | ||
2290 | static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, | 2372 | static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, |
@@ -2301,7 +2383,7 @@ static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, | |||
2301 | return ret; | 2383 | return ret; |
2302 | 2384 | ||
2303 | /* Check if device has been configured before */ | 2385 | /* Check if device has been configured before */ |
2304 | if (tcmu_dev_configured(udev)) { | 2386 | if (target_dev_configured(&udev->se_dev)) { |
2305 | ret = tcmu_send_dev_size_event(udev, val); | 2387 | ret = tcmu_send_dev_size_event(udev, val); |
2306 | if (ret) { | 2388 | if (ret) { |
2307 | pr_err("Unable to reconfigure device\n"); | 2389 | pr_err("Unable to reconfigure device\n"); |
@@ -2366,7 +2448,7 @@ static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) | |||
2366 | return ret; | 2448 | return ret; |
2367 | } | 2449 | } |
2368 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, | 2450 | return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, |
2369 | &skb, &msg_header); | 2451 | skb, msg_header); |
2370 | } | 2452 | } |
2371 | 2453 | ||
2372 | static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, | 2454 | static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, |
@@ -2383,7 +2465,7 @@ static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, | |||
2383 | return ret; | 2465 | return ret; |
2384 | 2466 | ||
2385 | /* Check if device has been configured before */ | 2467 | /* Check if device has been configured before */ |
2386 | if (tcmu_dev_configured(udev)) { | 2468 | if (target_dev_configured(&udev->se_dev)) { |
2387 | ret = tcmu_send_emulate_write_cache(udev, val); | 2469 | ret = tcmu_send_emulate_write_cache(udev, val); |
2388 | if (ret) { | 2470 | if (ret) { |
2389 | pr_err("Unable to reconfigure device\n"); | 2471 | pr_err("Unable to reconfigure device\n"); |
@@ -2419,6 +2501,11 @@ static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, | |||
2419 | u8 val; | 2501 | u8 val; |
2420 | int ret; | 2502 | int ret; |
2421 | 2503 | ||
2504 | if (!target_dev_configured(&udev->se_dev)) { | ||
2505 | pr_err("Device is not configured.\n"); | ||
2506 | return -EINVAL; | ||
2507 | } | ||
2508 | |||
2422 | ret = kstrtou8(page, 0, &val); | 2509 | ret = kstrtou8(page, 0, &val); |
2423 | if (ret < 0) | 2510 | if (ret < 0) |
2424 | return ret; | 2511 | return ret; |
@@ -2446,6 +2533,11 @@ static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, | |||
2446 | u8 val; | 2533 | u8 val; |
2447 | int ret; | 2534 | int ret; |
2448 | 2535 | ||
2536 | if (!target_dev_configured(&udev->se_dev)) { | ||
2537 | pr_err("Device is not configured.\n"); | ||
2538 | return -EINVAL; | ||
2539 | } | ||
2540 | |||
2449 | ret = kstrtou8(page, 0, &val); | 2541 | ret = kstrtou8(page, 0, &val); |
2450 | if (ret < 0) | 2542 | if (ret < 0) |
2451 | return ret; | 2543 | return ret; |
@@ -2510,6 +2602,11 @@ static void find_free_blocks(void) | |||
2510 | list_for_each_entry(udev, &root_udev, node) { | 2602 | list_for_each_entry(udev, &root_udev, node) { |
2511 | mutex_lock(&udev->cmdr_lock); | 2603 | mutex_lock(&udev->cmdr_lock); |
2512 | 2604 | ||
2605 | if (!target_dev_configured(&udev->se_dev)) { | ||
2606 | mutex_unlock(&udev->cmdr_lock); | ||
2607 | continue; | ||
2608 | } | ||
2609 | |||
2513 | /* Try to complete the finished commands first */ | 2610 | /* Try to complete the finished commands first */ |
2514 | tcmu_handle_completions(udev); | 2611 | tcmu_handle_completions(udev); |
2515 | 2612 | ||
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index 9ee89e00cd77..2718a933c0c6 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c | |||
@@ -497,10 +497,7 @@ int target_xcopy_setup_pt(void) | |||
497 | INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); | 497 | INIT_LIST_HEAD(&xcopy_pt_nacl.acl_list); |
498 | INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); | 498 | INIT_LIST_HEAD(&xcopy_pt_nacl.acl_sess_list); |
499 | memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); | 499 | memset(&xcopy_pt_sess, 0, sizeof(struct se_session)); |
500 | INIT_LIST_HEAD(&xcopy_pt_sess.sess_list); | 500 | transport_init_session(&xcopy_pt_sess); |
501 | INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list); | ||
502 | INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list); | ||
503 | spin_lock_init(&xcopy_pt_sess.sess_cmd_lock); | ||
504 | 501 | ||
505 | xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; | 502 | xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg; |
506 | xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; | 503 | xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess; |
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index ec372860106f..a183d4da7db2 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/configfs.h> | 28 | #include <linux/configfs.h> |
29 | #include <linux/ctype.h> | 29 | #include <linux/ctype.h> |
30 | #include <linux/hash.h> | 30 | #include <linux/hash.h> |
31 | #include <linux/percpu_ida.h> | ||
32 | #include <asm/unaligned.h> | 31 | #include <asm/unaligned.h> |
33 | #include <scsi/scsi_tcq.h> | 32 | #include <scsi/scsi_tcq.h> |
34 | #include <scsi/libfc.h> | 33 | #include <scsi/libfc.h> |
@@ -92,7 +91,7 @@ static void ft_free_cmd(struct ft_cmd *cmd) | |||
92 | if (fr_seq(fp)) | 91 | if (fr_seq(fp)) |
93 | fc_seq_release(fr_seq(fp)); | 92 | fc_seq_release(fr_seq(fp)); |
94 | fc_frame_free(fp); | 93 | fc_frame_free(fp); |
95 | percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); | 94 | target_free_tag(sess->se_sess, &cmd->se_cmd); |
96 | ft_sess_put(sess); /* undo get from lookup at recv */ | 95 | ft_sess_put(sess); /* undo get from lookup at recv */ |
97 | } | 96 | } |
98 | 97 | ||
@@ -448,9 +447,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) | |||
448 | struct ft_cmd *cmd; | 447 | struct ft_cmd *cmd; |
449 | struct fc_lport *lport = sess->tport->lport; | 448 | struct fc_lport *lport = sess->tport->lport; |
450 | struct se_session *se_sess = sess->se_sess; | 449 | struct se_session *se_sess = sess->se_sess; |
451 | int tag; | 450 | int tag, cpu; |
452 | 451 | ||
453 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 452 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
454 | if (tag < 0) | 453 | if (tag < 0) |
455 | goto busy; | 454 | goto busy; |
456 | 455 | ||
@@ -458,10 +457,11 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp) | |||
458 | memset(cmd, 0, sizeof(struct ft_cmd)); | 457 | memset(cmd, 0, sizeof(struct ft_cmd)); |
459 | 458 | ||
460 | cmd->se_cmd.map_tag = tag; | 459 | cmd->se_cmd.map_tag = tag; |
460 | cmd->se_cmd.map_cpu = cpu; | ||
461 | cmd->sess = sess; | 461 | cmd->sess = sess; |
462 | cmd->seq = fc_seq_assign(lport, fp); | 462 | cmd->seq = fc_seq_assign(lport, fp); |
463 | if (!cmd->seq) { | 463 | if (!cmd->seq) { |
464 | percpu_ida_free(&se_sess->sess_tag_pool, tag); | 464 | target_free_tag(se_sess, &cmd->se_cmd); |
465 | goto busy; | 465 | goto busy; |
466 | } | 466 | } |
467 | cmd->req_frame = fp; /* hold frame during cmd */ | 467 | cmd->req_frame = fp; /* hold frame during cmd */ |
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 42ee91123dca..e55c4d537592 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c | |||
@@ -223,10 +223,7 @@ static int ft_init_nodeacl(struct se_node_acl *nacl, const char *name) | |||
223 | /* | 223 | /* |
224 | * local_port port_group (tpg) ops. | 224 | * local_port port_group (tpg) ops. |
225 | */ | 225 | */ |
226 | static struct se_portal_group *ft_add_tpg( | 226 | static struct se_portal_group *ft_add_tpg(struct se_wwn *wwn, const char *name) |
227 | struct se_wwn *wwn, | ||
228 | struct config_group *group, | ||
229 | const char *name) | ||
230 | { | 227 | { |
231 | struct ft_lport_wwn *ft_wwn; | 228 | struct ft_lport_wwn *ft_wwn; |
232 | struct ft_tpg *tpg; | 229 | struct ft_tpg *tpg; |
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index c91979c1463d..6d4adf5ec26c 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c | |||
@@ -239,7 +239,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id, | |||
239 | sess->tport = tport; | 239 | sess->tport = tport; |
240 | sess->port_id = port_id; | 240 | sess->port_id = port_id; |
241 | 241 | ||
242 | sess->se_sess = target_alloc_session(se_tpg, TCM_FC_DEFAULT_TAGS, | 242 | sess->se_sess = target_setup_session(se_tpg, TCM_FC_DEFAULT_TAGS, |
243 | sizeof(struct ft_cmd), | 243 | sizeof(struct ft_cmd), |
244 | TARGET_PROT_NORMAL, &initiatorname[0], | 244 | TARGET_PROT_NORMAL, &initiatorname[0], |
245 | sess, ft_sess_alloc_cb); | 245 | sess, ft_sess_alloc_cb); |
@@ -287,7 +287,6 @@ static struct ft_sess *ft_sess_delete(struct ft_tport *tport, u32 port_id) | |||
287 | 287 | ||
288 | static void ft_close_sess(struct ft_sess *sess) | 288 | static void ft_close_sess(struct ft_sess *sess) |
289 | { | 289 | { |
290 | transport_deregister_session_configfs(sess->se_sess); | ||
291 | target_sess_cmd_list_set_waiting(sess->se_sess); | 290 | target_sess_cmd_list_set_waiting(sess->se_sess); |
292 | target_wait_for_sess_cmds(sess->se_sess); | 291 | target_wait_for_sess_cmds(sess->se_sess); |
293 | ft_sess_put(sess); | 292 | ft_sess_put(sess); |
@@ -448,7 +447,7 @@ static void ft_sess_free(struct kref *kref) | |||
448 | { | 447 | { |
449 | struct ft_sess *sess = container_of(kref, struct ft_sess, kref); | 448 | struct ft_sess *sess = container_of(kref, struct ft_sess, kref); |
450 | 449 | ||
451 | transport_deregister_session(sess->se_sess); | 450 | target_remove_session(sess->se_sess); |
452 | kfree_rcu(sess, rcu); | 451 | kfree_rcu(sess, rcu); |
453 | } | 452 | } |
454 | 453 | ||
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index d78dbb73bde8..106988a6661a 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c | |||
@@ -1071,15 +1071,16 @@ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu, | |||
1071 | { | 1071 | { |
1072 | struct se_session *se_sess = tv_nexus->tvn_se_sess; | 1072 | struct se_session *se_sess = tv_nexus->tvn_se_sess; |
1073 | struct usbg_cmd *cmd; | 1073 | struct usbg_cmd *cmd; |
1074 | int tag; | 1074 | int tag, cpu; |
1075 | 1075 | ||
1076 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 1076 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
1077 | if (tag < 0) | 1077 | if (tag < 0) |
1078 | return ERR_PTR(-ENOMEM); | 1078 | return ERR_PTR(-ENOMEM); |
1079 | 1079 | ||
1080 | cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag]; | 1080 | cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag]; |
1081 | memset(cmd, 0, sizeof(*cmd)); | 1081 | memset(cmd, 0, sizeof(*cmd)); |
1082 | cmd->se_cmd.map_tag = tag; | 1082 | cmd->se_cmd.map_tag = tag; |
1083 | cmd->se_cmd.map_cpu = cpu; | ||
1083 | cmd->se_cmd.tag = cmd->tag = scsi_tag; | 1084 | cmd->se_cmd.tag = cmd->tag = scsi_tag; |
1084 | cmd->fu = fu; | 1085 | cmd->fu = fu; |
1085 | 1086 | ||
@@ -1288,7 +1289,7 @@ static void usbg_release_cmd(struct se_cmd *se_cmd) | |||
1288 | struct se_session *se_sess = se_cmd->se_sess; | 1289 | struct se_session *se_sess = se_cmd->se_sess; |
1289 | 1290 | ||
1290 | kfree(cmd->data_buf); | 1291 | kfree(cmd->data_buf); |
1291 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | 1292 | target_free_tag(se_sess, se_cmd); |
1292 | } | 1293 | } |
1293 | 1294 | ||
1294 | static u32 usbg_sess_get_index(struct se_session *se_sess) | 1295 | static u32 usbg_sess_get_index(struct se_session *se_sess) |
@@ -1343,10 +1344,8 @@ static int usbg_init_nodeacl(struct se_node_acl *se_nacl, const char *name) | |||
1343 | return 0; | 1344 | return 0; |
1344 | } | 1345 | } |
1345 | 1346 | ||
1346 | static struct se_portal_group *usbg_make_tpg( | 1347 | static struct se_portal_group *usbg_make_tpg(struct se_wwn *wwn, |
1347 | struct se_wwn *wwn, | 1348 | const char *name) |
1348 | struct config_group *group, | ||
1349 | const char *name) | ||
1350 | { | 1349 | { |
1351 | struct usbg_tport *tport = container_of(wwn, struct usbg_tport, | 1350 | struct usbg_tport *tport = container_of(wwn, struct usbg_tport, |
1352 | tport_wwn); | 1351 | tport_wwn); |
@@ -1379,7 +1378,7 @@ static struct se_portal_group *usbg_make_tpg( | |||
1379 | goto unlock_dep; | 1378 | goto unlock_dep; |
1380 | } else { | 1379 | } else { |
1381 | ret = configfs_depend_item_unlocked( | 1380 | ret = configfs_depend_item_unlocked( |
1382 | group->cg_subsys, | 1381 | wwn->wwn_group.cg_subsys, |
1383 | &opts->func_inst.group.cg_item); | 1382 | &opts->func_inst.group.cg_item); |
1384 | if (ret) | 1383 | if (ret) |
1385 | goto unlock_dep; | 1384 | goto unlock_dep; |
@@ -1593,7 +1592,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name) | |||
1593 | goto out_unlock; | 1592 | goto out_unlock; |
1594 | } | 1593 | } |
1595 | 1594 | ||
1596 | tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg, | 1595 | tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, |
1597 | USB_G_DEFAULT_SESSION_TAGS, | 1596 | USB_G_DEFAULT_SESSION_TAGS, |
1598 | sizeof(struct usbg_cmd), | 1597 | sizeof(struct usbg_cmd), |
1599 | TARGET_PROT_NORMAL, name, | 1598 | TARGET_PROT_NORMAL, name, |
@@ -1639,7 +1638,7 @@ static int tcm_usbg_drop_nexus(struct usbg_tpg *tpg) | |||
1639 | /* | 1638 | /* |
1640 | * Release the SCSI I_T Nexus to the emulated vHost Target Port | 1639 | * Release the SCSI I_T Nexus to the emulated vHost Target Port |
1641 | */ | 1640 | */ |
1642 | transport_deregister_session(tv_nexus->tvn_se_sess); | 1641 | target_remove_session(se_sess); |
1643 | tpg->tpg_nexus = NULL; | 1642 | tpg->tpg_nexus = NULL; |
1644 | 1643 | ||
1645 | kfree(tv_nexus); | 1644 | kfree(tv_nexus); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 17fcd3b2e686..76f8d649147b 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/virtio_scsi.h> | 46 | #include <linux/virtio_scsi.h> |
47 | #include <linux/llist.h> | 47 | #include <linux/llist.h> |
48 | #include <linux/bitmap.h> | 48 | #include <linux/bitmap.h> |
49 | #include <linux/percpu_ida.h> | ||
50 | 49 | ||
51 | #include "vhost.h" | 50 | #include "vhost.h" |
52 | 51 | ||
@@ -324,7 +323,7 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) | |||
324 | } | 323 | } |
325 | 324 | ||
326 | vhost_scsi_put_inflight(tv_cmd->inflight); | 325 | vhost_scsi_put_inflight(tv_cmd->inflight); |
327 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | 326 | target_free_tag(se_sess, se_cmd); |
328 | } | 327 | } |
329 | 328 | ||
330 | static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) | 329 | static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) |
@@ -567,7 +566,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, | |||
567 | struct se_session *se_sess; | 566 | struct se_session *se_sess; |
568 | struct scatterlist *sg, *prot_sg; | 567 | struct scatterlist *sg, *prot_sg; |
569 | struct page **pages; | 568 | struct page **pages; |
570 | int tag; | 569 | int tag, cpu; |
571 | 570 | ||
572 | tv_nexus = tpg->tpg_nexus; | 571 | tv_nexus = tpg->tpg_nexus; |
573 | if (!tv_nexus) { | 572 | if (!tv_nexus) { |
@@ -576,7 +575,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, | |||
576 | } | 575 | } |
577 | se_sess = tv_nexus->tvn_se_sess; | 576 | se_sess = tv_nexus->tvn_se_sess; |
578 | 577 | ||
579 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 578 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
580 | if (tag < 0) { | 579 | if (tag < 0) { |
581 | pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); | 580 | pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); |
582 | return ERR_PTR(-ENOMEM); | 581 | return ERR_PTR(-ENOMEM); |
@@ -591,6 +590,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, | |||
591 | cmd->tvc_prot_sgl = prot_sg; | 590 | cmd->tvc_prot_sgl = prot_sg; |
592 | cmd->tvc_upages = pages; | 591 | cmd->tvc_upages = pages; |
593 | cmd->tvc_se_cmd.map_tag = tag; | 592 | cmd->tvc_se_cmd.map_tag = tag; |
593 | cmd->tvc_se_cmd.map_cpu = cpu; | ||
594 | cmd->tvc_tag = scsi_tag; | 594 | cmd->tvc_tag = scsi_tag; |
595 | cmd->tvc_lun = lun; | 595 | cmd->tvc_lun = lun; |
596 | cmd->tvc_task_attr = task_attr; | 596 | cmd->tvc_task_attr = task_attr; |
@@ -1738,7 +1738,7 @@ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, | |||
1738 | * struct se_node_acl for the vhost_scsi struct se_portal_group with | 1738 | * struct se_node_acl for the vhost_scsi struct se_portal_group with |
1739 | * the SCSI Initiator port name of the passed configfs group 'name'. | 1739 | * the SCSI Initiator port name of the passed configfs group 'name'. |
1740 | */ | 1740 | */ |
1741 | tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg, | 1741 | tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, |
1742 | VHOST_SCSI_DEFAULT_TAGS, | 1742 | VHOST_SCSI_DEFAULT_TAGS, |
1743 | sizeof(struct vhost_scsi_cmd), | 1743 | sizeof(struct vhost_scsi_cmd), |
1744 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, | 1744 | TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS, |
@@ -1797,7 +1797,7 @@ static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) | |||
1797 | /* | 1797 | /* |
1798 | * Release the SCSI I_T Nexus to the emulated vhost Target Port | 1798 | * Release the SCSI I_T Nexus to the emulated vhost Target Port |
1799 | */ | 1799 | */ |
1800 | transport_deregister_session(tv_nexus->tvn_se_sess); | 1800 | target_remove_session(se_sess); |
1801 | tpg->tpg_nexus = NULL; | 1801 | tpg->tpg_nexus = NULL; |
1802 | mutex_unlock(&tpg->tv_tpg_mutex); | 1802 | mutex_unlock(&tpg->tv_tpg_mutex); |
1803 | 1803 | ||
@@ -1912,9 +1912,7 @@ static struct configfs_attribute *vhost_scsi_tpg_attrs[] = { | |||
1912 | }; | 1912 | }; |
1913 | 1913 | ||
1914 | static struct se_portal_group * | 1914 | static struct se_portal_group * |
1915 | vhost_scsi_make_tpg(struct se_wwn *wwn, | 1915 | vhost_scsi_make_tpg(struct se_wwn *wwn, const char *name) |
1916 | struct config_group *group, | ||
1917 | const char *name) | ||
1918 | { | 1916 | { |
1919 | struct vhost_scsi_tport *tport = container_of(wwn, | 1917 | struct vhost_scsi_tport *tport = container_of(wwn, |
1920 | struct vhost_scsi_tport, tport_wwn); | 1918 | struct vhost_scsi_tport, tport_wwn); |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index e2f3e8b0fba9..14a3d4cbc2a7 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -654,9 +654,9 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring | |||
654 | struct scsiback_nexus *nexus = tpg->tpg_nexus; | 654 | struct scsiback_nexus *nexus = tpg->tpg_nexus; |
655 | struct se_session *se_sess = nexus->tvn_se_sess; | 655 | struct se_session *se_sess = nexus->tvn_se_sess; |
656 | struct vscsibk_pend *req; | 656 | struct vscsibk_pend *req; |
657 | int tag, i; | 657 | int tag, cpu, i; |
658 | 658 | ||
659 | tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); | 659 | tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); |
660 | if (tag < 0) { | 660 | if (tag < 0) { |
661 | pr_err("Unable to obtain tag for vscsiif_request\n"); | 661 | pr_err("Unable to obtain tag for vscsiif_request\n"); |
662 | return ERR_PTR(-ENOMEM); | 662 | return ERR_PTR(-ENOMEM); |
@@ -665,6 +665,7 @@ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring | |||
665 | req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag]; | 665 | req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag]; |
666 | memset(req, 0, sizeof(*req)); | 666 | memset(req, 0, sizeof(*req)); |
667 | req->se_cmd.map_tag = tag; | 667 | req->se_cmd.map_tag = tag; |
668 | req->se_cmd.map_cpu = cpu; | ||
668 | 669 | ||
669 | for (i = 0; i < VSCSI_MAX_GRANTS; i++) | 670 | for (i = 0; i < VSCSI_MAX_GRANTS; i++) |
670 | req->grant_handles[i] = SCSIBACK_INVALID_HANDLE; | 671 | req->grant_handles[i] = SCSIBACK_INVALID_HANDLE; |
@@ -1387,9 +1388,7 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd) | |||
1387 | 1388 | ||
1388 | static void scsiback_release_cmd(struct se_cmd *se_cmd) | 1389 | static void scsiback_release_cmd(struct se_cmd *se_cmd) |
1389 | { | 1390 | { |
1390 | struct se_session *se_sess = se_cmd->se_sess; | 1391 | target_free_tag(se_cmd->se_sess, se_cmd); |
1391 | |||
1392 | percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); | ||
1393 | } | 1392 | } |
1394 | 1393 | ||
1395 | static u32 scsiback_sess_get_index(struct se_session *se_sess) | 1394 | static u32 scsiback_sess_get_index(struct se_session *se_sess) |
@@ -1532,7 +1531,7 @@ static int scsiback_make_nexus(struct scsiback_tpg *tpg, | |||
1532 | goto out_unlock; | 1531 | goto out_unlock; |
1533 | } | 1532 | } |
1534 | 1533 | ||
1535 | tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg, | 1534 | tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg, |
1536 | VSCSI_DEFAULT_SESSION_TAGS, | 1535 | VSCSI_DEFAULT_SESSION_TAGS, |
1537 | sizeof(struct vscsibk_pend), | 1536 | sizeof(struct vscsibk_pend), |
1538 | TARGET_PROT_NORMAL, name, | 1537 | TARGET_PROT_NORMAL, name, |
@@ -1587,7 +1586,7 @@ static int scsiback_drop_nexus(struct scsiback_tpg *tpg) | |||
1587 | /* | 1586 | /* |
1588 | * Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port | 1587 | * Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port |
1589 | */ | 1588 | */ |
1590 | transport_deregister_session(tv_nexus->tvn_se_sess); | 1589 | target_remove_session(se_sess); |
1591 | tpg->tpg_nexus = NULL; | 1590 | tpg->tpg_nexus = NULL; |
1592 | mutex_unlock(&tpg->tv_tpg_mutex); | 1591 | mutex_unlock(&tpg->tv_tpg_mutex); |
1593 | 1592 | ||
@@ -1743,9 +1742,7 @@ static void scsiback_port_unlink(struct se_portal_group *se_tpg, | |||
1743 | } | 1742 | } |
1744 | 1743 | ||
1745 | static struct se_portal_group * | 1744 | static struct se_portal_group * |
1746 | scsiback_make_tpg(struct se_wwn *wwn, | 1745 | scsiback_make_tpg(struct se_wwn *wwn, const char *name) |
1747 | struct config_group *group, | ||
1748 | const char *name) | ||
1749 | { | 1746 | { |
1750 | struct scsiback_tport *tport = container_of(wwn, | 1747 | struct scsiback_tport *tport = container_of(wwn, |
1751 | struct scsiback_tport, tport_wwn); | 1748 | struct scsiback_tport, tport_wwn); |
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 052e5ad9a4d2..0a7252aecfa5 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c | |||
@@ -410,6 +410,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr, | |||
410 | EXPORT_SYMBOL_GPL(sysfs_chmod_file); | 410 | EXPORT_SYMBOL_GPL(sysfs_chmod_file); |
411 | 411 | ||
412 | /** | 412 | /** |
413 | * sysfs_break_active_protection - break "active" protection | ||
414 | * @kobj: The kernel object @attr is associated with. | ||
415 | * @attr: The attribute to break the "active" protection for. | ||
416 | * | ||
417 | * With sysfs, just like kernfs, deletion of an attribute is postponed until | ||
418 | * all active .show() and .store() callbacks have finished unless this function | ||
419 | * is called. Hence this function is useful in methods that implement self | ||
420 | * deletion. | ||
421 | */ | ||
422 | struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, | ||
423 | const struct attribute *attr) | ||
424 | { | ||
425 | struct kernfs_node *kn; | ||
426 | |||
427 | kobject_get(kobj); | ||
428 | kn = kernfs_find_and_get(kobj->sd, attr->name); | ||
429 | if (kn) | ||
430 | kernfs_break_active_protection(kn); | ||
431 | return kn; | ||
432 | } | ||
433 | EXPORT_SYMBOL_GPL(sysfs_break_active_protection); | ||
434 | |||
435 | /** | ||
436 | * sysfs_unbreak_active_protection - restore "active" protection | ||
437 | * @kn: Pointer returned by sysfs_break_active_protection(). | ||
438 | * | ||
439 | * Undo the effects of sysfs_break_active_protection(). Since this function | ||
440 | * calls kernfs_put() on the kernfs node that corresponds to the 'attr' | ||
441 | * argument passed to sysfs_break_active_protection() that attribute may have | ||
442 | * been removed between the sysfs_break_active_protection() and | ||
443 | * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after | ||
444 | * this function has returned. | ||
445 | */ | ||
446 | void sysfs_unbreak_active_protection(struct kernfs_node *kn) | ||
447 | { | ||
448 | struct kobject *kobj = kn->parent->priv; | ||
449 | |||
450 | kernfs_unbreak_active_protection(kn); | ||
451 | kernfs_put(kn); | ||
452 | kobject_put(kobj); | ||
453 | } | ||
454 | EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection); | ||
455 | |||
456 | /** | ||
413 | * sysfs_remove_file_ns - remove an object attribute with a custom ns tag | 457 | * sysfs_remove_file_ns - remove an object attribute with a custom ns tag |
414 | * @kobj: object we're acting for | 458 | * @kobj: object we're acting for |
415 | * @attr: attribute descriptor | 459 | * @attr: attribute descriptor |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 32f247cb5e9e..bc4f87cbe7f4 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -1111,6 +1111,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports); | |||
1111 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, | 1111 | extern struct ata_host *ata_host_alloc_pinfo(struct device *dev, |
1112 | const struct ata_port_info * const * ppi, int n_ports); | 1112 | const struct ata_port_info * const * ppi, int n_ports); |
1113 | extern int ata_slave_link_init(struct ata_port *ap); | 1113 | extern int ata_slave_link_init(struct ata_port *ap); |
1114 | extern void ata_host_get(struct ata_host *host); | ||
1115 | extern void ata_host_put(struct ata_host *host); | ||
1114 | extern int ata_host_start(struct ata_host *host); | 1116 | extern int ata_host_start(struct ata_host *host); |
1115 | extern int ata_host_register(struct ata_host *host, | 1117 | extern int ata_host_register(struct ata_host *host, |
1116 | struct scsi_host_template *sht); | 1118 | struct scsi_host_template *sht); |
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h deleted file mode 100644 index 07d78e4653bc..000000000000 --- a/include/linux/percpu_ida.h +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef __PERCPU_IDA_H__ | ||
3 | #define __PERCPU_IDA_H__ | ||
4 | |||
5 | #include <linux/types.h> | ||
6 | #include <linux/bitops.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/spinlock_types.h> | ||
10 | #include <linux/wait.h> | ||
11 | #include <linux/cpumask.h> | ||
12 | |||
13 | struct percpu_ida_cpu; | ||
14 | |||
15 | struct percpu_ida { | ||
16 | /* | ||
17 | * number of tags available to be allocated, as passed to | ||
18 | * percpu_ida_init() | ||
19 | */ | ||
20 | unsigned nr_tags; | ||
21 | unsigned percpu_max_size; | ||
22 | unsigned percpu_batch_size; | ||
23 | |||
24 | struct percpu_ida_cpu __percpu *tag_cpu; | ||
25 | |||
26 | /* | ||
27 | * Bitmap of cpus that (may) have tags on their percpu freelists: | ||
28 | * steal_tags() uses this to decide when to steal tags, and which cpus | ||
29 | * to try stealing from. | ||
30 | * | ||
31 | * It's ok for a freelist to be empty when its bit is set - steal_tags() | ||
32 | * will just keep looking - but the bitmap _must_ be set whenever a | ||
33 | * percpu freelist does have tags. | ||
34 | */ | ||
35 | cpumask_t cpus_have_tags; | ||
36 | |||
37 | struct { | ||
38 | spinlock_t lock; | ||
39 | /* | ||
40 | * When we go to steal tags from another cpu (see steal_tags()), | ||
41 | * we want to pick a cpu at random. Cycling through them every | ||
42 | * time we steal is a bit easier and more or less equivalent: | ||
43 | */ | ||
44 | unsigned cpu_last_stolen; | ||
45 | |||
46 | /* For sleeping on allocation failure */ | ||
47 | wait_queue_head_t wait; | ||
48 | |||
49 | /* | ||
50 | * Global freelist - it's a stack where nr_free points to the | ||
51 | * top | ||
52 | */ | ||
53 | unsigned nr_free; | ||
54 | unsigned *freelist; | ||
55 | } ____cacheline_aligned_in_smp; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * Number of tags we move between the percpu freelist and the global freelist at | ||
60 | * a time | ||
61 | */ | ||
62 | #define IDA_DEFAULT_PCPU_BATCH_MOVE 32U | ||
63 | /* Max size of percpu freelist, */ | ||
64 | #define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2) | ||
65 | |||
66 | int percpu_ida_alloc(struct percpu_ida *pool, int state); | ||
67 | void percpu_ida_free(struct percpu_ida *pool, unsigned tag); | ||
68 | |||
69 | void percpu_ida_destroy(struct percpu_ida *pool); | ||
70 | int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, | ||
71 | unsigned long max_size, unsigned long batch_size); | ||
72 | static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags) | ||
73 | { | ||
74 | return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE, | ||
75 | IDA_DEFAULT_PCPU_BATCH_MOVE); | ||
76 | } | ||
77 | |||
78 | typedef int (*percpu_ida_cb)(unsigned, void *); | ||
79 | int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, | ||
80 | void *data); | ||
81 | |||
82 | unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu); | ||
83 | #endif /* __PERCPU_IDA_H__ */ | ||
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index e6539536dea9..804a50983ec5 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | 25 | ||
26 | struct seq_file; | ||
27 | |||
26 | /** | 28 | /** |
27 | * struct sbitmap_word - Word in a &struct sbitmap. | 29 | * struct sbitmap_word - Word in a &struct sbitmap. |
28 | */ | 30 | */ |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index b8bfdc173ec0..3c12198c0103 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -237,6 +237,9 @@ int __must_check sysfs_create_files(struct kobject *kobj, | |||
237 | const struct attribute **attr); | 237 | const struct attribute **attr); |
238 | int __must_check sysfs_chmod_file(struct kobject *kobj, | 238 | int __must_check sysfs_chmod_file(struct kobject *kobj, |
239 | const struct attribute *attr, umode_t mode); | 239 | const struct attribute *attr, umode_t mode); |
240 | struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj, | ||
241 | const struct attribute *attr); | ||
242 | void sysfs_unbreak_active_protection(struct kernfs_node *kn); | ||
240 | void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, | 243 | void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr, |
241 | const void *ns); | 244 | const void *ns); |
242 | bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); | 245 | bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr); |
@@ -350,6 +353,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj, | |||
350 | return 0; | 353 | return 0; |
351 | } | 354 | } |
352 | 355 | ||
356 | static inline struct kernfs_node * | ||
357 | sysfs_break_active_protection(struct kobject *kobj, | ||
358 | const struct attribute *attr) | ||
359 | { | ||
360 | return NULL; | ||
361 | } | ||
362 | |||
363 | static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn) | ||
364 | { | ||
365 | } | ||
366 | |||
353 | static inline void sysfs_remove_file_ns(struct kobject *kobj, | 367 | static inline void sysfs_remove_file_ns(struct kobject *kobj, |
354 | const struct attribute *attr, | 368 | const struct attribute *attr, |
355 | const void *ns) | 369 | const void *ns) |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 225ab7783dfd..3de3b10da19a 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -161,7 +161,7 @@ struct sata_device { | |||
161 | u8 port_no; /* port number, if this is a PM (Port) */ | 161 | u8 port_no; /* port number, if this is a PM (Port) */ |
162 | 162 | ||
163 | struct ata_port *ap; | 163 | struct ata_port *ap; |
164 | struct ata_host ata_host; | 164 | struct ata_host *ata_host; |
165 | struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */ | 165 | struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */ |
166 | u8 fis[ATA_RESP_FIS_SIZE]; | 166 | u8 fis[ATA_RESP_FIS_SIZE]; |
167 | }; | 167 | }; |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index 53b485fe9b67..5ea06d310a25 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -758,6 +758,7 @@ extern void scsi_scan_host(struct Scsi_Host *); | |||
758 | extern void scsi_rescan_device(struct device *); | 758 | extern void scsi_rescan_device(struct device *); |
759 | extern void scsi_remove_host(struct Scsi_Host *); | 759 | extern void scsi_remove_host(struct Scsi_Host *); |
760 | extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); | 760 | extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); |
761 | extern int scsi_host_busy(struct Scsi_Host *shost); | ||
761 | extern void scsi_host_put(struct Scsi_Host *t); | 762 | extern void scsi_host_put(struct Scsi_Host *t); |
762 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); | 763 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); |
763 | extern const char *scsi_host_state_name(enum scsi_host_state); | 764 | extern const char *scsi_host_state_name(enum scsi_host_state); |
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index cf5f3fff1f1a..f2e6abea8490 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/dma-direction.h> /* enum dma_data_direction */ | 5 | #include <linux/dma-direction.h> /* enum dma_data_direction */ |
6 | #include <linux/list.h> /* struct list_head */ | 6 | #include <linux/list.h> /* struct list_head */ |
7 | #include <linux/sched.h> | ||
7 | #include <linux/socket.h> /* struct sockaddr_storage */ | 8 | #include <linux/socket.h> /* struct sockaddr_storage */ |
8 | #include <linux/types.h> /* u8 */ | 9 | #include <linux/types.h> /* u8 */ |
9 | #include <scsi/iscsi_proto.h> /* itt_t */ | 10 | #include <scsi/iscsi_proto.h> /* itt_t */ |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 34a15d59ed88..51b6f50eabee 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
@@ -106,13 +106,15 @@ bool target_lun_is_rdonly(struct se_cmd *); | |||
106 | sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, | 106 | sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, |
107 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); | 107 | sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); |
108 | 108 | ||
109 | struct se_device *target_find_device(int id, bool do_depend); | ||
110 | |||
111 | bool target_sense_desc_format(struct se_device *dev); | 109 | bool target_sense_desc_format(struct se_device *dev); |
112 | sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); | 110 | sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); |
113 | bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, | 111 | bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, |
114 | struct request_queue *q); | 112 | struct request_queue *q); |
115 | 113 | ||
114 | static inline bool target_dev_configured(struct se_device *se_dev) | ||
115 | { | ||
116 | return !!(se_dev->dev_flags & DF_CONFIGURED); | ||
117 | } | ||
116 | 118 | ||
117 | /* Only use get_unaligned_be24() if reading p - 1 is allowed. */ | 119 | /* Only use get_unaligned_be24() if reading p - 1 is allowed. */ |
118 | static inline uint32_t get_unaligned_be24(const uint8_t *const p) | 120 | static inline uint32_t get_unaligned_be24(const uint8_t *const p) |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 922a39f45abc..7a4ee7852ca4 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/configfs.h> /* struct config_group */ | 5 | #include <linux/configfs.h> /* struct config_group */ |
6 | #include <linux/dma-direction.h> /* enum dma_data_direction */ | 6 | #include <linux/dma-direction.h> /* enum dma_data_direction */ |
7 | #include <linux/percpu_ida.h> /* struct percpu_ida */ | 7 | #include <linux/sbitmap.h> |
8 | #include <linux/percpu-refcount.h> | 8 | #include <linux/percpu-refcount.h> |
9 | #include <linux/semaphore.h> /* struct semaphore */ | 9 | #include <linux/semaphore.h> /* struct semaphore */ |
10 | #include <linux/completion.h> | 10 | #include <linux/completion.h> |
@@ -443,7 +443,6 @@ struct se_cmd { | |||
443 | u8 scsi_asc; | 443 | u8 scsi_asc; |
444 | u8 scsi_ascq; | 444 | u8 scsi_ascq; |
445 | u16 scsi_sense_length; | 445 | u16 scsi_sense_length; |
446 | unsigned cmd_wait_set:1; | ||
447 | unsigned unknown_data_length:1; | 446 | unsigned unknown_data_length:1; |
448 | bool state_active:1; | 447 | bool state_active:1; |
449 | u64 tag; /* SAM command identifier aka task tag */ | 448 | u64 tag; /* SAM command identifier aka task tag */ |
@@ -455,6 +454,7 @@ struct se_cmd { | |||
455 | int sam_task_attr; | 454 | int sam_task_attr; |
456 | /* Used for se_sess->sess_tag_pool */ | 455 | /* Used for se_sess->sess_tag_pool */ |
457 | unsigned int map_tag; | 456 | unsigned int map_tag; |
457 | int map_cpu; | ||
458 | /* Transport protocol dependent state, see transport_state_table */ | 458 | /* Transport protocol dependent state, see transport_state_table */ |
459 | enum transport_state_table t_state; | 459 | enum transport_state_table t_state; |
460 | /* See se_cmd_flags_table */ | 460 | /* See se_cmd_flags_table */ |
@@ -475,7 +475,7 @@ struct se_cmd { | |||
475 | struct se_session *se_sess; | 475 | struct se_session *se_sess; |
476 | struct se_tmr_req *se_tmr_req; | 476 | struct se_tmr_req *se_tmr_req; |
477 | struct list_head se_cmd_list; | 477 | struct list_head se_cmd_list; |
478 | struct completion cmd_wait_comp; | 478 | struct completion *compl; |
479 | const struct target_core_fabric_ops *se_tfo; | 479 | const struct target_core_fabric_ops *se_tfo; |
480 | sense_reason_t (*execute_cmd)(struct se_cmd *); | 480 | sense_reason_t (*execute_cmd)(struct se_cmd *); |
481 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); | 481 | sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *); |
@@ -605,10 +605,10 @@ struct se_session { | |||
605 | struct list_head sess_list; | 605 | struct list_head sess_list; |
606 | struct list_head sess_acl_list; | 606 | struct list_head sess_acl_list; |
607 | struct list_head sess_cmd_list; | 607 | struct list_head sess_cmd_list; |
608 | struct list_head sess_wait_list; | ||
609 | spinlock_t sess_cmd_lock; | 608 | spinlock_t sess_cmd_lock; |
609 | wait_queue_head_t cmd_list_wq; | ||
610 | void *sess_cmd_map; | 610 | void *sess_cmd_map; |
611 | struct percpu_ida sess_tag_pool; | 611 | struct sbitmap_queue sess_tag_pool; |
612 | }; | 612 | }; |
613 | 613 | ||
614 | struct se_device; | 614 | struct se_device; |
@@ -638,7 +638,6 @@ struct se_dev_entry { | |||
638 | atomic_long_t total_cmds; | 638 | atomic_long_t total_cmds; |
639 | atomic_long_t read_bytes; | 639 | atomic_long_t read_bytes; |
640 | atomic_long_t write_bytes; | 640 | atomic_long_t write_bytes; |
641 | atomic_t ua_count; | ||
642 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 641 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
643 | struct kref pr_kref; | 642 | struct kref pr_kref; |
644 | struct completion pr_comp; | 643 | struct completion pr_comp; |
@@ -934,4 +933,9 @@ static inline void atomic_dec_mb(atomic_t *v) | |||
934 | smp_mb__after_atomic(); | 933 | smp_mb__after_atomic(); |
935 | } | 934 | } |
936 | 935 | ||
936 | static inline void target_free_tag(struct se_session *sess, struct se_cmd *cmd) | ||
937 | { | ||
938 | sbitmap_queue_clear(&sess->sess_tag_pool, cmd->map_tag, cmd->map_cpu); | ||
939 | } | ||
940 | |||
937 | #endif /* TARGET_CORE_BASE_H */ | 941 | #endif /* TARGET_CORE_BASE_H */ |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index b297aa0d9651..f4147b398431 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -79,7 +79,7 @@ struct target_core_fabric_ops { | |||
79 | void (*fabric_drop_wwn)(struct se_wwn *); | 79 | void (*fabric_drop_wwn)(struct se_wwn *); |
80 | void (*add_wwn_groups)(struct se_wwn *); | 80 | void (*add_wwn_groups)(struct se_wwn *); |
81 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, | 81 | struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *, |
82 | struct config_group *, const char *); | 82 | const char *); |
83 | void (*fabric_drop_tpg)(struct se_portal_group *); | 83 | void (*fabric_drop_tpg)(struct se_portal_group *); |
84 | int (*fabric_post_link)(struct se_portal_group *, | 84 | int (*fabric_post_link)(struct se_portal_group *, |
85 | struct se_lun *); | 85 | struct se_lun *); |
@@ -109,17 +109,17 @@ void target_unregister_template(const struct target_core_fabric_ops *fo); | |||
109 | int target_depend_item(struct config_item *item); | 109 | int target_depend_item(struct config_item *item); |
110 | void target_undepend_item(struct config_item *item); | 110 | void target_undepend_item(struct config_item *item); |
111 | 111 | ||
112 | struct se_session *target_alloc_session(struct se_portal_group *, | 112 | struct se_session *target_setup_session(struct se_portal_group *, |
113 | unsigned int, unsigned int, enum target_prot_op prot_op, | 113 | unsigned int, unsigned int, enum target_prot_op prot_op, |
114 | const char *, void *, | 114 | const char *, void *, |
115 | int (*callback)(struct se_portal_group *, | 115 | int (*callback)(struct se_portal_group *, |
116 | struct se_session *, void *)); | 116 | struct se_session *, void *)); |
117 | void target_remove_session(struct se_session *); | ||
117 | 118 | ||
118 | struct se_session *transport_init_session(enum target_prot_op); | 119 | void transport_init_session(struct se_session *); |
120 | struct se_session *transport_alloc_session(enum target_prot_op); | ||
119 | int transport_alloc_session_tags(struct se_session *, unsigned int, | 121 | int transport_alloc_session_tags(struct se_session *, unsigned int, |
120 | unsigned int); | 122 | unsigned int); |
121 | struct se_session *transport_init_session_tags(unsigned int, unsigned int, | ||
122 | enum target_prot_op); | ||
123 | void __transport_register_session(struct se_portal_group *, | 123 | void __transport_register_session(struct se_portal_group *, |
124 | struct se_node_acl *, struct se_session *, void *); | 124 | struct se_node_acl *, struct se_session *, void *); |
125 | void transport_register_session(struct se_portal_group *, | 125 | void transport_register_session(struct se_portal_group *, |
diff --git a/lib/Makefile b/lib/Makefile index ff3a397bbb12..d95bb2525101 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -37,7 +37,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ | |||
37 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ | 37 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ |
38 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ | 38 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ |
39 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ | 39 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ |
40 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ | 40 | percpu-refcount.o rhashtable.o reciprocal_div.o \ |
41 | once.o refcount.o usercopy.o errseq.o bucket_locks.o | 41 | once.o refcount.o usercopy.o errseq.o bucket_locks.o |
42 | obj-$(CONFIG_STRING_SELFTEST) += test_string.o | 42 | obj-$(CONFIG_STRING_SELFTEST) += test_string.o |
43 | obj-y += string_helpers.o | 43 | obj-y += string_helpers.o |
diff --git a/lib/klist.c b/lib/klist.c index 0507fa5d84c5..f6b547812fe3 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i) | |||
336 | void (*put)(struct klist_node *) = i->i_klist->put; | 336 | void (*put)(struct klist_node *) = i->i_klist->put; |
337 | struct klist_node *last = i->i_cur; | 337 | struct klist_node *last = i->i_cur; |
338 | struct klist_node *prev; | 338 | struct klist_node *prev; |
339 | unsigned long flags; | ||
339 | 340 | ||
340 | spin_lock(&i->i_klist->k_lock); | 341 | spin_lock_irqsave(&i->i_klist->k_lock, flags); |
341 | 342 | ||
342 | if (last) { | 343 | if (last) { |
343 | prev = to_klist_node(last->n_node.prev); | 344 | prev = to_klist_node(last->n_node.prev); |
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i) | |||
356 | prev = to_klist_node(prev->n_node.prev); | 357 | prev = to_klist_node(prev->n_node.prev); |
357 | } | 358 | } |
358 | 359 | ||
359 | spin_unlock(&i->i_klist->k_lock); | 360 | spin_unlock_irqrestore(&i->i_klist->k_lock, flags); |
360 | 361 | ||
361 | if (put && last) | 362 | if (put && last) |
362 | put(last); | 363 | put(last); |
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i) | |||
377 | void (*put)(struct klist_node *) = i->i_klist->put; | 378 | void (*put)(struct klist_node *) = i->i_klist->put; |
378 | struct klist_node *last = i->i_cur; | 379 | struct klist_node *last = i->i_cur; |
379 | struct klist_node *next; | 380 | struct klist_node *next; |
381 | unsigned long flags; | ||
380 | 382 | ||
381 | spin_lock(&i->i_klist->k_lock); | 383 | spin_lock_irqsave(&i->i_klist->k_lock, flags); |
382 | 384 | ||
383 | if (last) { | 385 | if (last) { |
384 | next = to_klist_node(last->n_node.next); | 386 | next = to_klist_node(last->n_node.next); |
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i) | |||
397 | next = to_klist_node(next->n_node.next); | 399 | next = to_klist_node(next->n_node.next); |
398 | } | 400 | } |
399 | 401 | ||
400 | spin_unlock(&i->i_klist->k_lock); | 402 | spin_unlock_irqrestore(&i->i_klist->k_lock, flags); |
401 | 403 | ||
402 | if (put && last) | 404 | if (put && last) |
403 | put(last); | 405 | put(last); |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c deleted file mode 100644 index beb14839b41a..000000000000 --- a/lib/percpu_ida.c +++ /dev/null | |||
@@ -1,370 +0,0 @@ | |||
1 | /* | ||
2 | * Percpu IDA library | ||
3 | * | ||
4 | * Copyright (C) 2013 Datera, Inc. Kent Overstreet | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation; either version 2, or (at | ||
9 | * your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <linux/bitmap.h> | ||
19 | #include <linux/bitops.h> | ||
20 | #include <linux/bug.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/percpu.h> | ||
26 | #include <linux/sched/signal.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/spinlock.h> | ||
29 | #include <linux/percpu_ida.h> | ||
30 | |||
31 | struct percpu_ida_cpu { | ||
32 | /* | ||
33 | * Even though this is percpu, we need a lock for tag stealing by remote | ||
34 | * CPUs: | ||
35 | */ | ||
36 | spinlock_t lock; | ||
37 | |||
38 | /* nr_free/freelist form a stack of free IDs */ | ||
39 | unsigned nr_free; | ||
40 | unsigned freelist[]; | ||
41 | }; | ||
42 | |||
43 | static inline void move_tags(unsigned *dst, unsigned *dst_nr, | ||
44 | unsigned *src, unsigned *src_nr, | ||
45 | unsigned nr) | ||
46 | { | ||
47 | *src_nr -= nr; | ||
48 | memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr); | ||
49 | *dst_nr += nr; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * Try to steal tags from a remote cpu's percpu freelist. | ||
54 | * | ||
55 | * We first check how many percpu freelists have tags | ||
56 | * | ||
57 | * Then we iterate through the cpus until we find some tags - we don't attempt | ||
58 | * to find the "best" cpu to steal from, to keep cacheline bouncing to a | ||
59 | * minimum. | ||
60 | */ | ||
61 | static inline void steal_tags(struct percpu_ida *pool, | ||
62 | struct percpu_ida_cpu *tags) | ||
63 | { | ||
64 | unsigned cpus_have_tags, cpu = pool->cpu_last_stolen; | ||
65 | struct percpu_ida_cpu *remote; | ||
66 | |||
67 | for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags); | ||
68 | cpus_have_tags; cpus_have_tags--) { | ||
69 | cpu = cpumask_next(cpu, &pool->cpus_have_tags); | ||
70 | |||
71 | if (cpu >= nr_cpu_ids) { | ||
72 | cpu = cpumask_first(&pool->cpus_have_tags); | ||
73 | if (cpu >= nr_cpu_ids) | ||
74 | BUG(); | ||
75 | } | ||
76 | |||
77 | pool->cpu_last_stolen = cpu; | ||
78 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | ||
79 | |||
80 | cpumask_clear_cpu(cpu, &pool->cpus_have_tags); | ||
81 | |||
82 | if (remote == tags) | ||
83 | continue; | ||
84 | |||
85 | spin_lock(&remote->lock); | ||
86 | |||
87 | if (remote->nr_free) { | ||
88 | memcpy(tags->freelist, | ||
89 | remote->freelist, | ||
90 | sizeof(unsigned) * remote->nr_free); | ||
91 | |||
92 | tags->nr_free = remote->nr_free; | ||
93 | remote->nr_free = 0; | ||
94 | } | ||
95 | |||
96 | spin_unlock(&remote->lock); | ||
97 | |||
98 | if (tags->nr_free) | ||
99 | break; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto | ||
105 | * our percpu freelist: | ||
106 | */ | ||
107 | static inline void alloc_global_tags(struct percpu_ida *pool, | ||
108 | struct percpu_ida_cpu *tags) | ||
109 | { | ||
110 | move_tags(tags->freelist, &tags->nr_free, | ||
111 | pool->freelist, &pool->nr_free, | ||
112 | min(pool->nr_free, pool->percpu_batch_size)); | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * percpu_ida_alloc - allocate a tag | ||
117 | * @pool: pool to allocate from | ||
118 | * @state: task state for prepare_to_wait | ||
119 | * | ||
120 | * Returns a tag - an integer in the range [0..nr_tags) (passed to | ||
121 | * tag_pool_init()), or otherwise -ENOSPC on allocation failure. | ||
122 | * | ||
123 | * Safe to be called from interrupt context (assuming it isn't passed | ||
124 | * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). | ||
125 | * | ||
126 | * @gfp indicates whether or not to wait until a free id is available (it's not | ||
127 | * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep | ||
128 | * however long it takes until another thread frees an id (same semantics as a | ||
129 | * mempool). | ||
130 | * | ||
131 | * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE. | ||
132 | */ | ||
133 | int percpu_ida_alloc(struct percpu_ida *pool, int state) | ||
134 | { | ||
135 | DEFINE_WAIT(wait); | ||
136 | struct percpu_ida_cpu *tags; | ||
137 | unsigned long flags; | ||
138 | int tag = -ENOSPC; | ||
139 | |||
140 | tags = raw_cpu_ptr(pool->tag_cpu); | ||
141 | spin_lock_irqsave(&tags->lock, flags); | ||
142 | |||
143 | /* Fastpath */ | ||
144 | if (likely(tags->nr_free)) { | ||
145 | tag = tags->freelist[--tags->nr_free]; | ||
146 | spin_unlock_irqrestore(&tags->lock, flags); | ||
147 | return tag; | ||
148 | } | ||
149 | spin_unlock_irqrestore(&tags->lock, flags); | ||
150 | |||
151 | while (1) { | ||
152 | spin_lock_irqsave(&pool->lock, flags); | ||
153 | tags = this_cpu_ptr(pool->tag_cpu); | ||
154 | |||
155 | /* | ||
156 | * prepare_to_wait() must come before steal_tags(), in case | ||
157 | * percpu_ida_free() on another cpu flips a bit in | ||
158 | * cpus_have_tags | ||
159 | * | ||
160 | * global lock held and irqs disabled, don't need percpu lock | ||
161 | */ | ||
162 | if (state != TASK_RUNNING) | ||
163 | prepare_to_wait(&pool->wait, &wait, state); | ||
164 | |||
165 | if (!tags->nr_free) | ||
166 | alloc_global_tags(pool, tags); | ||
167 | if (!tags->nr_free) | ||
168 | steal_tags(pool, tags); | ||
169 | |||
170 | if (tags->nr_free) { | ||
171 | tag = tags->freelist[--tags->nr_free]; | ||
172 | if (tags->nr_free) | ||
173 | cpumask_set_cpu(smp_processor_id(), | ||
174 | &pool->cpus_have_tags); | ||
175 | } | ||
176 | |||
177 | spin_unlock_irqrestore(&pool->lock, flags); | ||
178 | |||
179 | if (tag >= 0 || state == TASK_RUNNING) | ||
180 | break; | ||
181 | |||
182 | if (signal_pending_state(state, current)) { | ||
183 | tag = -ERESTARTSYS; | ||
184 | break; | ||
185 | } | ||
186 | |||
187 | schedule(); | ||
188 | } | ||
189 | if (state != TASK_RUNNING) | ||
190 | finish_wait(&pool->wait, &wait); | ||
191 | |||
192 | return tag; | ||
193 | } | ||
194 | EXPORT_SYMBOL_GPL(percpu_ida_alloc); | ||
195 | |||
196 | /** | ||
197 | * percpu_ida_free - free a tag | ||
198 | * @pool: pool @tag was allocated from | ||
199 | * @tag: a tag previously allocated with percpu_ida_alloc() | ||
200 | * | ||
201 | * Safe to be called from interrupt context. | ||
202 | */ | ||
203 | void percpu_ida_free(struct percpu_ida *pool, unsigned tag) | ||
204 | { | ||
205 | struct percpu_ida_cpu *tags; | ||
206 | unsigned long flags; | ||
207 | unsigned nr_free; | ||
208 | |||
209 | BUG_ON(tag >= pool->nr_tags); | ||
210 | |||
211 | tags = raw_cpu_ptr(pool->tag_cpu); | ||
212 | |||
213 | spin_lock_irqsave(&tags->lock, flags); | ||
214 | tags->freelist[tags->nr_free++] = tag; | ||
215 | |||
216 | nr_free = tags->nr_free; | ||
217 | |||
218 | if (nr_free == 1) { | ||
219 | cpumask_set_cpu(smp_processor_id(), | ||
220 | &pool->cpus_have_tags); | ||
221 | wake_up(&pool->wait); | ||
222 | } | ||
223 | spin_unlock_irqrestore(&tags->lock, flags); | ||
224 | |||
225 | if (nr_free == pool->percpu_max_size) { | ||
226 | spin_lock_irqsave(&pool->lock, flags); | ||
227 | spin_lock(&tags->lock); | ||
228 | |||
229 | if (tags->nr_free == pool->percpu_max_size) { | ||
230 | move_tags(pool->freelist, &pool->nr_free, | ||
231 | tags->freelist, &tags->nr_free, | ||
232 | pool->percpu_batch_size); | ||
233 | |||
234 | wake_up(&pool->wait); | ||
235 | } | ||
236 | spin_unlock(&tags->lock); | ||
237 | spin_unlock_irqrestore(&pool->lock, flags); | ||
238 | } | ||
239 | } | ||
240 | EXPORT_SYMBOL_GPL(percpu_ida_free); | ||
241 | |||
242 | /** | ||
243 | * percpu_ida_destroy - release a tag pool's resources | ||
244 | * @pool: pool to free | ||
245 | * | ||
246 | * Frees the resources allocated by percpu_ida_init(). | ||
247 | */ | ||
248 | void percpu_ida_destroy(struct percpu_ida *pool) | ||
249 | { | ||
250 | free_percpu(pool->tag_cpu); | ||
251 | free_pages((unsigned long) pool->freelist, | ||
252 | get_order(pool->nr_tags * sizeof(unsigned))); | ||
253 | } | ||
254 | EXPORT_SYMBOL_GPL(percpu_ida_destroy); | ||
255 | |||
256 | /** | ||
257 | * percpu_ida_init - initialize a percpu tag pool | ||
258 | * @pool: pool to initialize | ||
259 | * @nr_tags: number of tags that will be available for allocation | ||
260 | * | ||
261 | * Initializes @pool so that it can be used to allocate tags - integers in the | ||
262 | * range [0, nr_tags). Typically, they'll be used by driver code to refer to a | ||
263 | * preallocated array of tag structures. | ||
264 | * | ||
265 | * Allocation is percpu, but sharding is limited by nr_tags - for best | ||
266 | * performance, the workload should not span more cpus than nr_tags / 128. | ||
267 | */ | ||
268 | int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, | ||
269 | unsigned long max_size, unsigned long batch_size) | ||
270 | { | ||
271 | unsigned i, cpu, order; | ||
272 | |||
273 | memset(pool, 0, sizeof(*pool)); | ||
274 | |||
275 | init_waitqueue_head(&pool->wait); | ||
276 | spin_lock_init(&pool->lock); | ||
277 | pool->nr_tags = nr_tags; | ||
278 | pool->percpu_max_size = max_size; | ||
279 | pool->percpu_batch_size = batch_size; | ||
280 | |||
281 | /* Guard against overflow */ | ||
282 | if (nr_tags > (unsigned) INT_MAX + 1) { | ||
283 | pr_err("percpu_ida_init(): nr_tags too large\n"); | ||
284 | return -EINVAL; | ||
285 | } | ||
286 | |||
287 | order = get_order(nr_tags * sizeof(unsigned)); | ||
288 | pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order); | ||
289 | if (!pool->freelist) | ||
290 | return -ENOMEM; | ||
291 | |||
292 | for (i = 0; i < nr_tags; i++) | ||
293 | pool->freelist[i] = i; | ||
294 | |||
295 | pool->nr_free = nr_tags; | ||
296 | |||
297 | pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + | ||
298 | pool->percpu_max_size * sizeof(unsigned), | ||
299 | sizeof(unsigned)); | ||
300 | if (!pool->tag_cpu) | ||
301 | goto err; | ||
302 | |||
303 | for_each_possible_cpu(cpu) | ||
304 | spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); | ||
305 | |||
306 | return 0; | ||
307 | err: | ||
308 | percpu_ida_destroy(pool); | ||
309 | return -ENOMEM; | ||
310 | } | ||
311 | EXPORT_SYMBOL_GPL(__percpu_ida_init); | ||
312 | |||
313 | /** | ||
314 | * percpu_ida_for_each_free - iterate free ids of a pool | ||
315 | * @pool: pool to iterate | ||
316 | * @fn: interate callback function | ||
317 | * @data: parameter for @fn | ||
318 | * | ||
319 | * Note, this doesn't guarantee to iterate all free ids restrictly. Some free | ||
320 | * ids might be missed, some might be iterated duplicated, and some might | ||
321 | * be iterated and not free soon. | ||
322 | */ | ||
323 | int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, | ||
324 | void *data) | ||
325 | { | ||
326 | unsigned long flags; | ||
327 | struct percpu_ida_cpu *remote; | ||
328 | unsigned cpu, i, err = 0; | ||
329 | |||
330 | for_each_possible_cpu(cpu) { | ||
331 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | ||
332 | spin_lock_irqsave(&remote->lock, flags); | ||
333 | for (i = 0; i < remote->nr_free; i++) { | ||
334 | err = fn(remote->freelist[i], data); | ||
335 | if (err) | ||
336 | break; | ||
337 | } | ||
338 | spin_unlock_irqrestore(&remote->lock, flags); | ||
339 | if (err) | ||
340 | goto out; | ||
341 | } | ||
342 | |||
343 | spin_lock_irqsave(&pool->lock, flags); | ||
344 | for (i = 0; i < pool->nr_free; i++) { | ||
345 | err = fn(pool->freelist[i], data); | ||
346 | if (err) | ||
347 | break; | ||
348 | } | ||
349 | spin_unlock_irqrestore(&pool->lock, flags); | ||
350 | out: | ||
351 | return err; | ||
352 | } | ||
353 | EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); | ||
354 | |||
355 | /** | ||
356 | * percpu_ida_free_tags - return free tags number of a specific cpu or global pool | ||
357 | * @pool: pool related | ||
358 | * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids | ||
359 | * | ||
360 | * Note: this just returns a snapshot of free tags number. | ||
361 | */ | ||
362 | unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu) | ||
363 | { | ||
364 | struct percpu_ida_cpu *remote; | ||
365 | if (cpu == nr_cpu_ids) | ||
366 | return pool->nr_free; | ||
367 | remote = per_cpu_ptr(pool->tag_cpu, cpu); | ||
368 | return remote->nr_free; | ||
369 | } | ||
370 | EXPORT_SYMBOL_GPL(percpu_ida_free_tags); | ||