aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/LICENSE.qla2xxx41
-rw-r--r--Documentation/scsi/bfa.txt82
-rw-r--r--Documentation/scsi/libsas.txt15
-rw-r--r--drivers/ata/libata-core.c34
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c13
-rw-r--r--drivers/ata/libata.h2
-rw-r--r--drivers/scsi/Kconfig8
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/aachba.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h27
-rw-r--r--drivers/scsi/aacraid/comminit.c21
-rw-r--r--drivers/scsi/aacraid/commsup.c26
-rw-r--r--drivers/scsi/aacraid/linit.c28
-rw-r--r--drivers/scsi/aacraid/rx.c1
-rw-r--r--drivers/scsi/aacraid/sa.c1
-rw-r--r--drivers/scsi/aacraid/src.c293
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_dev.c38
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/aic94xx/aic94xx_tmf.c11
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c20
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c12
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c13
-rw-r--r--drivers/scsi/fcoe/fcoe.c167
-rw-r--r--drivers/scsi/fcoe/fcoe.h3
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c4
-rw-r--r--drivers/scsi/hpsa.c344
-rw-r--r--drivers/scsi/hpsa.h3
-rw-r--r--drivers/scsi/hpsa_cmd.h9
-rw-r--r--drivers/scsi/ipr.c14
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/host.c17
-rw-r--r--drivers/scsi/isci/host.h19
-rw-r--r--drivers/scsi/isci/init.c24
-rw-r--r--drivers/scsi/isci/phy.c171
-rw-r--r--drivers/scsi/isci/phy.h155
-rw-r--r--drivers/scsi/isci/port.c263
-rw-r--r--drivers/scsi/isci/port.h114
-rw-r--r--drivers/scsi/isci/registers.h27
-rw-r--r--drivers/scsi/isci/remote_device.c82
-rw-r--r--drivers/scsi/isci/remote_device.h212
-rw-r--r--drivers/scsi/isci/remote_node_context.c19
-rw-r--r--drivers/scsi/isci/remote_node_context.h97
-rw-r--r--drivers/scsi/isci/request.c370
-rw-r--r--drivers/scsi/isci/request.h228
-rw-r--r--drivers/scsi/isci/scu_task_context.h55
-rw-r--r--drivers/scsi/isci/task.c158
-rw-r--r--drivers/scsi/isci/task.h40
-rw-r--r--drivers/scsi/iscsi_tcp.c13
-rw-r--r--drivers/scsi/libfc/fc_disc.c7
-rw-r--r--drivers/scsi/libfc/fc_elsct.c3
-rw-r--r--drivers/scsi/libfc/fc_exch.c7
-rw-r--r--drivers/scsi/libfc/fc_lport.c227
-rw-r--r--drivers/scsi/libiscsi.c28
-rw-r--r--drivers/scsi/libiscsi_tcp.c18
-rw-r--r--drivers/scsi/libsas/sas_ata.c828
-rw-r--r--drivers/scsi/libsas/sas_discover.c246
-rw-r--r--drivers/scsi/libsas/sas_event.c96
-rw-r--r--drivers/scsi/libsas/sas_expander.c342
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c11
-rw-r--r--drivers/scsi/libsas/sas_init.c214
-rw-r--r--drivers/scsi/libsas/sas_internal.h97
-rw-r--r--drivers/scsi/libsas/sas_phy.c12
-rw-r--r--drivers/scsi/libsas/sas_port.c32
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c364
-rw-r--r--drivers/scsi/lpfc/lpfc.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c18
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h49
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c137
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c77
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c1054
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c240
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c5
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c6
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_chips.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c434
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c127
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c177
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c120
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c630
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h63
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h117
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c540
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c167
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c445
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c410
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c435
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c148
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h45
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h24
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h9
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c92
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c23
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c563
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c6
-rw-r--r--drivers/scsi/scsi_debug.c6
-rw-r--r--drivers/scsi/scsi_error.c24
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_transport_fc.c30
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c268
-rw-r--r--drivers/scsi/scsi_transport_sas.c60
-rw-r--r--drivers/scsi/sd.c86
-rw-r--r--drivers/scsi/sd.h35
-rw-r--r--drivers/scsi/st.c11
-rw-r--r--drivers/scsi/virtio_scsi.c594
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/virtio_ids.h1
-rw-r--r--include/linux/virtio_scsi.h114
-rw-r--r--include/scsi/fc/fc_ms.h213
-rw-r--r--include/scsi/fc_encode.h363
-rw-r--r--include/scsi/iscsi_if.h79
-rw-r--r--include/scsi/libfc.h11
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/libiscsi_tcp.h2
-rw-r--r--include/scsi/libsas.h71
-rw-r--r--include/scsi/sas.h4
-rw-r--r--include/scsi/sas_ata.h44
-rw-r--r--include/scsi/scsi_cmnd.h12
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_transport_fc.h32
-rw-r--r--include/scsi/scsi_transport_iscsi.h22
-rw-r--r--include/scsi/scsi_transport_sas.h12
154 files changed, 10252 insertions, 4355 deletions
diff --git a/Documentation/scsi/LICENSE.qla2xxx b/Documentation/scsi/LICENSE.qla2xxx
index 19e7cd4bba6..ce0fdf349a8 100644
--- a/Documentation/scsi/LICENSE.qla2xxx
+++ b/Documentation/scsi/LICENSE.qla2xxx
@@ -1,48 +1,11 @@
1Copyright (c) 2003-2011 QLogic Corporation 1Copyright (c) 2003-2011 QLogic Corporation
2QLogic Linux/ESX Fibre Channel HBA Driver 2QLogic Linux FC-FCoE Driver
3 3
4This program includes a device driver for Linux 2.6/ESX that may be 4This program includes a device driver for Linux 3.x.
5distributed with QLogic hardware specific firmware binary file.
6You may modify and redistribute the device driver code under the 5You may modify and redistribute the device driver code under the
7GNU General Public License (a copy of which is attached hereto as 6GNU General Public License (a copy of which is attached hereto as
8Exhibit A) published by the Free Software Foundation (version 2). 7Exhibit A) published by the Free Software Foundation (version 2).
9 8
10You may redistribute the hardware specific firmware binary file
11under the following terms:
12
13 1. Redistribution of source code (only if applicable),
14 must retain the above copyright notice, this list of
15 conditions and the following disclaimer.
16
17 2. Redistribution in binary form must reproduce the above
18 copyright notice, this list of conditions and the
19 following disclaimer in the documentation and/or other
20 materials provided with the distribution.
21
22 3. The name of QLogic Corporation may not be used to
23 endorse or promote products derived from this software
24 without specific prior written permission
25
26REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
27THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
28EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
30PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
31BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
33TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38POSSIBILITY OF SUCH DAMAGE.
39
40USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
41CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
42OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
43TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
44ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
45COMBINATION WITH THIS PROGRAM.
46 9
47 10
48EXHIBIT A 11EXHIBIT A
diff --git a/Documentation/scsi/bfa.txt b/Documentation/scsi/bfa.txt
new file mode 100644
index 00000000000..f2d6e9d1791
--- /dev/null
+++ b/Documentation/scsi/bfa.txt
@@ -0,0 +1,82 @@
1Linux driver for Brocade FC/FCOE adapters
2
3
4Supported Hardware
5------------------
6
7bfa 3.0.2.2 driver supports all Brocade FC/FCOE adapters. Below is a list of
8adapter models with corresponding PCIIDs.
9
10 PCIID Model
11
12 1657:0013:1657:0014 425 4Gbps dual port FC HBA
13 1657:0013:1657:0014 825 8Gbps PCIe dual port FC HBA
14 1657:0013:103c:1742 HP 82B 8Gbps PCIedual port FC HBA
15 1657:0013:103c:1744 HP 42B 4Gbps dual port FC HBA
16 1657:0017:1657:0014 415 4Gbps single port FC HBA
17 1657:0017:1657:0014 815 8Gbps single port FC HBA
18 1657:0017:103c:1741 HP 41B 4Gbps single port FC HBA
19 1657:0017:103c 1743 HP 81B 8Gbps single port FC HBA
20 1657:0021:103c:1779 804 8Gbps FC HBA for HP Bladesystem c-class
21
22 1657:0014:1657:0014 1010 10Gbps single port CNA - FCOE
23 1657:0014:1657:0014 1020 10Gbps dual port CNA - FCOE
24 1657:0014:1657:0014 1007 10Gbps dual port CNA - FCOE
25 1657:0014:1657:0014 1741 10Gbps dual port CNA - FCOE
26
27 1657:0022:1657:0024 1860 16Gbps FC HBA
28 1657:0022:1657:0022 1860 10Gbps CNA - FCOE
29
30
31Firmware download
32-----------------
33
34The latest Firmware package for 3.0.2.2 bfa driver can be found at:
35
36http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
37
38and then click following respective util package link:
39
40 Version Link
41
42 v3.0.0.0 Linux Adapter Firmware package for RHEL 6.2, SLES 11SP2
43
44
45Configuration & Management utility download
46-------------------------------------------
47
48The latest driver configuration & management utility for 3.0.2.2 bfa driver can
49be found at:
50
51http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
52
53and then click following respective util pacakge link
54
55 Version Link
56
57 v3.0.2.0 Linux Adapter Firmware package for RHEL 6.2, SLES 11SP2
58
59
60Documentation
61-------------
62
63The latest Administration's Guide, Installation and Reference Manual,
64Troubleshooting Guide, and Release Notes for the corresponding out-of-box
65driver can be found at:
66
67http://www.brocade.com/services-support/drivers-downloads/adapters/Linux.page
68
69and use the following inbox and out-of-box driver version mapping to find
70the corresponding documentation:
71
72 Inbox Version Out-of-box Version
73
74 v3.0.2.2 v3.0.0.0
75
76
77Support
78-------
79
80For general product and support info, go to the Brocade website at:
81
82http://www.brocade.com/services-support/index.page
diff --git a/Documentation/scsi/libsas.txt b/Documentation/scsi/libsas.txt
index aa54f54c4a5..3cc9c7843e1 100644
--- a/Documentation/scsi/libsas.txt
+++ b/Documentation/scsi/libsas.txt
@@ -398,21 +398,6 @@ struct sas_task {
398 task_done -- callback when the task has finished execution 398 task_done -- callback when the task has finished execution
399}; 399};
400 400
401When an external entity, entity other than the LLDD or the
402SAS Layer, wants to work with a struct domain_device, it
403_must_ call kobject_get() when getting a handle on the
404device and kobject_put() when it is done with the device.
405
406This does two things:
407 A) implements proper kfree() for the device;
408 B) increments/decrements the kref for all players:
409 domain_device
410 all domain_device's ... (if past an expander)
411 port
412 host adapter
413 pci device
414 and up the ladder, etc.
415
416DISCOVERY 401DISCOVERY
417--------- 402---------
418 403
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c06e0ec1155..e0bda9ff89c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5936,29 +5936,31 @@ void ata_host_init(struct ata_host *host, struct device *dev,
5936 host->ops = ops; 5936 host->ops = ops;
5937} 5937}
5938 5938
5939int ata_port_probe(struct ata_port *ap) 5939void __ata_port_probe(struct ata_port *ap)
5940{ 5940{
5941 int rc = 0; 5941 struct ata_eh_info *ehi = &ap->link.eh_info;
5942 unsigned long flags;
5942 5943
5943 /* probe */ 5944 /* kick EH for boot probing */
5944 if (ap->ops->error_handler) { 5945 spin_lock_irqsave(ap->lock, flags);
5945 struct ata_eh_info *ehi = &ap->link.eh_info;
5946 unsigned long flags;
5947 5946
5948 /* kick EH for boot probing */ 5947 ehi->probe_mask |= ATA_ALL_DEVICES;
5949 spin_lock_irqsave(ap->lock, flags); 5948 ehi->action |= ATA_EH_RESET;
5949 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5950 5950
5951 ehi->probe_mask |= ATA_ALL_DEVICES; 5951 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5952 ehi->action |= ATA_EH_RESET; 5952 ap->pflags |= ATA_PFLAG_LOADING;
5953 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5953 ata_port_schedule_eh(ap);
5954 5954
5955 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 5955 spin_unlock_irqrestore(ap->lock, flags);
5956 ap->pflags |= ATA_PFLAG_LOADING; 5956}
5957 ata_port_schedule_eh(ap);
5958 5957
5959 spin_unlock_irqrestore(ap->lock, flags); 5958int ata_port_probe(struct ata_port *ap)
5959{
5960 int rc = 0;
5960 5961
5961 /* wait for EH to finish */ 5962 if (ap->ops->error_handler) {
5963 __ata_port_probe(ap);
5962 ata_port_wait_eh(ap); 5964 ata_port_wait_eh(ap);
5963 } else { 5965 } else {
5964 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 5966 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index a9b28203800..c61316e9d2f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -863,6 +863,7 @@ void ata_port_wait_eh(struct ata_port *ap)
863 goto retry; 863 goto retry;
864 } 864 }
865} 865}
866EXPORT_SYMBOL_GPL(ata_port_wait_eh);
866 867
867static int ata_eh_nr_in_flight(struct ata_port *ap) 868static int ata_eh_nr_in_flight(struct ata_port *ap)
868{ 869{
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 508a60bfe5c..1ee00c8b5b0 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3838,6 +3838,19 @@ void ata_sas_port_stop(struct ata_port *ap)
3838} 3838}
3839EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3839EXPORT_SYMBOL_GPL(ata_sas_port_stop);
3840 3840
3841int ata_sas_async_port_init(struct ata_port *ap)
3842{
3843 int rc = ap->ops->port_start(ap);
3844
3845 if (!rc) {
3846 ap->print_id = ata_print_id++;
3847 __ata_port_probe(ap);
3848 }
3849
3850 return rc;
3851}
3852EXPORT_SYMBOL_GPL(ata_sas_async_port_init);
3853
3841/** 3854/**
3842 * ata_sas_port_init - Initialize a SATA device 3855 * ata_sas_port_init - Initialize a SATA device
3843 * @ap: SATA port to initialize 3856 * @ap: SATA port to initialize
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 814486d35c4..2e26fcaf635 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -105,6 +105,7 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
105extern struct ata_port *ata_port_alloc(struct ata_host *host); 105extern struct ata_port *ata_port_alloc(struct ata_host *host);
106extern const char *sata_spd_string(unsigned int spd); 106extern const char *sata_spd_string(unsigned int spd);
107extern int ata_port_probe(struct ata_port *ap); 107extern int ata_port_probe(struct ata_port *ap);
108extern void __ata_port_probe(struct ata_port *ap);
108 109
109/* libata-acpi.c */ 110/* libata-acpi.c */
110#ifdef CONFIG_ATA_ACPI 111#ifdef CONFIG_ATA_ACPI
@@ -151,7 +152,6 @@ extern void ata_eh_acquire(struct ata_port *ap);
151extern void ata_eh_release(struct ata_port *ap); 152extern void ata_eh_release(struct ata_port *ap);
152extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); 153extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
153extern void ata_scsi_error(struct Scsi_Host *host); 154extern void ata_scsi_error(struct Scsi_Host *host);
154extern void ata_port_wait_eh(struct ata_port *ap);
155extern void ata_eh_fastdrain_timerfn(unsigned long arg); 155extern void ata_eh_fastdrain_timerfn(unsigned long arg);
156extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); 156extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc);
157extern void ata_dev_disable(struct ata_device *dev); 157extern void ata_dev_disable(struct ata_device *dev);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4e89103204d..a06e608789e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1903,6 +1903,14 @@ config SCSI_BFA_FC
1903 To compile this driver as a module, choose M here. The module will 1903 To compile this driver as a module, choose M here. The module will
1904 be called bfa. 1904 be called bfa.
1905 1905
1906config SCSI_VIRTIO
1907 tristate "virtio-scsi support (EXPERIMENTAL)"
1908 depends on EXPERIMENTAL && VIRTIO
1909 help
1910 This is the virtual HBA driver for virtio. If the kernel will
1911 be used in a virtual machine, say Y or M.
1912
1913
1906endif # SCSI_LOWLEVEL 1914endif # SCSI_LOWLEVEL
1907 1915
1908source "drivers/scsi/pcmcia/Kconfig" 1916source "drivers/scsi/pcmcia/Kconfig"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e4c1a69f8fa..ad24e065b1e 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
141obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/ 141obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
142obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/ 142obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
143obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o 143obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
144obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
144obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o 145obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
145obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o 146obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
146 147
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 409f5805bdd..52551662d10 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -151,7 +151,11 @@ int aac_msi;
151int aac_commit = -1; 151int aac_commit = -1;
152int startup_timeout = 180; 152int startup_timeout = 180;
153int aif_timeout = 120; 153int aif_timeout = 120;
154int aac_sync_mode; /* Only Sync. transfer - disabled */
154 155
156module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
157MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
158 " 0=off, 1=on");
155module_param(nondasd, int, S_IRUGO|S_IWUSR); 159module_param(nondasd, int, S_IRUGO|S_IWUSR);
156MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." 160MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
157 " 0=off, 1=on"); 161 " 0=off, 1=on");
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index ffb587817ef..3fcf62724fa 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
12 *----------------------------------------------------------------------------*/ 12 *----------------------------------------------------------------------------*/
13 13
14#ifndef AAC_DRIVER_BUILD 14#ifndef AAC_DRIVER_BUILD
15# define AAC_DRIVER_BUILD 28000 15# define AAC_DRIVER_BUILD 28900
16# define AAC_DRIVER_BRANCH "-ms" 16# define AAC_DRIVER_BRANCH "-ms"
17#endif 17#endif
18#define MAXIMUM_NUM_CONTAINERS 32 18#define MAXIMUM_NUM_CONTAINERS 32
@@ -756,8 +756,16 @@ struct src_mu_registers {
756 756
757struct src_registers { 757struct src_registers {
758 struct src_mu_registers MUnit; /* 00h - c7h */ 758 struct src_mu_registers MUnit; /* 00h - c7h */
759 __le32 reserved1[130790]; /* c8h - 7fc5fh */ 759 union {
760 struct src_inbound IndexRegs; /* 7fc60h */ 760 struct {
761 __le32 reserved1[130790]; /* c8h - 7fc5fh */
762 struct src_inbound IndexRegs; /* 7fc60h */
763 } tupelo;
764 struct {
765 __le32 reserved1[974]; /* c8h - fffh */
766 struct src_inbound IndexRegs; /* 1000h */
767 } denali;
768 } u;
761}; 769};
762 770
763#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR)) 771#define src_readb(AEP, CSR) readb(&((AEP)->regs.src.bar0->CSR))
@@ -999,6 +1007,10 @@ struct aac_bus_info_response {
999#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17) 1007#define AAC_OPT_NEW_COMM cpu_to_le32(1<<17)
1000#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18) 1008#define AAC_OPT_NEW_COMM_64 cpu_to_le32(1<<18)
1001#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28) 1009#define AAC_OPT_NEW_COMM_TYPE1 cpu_to_le32(1<<28)
1010#define AAC_OPT_NEW_COMM_TYPE2 cpu_to_le32(1<<29)
1011#define AAC_OPT_NEW_COMM_TYPE3 cpu_to_le32(1<<30)
1012#define AAC_OPT_NEW_COMM_TYPE4 cpu_to_le32(1<<31)
1013
1002 1014
1003struct aac_dev 1015struct aac_dev
1004{ 1016{
@@ -1076,6 +1088,8 @@ struct aac_dev
1076# define AAC_MIN_FOOTPRINT_SIZE 8192 1088# define AAC_MIN_FOOTPRINT_SIZE 8192
1077# define AAC_MIN_SRC_BAR0_SIZE 0x400000 1089# define AAC_MIN_SRC_BAR0_SIZE 0x400000
1078# define AAC_MIN_SRC_BAR1_SIZE 0x800 1090# define AAC_MIN_SRC_BAR1_SIZE 0x800
1091# define AAC_MIN_SRCV_BAR0_SIZE 0x100000
1092# define AAC_MIN_SRCV_BAR1_SIZE 0x400
1079#endif 1093#endif
1080 union 1094 union
1081 { 1095 {
@@ -1116,7 +1130,10 @@ struct aac_dev
1116 u8 msi; 1130 u8 msi;
1117 int management_fib_count; 1131 int management_fib_count;
1118 spinlock_t manage_lock; 1132 spinlock_t manage_lock;
1119 1133 spinlock_t sync_lock;
1134 int sync_mode;
1135 struct fib *sync_fib;
1136 struct list_head sync_fib_list;
1120}; 1137};
1121 1138
1122#define aac_adapter_interrupt(dev) \ 1139#define aac_adapter_interrupt(dev) \
@@ -1163,6 +1180,7 @@ struct aac_dev
1163 1180
1164#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001) 1181#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
1165#define FIB_CONTEXT_FLAG (0x00000002) 1182#define FIB_CONTEXT_FLAG (0x00000002)
1183#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
1166 1184
1167/* 1185/*
1168 * Define the command values 1186 * Define the command values
@@ -1970,6 +1988,7 @@ int aac_rkt_init(struct aac_dev *dev);
1970int aac_nark_init(struct aac_dev *dev); 1988int aac_nark_init(struct aac_dev *dev);
1971int aac_sa_init(struct aac_dev *dev); 1989int aac_sa_init(struct aac_dev *dev);
1972int aac_src_init(struct aac_dev *dev); 1990int aac_src_init(struct aac_dev *dev);
1991int aac_srcv_init(struct aac_dev *dev);
1973int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify); 1992int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
1974unsigned int aac_response_normal(struct aac_queue * q); 1993unsigned int aac_response_normal(struct aac_queue * q);
1975unsigned int aac_command_normal(struct aac_queue * q); 1994unsigned int aac_command_normal(struct aac_queue * q);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 7ac8fdb5577..a35f54ebdce 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -325,12 +325,14 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
325{ 325{
326 u32 status[5]; 326 u32 status[5];
327 struct Scsi_Host * host = dev->scsi_host_ptr; 327 struct Scsi_Host * host = dev->scsi_host_ptr;
328 extern int aac_sync_mode;
328 329
329 /* 330 /*
330 * Check the preferred comm settings, defaults from template. 331 * Check the preferred comm settings, defaults from template.
331 */ 332 */
332 dev->management_fib_count = 0; 333 dev->management_fib_count = 0;
333 spin_lock_init(&dev->manage_lock); 334 spin_lock_init(&dev->manage_lock);
335 spin_lock_init(&dev->sync_lock);
334 dev->max_fib_size = sizeof(struct hw_fib); 336 dev->max_fib_size = sizeof(struct hw_fib);
335 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size 337 dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
336 - sizeof(struct aac_fibhdr) 338 - sizeof(struct aac_fibhdr)
@@ -344,13 +346,21 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
344 (status[0] == 0x00000001)) { 346 (status[0] == 0x00000001)) {
345 if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64)) 347 if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_64))
346 dev->raw_io_64 = 1; 348 dev->raw_io_64 = 1;
347 if (dev->a_ops.adapter_comm) { 349 dev->sync_mode = aac_sync_mode;
348 if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1)) { 350 if (dev->a_ops.adapter_comm &&
349 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1; 351 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM))) {
350 dev->raw_io_interface = 1;
351 } else if (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM)) {
352 dev->comm_interface = AAC_COMM_MESSAGE; 352 dev->comm_interface = AAC_COMM_MESSAGE;
353 dev->raw_io_interface = 1; 353 dev->raw_io_interface = 1;
354 if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
355 /* driver supports TYPE1 (Tupelo) */
356 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
357 } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
358 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) ||
359 (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
360 /* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */
361 /* switch to sync. mode */
362 dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
363 dev->sync_mode = 1;
354 } 364 }
355 } 365 }
356 if ((dev->comm_interface == AAC_COMM_MESSAGE) && 366 if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
@@ -455,6 +465,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
455 } 465 }
456 466
457 INIT_LIST_HEAD(&dev->fib_list); 467 INIT_LIST_HEAD(&dev->fib_list);
468 INIT_LIST_HEAD(&dev->sync_fib_list);
458 469
459 return dev; 470 return dev;
460} 471}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e5f2d7d9002..4b32ca44243 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -416,6 +416,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
416 unsigned long flags = 0; 416 unsigned long flags = 0;
417 unsigned long qflags; 417 unsigned long qflags;
418 unsigned long mflags = 0; 418 unsigned long mflags = 0;
419 unsigned long sflags = 0;
419 420
420 421
421 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned))) 422 if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
@@ -512,6 +513,31 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
512 spin_lock_irqsave(&fibptr->event_lock, flags); 513 spin_lock_irqsave(&fibptr->event_lock, flags);
513 } 514 }
514 515
516 if (dev->sync_mode) {
517 if (wait)
518 spin_unlock_irqrestore(&fibptr->event_lock, flags);
519 spin_lock_irqsave(&dev->sync_lock, sflags);
520 if (dev->sync_fib) {
521 list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
522 spin_unlock_irqrestore(&dev->sync_lock, sflags);
523 } else {
524 dev->sync_fib = fibptr;
525 spin_unlock_irqrestore(&dev->sync_lock, sflags);
526 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
527 (u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
528 NULL, NULL, NULL, NULL, NULL);
529 }
530 if (wait) {
531 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
532 if (down_interruptible(&fibptr->event_wait)) {
533 fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
534 return -EFAULT;
535 }
536 return 0;
537 }
538 return -EINPROGRESS;
539 }
540
515 if (aac_adapter_deliver(fibptr) != 0) { 541 if (aac_adapter_deliver(fibptr) != 0) {
516 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n"); 542 printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
517 if (wait) { 543 if (wait) {
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 705e13e470a..0d279c445a3 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -56,7 +56,7 @@
56 56
57#include "aacraid.h" 57#include "aacraid.h"
58 58
59#define AAC_DRIVER_VERSION "1.1-7" 59#define AAC_DRIVER_VERSION "1.2-0"
60#ifndef AAC_DRIVER_BRANCH 60#ifndef AAC_DRIVER_BRANCH
61#define AAC_DRIVER_BRANCH "" 61#define AAC_DRIVER_BRANCH ""
62#endif 62#endif
@@ -162,7 +162,10 @@ static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
162 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */ 162 { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
163 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */ 163 { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
164 { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */ 164 { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
165 { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Catch All */ 165 { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
166 { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
167 { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
168 { 0x9005, 0x028f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 65 }, /* Adaptec PMC Series 9 */
166 { 0,} 169 { 0,}
167}; 170};
168MODULE_DEVICE_TABLE(pci, aac_pci_tbl); 171MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
@@ -238,7 +241,10 @@ static struct aac_driver_ident aac_drivers[] = {
238 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */ 241 { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
239 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */ 242 { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
240 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */ 243 { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
241 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Catch All */ 244 { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
245 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
246 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
247 { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
242}; 248};
243 249
244/** 250/**
@@ -1102,6 +1108,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1102 int error = -ENODEV; 1108 int error = -ENODEV;
1103 int unique_id = 0; 1109 int unique_id = 0;
1104 u64 dmamask; 1110 u64 dmamask;
1111 extern int aac_sync_mode;
1105 1112
1106 list_for_each_entry(aac, &aac_devices, entry) { 1113 list_for_each_entry(aac, &aac_devices, entry) {
1107 if (aac->id > unique_id) 1114 if (aac->id > unique_id)
@@ -1162,6 +1169,21 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
1162 if ((*aac_drivers[index].init)(aac)) 1169 if ((*aac_drivers[index].init)(aac))
1163 goto out_unmap; 1170 goto out_unmap;
1164 1171
1172 if (aac->sync_mode) {
1173 if (aac_sync_mode)
1174 printk(KERN_INFO "%s%d: Sync. mode enforced "
1175 "by driver parameter. This will cause "
1176 "a significant performance decrease!\n",
1177 aac->name,
1178 aac->id);
1179 else
1180 printk(KERN_INFO "%s%d: Async. mode not supported "
1181 "by current driver, sync. mode enforced."
1182 "\nPlease update driver to get full performance.\n",
1183 aac->name,
1184 aac->id);
1185 }
1186
1165 /* 1187 /*
1166 * Start any kernel threads needed 1188 * Start any kernel threads needed
1167 */ 1189 */
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index ce530f113fd..b029c7cc785 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -643,6 +643,7 @@ int _aac_rx_init(struct aac_dev *dev)
643 if (aac_init_adapter(dev) == NULL) 643 if (aac_init_adapter(dev) == NULL)
644 goto error_iounmap; 644 goto error_iounmap;
645 aac_adapter_comm(dev, dev->comm_interface); 645 aac_adapter_comm(dev, dev->comm_interface);
646 dev->sync_mode = 0; /* sync. mode not supported */
646 dev->msi = aac_msi && !pci_enable_msi(dev->pdev); 647 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
647 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 648 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
648 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) { 649 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index e5d4457121e..beb533630d4 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -385,6 +385,7 @@ int aac_sa_init(struct aac_dev *dev)
385 385
386 if(aac_init_adapter(dev) == NULL) 386 if(aac_init_adapter(dev) == NULL)
387 goto error_irq; 387 goto error_irq;
388 dev->sync_mode = 0; /* sync. mode not supported */
388 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr, 389 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
389 IRQF_SHARED|IRQF_DISABLED, 390 IRQF_SHARED|IRQF_DISABLED,
390 "aacraid", (void *)dev ) < 0) { 391 "aacraid", (void *)dev ) < 0) {
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 957595a7a45..2bee51506a9 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -96,6 +96,38 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
96 our_interrupt = 1; 96 our_interrupt = 1;
97 /* handle AIF */ 97 /* handle AIF */
98 aac_intr_normal(dev, 0, 2, 0, NULL); 98 aac_intr_normal(dev, 0, 2, 0, NULL);
99 } else if (bellbits_shifted & OUTBOUNDDOORBELL_0) {
100 unsigned long sflags;
101 struct list_head *entry;
102 int send_it = 0;
103
104 if (dev->sync_fib) {
105 our_interrupt = 1;
106 if (dev->sync_fib->callback)
107 dev->sync_fib->callback(dev->sync_fib->callback_data,
108 dev->sync_fib);
109 spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
110 if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
111 dev->management_fib_count--;
112 up(&dev->sync_fib->event_wait);
113 }
114 spin_unlock_irqrestore(&dev->sync_fib->event_lock, sflags);
115 spin_lock_irqsave(&dev->sync_lock, sflags);
116 if (!list_empty(&dev->sync_fib_list)) {
117 entry = dev->sync_fib_list.next;
118 dev->sync_fib = list_entry(entry, struct fib, fiblink);
119 list_del(entry);
120 send_it = 1;
121 } else {
122 dev->sync_fib = NULL;
123 }
124 spin_unlock_irqrestore(&dev->sync_lock, sflags);
125 if (send_it) {
126 aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
127 (u32)dev->sync_fib->hw_fib_pa, 0, 0, 0, 0, 0,
128 NULL, NULL, NULL, NULL, NULL);
129 }
130 }
99 } 131 }
100 } 132 }
101 133
@@ -177,56 +209,63 @@ static int src_sync_cmd(struct aac_dev *dev, u32 command,
177 */ 209 */
178 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT); 210 src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
179 211
180 ok = 0; 212 if (!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) {
181 start = jiffies; 213 ok = 0;
214 start = jiffies;
182 215
183 /* 216 /*
184 * Wait up to 30 seconds 217 * Wait up to 5 minutes
185 */
186 while (time_before(jiffies, start+30*HZ)) {
187 /* Delay 5 microseconds to let Mon960 get info. */
188 udelay(5);
189
190 /* Mon960 will set doorbell0 bit
191 * when it has completed the command
192 */ 218 */
193 if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) { 219 while (time_before(jiffies, start+300*HZ)) {
194 /* Clear the doorbell */ 220 udelay(5); /* Delay 5 microseconds to let Mon960 get info. */
195 src_writel(dev, 221 /*
196 MUnit.ODR_C, 222 * Mon960 will set doorbell0 bit when it has completed the command.
197 OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT); 223 */
198 ok = 1; 224 if ((src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT) & OUTBOUNDDOORBELL_0) {
199 break; 225 /*
226 * Clear the doorbell.
227 */
228 src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
229 ok = 1;
230 break;
231 }
232 /*
233 * Yield the processor in case we are slow
234 */
235 msleep(1);
200 } 236 }
201 237 if (unlikely(ok != 1)) {
202 /* Yield the processor in case we are slow */ 238 /*
203 msleep(1); 239 * Restore interrupt mask even though we timed out
204 } 240 */
205 if (unlikely(ok != 1)) { 241 aac_adapter_enable_int(dev);
206 /* Restore interrupt mask even though we timed out */ 242 return -ETIMEDOUT;
207 aac_adapter_enable_int(dev); 243 }
208 return -ETIMEDOUT; 244 /*
245 * Pull the synch status from Mailbox 0.
246 */
247 if (status)
248 *status = readl(&dev->IndexRegs->Mailbox[0]);
249 if (r1)
250 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
251 if (r2)
252 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
253 if (r3)
254 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
255 if (r4)
256 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
257
258 /*
259 * Clear the synch command doorbell.
260 */
261 src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
209 } 262 }
210 263
211 /* Pull the synch status from Mailbox 0 */ 264 /*
212 if (status) 265 * Restore interrupt mask
213 *status = readl(&dev->IndexRegs->Mailbox[0]); 266 */
214 if (r1)
215 *r1 = readl(&dev->IndexRegs->Mailbox[1]);
216 if (r2)
217 *r2 = readl(&dev->IndexRegs->Mailbox[2]);
218 if (r3)
219 *r3 = readl(&dev->IndexRegs->Mailbox[3]);
220 if (r4)
221 *r4 = readl(&dev->IndexRegs->Mailbox[4]);
222
223 /* Clear the synch command doorbell */
224 src_writel(dev, MUnit.ODR_C, OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
225
226 /* Restore interrupt mask */
227 aac_adapter_enable_int(dev); 267 aac_adapter_enable_int(dev);
228 return 0; 268 return 0;
229
230} 269}
231 270
232/** 271/**
@@ -386,9 +425,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
386{ 425{
387 if (!size) { 426 if (!size) {
388 iounmap(dev->regs.src.bar0); 427 iounmap(dev->regs.src.bar0);
389 dev->regs.src.bar0 = NULL; 428 dev->base = dev->regs.src.bar0 = NULL;
390 iounmap(dev->base);
391 dev->base = NULL;
392 return 0; 429 return 0;
393 } 430 }
394 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2), 431 dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
@@ -404,7 +441,27 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
404 return -1; 441 return -1;
405 } 442 }
406 dev->IndexRegs = &((struct src_registers __iomem *) 443 dev->IndexRegs = &((struct src_registers __iomem *)
407 dev->base)->IndexRegs; 444 dev->base)->u.tupelo.IndexRegs;
445 return 0;
446}
447
448/**
449 * aac_srcv_ioremap
450 * @size: mapping resize request
451 *
452 */
453static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
454{
455 if (!size) {
456 iounmap(dev->regs.src.bar0);
457 dev->base = dev->regs.src.bar0 = NULL;
458 return 0;
459 }
460 dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size);
461 if (dev->base == NULL)
462 return -1;
463 dev->IndexRegs = &((struct src_registers __iomem *)
464 dev->base)->u.denali.IndexRegs;
408 return 0; 465 return 0;
409} 466}
410 467
@@ -419,7 +476,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
419 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 476 bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
420 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL); 477 0, 0, 0, 0, 0, 0, &var, &reset_mask, NULL, NULL, NULL);
421 if (bled || (var != 0x00000001)) 478 if (bled || (var != 0x00000001))
422 bled = -EINVAL; 479 return -EINVAL;
423 if (dev->supplement_adapter_info.SupportedOptions2 & 480 if (dev->supplement_adapter_info.SupportedOptions2 &
424 AAC_OPTION_DOORBELL_RESET) { 481 AAC_OPTION_DOORBELL_RESET) {
425 src_writel(dev, MUnit.IDR, reset_mask); 482 src_writel(dev, MUnit.IDR, reset_mask);
@@ -579,15 +636,149 @@ int aac_src_init(struct aac_dev *dev)
579 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE; 636 dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
580 637
581 aac_adapter_enable_int(dev); 638 aac_adapter_enable_int(dev);
639
640 if (!dev->sync_mode) {
641 /*
642 * Tell the adapter that all is configured, and it can
643 * start accepting requests
644 */
645 aac_src_start_adapter(dev);
646 }
647 return 0;
648
649error_iounmap:
650
651 return -1;
652}
653
654/**
655 * aac_srcv_init - initialize an SRCv card
656 * @dev: device to configure
657 *
658 */
659
660int aac_srcv_init(struct aac_dev *dev)
661{
662 unsigned long start;
663 unsigned long status;
664 int restart = 0;
665 int instance = dev->id;
666 const char *name = dev->name;
667
668 dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
669 dev->a_ops.adapter_comm = aac_src_select_comm;
670
671 dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
672 if (aac_adapter_ioremap(dev, dev->base_size)) {
673 printk(KERN_WARNING "%s: unable to map adapter.\n", name);
674 goto error_iounmap;
675 }
676
677 /* Failure to reset here is an option ... */
678 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
679 dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
680 if ((aac_reset_devices || reset_devices) &&
681 !aac_src_restart_adapter(dev, 0))
682 ++restart;
582 /* 683 /*
583 * Tell the adapter that all is configured, and it can 684 * Check to see if the board panic'd while booting.
584 * start accepting requests
585 */ 685 */
586 aac_src_start_adapter(dev); 686 status = src_readl(dev, MUnit.OMR);
687 if (status & KERNEL_PANIC) {
688 if (aac_src_restart_adapter(dev, aac_src_check_health(dev)))
689 goto error_iounmap;
690 ++restart;
691 }
692 /*
693 * Check to see if the board failed any self tests.
694 */
695 status = src_readl(dev, MUnit.OMR);
696 if (status & SELF_TEST_FAILED) {
697 printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
698 goto error_iounmap;
699 }
700 /*
701 * Check to see if the monitor panic'd while booting.
702 */
703 if (status & MONITOR_PANIC) {
704 printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
705 goto error_iounmap;
706 }
707 start = jiffies;
708 /*
709 * Wait for the adapter to be up and running. Wait up to 3 minutes
710 */
711 while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)) {
712 if ((restart &&
713 (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
714 time_after(jiffies, start+HZ*startup_timeout)) {
715 printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
716 dev->name, instance, status);
717 goto error_iounmap;
718 }
719 if (!restart &&
720 ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
721 time_after(jiffies, start + HZ *
722 ((startup_timeout > 60)
723 ? (startup_timeout - 60)
724 : (startup_timeout / 2))))) {
725 if (likely(!aac_src_restart_adapter(dev, aac_src_check_health(dev))))
726 start = jiffies;
727 ++restart;
728 }
729 msleep(1);
730 }
731 if (restart && aac_commit)
732 aac_commit = 1;
733 /*
734 * Fill in the common function dispatch table.
735 */
736 dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
737 dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
738 dev->a_ops.adapter_notify = aac_src_notify_adapter;
739 dev->a_ops.adapter_sync_cmd = src_sync_cmd;
740 dev->a_ops.adapter_check_health = aac_src_check_health;
741 dev->a_ops.adapter_restart = aac_src_restart_adapter;
742
743 /*
744 * First clear out all interrupts. Then enable the one's that we
745 * can handle.
746 */
747 aac_adapter_comm(dev, AAC_COMM_MESSAGE);
748 aac_adapter_disable_int(dev);
749 src_writel(dev, MUnit.ODR_C, 0xffffffff);
750 aac_adapter_enable_int(dev);
587 751
752 if (aac_init_adapter(dev) == NULL)
753 goto error_iounmap;
754 if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
755 goto error_iounmap;
756 dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
757 if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
758 IRQF_SHARED|IRQF_DISABLED, "aacraid", dev) < 0) {
759 if (dev->msi)
760 pci_disable_msi(dev->pdev);
761 printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
762 name, instance);
763 goto error_iounmap;
764 }
765 dev->dbg_base = dev->scsi_host_ptr->base;
766 dev->dbg_base_mapped = dev->base;
767 dev->dbg_size = dev->base_size;
768
769 aac_adapter_enable_int(dev);
770
771 if (!dev->sync_mode) {
772 /*
773 * Tell the adapter that all is configured, and it can
774 * start accepting requests
775 */
776 aac_src_start_adapter(dev);
777 }
588 return 0; 778 return 0;
589 779
590error_iounmap: 780error_iounmap:
591 781
592 return -1; 782 return -1;
593} 783}
784
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index 2863a9d2285..66cda669b41 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -80,6 +80,8 @@ void asd_invalidate_edb(struct asd_ascb *ascb, int edb_id);
80 80
81int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags); 81int asd_execute_task(struct sas_task *, int num, gfp_t gfp_flags);
82 82
83void asd_set_dmamode(struct domain_device *dev);
84
83/* ---------- TMFs ---------- */ 85/* ---------- TMFs ---------- */
84int asd_abort_task(struct sas_task *); 86int asd_abort_task(struct sas_task *);
85int asd_abort_task_set(struct domain_device *, u8 *lun); 87int asd_abort_task_set(struct domain_device *, u8 *lun);
diff --git a/drivers/scsi/aic94xx/aic94xx_dev.c b/drivers/scsi/aic94xx/aic94xx_dev.c
index 2e2ddec9c0b..64136c56e70 100644
--- a/drivers/scsi/aic94xx/aic94xx_dev.c
+++ b/drivers/scsi/aic94xx/aic94xx_dev.c
@@ -109,26 +109,37 @@ static int asd_init_sata_tag_ddb(struct domain_device *dev)
109 return 0; 109 return 0;
110} 110}
111 111
112static int asd_init_sata(struct domain_device *dev) 112void asd_set_dmamode(struct domain_device *dev)
113{ 113{
114 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; 114 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
115 struct ata_device *ata_dev = sas_to_ata_dev(dev);
115 int ddb = (int) (unsigned long) dev->lldd_dev; 116 int ddb = (int) (unsigned long) dev->lldd_dev;
116 u32 qdepth = 0; 117 u32 qdepth = 0;
117 int res = 0;
118 118
119 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); 119 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) {
120 if ((dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) && 120 if (ata_id_has_ncq(ata_dev->id))
121 dev->sata_dev.identify_device && 121 qdepth = ata_id_queue_depth(ata_dev->id);
122 dev->sata_dev.identify_device[10] != 0) {
123 u16 w75 = le16_to_cpu(dev->sata_dev.identify_device[75]);
124 u16 w76 = le16_to_cpu(dev->sata_dev.identify_device[76]);
125
126 if (w76 & 0x100) /* NCQ? */
127 qdepth = (w75 & 0x1F) + 1;
128 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, 122 asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK,
129 (1ULL<<qdepth)-1); 123 (1ULL<<qdepth)-1);
130 asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth); 124 asd_ddbsite_write_byte(asd_ha, ddb, NUM_SATA_TAGS, qdepth);
131 } 125 }
126
127 if (qdepth > 0)
128 if (asd_init_sata_tag_ddb(dev) != 0) {
129 unsigned long flags;
130
131 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
132 ata_dev->flags |= ATA_DFLAG_NCQ_OFF;
133 spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
134 }
135}
136
137static int asd_init_sata(struct domain_device *dev)
138{
139 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
140 int ddb = (int) (unsigned long) dev->lldd_dev;
141
142 asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF);
132 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || 143 if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
133 dev->dev_type == SATA_PM_PORT) { 144 dev->dev_type == SATA_PM_PORT) {
134 struct dev_to_host_fis *fis = (struct dev_to_host_fis *) 145 struct dev_to_host_fis *fis = (struct dev_to_host_fis *)
@@ -136,9 +147,8 @@ static int asd_init_sata(struct domain_device *dev)
136 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); 147 asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status);
137 } 148 }
138 asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF); 149 asd_ddbsite_write_word(asd_ha, ddb, NCQ_DATA_SCB_PTR, 0xFFFF);
139 if (qdepth > 0) 150
140 res = asd_init_sata_tag_ddb(dev); 151 return 0;
141 return res;
142} 152}
143 153
144static int asd_init_target_ddb(struct domain_device *dev) 154static int asd_init_target_ddb(struct domain_device *dev)
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index d5ff142c93a..ff80552ead8 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -68,7 +68,6 @@ static struct scsi_host_template aic94xx_sht = {
68 .queuecommand = sas_queuecommand, 68 .queuecommand = sas_queuecommand,
69 .target_alloc = sas_target_alloc, 69 .target_alloc = sas_target_alloc,
70 .slave_configure = sas_slave_configure, 70 .slave_configure = sas_slave_configure,
71 .slave_destroy = sas_slave_destroy,
72 .scan_finished = asd_scan_finished, 71 .scan_finished = asd_scan_finished,
73 .scan_start = asd_scan_start, 72 .scan_start = asd_scan_start,
74 .change_queue_depth = sas_change_queue_depth, 73 .change_queue_depth = sas_change_queue_depth,
@@ -82,7 +81,6 @@ static struct scsi_host_template aic94xx_sht = {
82 .use_clustering = ENABLE_CLUSTERING, 81 .use_clustering = ENABLE_CLUSTERING,
83 .eh_device_reset_handler = sas_eh_device_reset_handler, 82 .eh_device_reset_handler = sas_eh_device_reset_handler,
84 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 83 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
85 .slave_alloc = sas_slave_alloc,
86 .target_destroy = sas_target_destroy, 84 .target_destroy = sas_target_destroy,
87 .ioctl = sas_ioctl, 85 .ioctl = sas_ioctl,
88}; 86};
@@ -972,7 +970,7 @@ static int asd_scan_finished(struct Scsi_Host *shost, unsigned long time)
972 if (time < HZ) 970 if (time < HZ)
973 return 0; 971 return 0;
974 /* Wait for discovery to finish */ 972 /* Wait for discovery to finish */
975 scsi_flush_work(shost); 973 sas_drain_work(SHOST_TO_SAS_HA(shost));
976 return 1; 974 return 1;
977} 975}
978 976
@@ -1010,6 +1008,8 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
1010 .lldd_clear_nexus_ha = asd_clear_nexus_ha, 1008 .lldd_clear_nexus_ha = asd_clear_nexus_ha,
1011 1009
1012 .lldd_control_phy = asd_control_phy, 1010 .lldd_control_phy = asd_control_phy,
1011
1012 .lldd_ata_set_dmamode = asd_set_dmamode,
1013}; 1013};
1014 1014
1015static const struct pci_device_id aic94xx_pci_table[] __devinitdata = { 1015static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c
index 0add73bdf2a..cf9040933da 100644
--- a/drivers/scsi/aic94xx/aic94xx_tmf.c
+++ b/drivers/scsi/aic94xx/aic94xx_tmf.c
@@ -181,7 +181,7 @@ static int asd_clear_nexus_I_T(struct domain_device *dev,
181int asd_I_T_nexus_reset(struct domain_device *dev) 181int asd_I_T_nexus_reset(struct domain_device *dev)
182{ 182{
183 int res, tmp_res, i; 183 int res, tmp_res, i;
184 struct sas_phy *phy = sas_find_local_phy(dev); 184 struct sas_phy *phy = sas_get_local_phy(dev);
185 /* Standard mandates link reset for ATA (type 0) and 185 /* Standard mandates link reset for ATA (type 0) and
186 * hard reset for SSP (type 1) */ 186 * hard reset for SSP (type 1) */
187 int reset_type = (dev->dev_type == SATA_DEV || 187 int reset_type = (dev->dev_type == SATA_DEV ||
@@ -192,7 +192,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
192 ASD_DPRINTK("sending %s reset to %s\n", 192 ASD_DPRINTK("sending %s reset to %s\n",
193 reset_type ? "hard" : "soft", dev_name(&phy->dev)); 193 reset_type ? "hard" : "soft", dev_name(&phy->dev));
194 res = sas_phy_reset(phy, reset_type); 194 res = sas_phy_reset(phy, reset_type);
195 if (res == TMF_RESP_FUNC_COMPLETE) { 195 if (res == TMF_RESP_FUNC_COMPLETE || res == -ENODEV) {
196 /* wait for the maximum settle time */ 196 /* wait for the maximum settle time */
197 msleep(500); 197 msleep(500);
198 /* clear all outstanding commands (keep nexus suspended) */ 198 /* clear all outstanding commands (keep nexus suspended) */
@@ -201,7 +201,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
201 for (i = 0 ; i < 3; i++) { 201 for (i = 0 ; i < 3; i++) {
202 tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME); 202 tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
203 if (tmp_res == TC_RESUME) 203 if (tmp_res == TC_RESUME)
204 return res; 204 goto out;
205 msleep(500); 205 msleep(500);
206 } 206 }
207 207
@@ -211,7 +211,10 @@ int asd_I_T_nexus_reset(struct domain_device *dev)
211 dev_printk(KERN_ERR, &phy->dev, 211 dev_printk(KERN_ERR, &phy->dev,
212 "Failed to resume nexus after reset 0x%x\n", tmp_res); 212 "Failed to resume nexus after reset 0x%x\n", tmp_res);
213 213
214 return TMF_RESP_FUNC_FAILED; 214 res = TMF_RESP_FUNC_FAILED;
215 out:
216 sas_put_local_phy(phy);
217 return res;
215} 218}
216 219
217static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) 220static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 530de2b1200..8005c6c5a08 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3047,8 +3047,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
3047 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload 3047 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3048 * buffer of size bsg_data->payload_len 3048 * buffer of size bsg_data->payload_len
3049 */ 3049 */
3050 bsg_fcpt = (struct bfa_bsg_fcpt_s *) 3050 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3051 kzalloc(bsg_data->payload_len, GFP_KERNEL);
3052 if (!bsg_fcpt) 3051 if (!bsg_fcpt)
3053 goto out; 3052 goto out;
3054 3053
@@ -3060,6 +3059,7 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
3060 3059
3061 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL); 3060 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3062 if (drv_fcxp == NULL) { 3061 if (drv_fcxp == NULL) {
3062 kfree(bsg_fcpt);
3063 rc = -ENOMEM; 3063 rc = -ENOMEM;
3064 goto out; 3064 goto out;
3065 } 3065 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 049ea907e04..a4953ef9e53 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
62#include "bnx2fc_constants.h" 62#include "bnx2fc_constants.h"
63 63
64#define BNX2FC_NAME "bnx2fc" 64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.9" 65#define BNX2FC_VERSION "1.0.10"
66 66
67#define PFX "bnx2fc: " 67#define PFX "bnx2fc: "
68 68
@@ -114,6 +114,8 @@
114#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) 114#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024)
115 115
116#define BNX2FC_MAX_SEQS 255 116#define BNX2FC_MAX_SEQS 255
117#define BNX2FC_MAX_RETRY_CNT 3
118#define BNX2FC_MAX_RPORT_RETRY_CNT 255
117 119
118#define BNX2FC_READ (1 << 1) 120#define BNX2FC_READ (1 << 1)
119#define BNX2FC_WRITE (1 << 0) 121#define BNX2FC_WRITE (1 << 0)
@@ -121,8 +123,10 @@
121#define BNX2FC_MIN_XID 0 123#define BNX2FC_MIN_XID 0
122#define BNX2FC_MAX_XID \ 124#define BNX2FC_MAX_XID \
123 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1) 125 (BNX2FC_MAX_OUTSTANDING_CMNDS + BNX2FC_ELSTM_XIDS - 1)
126#define FCOE_MAX_NUM_XIDS 0x2000
124#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1) 127#define FCOE_MIN_XID (BNX2FC_MAX_XID + 1)
125#define FCOE_MAX_XID (FCOE_MIN_XID + 4095) 128#define FCOE_MAX_XID (FCOE_MIN_XID + FCOE_MAX_NUM_XIDS - 1)
129#define FCOE_XIDS_PER_CPU (FCOE_MIN_XID + (512 * nr_cpu_ids) - 1)
126#define BNX2FC_MAX_LUN 0xFFFF 130#define BNX2FC_MAX_LUN 0xFFFF
127#define BNX2FC_MAX_FCP_TGT 256 131#define BNX2FC_MAX_FCP_TGT 256
128#define BNX2FC_MAX_CMD_LEN 16 132#define BNX2FC_MAX_CMD_LEN 16
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a9af42e8363..abd72a01856 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Oct 21, 2011" 25#define DRV_MODULE_RELDATE "Jan 22, 2011"
26 26
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -939,8 +939,14 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
939 939
940static int bnx2fc_em_config(struct fc_lport *lport) 940static int bnx2fc_em_config(struct fc_lport *lport)
941{ 941{
942 int max_xid;
943
944 if (nr_cpu_ids <= 2)
945 max_xid = FCOE_XIDS_PER_CPU;
946 else
947 max_xid = FCOE_MAX_XID;
942 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID, 948 if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, FCOE_MIN_XID,
943 FCOE_MAX_XID, NULL)) { 949 max_xid, NULL)) {
944 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); 950 printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n");
945 return -ENOMEM; 951 return -ENOMEM;
946 } 952 }
@@ -952,8 +958,8 @@ static int bnx2fc_lport_config(struct fc_lport *lport)
952{ 958{
953 lport->link_up = 0; 959 lport->link_up = 0;
954 lport->qfull = 0; 960 lport->qfull = 0;
955 lport->max_retry_count = 3; 961 lport->max_retry_count = BNX2FC_MAX_RETRY_CNT;
956 lport->max_rport_retry_count = 3; 962 lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT;
957 lport->e_d_tov = 2 * 1000; 963 lport->e_d_tov = 2 * 1000;
958 lport->r_a_tov = 10 * 1000; 964 lport->r_a_tov = 10 * 1000;
959 965
@@ -1536,6 +1542,7 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1536static int bnx2fc_destroy(struct net_device *netdev) 1542static int bnx2fc_destroy(struct net_device *netdev)
1537{ 1543{
1538 struct bnx2fc_interface *interface = NULL; 1544 struct bnx2fc_interface *interface = NULL;
1545 struct workqueue_struct *timer_work_queue;
1539 int rc = 0; 1546 int rc = 0;
1540 1547
1541 rtnl_lock(); 1548 rtnl_lock();
@@ -1548,9 +1555,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
1548 goto netdev_err; 1555 goto netdev_err;
1549 } 1556 }
1550 1557
1551 1558 timer_work_queue = interface->timer_work_queue;
1552 destroy_workqueue(interface->timer_work_queue);
1553 __bnx2fc_destroy(interface); 1559 __bnx2fc_destroy(interface);
1560 destroy_workqueue(timer_work_queue);
1554 1561
1555netdev_err: 1562netdev_err:
1556 mutex_unlock(&bnx2fc_dev_lock); 1563 mutex_unlock(&bnx2fc_dev_lock);
@@ -2054,6 +2061,7 @@ if_create_err:
2054ifput_err: 2061ifput_err:
2055 bnx2fc_net_cleanup(interface); 2062 bnx2fc_net_cleanup(interface);
2056 bnx2fc_interface_put(interface); 2063 bnx2fc_interface_put(interface);
2064 goto mod_err;
2057netdev_err: 2065netdev_err:
2058 module_put(THIS_MODULE); 2066 module_put(THIS_MODULE);
2059mod_err: 2067mod_err:
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 1ad0b822556..f9d6f412909 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1312,14 +1312,18 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
1312 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) | 1312 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
1313 /* EMC */ 1313 /* EMC */
1314 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN)); 1314 (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
1315 if (error_mask1) 1315 if (error_mask1) {
1316 iscsi_init2.error_bit_map[0] = error_mask1; 1316 iscsi_init2.error_bit_map[0] = error_mask1;
1317 else 1317 mask64 &= (u32)(~mask64);
1318 mask64 |= error_mask1;
1319 } else
1318 iscsi_init2.error_bit_map[0] = (u32) mask64; 1320 iscsi_init2.error_bit_map[0] = (u32) mask64;
1319 1321
1320 if (error_mask2) 1322 if (error_mask2) {
1321 iscsi_init2.error_bit_map[1] = error_mask2; 1323 iscsi_init2.error_bit_map[1] = error_mask2;
1322 else 1324 mask64 &= 0xffffffff;
1325 mask64 |= ((u64)error_mask2 << 32);
1326 } else
1323 iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32); 1327 iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
1324 1328
1325 iscsi_error_mask = mask64; 1329 iscsi_error_mask = mask64;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 1a947f1b972..4927cca733d 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -49,11 +49,11 @@ module_param(en_tcp_dack, int, 0664);
49MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK"); 49MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
50 50
51unsigned int error_mask1 = 0x00; 51unsigned int error_mask1 = 0x00;
52module_param(error_mask1, int, 0664); 52module_param(error_mask1, uint, 0664);
53MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1"); 53MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
54 54
55unsigned int error_mask2 = 0x00; 55unsigned int error_mask2 = 0x00;
56module_param(error_mask2, int, 0664); 56module_param(error_mask2, uint, 0664);
57MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2"); 57MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
58 58
59unsigned int sq_size; 59unsigned int sq_size;
@@ -393,8 +393,9 @@ static void bnx2i_percpu_thread_create(unsigned int cpu)
393 393
394 p = &per_cpu(bnx2i_percpu, cpu); 394 p = &per_cpu(bnx2i_percpu, cpu);
395 395
396 thread = kthread_create(bnx2i_percpu_io_thread, (void *)p, 396 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p,
397 "bnx2i_thread/%d", cpu); 397 cpu_to_node(cpu),
398 "bnx2i_thread/%d", cpu);
398 /* bind thread to the cpu */ 399 /* bind thread to the cpu */
399 if (likely(!IS_ERR(thread))) { 400 if (likely(!IS_ERR(thread))) {
400 kthread_bind(thread, cpu); 401 kthread_bind(thread, cpu);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 89afd6d21d8..d9253db1d0e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2147,11 +2147,10 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2147 enum iscsi_param param, char *buf, int buflen) 2147 enum iscsi_param param, char *buf, int buflen)
2148{ 2148{
2149 struct iscsi_conn *conn = cls_conn->dd_data; 2149 struct iscsi_conn *conn = cls_conn->dd_data;
2150 struct iscsi_session *session = conn->session;
2151 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 2150 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2152 struct cxgbi_conn *cconn = tcp_conn->dd_data; 2151 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2153 struct cxgbi_sock *csk = cconn->cep->csk; 2152 struct cxgbi_sock *csk = cconn->cep->csk;
2154 int value, err = 0; 2153 int err;
2155 2154
2156 log_debug(1 << CXGBI_DBG_ISCSI, 2155 log_debug(1 << CXGBI_DBG_ISCSI,
2157 "cls_conn 0x%p, param %d, buf(%d) %s.\n", 2156 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
@@ -2173,15 +2172,7 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2173 conn->datadgst_en, 0); 2172 conn->datadgst_en, 0);
2174 break; 2173 break;
2175 case ISCSI_PARAM_MAX_R2T: 2174 case ISCSI_PARAM_MAX_R2T:
2176 sscanf(buf, "%d", &value); 2175 return iscsi_tcp_set_max_r2t(conn, buf);
2177 if (value <= 0 || !is_power_of_2(value))
2178 return -EINVAL;
2179 if (session->max_r2t == value)
2180 break;
2181 iscsi_tcp_r2tpool_free(session);
2182 err = iscsi_set_param(cls_conn, param, buf, buflen);
2183 if (!err && iscsi_tcp_r2tpool_alloc(session))
2184 return -ENOMEM;
2185 case ISCSI_PARAM_MAX_RECV_DLENGTH: 2176 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2186 err = iscsi_set_param(cls_conn, param, buf, buflen); 2177 err = iscsi_set_param(cls_conn, param, buf, buflen);
2187 if (!err) 2178 if (!err)
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index cc75cbea936..ae7d15c44e2 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -168,6 +168,14 @@ static struct fc_function_template fcoe_nport_fc_functions = {
168 .show_host_supported_fc4s = 1, 168 .show_host_supported_fc4s = 1,
169 .show_host_active_fc4s = 1, 169 .show_host_active_fc4s = 1,
170 .show_host_maxframe_size = 1, 170 .show_host_maxframe_size = 1,
171 .show_host_serial_number = 1,
172 .show_host_manufacturer = 1,
173 .show_host_model = 1,
174 .show_host_model_description = 1,
175 .show_host_hardware_version = 1,
176 .show_host_driver_version = 1,
177 .show_host_firmware_version = 1,
178 .show_host_optionrom_version = 1,
171 179
172 .show_host_port_id = 1, 180 .show_host_port_id = 1,
173 .show_host_supported_speeds = 1, 181 .show_host_supported_speeds = 1,
@@ -208,6 +216,14 @@ static struct fc_function_template fcoe_vport_fc_functions = {
208 .show_host_supported_fc4s = 1, 216 .show_host_supported_fc4s = 1,
209 .show_host_active_fc4s = 1, 217 .show_host_active_fc4s = 1,
210 .show_host_maxframe_size = 1, 218 .show_host_maxframe_size = 1,
219 .show_host_serial_number = 1,
220 .show_host_manufacturer = 1,
221 .show_host_model = 1,
222 .show_host_model_description = 1,
223 .show_host_hardware_version = 1,
224 .show_host_driver_version = 1,
225 .show_host_firmware_version = 1,
226 .show_host_optionrom_version = 1,
211 227
212 .show_host_port_id = 1, 228 .show_host_port_id = 1,
213 .show_host_supported_speeds = 1, 229 .show_host_supported_speeds = 1,
@@ -364,11 +380,10 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
364 if (!fcoe) { 380 if (!fcoe) {
365 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); 381 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
366 fcoe = ERR_PTR(-ENOMEM); 382 fcoe = ERR_PTR(-ENOMEM);
367 goto out_nomod; 383 goto out_putmod;
368 } 384 }
369 385
370 dev_hold(netdev); 386 dev_hold(netdev);
371 kref_init(&fcoe->kref);
372 387
373 /* 388 /*
374 * Initialize FIP. 389 * Initialize FIP.
@@ -384,54 +399,18 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
384 kfree(fcoe); 399 kfree(fcoe);
385 dev_put(netdev); 400 dev_put(netdev);
386 fcoe = ERR_PTR(err); 401 fcoe = ERR_PTR(err);
387 goto out_nomod; 402 goto out_putmod;
388 } 403 }
389 404
390 goto out; 405 goto out;
391 406
392out_nomod: 407out_putmod:
393 module_put(THIS_MODULE); 408 module_put(THIS_MODULE);
394out: 409out:
395 return fcoe; 410 return fcoe;
396} 411}
397 412
398/** 413/**
399 * fcoe_interface_release() - fcoe_port kref release function
400 * @kref: Embedded reference count in an fcoe_interface struct
401 */
402static void fcoe_interface_release(struct kref *kref)
403{
404 struct fcoe_interface *fcoe;
405 struct net_device *netdev;
406
407 fcoe = container_of(kref, struct fcoe_interface, kref);
408 netdev = fcoe->netdev;
409 /* tear-down the FCoE controller */
410 fcoe_ctlr_destroy(&fcoe->ctlr);
411 kfree(fcoe);
412 dev_put(netdev);
413 module_put(THIS_MODULE);
414}
415
416/**
417 * fcoe_interface_get() - Get a reference to a FCoE interface
418 * @fcoe: The FCoE interface to be held
419 */
420static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
421{
422 kref_get(&fcoe->kref);
423}
424
425/**
426 * fcoe_interface_put() - Put a reference to a FCoE interface
427 * @fcoe: The FCoE interface to be released
428 */
429static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
430{
431 kref_put(&fcoe->kref, fcoe_interface_release);
432}
433
434/**
435 * fcoe_interface_cleanup() - Clean up a FCoE interface 414 * fcoe_interface_cleanup() - Clean up a FCoE interface
436 * @fcoe: The FCoE interface to be cleaned up 415 * @fcoe: The FCoE interface to be cleaned up
437 * 416 *
@@ -478,7 +457,11 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
478 rtnl_unlock(); 457 rtnl_unlock();
479 458
480 /* Release the self-reference taken during fcoe_interface_create() */ 459 /* Release the self-reference taken during fcoe_interface_create() */
481 fcoe_interface_put(fcoe); 460 /* tear-down the FCoE controller */
461 fcoe_ctlr_destroy(fip);
462 kfree(fcoe);
463 dev_put(netdev);
464 module_put(THIS_MODULE);
482} 465}
483 466
484/** 467/**
@@ -734,6 +717,85 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
734 return 0; 717 return 0;
735} 718}
736 719
720
721/**
722 * fcoe_fdmi_info() - Get FDMI related info from net devive for SW FCoE
723 * @lport: The local port that is associated with the net device
724 * @netdev: The associated net device
725 *
726 * Must be called after fcoe_shost_config() as it will use local port mutex
727 *
728 */
729static void fcoe_fdmi_info(struct fc_lport *lport, struct net_device *netdev)
730{
731 struct fcoe_interface *fcoe;
732 struct fcoe_port *port;
733 struct net_device *realdev;
734 int rc;
735 struct netdev_fcoe_hbainfo fdmi;
736
737 port = lport_priv(lport);
738 fcoe = port->priv;
739 realdev = fcoe->realdev;
740
741 if (!realdev)
742 return;
743
744 /* No FDMI state m/c for NPIV ports */
745 if (lport->vport)
746 return;
747
748 if (realdev->netdev_ops->ndo_fcoe_get_hbainfo) {
749 memset(&fdmi, 0, sizeof(fdmi));
750 rc = realdev->netdev_ops->ndo_fcoe_get_hbainfo(realdev,
751 &fdmi);
752 if (rc) {
753 printk(KERN_INFO "fcoe: Failed to retrieve FDMI "
754 "information from netdev.\n");
755 return;
756 }
757
758 snprintf(fc_host_serial_number(lport->host),
759 FC_SERIAL_NUMBER_SIZE,
760 "%s",
761 fdmi.serial_number);
762 snprintf(fc_host_manufacturer(lport->host),
763 FC_SERIAL_NUMBER_SIZE,
764 "%s",
765 fdmi.manufacturer);
766 snprintf(fc_host_model(lport->host),
767 FC_SYMBOLIC_NAME_SIZE,
768 "%s",
769 fdmi.model);
770 snprintf(fc_host_model_description(lport->host),
771 FC_SYMBOLIC_NAME_SIZE,
772 "%s",
773 fdmi.model_description);
774 snprintf(fc_host_hardware_version(lport->host),
775 FC_VERSION_STRING_SIZE,
776 "%s",
777 fdmi.hardware_version);
778 snprintf(fc_host_driver_version(lport->host),
779 FC_VERSION_STRING_SIZE,
780 "%s",
781 fdmi.driver_version);
782 snprintf(fc_host_optionrom_version(lport->host),
783 FC_VERSION_STRING_SIZE,
784 "%s",
785 fdmi.optionrom_version);
786 snprintf(fc_host_firmware_version(lport->host),
787 FC_VERSION_STRING_SIZE,
788 "%s",
789 fdmi.firmware_version);
790
791 /* Enable FDMI lport states */
792 lport->fdmi_enabled = 1;
793 } else {
794 lport->fdmi_enabled = 0;
795 printk(KERN_INFO "fcoe: No FDMI support.\n");
796 }
797}
798
737/** 799/**
738 * fcoe_oem_match() - The match routine for the offloaded exchange manager 800 * fcoe_oem_match() - The match routine for the offloaded exchange manager
739 * @fp: The I/O frame 801 * @fp: The I/O frame
@@ -881,9 +943,6 @@ static void fcoe_if_destroy(struct fc_lport *lport)
881 dev_uc_del(netdev, port->data_src_addr); 943 dev_uc_del(netdev, port->data_src_addr);
882 rtnl_unlock(); 944 rtnl_unlock();
883 945
884 /* Release reference held in fcoe_if_create() */
885 fcoe_interface_put(fcoe);
886
887 /* Free queued packets for the per-CPU receive threads */ 946 /* Free queued packets for the per-CPU receive threads */
888 fcoe_percpu_clean(lport); 947 fcoe_percpu_clean(lport);
889 948
@@ -1047,6 +1106,9 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1047 goto out_lp_destroy; 1106 goto out_lp_destroy;
1048 } 1107 }
1049 1108
1109 /* Initialized FDMI information */
1110 fcoe_fdmi_info(lport, netdev);
1111
1050 /* 1112 /*
1051 * fcoe_em_alloc() and fcoe_hostlist_add() both 1113 * fcoe_em_alloc() and fcoe_hostlist_add() both
1052 * need to be atomic with respect to other changes to the 1114 * need to be atomic with respect to other changes to the
@@ -1070,7 +1132,6 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1070 goto out_lp_destroy; 1132 goto out_lp_destroy;
1071 } 1133 }
1072 1134
1073 fcoe_interface_get(fcoe);
1074 return lport; 1135 return lport;
1075 1136
1076out_lp_destroy: 1137out_lp_destroy:
@@ -2009,20 +2070,13 @@ static void fcoe_destroy_work(struct work_struct *work)
2009{ 2070{
2010 struct fcoe_port *port; 2071 struct fcoe_port *port;
2011 struct fcoe_interface *fcoe; 2072 struct fcoe_interface *fcoe;
2012 int npiv = 0;
2013 2073
2014 port = container_of(work, struct fcoe_port, destroy_work); 2074 port = container_of(work, struct fcoe_port, destroy_work);
2015 mutex_lock(&fcoe_config_mutex); 2075 mutex_lock(&fcoe_config_mutex);
2016 2076
2017 /* set if this is an NPIV port */
2018 npiv = port->lport->vport ? 1 : 0;
2019
2020 fcoe = port->priv; 2077 fcoe = port->priv;
2021 fcoe_if_destroy(port->lport); 2078 fcoe_if_destroy(port->lport);
2022 2079 fcoe_interface_cleanup(fcoe);
2023 /* Do not tear down the fcoe interface for NPIV port */
2024 if (!npiv)
2025 fcoe_interface_cleanup(fcoe);
2026 2080
2027 mutex_unlock(&fcoe_config_mutex); 2081 mutex_unlock(&fcoe_config_mutex);
2028} 2082}
@@ -2593,12 +2647,15 @@ static int fcoe_vport_destroy(struct fc_vport *vport)
2593 struct Scsi_Host *shost = vport_to_shost(vport); 2647 struct Scsi_Host *shost = vport_to_shost(vport);
2594 struct fc_lport *n_port = shost_priv(shost); 2648 struct fc_lport *n_port = shost_priv(shost);
2595 struct fc_lport *vn_port = vport->dd_data; 2649 struct fc_lport *vn_port = vport->dd_data;
2596 struct fcoe_port *port = lport_priv(vn_port);
2597 2650
2598 mutex_lock(&n_port->lp_mutex); 2651 mutex_lock(&n_port->lp_mutex);
2599 list_del(&vn_port->list); 2652 list_del(&vn_port->list);
2600 mutex_unlock(&n_port->lp_mutex); 2653 mutex_unlock(&n_port->lp_mutex);
2601 queue_work(fcoe_wq, &port->destroy_work); 2654
2655 mutex_lock(&fcoe_config_mutex);
2656 fcoe_if_destroy(vn_port);
2657 mutex_unlock(&fcoe_config_mutex);
2658
2602 return 0; 2659 return 0;
2603} 2660}
2604 2661
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index bcc89e63949..3c2733a12aa 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -71,8 +71,6 @@ do { \
71 * @ctlr: The FCoE controller (for FIP) 71 * @ctlr: The FCoE controller (for FIP)
72 * @oem: The offload exchange manager for all local port 72 * @oem: The offload exchange manager for all local port
73 * instances associated with this port 73 * instances associated with this port
74 * @kref: The kernel reference
75 *
76 * This structure is 1:1 with a net devive. 74 * This structure is 1:1 with a net devive.
77 */ 75 */
78struct fcoe_interface { 76struct fcoe_interface {
@@ -83,7 +81,6 @@ struct fcoe_interface {
83 struct packet_type fip_packet_type; 81 struct packet_type fip_packet_type;
84 struct fcoe_ctlr ctlr; 82 struct fcoe_ctlr ctlr;
85 struct fc_exch_mgr *oem; 83 struct fc_exch_mgr *oem;
86 struct kref kref;
87}; 84};
88 85
89#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) 86#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 4d119a326d3..710e149d41b 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -619,8 +619,8 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
619 619
620 switch (event) { 620 switch (event) {
621 case NETDEV_UNREGISTER: 621 case NETDEV_UNREGISTER:
622 printk(KERN_ERR "libfcoe_device_notification: NETDEV_UNREGISTER %s\n", 622 LIBFCOE_TRANSPORT_DBG("NETDEV_UNREGISTER %s\n",
623 netdev->name); 623 netdev->name);
624 fcoe_del_netdev_mapping(netdev); 624 fcoe_del_netdev_mapping(netdev);
625 break; 625 break;
626 } 626 }
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index b96962c3944..500e20dd56e 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -56,6 +56,7 @@
56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ 56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57#define HPSA_DRIVER_VERSION "2.0.2-1" 57#define HPSA_DRIVER_VERSION "2.0.2-1"
58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59#define HPSA "hpsa"
59 60
60/* How long to wait (in milliseconds) for board to go into simple mode */ 61/* How long to wait (in milliseconds) for board to go into simple mode */
61#define MAX_CONFIG_WAIT 30000 62#define MAX_CONFIG_WAIT 30000
@@ -202,30 +203,31 @@ static int check_for_unit_attention(struct ctlr_info *h,
202 203
203 switch (c->err_info->SenseInfo[12]) { 204 switch (c->err_info->SenseInfo[12]) {
204 case STATE_CHANGED: 205 case STATE_CHANGED:
205 dev_warn(&h->pdev->dev, "hpsa%d: a state change " 206 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
206 "detected, command retried\n", h->ctlr); 207 "detected, command retried\n", h->ctlr);
207 break; 208 break;
208 case LUN_FAILED: 209 case LUN_FAILED:
209 dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " 210 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
210 "detected, action required\n", h->ctlr); 211 "detected, action required\n", h->ctlr);
211 break; 212 break;
212 case REPORT_LUNS_CHANGED: 213 case REPORT_LUNS_CHANGED:
213 dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " 214 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
214 "changed, action required\n", h->ctlr); 215 "changed, action required\n", h->ctlr);
215 /* 216 /*
216 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. 217 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
218 * target (array) devices.
217 */ 219 */
218 break; 220 break;
219 case POWER_OR_RESET: 221 case POWER_OR_RESET:
220 dev_warn(&h->pdev->dev, "hpsa%d: a power on " 222 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
221 "or device reset detected\n", h->ctlr); 223 "or device reset detected\n", h->ctlr);
222 break; 224 break;
223 case UNIT_ATTENTION_CLEARED: 225 case UNIT_ATTENTION_CLEARED:
224 dev_warn(&h->pdev->dev, "hpsa%d: unit attention " 226 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
225 "cleared by another initiator\n", h->ctlr); 227 "cleared by another initiator\n", h->ctlr);
226 break; 228 break;
227 default: 229 default:
228 dev_warn(&h->pdev->dev, "hpsa%d: unknown " 230 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
229 "unit attention detected\n", h->ctlr); 231 "unit attention detected\n", h->ctlr);
230 break; 232 break;
231 } 233 }
@@ -296,11 +298,23 @@ static u32 unresettable_controller[] = {
296 0x40800E11, /* Smart Array 5i */ 298 0x40800E11, /* Smart Array 5i */
297 0x409C0E11, /* Smart Array 6400 */ 299 0x409C0E11, /* Smart Array 6400 */
298 0x409D0E11, /* Smart Array 6400 EM */ 300 0x409D0E11, /* Smart Array 6400 EM */
301 0x40700E11, /* Smart Array 5300 */
302 0x40820E11, /* Smart Array 532 */
303 0x40830E11, /* Smart Array 5312 */
304 0x409A0E11, /* Smart Array 641 */
305 0x409B0E11, /* Smart Array 642 */
306 0x40910E11, /* Smart Array 6i */
299}; 307};
300 308
301/* List of controllers which cannot even be soft reset */ 309/* List of controllers which cannot even be soft reset */
302static u32 soft_unresettable_controller[] = { 310static u32 soft_unresettable_controller[] = {
303 0x40800E11, /* Smart Array 5i */ 311 0x40800E11, /* Smart Array 5i */
312 0x40700E11, /* Smart Array 5300 */
313 0x40820E11, /* Smart Array 532 */
314 0x40830E11, /* Smart Array 5312 */
315 0x409A0E11, /* Smart Array 641 */
316 0x409B0E11, /* Smart Array 642 */
317 0x40910E11, /* Smart Array 6i */
304 /* Exclude 640x boards. These are two pci devices in one slot 318 /* Exclude 640x boards. These are two pci devices in one slot
305 * which share a battery backed cache module. One controls the 319 * which share a battery backed cache module. One controls the
306 * cache, the other accesses the cache through the one that controls 320 * cache, the other accesses the cache through the one that controls
@@ -475,8 +489,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {
475 489
476static struct scsi_host_template hpsa_driver_template = { 490static struct scsi_host_template hpsa_driver_template = {
477 .module = THIS_MODULE, 491 .module = THIS_MODULE,
478 .name = "hpsa", 492 .name = HPSA,
479 .proc_name = "hpsa", 493 .proc_name = HPSA,
480 .queuecommand = hpsa_scsi_queue_command, 494 .queuecommand = hpsa_scsi_queue_command,
481 .scan_start = hpsa_scan_start, 495 .scan_start = hpsa_scan_start,
482 .scan_finished = hpsa_scan_finished, 496 .scan_finished = hpsa_scan_finished,
@@ -577,21 +591,19 @@ static int hpsa_find_target_lun(struct ctlr_info *h,
577 int i, found = 0; 591 int i, found = 0;
578 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES); 592 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
579 593
580 memset(&lun_taken[0], 0, HPSA_MAX_DEVICES >> 3); 594 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
581 595
582 for (i = 0; i < h->ndevices; i++) { 596 for (i = 0; i < h->ndevices; i++) {
583 if (h->dev[i]->bus == bus && h->dev[i]->target != -1) 597 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
584 set_bit(h->dev[i]->target, lun_taken); 598 __set_bit(h->dev[i]->target, lun_taken);
585 } 599 }
586 600
587 for (i = 0; i < HPSA_MAX_DEVICES; i++) { 601 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
588 if (!test_bit(i, lun_taken)) { 602 if (i < HPSA_MAX_DEVICES) {
589 /* *bus = 1; */ 603 /* *bus = 1; */
590 *target = i; 604 *target = i;
591 *lun = 0; 605 *lun = 0;
592 found = 1; 606 found = 1;
593 break;
594 }
595 } 607 }
596 return !found; 608 return !found;
597} 609}
@@ -675,6 +687,20 @@ lun_assigned:
675 return 0; 687 return 0;
676} 688}
677 689
690/* Update an entry in h->dev[] array. */
691static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
692 int entry, struct hpsa_scsi_dev_t *new_entry)
693{
694 /* assumes h->devlock is held */
695 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
696
697 /* Raid level changed. */
698 h->dev[entry]->raid_level = new_entry->raid_level;
699 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
700 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
701 new_entry->target, new_entry->lun);
702}
703
678/* Replace an entry from h->dev[] array. */ 704/* Replace an entry from h->dev[] array. */
679static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, 705static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
680 int entry, struct hpsa_scsi_dev_t *new_entry, 706 int entry, struct hpsa_scsi_dev_t *new_entry,
@@ -781,10 +807,25 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
781 return 1; 807 return 1;
782} 808}
783 809
810static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
811 struct hpsa_scsi_dev_t *dev2)
812{
813 /* Device attributes that can change, but don't mean
814 * that the device is a different device, nor that the OS
815 * needs to be told anything about the change.
816 */
817 if (dev1->raid_level != dev2->raid_level)
818 return 1;
819 return 0;
820}
821
784/* Find needle in haystack. If exact match found, return DEVICE_SAME, 822/* Find needle in haystack. If exact match found, return DEVICE_SAME,
785 * and return needle location in *index. If scsi3addr matches, but not 823 * and return needle location in *index. If scsi3addr matches, but not
786 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle 824 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
787 * location in *index. If needle not found, return DEVICE_NOT_FOUND. 825 * location in *index.
826 * In the case of a minor device attribute change, such as RAID level, just
827 * return DEVICE_UPDATED, along with the updated device's location in index.
828 * If needle not found, return DEVICE_NOT_FOUND.
788 */ 829 */
789static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, 830static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
790 struct hpsa_scsi_dev_t *haystack[], int haystack_size, 831 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
@@ -794,15 +835,19 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
794#define DEVICE_NOT_FOUND 0 835#define DEVICE_NOT_FOUND 0
795#define DEVICE_CHANGED 1 836#define DEVICE_CHANGED 1
796#define DEVICE_SAME 2 837#define DEVICE_SAME 2
838#define DEVICE_UPDATED 3
797 for (i = 0; i < haystack_size; i++) { 839 for (i = 0; i < haystack_size; i++) {
798 if (haystack[i] == NULL) /* previously removed. */ 840 if (haystack[i] == NULL) /* previously removed. */
799 continue; 841 continue;
800 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { 842 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
801 *index = i; 843 *index = i;
802 if (device_is_the_same(needle, haystack[i])) 844 if (device_is_the_same(needle, haystack[i])) {
845 if (device_updated(needle, haystack[i]))
846 return DEVICE_UPDATED;
803 return DEVICE_SAME; 847 return DEVICE_SAME;
804 else 848 } else {
805 return DEVICE_CHANGED; 849 return DEVICE_CHANGED;
850 }
806 } 851 }
807 } 852 }
808 *index = -1; 853 *index = -1;
@@ -838,6 +883,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
838 * sd[] and remove them from h->dev[], and for any 883 * sd[] and remove them from h->dev[], and for any
839 * devices which have changed, remove the old device 884 * devices which have changed, remove the old device
840 * info and add the new device info. 885 * info and add the new device info.
886 * If minor device attributes change, just update
887 * the existing device structure.
841 */ 888 */
842 i = 0; 889 i = 0;
843 nremoved = 0; 890 nremoved = 0;
@@ -858,6 +905,8 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
858 * at the bottom of hpsa_update_scsi_devices() 905 * at the bottom of hpsa_update_scsi_devices()
859 */ 906 */
860 sd[entry] = NULL; 907 sd[entry] = NULL;
908 } else if (device_change == DEVICE_UPDATED) {
909 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
861 } 910 }
862 i++; 911 i++;
863 } 912 }
@@ -1257,46 +1306,6 @@ static void complete_scsi_command(struct CommandList *cp)
1257 cmd_free(h, cp); 1306 cmd_free(h, cp);
1258} 1307}
1259 1308
1260static int hpsa_scsi_detect(struct ctlr_info *h)
1261{
1262 struct Scsi_Host *sh;
1263 int error;
1264
1265 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
1266 if (sh == NULL)
1267 goto fail;
1268
1269 sh->io_port = 0;
1270 sh->n_io_port = 0;
1271 sh->this_id = -1;
1272 sh->max_channel = 3;
1273 sh->max_cmd_len = MAX_COMMAND_SIZE;
1274 sh->max_lun = HPSA_MAX_LUN;
1275 sh->max_id = HPSA_MAX_LUN;
1276 sh->can_queue = h->nr_cmds;
1277 sh->cmd_per_lun = h->nr_cmds;
1278 sh->sg_tablesize = h->maxsgentries;
1279 h->scsi_host = sh;
1280 sh->hostdata[0] = (unsigned long) h;
1281 sh->irq = h->intr[h->intr_mode];
1282 sh->unique_id = sh->irq;
1283 error = scsi_add_host(sh, &h->pdev->dev);
1284 if (error)
1285 goto fail_host_put;
1286 scsi_scan_host(sh);
1287 return 0;
1288
1289 fail_host_put:
1290 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host"
1291 " failed for controller %d\n", h->ctlr);
1292 scsi_host_put(sh);
1293 return error;
1294 fail:
1295 dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc"
1296 " failed for controller %d\n", h->ctlr);
1297 return -ENOMEM;
1298}
1299
1300static void hpsa_pci_unmap(struct pci_dev *pdev, 1309static void hpsa_pci_unmap(struct pci_dev *pdev,
1301 struct CommandList *c, int sg_used, int data_direction) 1310 struct CommandList *c, int sg_used, int data_direction)
1302{ 1311{
@@ -1641,7 +1650,7 @@ bail_out:
1641 return 1; 1650 return 1;
1642} 1651}
1643 1652
1644static unsigned char *msa2xxx_model[] = { 1653static unsigned char *ext_target_model[] = {
1645 "MSA2012", 1654 "MSA2012",
1646 "MSA2024", 1655 "MSA2024",
1647 "MSA2312", 1656 "MSA2312",
@@ -1650,78 +1659,54 @@ static unsigned char *msa2xxx_model[] = {
1650 NULL, 1659 NULL,
1651}; 1660};
1652 1661
1653static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) 1662static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1654{ 1663{
1655 int i; 1664 int i;
1656 1665
1657 for (i = 0; msa2xxx_model[i]; i++) 1666 for (i = 0; ext_target_model[i]; i++)
1658 if (strncmp(device->model, msa2xxx_model[i], 1667 if (strncmp(device->model, ext_target_model[i],
1659 strlen(msa2xxx_model[i])) == 0) 1668 strlen(ext_target_model[i])) == 0)
1660 return 1; 1669 return 1;
1661 return 0; 1670 return 0;
1662} 1671}
1663 1672
1664/* Helper function to assign bus, target, lun mapping of devices. 1673/* Helper function to assign bus, target, lun mapping of devices.
1665 * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical 1674 * Puts non-external target logical volumes on bus 0, external target logical
1666 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. 1675 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
1667 * Logical drive target and lun are assigned at this time, but 1676 * Logical drive target and lun are assigned at this time, but
1668 * physical device lun and target assignment are deferred (assigned 1677 * physical device lun and target assignment are deferred (assigned
1669 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 1678 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
1670 */ 1679 */
1671static void figure_bus_target_lun(struct ctlr_info *h, 1680static void figure_bus_target_lun(struct ctlr_info *h,
1672 u8 *lunaddrbytes, int *bus, int *target, int *lun, 1681 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
1673 struct hpsa_scsi_dev_t *device)
1674{ 1682{
1675 u32 lunid; 1683 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1676 1684
1677 if (is_logical_dev_addr_mode(lunaddrbytes)) { 1685 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
1678 /* logical device */ 1686 /* physical device, target and lun filled in later */
1679 if (unlikely(is_scsi_rev_5(h))) {
1680 /* p1210m, logical drives lun assignments
1681 * match SCSI REPORT LUNS data.
1682 */
1683 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1684 *bus = 0;
1685 *target = 0;
1686 *lun = (lunid & 0x3fff) + 1;
1687 } else {
1688 /* not p1210m... */
1689 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
1690 if (is_msa2xxx(h, device)) {
1691 /* msa2xxx way, put logicals on bus 1
1692 * and match target/lun numbers box
1693 * reports.
1694 */
1695 *bus = 1;
1696 *target = (lunid >> 16) & 0x3fff;
1697 *lun = lunid & 0x00ff;
1698 } else {
1699 /* Traditional smart array way. */
1700 *bus = 0;
1701 *lun = 0;
1702 *target = lunid & 0x3fff;
1703 }
1704 }
1705 } else {
1706 /* physical device */
1707 if (is_hba_lunid(lunaddrbytes)) 1687 if (is_hba_lunid(lunaddrbytes))
1708 if (unlikely(is_scsi_rev_5(h))) { 1688 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
1709 *bus = 0; /* put p1210m ctlr at 0,0,0 */
1710 *target = 0;
1711 *lun = 0;
1712 return;
1713 } else
1714 *bus = 3; /* traditional smartarray */
1715 else 1689 else
1716 *bus = 2; /* physical disk */ 1690 /* defer target, lun assignment for physical devices */
1717 *target = -1; 1691 hpsa_set_bus_target_lun(device, 2, -1, -1);
1718 *lun = -1; /* we will fill these in later. */ 1692 return;
1693 }
1694 /* It's a logical device */
1695 if (is_ext_target(h, device)) {
1696 /* external target way, put logicals on bus 1
1697 * and match target/lun numbers box
1698 * reports, other smart array, bus 0, target 0, match lunid
1699 */
1700 hpsa_set_bus_target_lun(device,
1701 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
1702 return;
1719 } 1703 }
1704 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
1720} 1705}
1721 1706
1722/* 1707/*
1723 * If there is no lun 0 on a target, linux won't find any devices. 1708 * If there is no lun 0 on a target, linux won't find any devices.
1724 * For the MSA2xxx boxes, we have to manually detect the enclosure 1709 * For the external targets (arrays), we have to manually detect the enclosure
1725 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report 1710 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
1726 * it for some reason. *tmpdevice is the target we're adding, 1711 * it for some reason. *tmpdevice is the target we're adding,
1727 * this_device is a pointer into the current element of currentsd[] 1712 * this_device is a pointer into the current element of currentsd[]
@@ -1730,46 +1715,46 @@ static void figure_bus_target_lun(struct ctlr_info *h,
1730 * lun 0 assigned. 1715 * lun 0 assigned.
1731 * Returns 1 if an enclosure was added, 0 if not. 1716 * Returns 1 if an enclosure was added, 0 if not.
1732 */ 1717 */
1733static int add_msa2xxx_enclosure_device(struct ctlr_info *h, 1718static int add_ext_target_dev(struct ctlr_info *h,
1734 struct hpsa_scsi_dev_t *tmpdevice, 1719 struct hpsa_scsi_dev_t *tmpdevice,
1735 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, 1720 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
1736 int bus, int target, int lun, unsigned long lunzerobits[], 1721 unsigned long lunzerobits[], int *n_ext_target_devs)
1737 int *nmsa2xxx_enclosures)
1738{ 1722{
1739 unsigned char scsi3addr[8]; 1723 unsigned char scsi3addr[8];
1740 1724
1741 if (test_bit(target, lunzerobits)) 1725 if (test_bit(tmpdevice->target, lunzerobits))
1742 return 0; /* There is already a lun 0 on this target. */ 1726 return 0; /* There is already a lun 0 on this target. */
1743 1727
1744 if (!is_logical_dev_addr_mode(lunaddrbytes)) 1728 if (!is_logical_dev_addr_mode(lunaddrbytes))
1745 return 0; /* It's the logical targets that may lack lun 0. */ 1729 return 0; /* It's the logical targets that may lack lun 0. */
1746 1730
1747 if (!is_msa2xxx(h, tmpdevice)) 1731 if (!is_ext_target(h, tmpdevice))
1748 return 0; /* It's only the MSA2xxx that have this problem. */ 1732 return 0; /* Only external target devices have this problem. */
1749 1733
1750 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ 1734 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
1751 return 0; 1735 return 0;
1752 1736
1753 memset(scsi3addr, 0, 8); 1737 memset(scsi3addr, 0, 8);
1754 scsi3addr[3] = target; 1738 scsi3addr[3] = tmpdevice->target;
1755 if (is_hba_lunid(scsi3addr)) 1739 if (is_hba_lunid(scsi3addr))
1756 return 0; /* Don't add the RAID controller here. */ 1740 return 0; /* Don't add the RAID controller here. */
1757 1741
1758 if (is_scsi_rev_5(h)) 1742 if (is_scsi_rev_5(h))
1759 return 0; /* p1210m doesn't need to do this. */ 1743 return 0; /* p1210m doesn't need to do this. */
1760 1744
1761 if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { 1745 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
1762 dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " 1746 dev_warn(&h->pdev->dev, "Maximum number of external "
1763 "enclosures exceeded. Check your hardware " 1747 "target devices exceeded. Check your hardware "
1764 "configuration."); 1748 "configuration.");
1765 return 0; 1749 return 0;
1766 } 1750 }
1767 1751
1768 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL)) 1752 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
1769 return 0; 1753 return 0;
1770 (*nmsa2xxx_enclosures)++; 1754 (*n_ext_target_devs)++;
1771 hpsa_set_bus_target_lun(this_device, bus, target, 0); 1755 hpsa_set_bus_target_lun(this_device,
1772 set_bit(target, lunzerobits); 1756 tmpdevice->bus, tmpdevice->target, 0);
1757 set_bit(tmpdevice->target, lunzerobits);
1773 return 1; 1758 return 1;
1774} 1759}
1775 1760
@@ -1863,10 +1848,9 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1863 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; 1848 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
1864 int ncurrent = 0; 1849 int ncurrent = 0;
1865 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; 1850 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8;
1866 int i, nmsa2xxx_enclosures, ndevs_to_allocate; 1851 int i, n_ext_target_devs, ndevs_to_allocate;
1867 int bus, target, lun;
1868 int raid_ctlr_position; 1852 int raid_ctlr_position;
1869 DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); 1853 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
1870 1854
1871 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 1855 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
1872 physdev_list = kzalloc(reportlunsize, GFP_KERNEL); 1856 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
@@ -1883,11 +1867,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1883 logdev_list, &nlogicals)) 1867 logdev_list, &nlogicals))
1884 goto out; 1868 goto out;
1885 1869
1886 /* We might see up to 32 MSA2xxx enclosures, actually 8 of them 1870 /* We might see up to the maximum number of logical and physical disks
1887 * but each of them 4 times through different paths. The plus 1 1871 * plus external target devices, and a device for the local RAID
1888 * is for the RAID controller. 1872 * controller.
1889 */ 1873 */
1890 ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; 1874 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
1891 1875
1892 /* Allocate the per device structures */ 1876 /* Allocate the per device structures */
1893 for (i = 0; i < ndevs_to_allocate; i++) { 1877 for (i = 0; i < ndevs_to_allocate; i++) {
@@ -1913,7 +1897,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1913 raid_ctlr_position = nphysicals + nlogicals; 1897 raid_ctlr_position = nphysicals + nlogicals;
1914 1898
1915 /* adjust our table of devices */ 1899 /* adjust our table of devices */
1916 nmsa2xxx_enclosures = 0; 1900 n_ext_target_devs = 0;
1917 for (i = 0; i < nphysicals + nlogicals + 1; i++) { 1901 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1918 u8 *lunaddrbytes, is_OBDR = 0; 1902 u8 *lunaddrbytes, is_OBDR = 0;
1919 1903
@@ -1929,26 +1913,24 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
1929 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice, 1913 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1930 &is_OBDR)) 1914 &is_OBDR))
1931 continue; /* skip it if we can't talk to it. */ 1915 continue; /* skip it if we can't talk to it. */
1932 figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, 1916 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
1933 tmpdevice);
1934 this_device = currentsd[ncurrent]; 1917 this_device = currentsd[ncurrent];
1935 1918
1936 /* 1919 /*
1937 * For the msa2xxx boxes, we have to insert a LUN 0 which 1920 * For external target devices, we have to insert a LUN 0 which
1938 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there 1921 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
1939 * is nonetheless an enclosure device there. We have to 1922 * is nonetheless an enclosure device there. We have to
1940 * present that otherwise linux won't find anything if 1923 * present that otherwise linux won't find anything if
1941 * there is no lun 0. 1924 * there is no lun 0.
1942 */ 1925 */
1943 if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, 1926 if (add_ext_target_dev(h, tmpdevice, this_device,
1944 lunaddrbytes, bus, target, lun, lunzerobits, 1927 lunaddrbytes, lunzerobits,
1945 &nmsa2xxx_enclosures)) { 1928 &n_ext_target_devs)) {
1946 ncurrent++; 1929 ncurrent++;
1947 this_device = currentsd[ncurrent]; 1930 this_device = currentsd[ncurrent];
1948 } 1931 }
1949 1932
1950 *this_device = *tmpdevice; 1933 *this_device = *tmpdevice;
1951 hpsa_set_bus_target_lun(this_device, bus, target, lun);
1952 1934
1953 switch (this_device->devtype) { 1935 switch (this_device->devtype) {
1954 case TYPE_ROM: 1936 case TYPE_ROM:
@@ -2228,13 +2210,42 @@ static void hpsa_unregister_scsi(struct ctlr_info *h)
2228 2210
2229static int hpsa_register_scsi(struct ctlr_info *h) 2211static int hpsa_register_scsi(struct ctlr_info *h)
2230{ 2212{
2231 int rc; 2213 struct Scsi_Host *sh;
2214 int error;
2232 2215
2233 rc = hpsa_scsi_detect(h); 2216 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
2234 if (rc != 0) 2217 if (sh == NULL)
2235 dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" 2218 goto fail;
2236 " hpsa_scsi_detect(), rc is %d\n", rc); 2219
2237 return rc; 2220 sh->io_port = 0;
2221 sh->n_io_port = 0;
2222 sh->this_id = -1;
2223 sh->max_channel = 3;
2224 sh->max_cmd_len = MAX_COMMAND_SIZE;
2225 sh->max_lun = HPSA_MAX_LUN;
2226 sh->max_id = HPSA_MAX_LUN;
2227 sh->can_queue = h->nr_cmds;
2228 sh->cmd_per_lun = h->nr_cmds;
2229 sh->sg_tablesize = h->maxsgentries;
2230 h->scsi_host = sh;
2231 sh->hostdata[0] = (unsigned long) h;
2232 sh->irq = h->intr[h->intr_mode];
2233 sh->unique_id = sh->irq;
2234 error = scsi_add_host(sh, &h->pdev->dev);
2235 if (error)
2236 goto fail_host_put;
2237 scsi_scan_host(sh);
2238 return 0;
2239
2240 fail_host_put:
2241 dev_err(&h->pdev->dev, "%s: scsi_add_host"
2242 " failed for controller %d\n", __func__, h->ctlr);
2243 scsi_host_put(sh);
2244 return error;
2245 fail:
2246 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
2247 " failed for controller %d\n", __func__, h->ctlr);
2248 return -ENOMEM;
2238} 2249}
2239 2250
2240static int wait_for_device_to_become_ready(struct ctlr_info *h, 2251static int wait_for_device_to_become_ready(struct ctlr_info *h,
@@ -2700,16 +2711,16 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2700 status = -EINVAL; 2711 status = -EINVAL;
2701 goto cleanup1; 2712 goto cleanup1;
2702 } 2713 }
2703 if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { 2714 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
2704 status = -EINVAL; 2715 status = -EINVAL;
2705 goto cleanup1; 2716 goto cleanup1;
2706 } 2717 }
2707 buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); 2718 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
2708 if (!buff) { 2719 if (!buff) {
2709 status = -ENOMEM; 2720 status = -ENOMEM;
2710 goto cleanup1; 2721 goto cleanup1;
2711 } 2722 }
2712 buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); 2723 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
2713 if (!buff_size) { 2724 if (!buff_size) {
2714 status = -ENOMEM; 2725 status = -ENOMEM;
2715 goto cleanup1; 2726 goto cleanup1;
@@ -3354,7 +3365,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3354static __devinit void init_driver_version(char *driver_version, int len) 3365static __devinit void init_driver_version(char *driver_version, int len)
3355{ 3366{
3356 memset(driver_version, 0, len); 3367 memset(driver_version, 0, len);
3357 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1); 3368 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
3358} 3369}
3359 3370
3360static __devinit int write_driver_ver_to_cfgtable( 3371static __devinit int write_driver_ver_to_cfgtable(
@@ -3935,7 +3946,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
3935 return err; 3946 return err;
3936 } 3947 }
3937 3948
3938 err = pci_request_regions(h->pdev, "hpsa"); 3949 err = pci_request_regions(h->pdev, HPSA);
3939 if (err) { 3950 if (err) {
3940 dev_err(&h->pdev->dev, 3951 dev_err(&h->pdev->dev,
3941 "cannot obtain PCI resources, aborting\n"); 3952 "cannot obtain PCI resources, aborting\n");
@@ -4253,7 +4264,7 @@ static void start_controller_lockup_detector(struct ctlr_info *h)
4253 spin_lock_init(&lockup_detector_lock); 4264 spin_lock_init(&lockup_detector_lock);
4254 hpsa_lockup_detector = 4265 hpsa_lockup_detector =
4255 kthread_run(detect_controller_lockup_thread, 4266 kthread_run(detect_controller_lockup_thread,
4256 NULL, "hpsa"); 4267 NULL, HPSA);
4257 } 4268 }
4258 if (!hpsa_lockup_detector) { 4269 if (!hpsa_lockup_detector) {
4259 dev_warn(&h->pdev->dev, 4270 dev_warn(&h->pdev->dev,
@@ -4325,7 +4336,7 @@ reinit_after_soft_reset:
4325 if (rc != 0) 4336 if (rc != 0)
4326 goto clean1; 4337 goto clean1;
4327 4338
4328 sprintf(h->devname, "hpsa%d", number_of_controllers); 4339 sprintf(h->devname, HPSA "%d", number_of_controllers);
4329 h->ctlr = number_of_controllers; 4340 h->ctlr = number_of_controllers;
4330 number_of_controllers++; 4341 number_of_controllers++;
4331 4342
@@ -4482,6 +4493,14 @@ static void hpsa_shutdown(struct pci_dev *pdev)
4482#endif /* CONFIG_PCI_MSI */ 4493#endif /* CONFIG_PCI_MSI */
4483} 4494}
4484 4495
4496static void __devexit hpsa_free_device_info(struct ctlr_info *h)
4497{
4498 int i;
4499
4500 for (i = 0; i < h->ndevices; i++)
4501 kfree(h->dev[i]);
4502}
4503
4485static void __devexit hpsa_remove_one(struct pci_dev *pdev) 4504static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4486{ 4505{
4487 struct ctlr_info *h; 4506 struct ctlr_info *h;
@@ -4497,6 +4516,7 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev)
4497 iounmap(h->vaddr); 4516 iounmap(h->vaddr);
4498 iounmap(h->transtable); 4517 iounmap(h->transtable);
4499 iounmap(h->cfgtable); 4518 iounmap(h->cfgtable);
4519 hpsa_free_device_info(h);
4500 hpsa_free_sg_chain_blocks(h); 4520 hpsa_free_sg_chain_blocks(h);
4501 pci_free_consistent(h->pdev, 4521 pci_free_consistent(h->pdev,
4502 h->nr_cmds * sizeof(struct CommandList), 4522 h->nr_cmds * sizeof(struct CommandList),
@@ -4530,7 +4550,7 @@ static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
4530} 4550}
4531 4551
4532static struct pci_driver hpsa_pci_driver = { 4552static struct pci_driver hpsa_pci_driver = {
4533 .name = "hpsa", 4553 .name = HPSA,
4534 .probe = hpsa_init_one, 4554 .probe = hpsa_init_one,
4535 .remove = __devexit_p(hpsa_remove_one), 4555 .remove = __devexit_p(hpsa_remove_one),
4536 .id_table = hpsa_pci_device_id, /* id_table */ 4556 .id_table = hpsa_pci_device_id, /* id_table */
@@ -4592,15 +4612,15 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4592 * Each SG entry requires 16 bytes. The eight registers are programmed 4612 * Each SG entry requires 16 bytes. The eight registers are programmed
4593 * with the number of 16-byte blocks a command of that size requires. 4613 * with the number of 16-byte blocks a command of that size requires.
4594 * The smallest command possible requires 5 such 16 byte blocks. 4614 * The smallest command possible requires 5 such 16 byte blocks.
4595 * the largest command possible requires MAXSGENTRIES + 4 16-byte 4615 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
4596 * blocks. Note, this only extends to the SG entries contained 4616 * blocks. Note, this only extends to the SG entries contained
4597 * within the command block, and does not extend to chained blocks 4617 * within the command block, and does not extend to chained blocks
4598 * of SG elements. bft[] contains the eight values we write to 4618 * of SG elements. bft[] contains the eight values we write to
4599 * the registers. They are not evenly distributed, but have more 4619 * the registers. They are not evenly distributed, but have more
4600 * sizes for small commands, and fewer sizes for larger commands. 4620 * sizes for small commands, and fewer sizes for larger commands.
4601 */ 4621 */
4602 int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 4622 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
4603 BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 4623 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
4604 /* 5 = 1 s/g entry or 4k 4624 /* 5 = 1 s/g entry or 4k
4605 * 6 = 2 s/g entry or 8k 4625 * 6 = 2 s/g entry or 8k
4606 * 8 = 4 s/g entry or 16k 4626 * 8 = 4 s/g entry or 16k
@@ -4613,8 +4633,9 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4613 memset(h->reply_pool, 0, h->reply_pool_size); 4633 memset(h->reply_pool, 0, h->reply_pool_size);
4614 h->reply_pool_head = h->reply_pool; 4634 h->reply_pool_head = h->reply_pool;
4615 4635
4616 bft[7] = h->max_sg_entries + 4; 4636 bft[7] = SG_ENTRIES_IN_CMD + 4;
4617 calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); 4637 calc_bucket_map(bft, ARRAY_SIZE(bft),
4638 SG_ENTRIES_IN_CMD, h->blockFetchTable);
4618 for (i = 0; i < 8; i++) 4639 for (i = 0; i < 8; i++)
4619 writel(bft[i], &h->transtable->BlockFetch[i]); 4640 writel(bft[i], &h->transtable->BlockFetch[i]);
4620 4641
@@ -4652,14 +4673,13 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4652 return; 4673 return;
4653 4674
4654 hpsa_get_max_perf_mode_cmds(h); 4675 hpsa_get_max_perf_mode_cmds(h);
4655 h->max_sg_entries = 32;
4656 /* Performant mode ring buffer and supporting data structures */ 4676 /* Performant mode ring buffer and supporting data structures */
4657 h->reply_pool_size = h->max_commands * sizeof(u64); 4677 h->reply_pool_size = h->max_commands * sizeof(u64);
4658 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, 4678 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
4659 &(h->reply_pool_dhandle)); 4679 &(h->reply_pool_dhandle));
4660 4680
4661 /* Need a block fetch table for performant mode */ 4681 /* Need a block fetch table for performant mode */
4662 h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * 4682 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
4663 sizeof(u32)), GFP_KERNEL); 4683 sizeof(u32)), GFP_KERNEL);
4664 4684
4665 if ((h->reply_pool == NULL) 4685 if ((h->reply_pool == NULL)
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 91edafb8c7e..7b28d54fa87 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -58,7 +58,6 @@ struct ctlr_info {
58 unsigned long paddr; 58 unsigned long paddr;
59 int nr_cmds; /* Number of commands allowed on this controller */ 59 int nr_cmds; /* Number of commands allowed on this controller */
60 struct CfgTable __iomem *cfgtable; 60 struct CfgTable __iomem *cfgtable;
61 int max_sg_entries;
62 int interrupts_enabled; 61 int interrupts_enabled;
63 int major; 62 int major;
64 int max_commands; 63 int max_commands;
@@ -317,7 +316,7 @@ static unsigned long SA5_completed(struct ctlr_info *h)
317 dev_dbg(&h->pdev->dev, "Read %lx back from board\n", 316 dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
318 register_value); 317 register_value);
319 else 318 else
320 dev_dbg(&h->pdev->dev, "hpsa: FIFO Empty read\n"); 319 dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
321#endif 320#endif
322 321
323 return register_value; 322 return register_value;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3fd4715935c..8049815d8c1 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -23,7 +23,7 @@
23 23
24/* general boundary defintions */ 24/* general boundary defintions */
25#define SENSEINFOBYTES 32 /* may vary between hbas */ 25#define SENSEINFOBYTES 32 /* may vary between hbas */
26#define MAXSGENTRIES 32 26#define SG_ENTRIES_IN_CMD 32 /* Max SG entries excluding chain blocks */
27#define HPSA_SG_CHAIN 0x80000000 27#define HPSA_SG_CHAIN 0x80000000
28#define MAXREPLYQS 256 28#define MAXREPLYQS 256
29 29
@@ -122,12 +122,11 @@ union u64bit {
122}; 122};
123 123
124/* FIXME this is a per controller value (barf!) */ 124/* FIXME this is a per controller value (barf!) */
125#define HPSA_MAX_TARGETS_PER_CTLR 16
126#define HPSA_MAX_LUN 1024 125#define HPSA_MAX_LUN 1024
127#define HPSA_MAX_PHYS_LUN 1024 126#define HPSA_MAX_PHYS_LUN 1024
128#define MAX_MSA2XXX_ENCLOSURES 32 127#define MAX_EXT_TARGETS 32
129#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \ 128#define HPSA_MAX_DEVICES (HPSA_MAX_PHYS_LUN + HPSA_MAX_LUN + \
130 MAX_MSA2XXX_ENCLOSURES + 1) /* + 1 is for the controller itself */ 129 MAX_EXT_TARGETS + 1) /* + 1 is for the controller itself */
131 130
132/* SCSI-3 Commands */ 131/* SCSI-3 Commands */
133#pragma pack(1) 132#pragma pack(1)
@@ -282,7 +281,7 @@ struct CommandList {
282 struct CommandListHeader Header; 281 struct CommandListHeader Header;
283 struct RequestBlock Request; 282 struct RequestBlock Request;
284 struct ErrDescriptor ErrDesc; 283 struct ErrDescriptor ErrDesc;
285 struct SGDescriptor SG[MAXSGENTRIES]; 284 struct SGDescriptor SG[SG_ENTRIES_IN_CMD];
286 /* information associated with the command */ 285 /* information associated with the command */
287 u32 busaddr; /* physical addr of this record */ 286 u32 busaddr; /* physical addr of this record */
288 struct ErrorInfo *err_info; /* pointer to the allocated mem */ 287 struct ErrorInfo *err_info; /* pointer to the allocated mem */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index b538f0883fd..cdfe5a16de2 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -183,7 +183,7 @@ static const struct ipr_chip_t ipr_chip[] = {
183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 183 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
184 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] }, 184 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }, 185 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } 186 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
187}; 187};
188 188
189static int ipr_max_bus_speeds [] = { 189static int ipr_max_bus_speeds [] = {
@@ -9191,15 +9191,15 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
9191 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 }, 9191 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, 9192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9193 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, 9193 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9195 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, 9195 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9197 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 }, 9197 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9199 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 }, 9199 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9200 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9200 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9201 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 }, 9201 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9202 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, 9202 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9203 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 }, 9203 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9204 { } 9204 { }
9205}; 9205};
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b13f9cc1227..f94eaee2ff1 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -58,7 +58,7 @@
58#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339 58#define PCI_DEVICE_ID_IBM_OBSIDIAN_E 0x0339
59 59
60#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D 60#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2 0x033D
61#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2 0x034A 61#define PCI_DEVICE_ID_IBM_CROCODILE 0x034A
62 62
63#define IPR_SUBS_DEV_ID_2780 0x0264 63#define IPR_SUBS_DEV_ID_2780 0x0264
64#define IPR_SUBS_DEV_ID_5702 0x0266 64#define IPR_SUBS_DEV_ID_5702 0x0266
@@ -92,7 +92,7 @@
92#define IPR_SUBS_DEV_ID_57B1 0x0355 92#define IPR_SUBS_DEV_ID_57B1 0x0355
93 93
94#define IPR_SUBS_DEV_ID_574D 0x0356 94#define IPR_SUBS_DEV_ID_574D 0x0356
95#define IPR_SUBS_DEV_ID_575D 0x035D 95#define IPR_SUBS_DEV_ID_57C8 0x035D
96 96
97#define IPR_NAME "ipr" 97#define IPR_NAME "ipr"
98 98
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 6ca9b26bb2f..d4bf9c12ecd 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -649,15 +649,13 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co
649 649
650int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) 650int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
651{ 651{
652 struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; 652 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
653 struct isci_host *ihost = ha->lldd_ha;
653 654
654 if (test_bit(IHOST_START_PENDING, &ihost->flags)) 655 if (test_bit(IHOST_START_PENDING, &ihost->flags))
655 return 0; 656 return 0;
656 657
657 /* todo: use sas_flush_discovery once it is upstream */ 658 sas_drain_work(ha);
658 scsi_flush_work(shost);
659
660 scsi_flush_work(shost);
661 659
662 dev_dbg(&ihost->pdev->dev, 660 dev_dbg(&ihost->pdev->dev,
663 "%s: ihost->status = %d, time = %ld\n", 661 "%s: ihost->status = %d, time = %ld\n",
@@ -1490,6 +1488,15 @@ sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
1490static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) 1488static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
1491{ 1489{
1492 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); 1490 struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
1491 u32 val;
1492
1493 /* enable clock gating for power control of the scu unit */
1494 val = readl(&ihost->smu_registers->clock_gating_control);
1495 val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) |
1496 SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) |
1497 SMU_CGUCR_GEN_BIT(XCLK_ENABLE));
1498 val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE);
1499 writel(val, &ihost->smu_registers->clock_gating_control);
1493 1500
1494 /* set the default interrupt coalescence number and timeout value. */ 1501 /* set the default interrupt coalescence number and timeout value. */
1495 sci_controller_set_interrupt_coalescence(ihost, 0, 0); 1502 sci_controller_set_interrupt_coalescence(ihost, 0, 0);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 5477f0fa823..adbad69d106 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -187,6 +187,7 @@ struct isci_host {
187 int id; /* unique within a given pci device */ 187 int id; /* unique within a given pci device */
188 struct isci_phy phys[SCI_MAX_PHYS]; 188 struct isci_phy phys[SCI_MAX_PHYS];
189 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */ 189 struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
190 struct asd_sas_port sas_ports[SCI_MAX_PORTS];
190 struct sas_ha_struct sas_ha; 191 struct sas_ha_struct sas_ha;
191 192
192 spinlock_t state_lock; 193 spinlock_t state_lock;
@@ -393,24 +394,6 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
393#define sci_controller_clear_invalid_phy(controller, phy) \ 394#define sci_controller_clear_invalid_phy(controller, phy) \
394 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index)) 395 ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
395 396
396static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
397{
398
399 if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
400 return NULL;
401
402 return &iphy->isci_port->isci_host->pdev->dev;
403}
404
405static inline struct device *sciport_to_dev(struct isci_port *iport)
406{
407
408 if (!iport || !iport->isci_host)
409 return NULL;
410
411 return &iport->isci_host->pdev->dev;
412}
413
414static inline struct device *scirdev_to_dev(struct isci_remote_device *idev) 397static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
415{ 398{
416 if (!idev || !idev->isci_port || !idev->isci_port->isci_host) 399 if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 17c4c2c89c2..5137db5a5d8 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -60,6 +60,7 @@
60#include <linux/efi.h> 60#include <linux/efi.h>
61#include <asm/string.h> 61#include <asm/string.h>
62#include <scsi/scsi_host.h> 62#include <scsi/scsi_host.h>
63#include "host.h"
63#include "isci.h" 64#include "isci.h"
64#include "task.h" 65#include "task.h"
65#include "probe_roms.h" 66#include "probe_roms.h"
@@ -154,7 +155,6 @@ static struct scsi_host_template isci_sht = {
154 .queuecommand = sas_queuecommand, 155 .queuecommand = sas_queuecommand,
155 .target_alloc = sas_target_alloc, 156 .target_alloc = sas_target_alloc,
156 .slave_configure = sas_slave_configure, 157 .slave_configure = sas_slave_configure,
157 .slave_destroy = sas_slave_destroy,
158 .scan_finished = isci_host_scan_finished, 158 .scan_finished = isci_host_scan_finished,
159 .scan_start = isci_host_scan_start, 159 .scan_start = isci_host_scan_start,
160 .change_queue_depth = sas_change_queue_depth, 160 .change_queue_depth = sas_change_queue_depth,
@@ -166,9 +166,6 @@ static struct scsi_host_template isci_sht = {
166 .sg_tablesize = SG_ALL, 166 .sg_tablesize = SG_ALL,
167 .max_sectors = SCSI_DEFAULT_MAX_SECTORS, 167 .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
168 .use_clustering = ENABLE_CLUSTERING, 168 .use_clustering = ENABLE_CLUSTERING,
169 .eh_device_reset_handler = sas_eh_device_reset_handler,
170 .eh_bus_reset_handler = isci_bus_reset_handler,
171 .slave_alloc = sas_slave_alloc,
172 .target_destroy = sas_target_destroy, 169 .target_destroy = sas_target_destroy,
173 .ioctl = sas_ioctl, 170 .ioctl = sas_ioctl,
174 .shost_attrs = isci_host_attrs, 171 .shost_attrs = isci_host_attrs,
@@ -194,6 +191,9 @@ static struct sas_domain_function_template isci_transport_ops = {
194 .lldd_lu_reset = isci_task_lu_reset, 191 .lldd_lu_reset = isci_task_lu_reset,
195 .lldd_query_task = isci_task_query_task, 192 .lldd_query_task = isci_task_query_task,
196 193
194 /* ata recovery called from ata-eh */
195 .lldd_ata_check_ready = isci_ata_check_ready,
196
197 /* Port and Adapter management */ 197 /* Port and Adapter management */
198 .lldd_clear_nexus_port = isci_task_clear_nexus_port, 198 .lldd_clear_nexus_port = isci_task_clear_nexus_port,
199 .lldd_clear_nexus_ha = isci_task_clear_nexus_ha, 199 .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
@@ -242,18 +242,13 @@ static int isci_register_sas_ha(struct isci_host *isci_host)
242 if (!sas_ports) 242 if (!sas_ports)
243 return -ENOMEM; 243 return -ENOMEM;
244 244
245 /*----------------- Libsas Initialization Stuff----------------------
246 * Set various fields in the sas_ha struct:
247 */
248
249 sas_ha->sas_ha_name = DRV_NAME; 245 sas_ha->sas_ha_name = DRV_NAME;
250 sas_ha->lldd_module = THIS_MODULE; 246 sas_ha->lldd_module = THIS_MODULE;
251 sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0]; 247 sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
252 248
253 /* set the array of phy and port structs. */
254 for (i = 0; i < SCI_MAX_PHYS; i++) { 249 for (i = 0; i < SCI_MAX_PHYS; i++) {
255 sas_phys[i] = &isci_host->phys[i].sas_phy; 250 sas_phys[i] = &isci_host->phys[i].sas_phy;
256 sas_ports[i] = &isci_host->ports[i].sas_port; 251 sas_ports[i] = &isci_host->sas_ports[i];
257 } 252 }
258 253
259 sas_ha->sas_phy = sas_phys; 254 sas_ha->sas_phy = sas_phys;
@@ -528,6 +523,13 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
528 goto err_host_alloc; 523 goto err_host_alloc;
529 } 524 }
530 pci_info->hosts[i] = h; 525 pci_info->hosts[i] = h;
526
527 /* turn on DIF support */
528 scsi_host_set_prot(h->shost,
529 SHOST_DIF_TYPE1_PROTECTION |
530 SHOST_DIF_TYPE2_PROTECTION |
531 SHOST_DIF_TYPE3_PROTECTION);
532 scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC);
531 } 533 }
532 534
533 err = isci_setup_interrupts(pdev); 535 err = isci_setup_interrupts(pdev);
@@ -551,9 +553,9 @@ static void __devexit isci_pci_remove(struct pci_dev *pdev)
551 int i; 553 int i;
552 554
553 for_each_isci_host(i, ihost, pdev) { 555 for_each_isci_host(i, ihost, pdev) {
556 wait_for_start(ihost);
554 isci_unregister(ihost); 557 isci_unregister(ihost);
555 isci_host_deinit(ihost); 558 isci_host_deinit(ihost);
556 sci_controller_disable_interrupts(ihost);
557 } 559 }
558} 560}
559 561
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index fe18acfd6eb..fab3586840b 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -59,6 +59,16 @@
59#include "scu_event_codes.h" 59#include "scu_event_codes.h"
60#include "probe_roms.h" 60#include "probe_roms.h"
61 61
62#undef C
63#define C(a) (#a)
64static const char *phy_state_name(enum sci_phy_states state)
65{
66 static const char * const strings[] = PHY_STATES;
67
68 return strings[state];
69}
70#undef C
71
62/* Maximum arbitration wait time in micro-seconds */ 72/* Maximum arbitration wait time in micro-seconds */
63#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700) 73#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
64 74
@@ -67,6 +77,19 @@ enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
67 return iphy->max_negotiated_speed; 77 return iphy->max_negotiated_speed;
68} 78}
69 79
80static struct isci_host *phy_to_host(struct isci_phy *iphy)
81{
82 struct isci_phy *table = iphy - iphy->phy_index;
83 struct isci_host *ihost = container_of(table, typeof(*ihost), phys[0]);
84
85 return ihost;
86}
87
88static struct device *sciphy_to_dev(struct isci_phy *iphy)
89{
90 return &phy_to_host(iphy)->pdev->dev;
91}
92
70static enum sci_status 93static enum sci_status
71sci_phy_transport_layer_initialization(struct isci_phy *iphy, 94sci_phy_transport_layer_initialization(struct isci_phy *iphy,
72 struct scu_transport_layer_registers __iomem *reg) 95 struct scu_transport_layer_registers __iomem *reg)
@@ -446,8 +469,8 @@ enum sci_status sci_phy_start(struct isci_phy *iphy)
446 enum sci_phy_states state = iphy->sm.current_state_id; 469 enum sci_phy_states state = iphy->sm.current_state_id;
447 470
448 if (state != SCI_PHY_STOPPED) { 471 if (state != SCI_PHY_STOPPED) {
449 dev_dbg(sciphy_to_dev(iphy), 472 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
450 "%s: in wrong state: %d\n", __func__, state); 473 __func__, phy_state_name(state));
451 return SCI_FAILURE_INVALID_STATE; 474 return SCI_FAILURE_INVALID_STATE;
452 } 475 }
453 476
@@ -472,8 +495,8 @@ enum sci_status sci_phy_stop(struct isci_phy *iphy)
472 case SCI_PHY_READY: 495 case SCI_PHY_READY:
473 break; 496 break;
474 default: 497 default:
475 dev_dbg(sciphy_to_dev(iphy), 498 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
476 "%s: in wrong state: %d\n", __func__, state); 499 __func__, phy_state_name(state));
477 return SCI_FAILURE_INVALID_STATE; 500 return SCI_FAILURE_INVALID_STATE;
478 } 501 }
479 502
@@ -486,8 +509,8 @@ enum sci_status sci_phy_reset(struct isci_phy *iphy)
486 enum sci_phy_states state = iphy->sm.current_state_id; 509 enum sci_phy_states state = iphy->sm.current_state_id;
487 510
488 if (state != SCI_PHY_READY) { 511 if (state != SCI_PHY_READY) {
489 dev_dbg(sciphy_to_dev(iphy), 512 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
490 "%s: in wrong state: %d\n", __func__, state); 513 __func__, phy_state_name(state));
491 return SCI_FAILURE_INVALID_STATE; 514 return SCI_FAILURE_INVALID_STATE;
492 } 515 }
493 516
@@ -536,8 +559,8 @@ enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
536 return SCI_SUCCESS; 559 return SCI_SUCCESS;
537 } 560 }
538 default: 561 default:
539 dev_dbg(sciphy_to_dev(iphy), 562 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
540 "%s: in wrong state: %d\n", __func__, state); 563 __func__, phy_state_name(state));
541 return SCI_FAILURE_INVALID_STATE; 564 return SCI_FAILURE_INVALID_STATE;
542 } 565 }
543} 566}
@@ -591,6 +614,60 @@ static void sci_phy_complete_link_training(struct isci_phy *iphy,
591 sci_change_state(&iphy->sm, next_state); 614 sci_change_state(&iphy->sm, next_state);
592} 615}
593 616
617static const char *phy_event_name(u32 event_code)
618{
619 switch (scu_get_event_code(event_code)) {
620 case SCU_EVENT_PORT_SELECTOR_DETECTED:
621 return "port selector";
622 case SCU_EVENT_SENT_PORT_SELECTION:
623 return "port selection";
624 case SCU_EVENT_HARD_RESET_TRANSMITTED:
625 return "tx hard reset";
626 case SCU_EVENT_HARD_RESET_RECEIVED:
627 return "rx hard reset";
628 case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
629 return "identify timeout";
630 case SCU_EVENT_LINK_FAILURE:
631 return "link fail";
632 case SCU_EVENT_SATA_SPINUP_HOLD:
633 return "sata spinup hold";
634 case SCU_EVENT_SAS_15_SSC:
635 case SCU_EVENT_SAS_15:
636 return "sas 1.5";
637 case SCU_EVENT_SAS_30_SSC:
638 case SCU_EVENT_SAS_30:
639 return "sas 3.0";
640 case SCU_EVENT_SAS_60_SSC:
641 case SCU_EVENT_SAS_60:
642 return "sas 6.0";
643 case SCU_EVENT_SATA_15_SSC:
644 case SCU_EVENT_SATA_15:
645 return "sata 1.5";
646 case SCU_EVENT_SATA_30_SSC:
647 case SCU_EVENT_SATA_30:
648 return "sata 3.0";
649 case SCU_EVENT_SATA_60_SSC:
650 case SCU_EVENT_SATA_60:
651 return "sata 6.0";
652 case SCU_EVENT_SAS_PHY_DETECTED:
653 return "sas detect";
654 case SCU_EVENT_SATA_PHY_DETECTED:
655 return "sata detect";
656 default:
657 return "unknown";
658 }
659}
660
661#define phy_event_dbg(iphy, state, code) \
662 dev_dbg(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
663 phy_to_host(iphy)->id, iphy->phy_index, \
664 phy_state_name(state), phy_event_name(code), code)
665
666#define phy_event_warn(iphy, state, code) \
667 dev_warn(sciphy_to_dev(iphy), "phy-%d:%d: %s event: %s (%x)\n", \
668 phy_to_host(iphy)->id, iphy->phy_index, \
669 phy_state_name(state), phy_event_name(code), code)
670
594enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) 671enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
595{ 672{
596 enum sci_phy_states state = iphy->sm.current_state_id; 673 enum sci_phy_states state = iphy->sm.current_state_id;
@@ -607,11 +684,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
607 iphy->is_in_link_training = true; 684 iphy->is_in_link_training = true;
608 break; 685 break;
609 default: 686 default:
610 dev_dbg(sciphy_to_dev(iphy), 687 phy_event_dbg(iphy, state, event_code);
611 "%s: PHY starting substate machine received "
612 "unexpected event_code %x\n",
613 __func__,
614 event_code);
615 return SCI_FAILURE; 688 return SCI_FAILURE;
616 } 689 }
617 return SCI_SUCCESS; 690 return SCI_SUCCESS;
@@ -648,11 +721,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
648 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 721 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
649 break; 722 break;
650 default: 723 default:
651 dev_warn(sciphy_to_dev(iphy), 724 phy_event_warn(iphy, state, event_code);
652 "%s: PHY starting substate machine received "
653 "unexpected event_code %x\n",
654 __func__, event_code);
655
656 return SCI_FAILURE; 725 return SCI_FAILURE;
657 break; 726 break;
658 } 727 }
@@ -677,10 +746,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
677 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 746 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
678 break; 747 break;
679 default: 748 default:
680 dev_warn(sciphy_to_dev(iphy), 749 phy_event_warn(iphy, state, event_code);
681 "%s: PHY starting substate machine received "
682 "unexpected event_code %x\n",
683 __func__, event_code);
684 return SCI_FAILURE; 750 return SCI_FAILURE;
685 } 751 }
686 return SCI_SUCCESS; 752 return SCI_SUCCESS;
@@ -691,11 +757,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
691 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 757 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
692 break; 758 break;
693 default: 759 default:
694 dev_warn(sciphy_to_dev(iphy), 760 phy_event_warn(iphy, state, event_code);
695 "%s: PHY starting substate machine received unexpected "
696 "event_code %x\n",
697 __func__,
698 event_code);
699 return SCI_FAILURE; 761 return SCI_FAILURE;
700 } 762 }
701 return SCI_SUCCESS; 763 return SCI_SUCCESS;
@@ -719,11 +781,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
719 break; 781 break;
720 782
721 default: 783 default:
722 dev_warn(sciphy_to_dev(iphy), 784 phy_event_warn(iphy, state, event_code);
723 "%s: PHY starting substate machine received "
724 "unexpected event_code %x\n",
725 __func__, event_code);
726
727 return SCI_FAILURE; 785 return SCI_FAILURE;
728 } 786 }
729 return SCI_SUCCESS; 787 return SCI_SUCCESS;
@@ -751,12 +809,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
751 sci_phy_start_sas_link_training(iphy); 809 sci_phy_start_sas_link_training(iphy);
752 break; 810 break;
753 default: 811 default:
754 dev_warn(sciphy_to_dev(iphy), 812 phy_event_warn(iphy, state, event_code);
755 "%s: PHY starting substate machine received "
756 "unexpected event_code %x\n",
757 __func__,
758 event_code);
759
760 return SCI_FAILURE; 813 return SCI_FAILURE;
761 } 814 }
762 return SCI_SUCCESS; 815 return SCI_SUCCESS;
@@ -793,11 +846,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
793 sci_phy_start_sas_link_training(iphy); 846 sci_phy_start_sas_link_training(iphy);
794 break; 847 break;
795 default: 848 default:
796 dev_warn(sciphy_to_dev(iphy), 849 phy_event_warn(iphy, state, event_code);
797 "%s: PHY starting substate machine received "
798 "unexpected event_code %x\n",
799 __func__, event_code);
800
801 return SCI_FAILURE; 850 return SCI_FAILURE;
802 } 851 }
803 852
@@ -815,12 +864,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
815 break; 864 break;
816 865
817 default: 866 default:
818 dev_warn(sciphy_to_dev(iphy), 867 phy_event_warn(iphy, state, event_code);
819 "%s: PHY starting substate machine received "
820 "unexpected event_code %x\n",
821 __func__,
822 event_code);
823
824 return SCI_FAILURE; 868 return SCI_FAILURE;
825 } 869 }
826 return SCI_SUCCESS; 870 return SCI_SUCCESS;
@@ -838,10 +882,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
838 iphy->bcn_received_while_port_unassigned = true; 882 iphy->bcn_received_while_port_unassigned = true;
839 break; 883 break;
840 default: 884 default:
841 dev_warn(sciphy_to_dev(iphy), 885 phy_event_warn(iphy, state, event_code);
842 "%sP SCIC PHY 0x%p ready state machine received "
843 "unexpected event_code %x\n",
844 __func__, iphy, event_code);
845 return SCI_FAILURE_INVALID_STATE; 886 return SCI_FAILURE_INVALID_STATE;
846 } 887 }
847 return SCI_SUCCESS; 888 return SCI_SUCCESS;
@@ -852,18 +893,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
852 sci_change_state(&iphy->sm, SCI_PHY_STARTING); 893 sci_change_state(&iphy->sm, SCI_PHY_STARTING);
853 break; 894 break;
854 default: 895 default:
855 dev_warn(sciphy_to_dev(iphy), 896 phy_event_warn(iphy, state, event_code);
856 "%s: SCIC PHY 0x%p resetting state machine received "
857 "unexpected event_code %x\n",
858 __func__, iphy, event_code);
859
860 return SCI_FAILURE_INVALID_STATE; 897 return SCI_FAILURE_INVALID_STATE;
861 break; 898 break;
862 } 899 }
863 return SCI_SUCCESS; 900 return SCI_SUCCESS;
864 default: 901 default:
865 dev_dbg(sciphy_to_dev(iphy), 902 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
866 "%s: in wrong state: %d\n", __func__, state); 903 __func__, phy_state_name(state));
867 return SCI_FAILURE_INVALID_STATE; 904 return SCI_FAILURE_INVALID_STATE;
868 } 905 }
869} 906}
@@ -956,8 +993,8 @@ enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
956 return result; 993 return result;
957 } 994 }
958 default: 995 default:
959 dev_dbg(sciphy_to_dev(iphy), 996 dev_dbg(sciphy_to_dev(iphy), "%s: in wrong state: %s\n",
960 "%s: in wrong state: %d\n", __func__, state); 997 __func__, phy_state_name(state));
961 return SCI_FAILURE_INVALID_STATE; 998 return SCI_FAILURE_INVALID_STATE;
962 } 999 }
963 1000
@@ -1299,7 +1336,6 @@ void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
1299 sas_addr = cpu_to_be64(sci_sas_addr); 1336 sas_addr = cpu_to_be64(sci_sas_addr);
1300 memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr)); 1337 memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
1301 1338
1302 iphy->isci_port = NULL;
1303 iphy->sas_phy.enabled = 0; 1339 iphy->sas_phy.enabled = 0;
1304 iphy->sas_phy.id = index; 1340 iphy->sas_phy.id = index;
1305 iphy->sas_phy.sas_addr = &iphy->sas_addr[0]; 1341 iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
@@ -1333,13 +1369,13 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
1333{ 1369{
1334 int ret = 0; 1370 int ret = 0;
1335 struct isci_phy *iphy = sas_phy->lldd_phy; 1371 struct isci_phy *iphy = sas_phy->lldd_phy;
1336 struct isci_port *iport = iphy->isci_port; 1372 struct asd_sas_port *port = sas_phy->port;
1337 struct isci_host *ihost = sas_phy->ha->lldd_ha; 1373 struct isci_host *ihost = sas_phy->ha->lldd_ha;
1338 unsigned long flags; 1374 unsigned long flags;
1339 1375
1340 dev_dbg(&ihost->pdev->dev, 1376 dev_dbg(&ihost->pdev->dev,
1341 "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n", 1377 "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
1342 __func__, sas_phy, func, buf, iphy, iport); 1378 __func__, sas_phy, func, buf, iphy, port);
1343 1379
1344 switch (func) { 1380 switch (func) {
1345 case PHY_FUNC_DISABLE: 1381 case PHY_FUNC_DISABLE:
@@ -1356,11 +1392,10 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
1356 break; 1392 break;
1357 1393
1358 case PHY_FUNC_HARD_RESET: 1394 case PHY_FUNC_HARD_RESET:
1359 if (!iport) 1395 if (!port)
1360 return -ENODEV; 1396 return -ENODEV;
1361 1397
1362 /* Perform the port reset. */ 1398 ret = isci_port_perform_hard_reset(ihost, port->lldd_port, iphy);
1363 ret = isci_port_perform_hard_reset(ihost, iport, iphy);
1364 1399
1365 break; 1400 break;
1366 case PHY_FUNC_GET_EVENTS: { 1401 case PHY_FUNC_GET_EVENTS: {
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
index 67699c8e321..0e45833ba06 100644
--- a/drivers/scsi/isci/phy.h
+++ b/drivers/scsi/isci/phy.h
@@ -103,7 +103,6 @@ struct isci_phy {
103 struct scu_transport_layer_registers __iomem *transport_layer_registers; 103 struct scu_transport_layer_registers __iomem *transport_layer_registers;
104 struct scu_link_layer_registers __iomem *link_layer_registers; 104 struct scu_link_layer_registers __iomem *link_layer_registers;
105 struct asd_sas_phy sas_phy; 105 struct asd_sas_phy sas_phy;
106 struct isci_port *isci_port;
107 u8 sas_addr[SAS_ADDR_SIZE]; 106 u8 sas_addr[SAS_ADDR_SIZE];
108 union { 107 union {
109 struct sas_identify_frame iaf; 108 struct sas_identify_frame iaf;
@@ -344,101 +343,65 @@ enum sci_phy_counter_id {
344 SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR 343 SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
345}; 344};
346 345
347enum sci_phy_states { 346/**
348 /** 347 * enum sci_phy_states - phy state machine states
349 * Simply the initial state for the base domain state machine. 348 * @SCI_PHY_INITIAL: Simply the initial state for the base domain state
350 */ 349 * machine.
351 SCI_PHY_INITIAL, 350 * @SCI_PHY_STOPPED: phy has successfully been stopped. In this state
352 351 * no new IO operations are permitted on this phy.
353 /** 352 * @SCI_PHY_STARTING: the phy is in the process of becomming ready. In
354 * This state indicates that the phy has successfully been stopped. 353 * this state no new IO operations are permitted on
355 * In this state no new IO operations are permitted on this phy. 354 * this phy.
356 * This state is entered from the INITIAL state. 355 * @SCI_PHY_SUB_INITIAL: Initial state
357 * This state is entered from the STARTING state. 356 * @SCI_PHY_SUB_AWAIT_OSSP_EN: Wait state for the hardware OSSP event
358 * This state is entered from the READY state. 357 * type notification
359 * This state is entered from the RESETTING state. 358 * @SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: Wait state for the PHY speed
360 */ 359 * notification
361 SCI_PHY_STOPPED, 360 * @SCI_PHY_SUB_AWAIT_IAF_UF: Wait state for the IAF Unsolicited frame
362 361 * notification
363 /** 362 * @SCI_PHY_SUB_AWAIT_SAS_POWER: Wait state for the request to consume
364 * This state indicates that the phy is in the process of becomming 363 * power
365 * ready. In this state no new IO operations are permitted on this phy. 364 * @SCI_PHY_SUB_AWAIT_SATA_POWER: Wait state for request to consume
366 * This state is entered from the STOPPED state. 365 * power
367 * This state is entered from the READY state. 366 * @SCI_PHY_SUB_AWAIT_SATA_PHY_EN: Wait state for the SATA PHY
368 * This state is entered from the RESETTING state. 367 * notification
369 */ 368 * @SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: Wait for the SATA PHY speed
370 SCI_PHY_STARTING, 369 * notification
371 370 * @SCI_PHY_SUB_AWAIT_SIG_FIS_UF: Wait state for the SIGNATURE FIS
372 /** 371 * unsolicited frame notification
373 * Initial state 372 * @SCI_PHY_SUB_FINAL: Exit state for this state machine
374 */ 373 * @SCI_PHY_READY: phy is now ready. Thus, the user is able to perform
375 SCI_PHY_SUB_INITIAL, 374 * IO operations utilizing this phy as long as it is
376 375 * currently part of a valid port. This state is
377 /** 376 * entered from the STARTING state.
378 * Wait state for the hardware OSSP event type notification 377 * @SCI_PHY_RESETTING: phy is in the process of being reset. In this
379 */ 378 * state no new IO operations are permitted on this
380 SCI_PHY_SUB_AWAIT_OSSP_EN, 379 * phy. This state is entered from the READY state.
381 380 * @SCI_PHY_FINAL: Simply the final state for the base phy state
382 /** 381 * machine.
383 * Wait state for the PHY speed notification 382 */
384 */ 383#define PHY_STATES {\
385 SCI_PHY_SUB_AWAIT_SAS_SPEED_EN, 384 C(PHY_INITIAL),\
386 385 C(PHY_STOPPED),\
387 /** 386 C(PHY_STARTING),\
388 * Wait state for the IAF Unsolicited frame notification 387 C(PHY_SUB_INITIAL),\
389 */ 388 C(PHY_SUB_AWAIT_OSSP_EN),\
390 SCI_PHY_SUB_AWAIT_IAF_UF, 389 C(PHY_SUB_AWAIT_SAS_SPEED_EN),\
391 390 C(PHY_SUB_AWAIT_IAF_UF),\
392 /** 391 C(PHY_SUB_AWAIT_SAS_POWER),\
393 * Wait state for the request to consume power 392 C(PHY_SUB_AWAIT_SATA_POWER),\
394 */ 393 C(PHY_SUB_AWAIT_SATA_PHY_EN),\
395 SCI_PHY_SUB_AWAIT_SAS_POWER, 394 C(PHY_SUB_AWAIT_SATA_SPEED_EN),\
396 395 C(PHY_SUB_AWAIT_SIG_FIS_UF),\
397 /** 396 C(PHY_SUB_FINAL),\
398 * Wait state for request to consume power 397 C(PHY_READY),\
399 */ 398 C(PHY_RESETTING),\
400 SCI_PHY_SUB_AWAIT_SATA_POWER, 399 C(PHY_FINAL),\
401 400 }
402 /** 401#undef C
403 * Wait state for the SATA PHY notification 402#define C(a) SCI_##a
404 */ 403enum sci_phy_states PHY_STATES;
405 SCI_PHY_SUB_AWAIT_SATA_PHY_EN, 404#undef C
406
407 /**
408 * Wait for the SATA PHY speed notification
409 */
410 SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
411
412 /**
413 * Wait state for the SIGNATURE FIS unsolicited frame notification
414 */
415 SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
416
417 /**
418 * Exit state for this state machine
419 */
420 SCI_PHY_SUB_FINAL,
421
422 /**
423 * This state indicates the the phy is now ready. Thus, the user
424 * is able to perform IO operations utilizing this phy as long as it
425 * is currently part of a valid port.
426 * This state is entered from the STARTING state.
427 */
428 SCI_PHY_READY,
429
430 /**
431 * This state indicates that the phy is in the process of being reset.
432 * In this state no new IO operations are permitted on this phy.
433 * This state is entered from the READY state.
434 */
435 SCI_PHY_RESETTING,
436
437 /**
438 * Simply the final state for the base phy state machine.
439 */
440 SCI_PHY_FINAL,
441};
442 405
443void sci_phy_construct( 406void sci_phy_construct(
444 struct isci_phy *iphy, 407 struct isci_phy *iphy,
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 7c6ac58a5c4..5fada73b71f 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -60,18 +60,29 @@
60#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000) 60#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
61#define SCU_DUMMY_INDEX (0xFFFF) 61#define SCU_DUMMY_INDEX (0xFFFF)
62 62
63static void isci_port_change_state(struct isci_port *iport, enum isci_status status) 63#undef C
64#define C(a) (#a)
65const char *port_state_name(enum sci_port_states state)
64{ 66{
65 unsigned long flags; 67 static const char * const strings[] = PORT_STATES;
68
69 return strings[state];
70}
71#undef C
72
73static struct device *sciport_to_dev(struct isci_port *iport)
74{
75 int i = iport->physical_port_index;
76 struct isci_port *table;
77 struct isci_host *ihost;
78
79 if (i == SCIC_SDS_DUMMY_PORT)
80 i = SCI_MAX_PORTS+1;
66 81
67 dev_dbg(&iport->isci_host->pdev->dev, 82 table = iport - i;
68 "%s: iport = %p, state = 0x%x\n", 83 ihost = container_of(table, typeof(*ihost), ports[0]);
69 __func__, iport, status);
70 84
71 /* XXX pointless lock */ 85 return &ihost->pdev->dev;
72 spin_lock_irqsave(&iport->state_lock, flags);
73 iport->status = status;
74 spin_unlock_irqrestore(&iport->state_lock, flags);
75} 86}
76 87
77static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto) 88static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
@@ -165,18 +176,12 @@ static void isci_port_link_up(struct isci_host *isci_host,
165 struct sci_port_properties properties; 176 struct sci_port_properties properties;
166 unsigned long success = true; 177 unsigned long success = true;
167 178
168 BUG_ON(iphy->isci_port != NULL);
169
170 iphy->isci_port = iport;
171
172 dev_dbg(&isci_host->pdev->dev, 179 dev_dbg(&isci_host->pdev->dev,
173 "%s: isci_port = %p\n", 180 "%s: isci_port = %p\n",
174 __func__, iport); 181 __func__, iport);
175 182
176 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags); 183 spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
177 184
178 isci_port_change_state(iphy->isci_port, isci_starting);
179
180 sci_port_get_properties(iport, &properties); 185 sci_port_get_properties(iport, &properties);
181 186
182 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { 187 if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
@@ -258,7 +263,6 @@ static void isci_port_link_down(struct isci_host *isci_host,
258 __func__, isci_device); 263 __func__, isci_device);
259 set_bit(IDEV_GONE, &isci_device->flags); 264 set_bit(IDEV_GONE, &isci_device->flags);
260 } 265 }
261 isci_port_change_state(isci_port, isci_stopping);
262 } 266 }
263 } 267 }
264 268
@@ -269,52 +273,10 @@ static void isci_port_link_down(struct isci_host *isci_host,
269 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy, 273 isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
270 PHYE_LOSS_OF_SIGNAL); 274 PHYE_LOSS_OF_SIGNAL);
271 275
272 isci_phy->isci_port = NULL;
273
274 dev_dbg(&isci_host->pdev->dev, 276 dev_dbg(&isci_host->pdev->dev,
275 "%s: isci_port = %p - Done\n", __func__, isci_port); 277 "%s: isci_port = %p - Done\n", __func__, isci_port);
276} 278}
277 279
278
279/**
280 * isci_port_ready() - This function is called by the sci core when a link
281 * becomes ready.
282 * @isci_host: This parameter specifies the isci host object.
283 * @port: This parameter specifies the sci port with the active link.
284 *
285 */
286static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
287{
288 dev_dbg(&isci_host->pdev->dev,
289 "%s: isci_port = %p\n", __func__, isci_port);
290
291 complete_all(&isci_port->start_complete);
292 isci_port_change_state(isci_port, isci_ready);
293 return;
294}
295
296/**
297 * isci_port_not_ready() - This function is called by the sci core when a link
298 * is not ready. All remote devices on this link will be removed if they are
299 * in the stopping state.
300 * @isci_host: This parameter specifies the isci host object.
301 * @port: This parameter specifies the sci port with the active link.
302 *
303 */
304static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
305{
306 dev_dbg(&isci_host->pdev->dev,
307 "%s: isci_port = %p\n", __func__, isci_port);
308}
309
310static void isci_port_stop_complete(struct isci_host *ihost,
311 struct isci_port *iport,
312 enum sci_status completion_status)
313{
314 dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
315}
316
317
318static bool is_port_ready_state(enum sci_port_states state) 280static bool is_port_ready_state(enum sci_port_states state)
319{ 281{
320 switch (state) { 282 switch (state) {
@@ -353,7 +315,9 @@ static void port_state_machine_change(struct isci_port *iport,
353static void isci_port_hard_reset_complete(struct isci_port *isci_port, 315static void isci_port_hard_reset_complete(struct isci_port *isci_port,
354 enum sci_status completion_status) 316 enum sci_status completion_status)
355{ 317{
356 dev_dbg(&isci_port->isci_host->pdev->dev, 318 struct isci_host *ihost = isci_port->owning_controller;
319
320 dev_dbg(&ihost->pdev->dev,
357 "%s: isci_port = %p, completion_status=%x\n", 321 "%s: isci_port = %p, completion_status=%x\n",
358 __func__, isci_port, completion_status); 322 __func__, isci_port, completion_status);
359 323
@@ -364,23 +328,24 @@ static void isci_port_hard_reset_complete(struct isci_port *isci_port,
364 328
365 /* The reset failed. The port state is now SCI_PORT_FAILED. */ 329 /* The reset failed. The port state is now SCI_PORT_FAILED. */
366 if (isci_port->active_phy_mask == 0) { 330 if (isci_port->active_phy_mask == 0) {
331 int phy_idx = isci_port->last_active_phy;
332 struct isci_phy *iphy = &ihost->phys[phy_idx];
367 333
368 /* Generate the link down now to the host, since it 334 /* Generate the link down now to the host, since it
369 * was intercepted by the hard reset state machine when 335 * was intercepted by the hard reset state machine when
370 * it really happened. 336 * it really happened.
371 */ 337 */
372 isci_port_link_down(isci_port->isci_host, 338 isci_port_link_down(ihost, iphy, isci_port);
373 &isci_port->isci_host->phys[
374 isci_port->last_active_phy],
375 isci_port);
376 } 339 }
377 /* Advance the port state so that link state changes will be 340 /* Advance the port state so that link state changes will be
378 * noticed. 341 * noticed.
379 */ 342 */
380 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING); 343 port_state_machine_change(isci_port, SCI_PORT_SUB_WAITING);
381 344
382 } 345 }
383 complete_all(&isci_port->hard_reset_complete); 346 clear_bit(IPORT_RESET_PENDING, &isci_port->state);
347 wake_up(&ihost->eventq);
348
384} 349}
385 350
386/* This method will return a true value if the specified phy can be assigned to 351/* This method will return a true value if the specified phy can be assigned to
@@ -835,10 +800,9 @@ static void port_timeout(unsigned long data)
835 __func__, 800 __func__,
836 iport); 801 iport);
837 } else if (current_state == SCI_PORT_STOPPING) { 802 } else if (current_state == SCI_PORT_STOPPING) {
838 /* if the port is still stopping then the stop has not completed */ 803 dev_dbg(sciport_to_dev(iport),
839 isci_port_stop_complete(iport->owning_controller, 804 "%s: port%d: stop complete timeout\n",
840 iport, 805 __func__, iport->physical_port_index);
841 SCI_FAILURE_TIMEOUT);
842 } else { 806 } else {
843 /* The port is in the ready state and we have a timer 807 /* The port is in the ready state and we have a timer
844 * reporting a timeout this should not happen. 808 * reporting a timeout this should not happen.
@@ -1003,7 +967,8 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
1003 struct isci_port *iport = container_of(sm, typeof(*iport), sm); 967 struct isci_port *iport = container_of(sm, typeof(*iport), sm);
1004 struct isci_host *ihost = iport->owning_controller; 968 struct isci_host *ihost = iport->owning_controller;
1005 969
1006 isci_port_ready(ihost, iport); 970 dev_dbg(&ihost->pdev->dev, "%s: port%d ready\n",
971 __func__, iport->physical_port_index);
1007 972
1008 for (index = 0; index < SCI_MAX_PHYS; index++) { 973 for (index = 0; index < SCI_MAX_PHYS; index++) {
1009 if (iport->phy_table[index]) { 974 if (iport->phy_table[index]) {
@@ -1069,7 +1034,8 @@ static void sci_port_ready_substate_operational_exit(struct sci_base_state_machi
1069 */ 1034 */
1070 sci_port_abort_dummy_request(iport); 1035 sci_port_abort_dummy_request(iport);
1071 1036
1072 isci_port_not_ready(ihost, iport); 1037 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1038 __func__, iport->physical_port_index);
1073 1039
1074 if (iport->ready_exit) 1040 if (iport->ready_exit)
1075 sci_port_invalidate_dummy_remote_node(iport); 1041 sci_port_invalidate_dummy_remote_node(iport);
@@ -1081,7 +1047,8 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
1081 struct isci_host *ihost = iport->owning_controller; 1047 struct isci_host *ihost = iport->owning_controller;
1082 1048
1083 if (iport->active_phy_mask == 0) { 1049 if (iport->active_phy_mask == 0) {
1084 isci_port_not_ready(ihost, iport); 1050 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1051 __func__, iport->physical_port_index);
1085 1052
1086 port_state_machine_change(iport, SCI_PORT_SUB_WAITING); 1053 port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
1087 } else 1054 } else
@@ -1097,8 +1064,8 @@ enum sci_status sci_port_start(struct isci_port *iport)
1097 1064
1098 state = iport->sm.current_state_id; 1065 state = iport->sm.current_state_id;
1099 if (state != SCI_PORT_STOPPED) { 1066 if (state != SCI_PORT_STOPPED) {
1100 dev_warn(sciport_to_dev(iport), 1067 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1101 "%s: in wrong state: %d\n", __func__, state); 1068 __func__, port_state_name(state));
1102 return SCI_FAILURE_INVALID_STATE; 1069 return SCI_FAILURE_INVALID_STATE;
1103 } 1070 }
1104 1071
@@ -1172,8 +1139,8 @@ enum sci_status sci_port_stop(struct isci_port *iport)
1172 SCI_PORT_STOPPING); 1139 SCI_PORT_STOPPING);
1173 return SCI_SUCCESS; 1140 return SCI_SUCCESS;
1174 default: 1141 default:
1175 dev_warn(sciport_to_dev(iport), 1142 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1176 "%s: in wrong state: %d\n", __func__, state); 1143 __func__, port_state_name(state));
1177 return SCI_FAILURE_INVALID_STATE; 1144 return SCI_FAILURE_INVALID_STATE;
1178 } 1145 }
1179} 1146}
@@ -1187,8 +1154,8 @@ static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
1187 1154
1188 state = iport->sm.current_state_id; 1155 state = iport->sm.current_state_id;
1189 if (state != SCI_PORT_SUB_OPERATIONAL) { 1156 if (state != SCI_PORT_SUB_OPERATIONAL) {
1190 dev_warn(sciport_to_dev(iport), 1157 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1191 "%s: in wrong state: %d\n", __func__, state); 1158 __func__, port_state_name(state));
1192 return SCI_FAILURE_INVALID_STATE; 1159 return SCI_FAILURE_INVALID_STATE;
1193 } 1160 }
1194 1161
@@ -1282,8 +1249,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
1282 SCI_PORT_SUB_CONFIGURING); 1249 SCI_PORT_SUB_CONFIGURING);
1283 return SCI_SUCCESS; 1250 return SCI_SUCCESS;
1284 default: 1251 default:
1285 dev_warn(sciport_to_dev(iport), 1252 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1286 "%s: in wrong state: %d\n", __func__, state); 1253 __func__, port_state_name(state));
1287 return SCI_FAILURE_INVALID_STATE; 1254 return SCI_FAILURE_INVALID_STATE;
1288 } 1255 }
1289} 1256}
@@ -1332,8 +1299,8 @@ enum sci_status sci_port_remove_phy(struct isci_port *iport,
1332 SCI_PORT_SUB_CONFIGURING); 1299 SCI_PORT_SUB_CONFIGURING);
1333 return SCI_SUCCESS; 1300 return SCI_SUCCESS;
1334 default: 1301 default:
1335 dev_warn(sciport_to_dev(iport), 1302 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1336 "%s: in wrong state: %d\n", __func__, state); 1303 __func__, port_state_name(state));
1337 return SCI_FAILURE_INVALID_STATE; 1304 return SCI_FAILURE_INVALID_STATE;
1338 } 1305 }
1339} 1306}
@@ -1375,8 +1342,8 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
1375 sci_port_general_link_up_handler(iport, iphy, PF_RESUME); 1342 sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
1376 return SCI_SUCCESS; 1343 return SCI_SUCCESS;
1377 default: 1344 default:
1378 dev_warn(sciport_to_dev(iport), 1345 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1379 "%s: in wrong state: %d\n", __func__, state); 1346 __func__, port_state_name(state));
1380 return SCI_FAILURE_INVALID_STATE; 1347 return SCI_FAILURE_INVALID_STATE;
1381 } 1348 }
1382} 1349}
@@ -1405,8 +1372,8 @@ enum sci_status sci_port_link_down(struct isci_port *iport,
1405 sci_port_deactivate_phy(iport, iphy, false); 1372 sci_port_deactivate_phy(iport, iphy, false);
1406 return SCI_SUCCESS; 1373 return SCI_SUCCESS;
1407 default: 1374 default:
1408 dev_warn(sciport_to_dev(iport), 1375 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1409 "%s: in wrong state: %d\n", __func__, state); 1376 __func__, port_state_name(state));
1410 return SCI_FAILURE_INVALID_STATE; 1377 return SCI_FAILURE_INVALID_STATE;
1411 } 1378 }
1412} 1379}
@@ -1425,8 +1392,8 @@ enum sci_status sci_port_start_io(struct isci_port *iport,
1425 iport->started_request_count++; 1392 iport->started_request_count++;
1426 return SCI_SUCCESS; 1393 return SCI_SUCCESS;
1427 default: 1394 default:
1428 dev_warn(sciport_to_dev(iport), 1395 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1429 "%s: in wrong state: %d\n", __func__, state); 1396 __func__, port_state_name(state));
1430 return SCI_FAILURE_INVALID_STATE; 1397 return SCI_FAILURE_INVALID_STATE;
1431 } 1398 }
1432} 1399}
@@ -1440,8 +1407,8 @@ enum sci_status sci_port_complete_io(struct isci_port *iport,
1440 state = iport->sm.current_state_id; 1407 state = iport->sm.current_state_id;
1441 switch (state) { 1408 switch (state) {
1442 case SCI_PORT_STOPPED: 1409 case SCI_PORT_STOPPED:
1443 dev_warn(sciport_to_dev(iport), 1410 dev_warn(sciport_to_dev(iport), "%s: in wrong state: %s\n",
1444 "%s: in wrong state: %d\n", __func__, state); 1411 __func__, port_state_name(state));
1445 return SCI_FAILURE_INVALID_STATE; 1412 return SCI_FAILURE_INVALID_STATE;
1446 case SCI_PORT_STOPPING: 1413 case SCI_PORT_STOPPING:
1447 sci_port_decrement_request_count(iport); 1414 sci_port_decrement_request_count(iport);
@@ -1547,7 +1514,8 @@ static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
1547 if (prev_state == SCI_PORT_RESETTING) 1514 if (prev_state == SCI_PORT_RESETTING)
1548 isci_port_hard_reset_complete(iport, SCI_SUCCESS); 1515 isci_port_hard_reset_complete(iport, SCI_SUCCESS);
1549 else 1516 else
1550 isci_port_not_ready(ihost, iport); 1517 dev_dbg(&ihost->pdev->dev, "%s: port%d !ready\n",
1518 __func__, iport->physical_port_index);
1551 1519
1552 /* Post and suspend the dummy remote node context for this port. */ 1520 /* Post and suspend the dummy remote node context for this port. */
1553 sci_port_post_dummy_remote_node(iport); 1521 sci_port_post_dummy_remote_node(iport);
@@ -1644,22 +1612,7 @@ void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
1644{ 1612{
1645 INIT_LIST_HEAD(&iport->remote_dev_list); 1613 INIT_LIST_HEAD(&iport->remote_dev_list);
1646 INIT_LIST_HEAD(&iport->domain_dev_list); 1614 INIT_LIST_HEAD(&iport->domain_dev_list);
1647 spin_lock_init(&iport->state_lock);
1648 init_completion(&iport->start_complete);
1649 iport->isci_host = ihost; 1615 iport->isci_host = ihost;
1650 isci_port_change_state(iport, isci_freed);
1651}
1652
1653/**
1654 * isci_port_get_state() - This function gets the status of the port object.
1655 * @isci_port: This parameter points to the isci_port object
1656 *
1657 * status of the object as a isci_status enum.
1658 */
1659enum isci_status isci_port_get_state(
1660 struct isci_port *isci_port)
1661{
1662 return isci_port->status;
1663} 1616}
1664 1617
1665void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) 1618void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
@@ -1670,6 +1623,11 @@ void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy
1670 isci_port_bc_change_received(ihost, iport, iphy); 1623 isci_port_bc_change_received(ihost, iport, iphy);
1671} 1624}
1672 1625
1626static void wait_port_reset(struct isci_host *ihost, struct isci_port *iport)
1627{
1628 wait_event(ihost->eventq, !test_bit(IPORT_RESET_PENDING, &iport->state));
1629}
1630
1673int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 1631int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
1674 struct isci_phy *iphy) 1632 struct isci_phy *iphy)
1675{ 1633{
@@ -1680,9 +1638,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1680 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n", 1638 dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
1681 __func__, iport); 1639 __func__, iport);
1682 1640
1683 init_completion(&iport->hard_reset_complete);
1684
1685 spin_lock_irqsave(&ihost->scic_lock, flags); 1641 spin_lock_irqsave(&ihost->scic_lock, flags);
1642 set_bit(IPORT_RESET_PENDING, &iport->state);
1686 1643
1687 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT 1644 #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
1688 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT); 1645 status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
@@ -1690,7 +1647,7 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1690 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1647 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1691 1648
1692 if (status == SCI_SUCCESS) { 1649 if (status == SCI_SUCCESS) {
1693 wait_for_completion(&iport->hard_reset_complete); 1650 wait_port_reset(ihost, iport);
1694 1651
1695 dev_dbg(&ihost->pdev->dev, 1652 dev_dbg(&ihost->pdev->dev,
1696 "%s: iport = %p; hard reset completion\n", 1653 "%s: iport = %p; hard reset completion\n",
@@ -1704,6 +1661,8 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1704 __func__, iport, iport->hard_reset_status); 1661 __func__, iport, iport->hard_reset_status);
1705 } 1662 }
1706 } else { 1663 } else {
1664 clear_bit(IPORT_RESET_PENDING, &iport->state);
1665 wake_up(&ihost->eventq);
1707 ret = TMF_RESP_FUNC_FAILED; 1666 ret = TMF_RESP_FUNC_FAILED;
1708 1667
1709 dev_err(&ihost->pdev->dev, 1668 dev_err(&ihost->pdev->dev,
@@ -1726,24 +1685,80 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor
1726 return ret; 1685 return ret;
1727} 1686}
1728 1687
1729/** 1688int isci_ata_check_ready(struct domain_device *dev)
1730 * isci_port_deformed() - This function is called by libsas when a port becomes 1689{
1731 * inactive. 1690 struct isci_port *iport = dev->port->lldd_port;
1732 * @phy: This parameter specifies the libsas phy with the inactive port. 1691 struct isci_host *ihost = dev_to_ihost(dev);
1733 * 1692 struct isci_remote_device *idev;
1734 */ 1693 unsigned long flags;
1694 int rc = 0;
1695
1696 spin_lock_irqsave(&ihost->scic_lock, flags);
1697 idev = isci_lookup_device(dev);
1698 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1699
1700 if (!idev)
1701 goto out;
1702
1703 if (test_bit(IPORT_RESET_PENDING, &iport->state))
1704 goto out;
1705
1706 rc = !!iport->active_phy_mask;
1707 out:
1708 isci_put_device(idev);
1709
1710 return rc;
1711}
1712
1735void isci_port_deformed(struct asd_sas_phy *phy) 1713void isci_port_deformed(struct asd_sas_phy *phy)
1736{ 1714{
1737 pr_debug("%s: sas_phy = %p\n", __func__, phy); 1715 struct isci_host *ihost = phy->ha->lldd_ha;
1716 struct isci_port *iport = phy->port->lldd_port;
1717 unsigned long flags;
1718 int i;
1719
1720 /* we got a port notification on a port that was subsequently
1721 * torn down and libsas is just now catching up
1722 */
1723 if (!iport)
1724 return;
1725
1726 spin_lock_irqsave(&ihost->scic_lock, flags);
1727 for (i = 0; i < SCI_MAX_PHYS; i++) {
1728 if (iport->active_phy_mask & 1 << i)
1729 break;
1730 }
1731 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1732
1733 if (i >= SCI_MAX_PHYS)
1734 dev_dbg(&ihost->pdev->dev, "%s: port: %ld\n",
1735 __func__, (long) (iport - &ihost->ports[0]));
1738} 1736}
1739 1737
1740/**
1741 * isci_port_formed() - This function is called by libsas when a port becomes
1742 * active.
1743 * @phy: This parameter specifies the libsas phy with the active port.
1744 *
1745 */
1746void isci_port_formed(struct asd_sas_phy *phy) 1738void isci_port_formed(struct asd_sas_phy *phy)
1747{ 1739{
1748 pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port); 1740 struct isci_host *ihost = phy->ha->lldd_ha;
1741 struct isci_phy *iphy = to_iphy(phy);
1742 struct asd_sas_port *port = phy->port;
1743 struct isci_port *iport;
1744 unsigned long flags;
1745 int i;
1746
1747 /* initial ports are formed as the driver is still initializing,
1748 * wait for that process to complete
1749 */
1750 wait_for_start(ihost);
1751
1752 spin_lock_irqsave(&ihost->scic_lock, flags);
1753 for (i = 0; i < SCI_MAX_PORTS; i++) {
1754 iport = &ihost->ports[i];
1755 if (iport->active_phy_mask & 1 << iphy->phy_index)
1756 break;
1757 }
1758 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1759
1760 if (i >= SCI_MAX_PORTS)
1761 iport = NULL;
1762
1763 port->lldd_port = iport;
1749} 1764}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index 08116090eb7..6b56240c205 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -95,14 +95,11 @@ enum isci_status {
95 * @timer: timeout start/stop operations 95 * @timer: timeout start/stop operations
96 */ 96 */
97struct isci_port { 97struct isci_port {
98 enum isci_status status;
99 struct isci_host *isci_host; 98 struct isci_host *isci_host;
100 struct asd_sas_port sas_port;
101 struct list_head remote_dev_list; 99 struct list_head remote_dev_list;
102 spinlock_t state_lock;
103 struct list_head domain_dev_list; 100 struct list_head domain_dev_list;
104 struct completion start_complete; 101 #define IPORT_RESET_PENDING 0
105 struct completion hard_reset_complete; 102 unsigned long state;
106 enum sci_status hard_reset_status; 103 enum sci_status hard_reset_status;
107 struct sci_base_state_machine sm; 104 struct sci_base_state_machine sm;
108 bool ready_exit; 105 bool ready_exit;
@@ -147,70 +144,47 @@ struct sci_port_properties {
147}; 144};
148 145
149/** 146/**
150 * enum sci_port_states - This enumeration depicts all the states for the 147 * enum sci_port_states - port state machine states
151 * common port state machine. 148 * @SCI_PORT_STOPPED: port has successfully been stopped. In this state
152 * 149 * no new IO operations are permitted. This state is
153 * 150 * entered from the STOPPING state.
151 * @SCI_PORT_STOPPING: port is in the process of stopping. In this
152 * state no new IO operations are permitted, but
153 * existing IO operations are allowed to complete.
154 * This state is entered from the READY state.
155 * @SCI_PORT_READY: port is now ready. Thus, the user is able to
156 * perform IO operations on this port. This state is
157 * entered from the STARTING state.
158 * @SCI_PORT_SUB_WAITING: port is started and ready but has no active
159 * phys.
160 * @SCI_PORT_SUB_OPERATIONAL: port is started and ready and there is at
161 * least one phy operational.
162 * @SCI_PORT_SUB_CONFIGURING: port is started and there was an
163 * add/remove phy event. This state is only
164 * used in Automatic Port Configuration Mode
165 * (APC)
166 * @SCI_PORT_RESETTING: port is in the process of performing a hard
167 * reset. Thus, the user is unable to perform IO
168 * operations on this port. This state is entered
169 * from the READY state.
170 * @SCI_PORT_FAILED: port has failed a reset request. This state is
171 * entered when a port reset request times out. This
172 * state is entered from the RESETTING state.
154 */ 173 */
155enum sci_port_states { 174#define PORT_STATES {\
156 /** 175 C(PORT_STOPPED),\
157 * This state indicates that the port has successfully been stopped. 176 C(PORT_STOPPING),\
158 * In this state no new IO operations are permitted. 177 C(PORT_READY),\
159 * This state is entered from the STOPPING state. 178 C(PORT_SUB_WAITING),\
160 */ 179 C(PORT_SUB_OPERATIONAL),\
161 SCI_PORT_STOPPED, 180 C(PORT_SUB_CONFIGURING),\
162 181 C(PORT_RESETTING),\
163 /** 182 C(PORT_FAILED),\
164 * This state indicates that the port is in the process of stopping. 183 }
165 * In this state no new IO operations are permitted, but existing IO 184#undef C
166 * operations are allowed to complete. 185#define C(a) SCI_##a
167 * This state is entered from the READY state. 186enum sci_port_states PORT_STATES;
168 */ 187#undef C
169 SCI_PORT_STOPPING,
170
171 /**
172 * This state indicates the port is now ready. Thus, the user is
173 * able to perform IO operations on this port.
174 * This state is entered from the STARTING state.
175 */
176 SCI_PORT_READY,
177
178 /**
179 * The substate where the port is started and ready but has no
180 * active phys.
181 */
182 SCI_PORT_SUB_WAITING,
183
184 /**
185 * The substate where the port is started and ready and there is
186 * at least one phy operational.
187 */
188 SCI_PORT_SUB_OPERATIONAL,
189
190 /**
191 * The substate where the port is started and there was an
192 * add/remove phy event. This state is only used in Automatic
193 * Port Configuration Mode (APC)
194 */
195 SCI_PORT_SUB_CONFIGURING,
196
197 /**
198 * This state indicates the port is in the process of performing a hard
199 * reset. Thus, the user is unable to perform IO operations on this
200 * port.
201 * This state is entered from the READY state.
202 */
203 SCI_PORT_RESETTING,
204
205 /**
206 * This state indicates the port has failed a reset request. This state
207 * is entered when a port reset request times out.
208 * This state is entered from the RESETTING state.
209 */
210 SCI_PORT_FAILED,
211
212
213};
214 188
215static inline void sci_port_decrement_request_count(struct isci_port *iport) 189static inline void sci_port_decrement_request_count(struct isci_port *iport)
216{ 190{
@@ -296,9 +270,6 @@ void sci_port_get_attached_sas_address(
296 struct isci_port *iport, 270 struct isci_port *iport,
297 struct sci_sas_address *sas_address); 271 struct sci_sas_address *sas_address);
298 272
299enum isci_status isci_port_get_state(
300 struct isci_port *isci_port);
301
302void isci_port_formed(struct asd_sas_phy *); 273void isci_port_formed(struct asd_sas_phy *);
303void isci_port_deformed(struct asd_sas_phy *); 274void isci_port_deformed(struct asd_sas_phy *);
304 275
@@ -309,4 +280,5 @@ void isci_port_init(
309 280
310int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, 281int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
311 struct isci_phy *iphy); 282 struct isci_phy *iphy);
283int isci_ata_check_ready(struct domain_device *dev);
312#endif /* !defined(_ISCI_PORT_H_) */ 284#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index eaa541afc75..7eb0ccd45fe 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -370,6 +370,27 @@ struct scu_iit_entry {
370 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \ 370 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
371 ) 371 )
372 372
373/* ***************************************************************************** */
374#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_SHIFT (0)
375#define SMU_CLOCK_GATING_CONTROL_IDLE_ENABLE_MASK (0x00000001)
376#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_SHIFT (1)
377#define SMU_CLOCK_GATING_CONTROL_XCLK_ENABLE_MASK (0x00000002)
378#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_SHIFT (2)
379#define SMU_CLOCK_GATING_CONTROL_TXCLK_ENABLE_MASK (0x00000004)
380#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_SHIFT (3)
381#define SMU_CLOCK_GATING_CONTROL_REGCLK_ENABLE_MASK (0x00000008)
382#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_SHIFT (16)
383#define SMU_CLOCK_GATING_CONTROL_IDLE_TIMEOUT_MASK (0x000F0000)
384#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_SHIFT (31)
385#define SMU_CLOCK_GATING_CONTROL_FORCE_IDLE_MASK (0x80000000)
386#define SMU_CLOCK_GATING_CONTROL_RESERVED_MASK (0x7FF0FFF0)
387
388#define SMU_CGUCR_GEN_VAL(name, value) \
389 SCU_GEN_VALUE(SMU_CLOCK_GATING_CONTROL_##name, value)
390
391#define SMU_CGUCR_GEN_BIT(name) \
392 SCU_GEN_BIT(SMU_CLOCK_GATING_CONTROL_##name)
393
373/* -------------------------------------------------------------------------- */ 394/* -------------------------------------------------------------------------- */
374 395
375#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0) 396#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
@@ -992,8 +1013,10 @@ struct smu_registers {
992 u32 mmr_address_window; 1013 u32 mmr_address_window;
993/* 0x00A4 SMDW */ 1014/* 0x00A4 SMDW */
994 u32 mmr_data_window; 1015 u32 mmr_data_window;
995 u32 reserved_A8; 1016/* 0x00A8 CGUCR */
996 u32 reserved_AC; 1017 u32 clock_gating_control;
1018/* 0x00AC CGUPC */
1019 u32 clock_gating_performance;
997/* A whole bunch of reserved space */ 1020/* A whole bunch of reserved space */
998 u32 reserved_Bx[4]; 1021 u32 reserved_Bx[4];
999 u32 reserved_Cx[4]; 1022 u32 reserved_Cx[4];
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index dd74b6ceeb8..8f501b0a81d 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -62,6 +62,16 @@
62#include "scu_event_codes.h" 62#include "scu_event_codes.h"
63#include "task.h" 63#include "task.h"
64 64
65#undef C
66#define C(a) (#a)
67const char *dev_state_name(enum sci_remote_device_states state)
68{
69 static const char * const strings[] = REMOTE_DEV_STATES;
70
71 return strings[state];
72}
73#undef C
74
65/** 75/**
66 * isci_remote_device_not_ready() - This function is called by the ihost when 76 * isci_remote_device_not_ready() - This function is called by the ihost when
67 * the remote device is not ready. We mark the isci device as ready (not 77 * the remote device is not ready. We mark the isci device as ready (not
@@ -167,8 +177,8 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
167 case SCI_DEV_FAILED: 177 case SCI_DEV_FAILED:
168 case SCI_DEV_FINAL: 178 case SCI_DEV_FINAL:
169 default: 179 default:
170 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 180 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
171 __func__, state); 181 __func__, dev_state_name(state));
172 return SCI_FAILURE_INVALID_STATE; 182 return SCI_FAILURE_INVALID_STATE;
173 case SCI_DEV_STOPPED: 183 case SCI_DEV_STOPPED:
174 return SCI_SUCCESS; 184 return SCI_SUCCESS;
@@ -226,8 +236,8 @@ enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
226 case SCI_DEV_RESETTING: 236 case SCI_DEV_RESETTING:
227 case SCI_DEV_FINAL: 237 case SCI_DEV_FINAL:
228 default: 238 default:
229 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 239 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
230 __func__, state); 240 __func__, dev_state_name(state));
231 return SCI_FAILURE_INVALID_STATE; 241 return SCI_FAILURE_INVALID_STATE;
232 case SCI_DEV_READY: 242 case SCI_DEV_READY:
233 case SCI_STP_DEV_IDLE: 243 case SCI_STP_DEV_IDLE:
@@ -246,8 +256,8 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev
246 enum sci_remote_device_states state = sm->current_state_id; 256 enum sci_remote_device_states state = sm->current_state_id;
247 257
248 if (state != SCI_DEV_RESETTING) { 258 if (state != SCI_DEV_RESETTING) {
249 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 259 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
250 __func__, state); 260 __func__, dev_state_name(state));
251 return SCI_FAILURE_INVALID_STATE; 261 return SCI_FAILURE_INVALID_STATE;
252 } 262 }
253 263
@@ -262,8 +272,8 @@ enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
262 enum sci_remote_device_states state = sm->current_state_id; 272 enum sci_remote_device_states state = sm->current_state_id;
263 273
264 if (state != SCI_STP_DEV_CMD) { 274 if (state != SCI_STP_DEV_CMD) {
265 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 275 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
266 __func__, state); 276 __func__, dev_state_name(state));
267 return SCI_FAILURE_INVALID_STATE; 277 return SCI_FAILURE_INVALID_STATE;
268 } 278 }
269 279
@@ -287,8 +297,8 @@ enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
287 case SCI_SMP_DEV_IDLE: 297 case SCI_SMP_DEV_IDLE:
288 case SCI_DEV_FINAL: 298 case SCI_DEV_FINAL:
289 default: 299 default:
290 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 300 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
291 __func__, state); 301 __func__, dev_state_name(state));
292 /* Return the frame back to the controller */ 302 /* Return the frame back to the controller */
293 sci_controller_release_frame(ihost, frame_index); 303 sci_controller_release_frame(ihost, frame_index);
294 return SCI_FAILURE_INVALID_STATE; 304 return SCI_FAILURE_INVALID_STATE;
@@ -502,8 +512,8 @@ enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
502 case SCI_DEV_RESETTING: 512 case SCI_DEV_RESETTING:
503 case SCI_DEV_FINAL: 513 case SCI_DEV_FINAL:
504 default: 514 default:
505 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 515 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
506 __func__, state); 516 __func__, dev_state_name(state));
507 return SCI_FAILURE_INVALID_STATE; 517 return SCI_FAILURE_INVALID_STATE;
508 case SCI_DEV_READY: 518 case SCI_DEV_READY:
509 /* attempt to start an io request for this device object. The remote 519 /* attempt to start an io request for this device object. The remote
@@ -637,8 +647,8 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
637 case SCI_DEV_FAILED: 647 case SCI_DEV_FAILED:
638 case SCI_DEV_FINAL: 648 case SCI_DEV_FINAL:
639 default: 649 default:
640 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 650 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
641 __func__, state); 651 __func__, dev_state_name(state));
642 return SCI_FAILURE_INVALID_STATE; 652 return SCI_FAILURE_INVALID_STATE;
643 case SCI_DEV_READY: 653 case SCI_DEV_READY:
644 case SCI_STP_DEV_AWAIT_RESET: 654 case SCI_STP_DEV_AWAIT_RESET:
@@ -721,8 +731,8 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
721 case SCI_DEV_RESETTING: 731 case SCI_DEV_RESETTING:
722 case SCI_DEV_FINAL: 732 case SCI_DEV_FINAL:
723 default: 733 default:
724 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 734 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
725 __func__, state); 735 __func__, dev_state_name(state));
726 return SCI_FAILURE_INVALID_STATE; 736 return SCI_FAILURE_INVALID_STATE;
727 case SCI_STP_DEV_IDLE: 737 case SCI_STP_DEV_IDLE:
728 case SCI_STP_DEV_CMD: 738 case SCI_STP_DEV_CMD:
@@ -853,8 +863,8 @@ static enum sci_status sci_remote_device_destruct(struct isci_remote_device *ide
853 struct isci_host *ihost; 863 struct isci_host *ihost;
854 864
855 if (state != SCI_DEV_STOPPED) { 865 if (state != SCI_DEV_STOPPED) {
856 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 866 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
857 __func__, state); 867 __func__, dev_state_name(state));
858 return SCI_FAILURE_INVALID_STATE; 868 return SCI_FAILURE_INVALID_STATE;
859 } 869 }
860 870
@@ -1204,8 +1214,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
1204 enum sci_status status; 1214 enum sci_status status;
1205 1215
1206 if (state != SCI_DEV_STOPPED) { 1216 if (state != SCI_DEV_STOPPED) {
1207 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n", 1217 dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n",
1208 __func__, state); 1218 __func__, dev_state_name(state));
1209 return SCI_FAILURE_INVALID_STATE; 1219 return SCI_FAILURE_INVALID_STATE;
1210 } 1220 }
1211 1221
@@ -1308,7 +1318,6 @@ void isci_remote_device_release(struct kref *kref)
1308 clear_bit(IDEV_STOP_PENDING, &idev->flags); 1318 clear_bit(IDEV_STOP_PENDING, &idev->flags);
1309 clear_bit(IDEV_IO_READY, &idev->flags); 1319 clear_bit(IDEV_IO_READY, &idev->flags);
1310 clear_bit(IDEV_GONE, &idev->flags); 1320 clear_bit(IDEV_GONE, &idev->flags);
1311 clear_bit(IDEV_EH, &idev->flags);
1312 smp_mb__before_clear_bit(); 1321 smp_mb__before_clear_bit();
1313 clear_bit(IDEV_ALLOCATED, &idev->flags); 1322 clear_bit(IDEV_ALLOCATED, &idev->flags);
1314 wake_up(&ihost->eventq); 1323 wake_up(&ihost->eventq);
@@ -1381,34 +1390,17 @@ void isci_remote_device_gone(struct domain_device *dev)
1381 * 1390 *
1382 * status, zero indicates success. 1391 * status, zero indicates success.
1383 */ 1392 */
1384int isci_remote_device_found(struct domain_device *domain_dev) 1393int isci_remote_device_found(struct domain_device *dev)
1385{ 1394{
1386 struct isci_host *isci_host = dev_to_ihost(domain_dev); 1395 struct isci_host *isci_host = dev_to_ihost(dev);
1387 struct isci_port *isci_port; 1396 struct isci_port *isci_port = dev->port->lldd_port;
1388 struct isci_phy *isci_phy;
1389 struct asd_sas_port *sas_port;
1390 struct asd_sas_phy *sas_phy;
1391 struct isci_remote_device *isci_device; 1397 struct isci_remote_device *isci_device;
1392 enum sci_status status; 1398 enum sci_status status;
1393 1399
1394 dev_dbg(&isci_host->pdev->dev, 1400 dev_dbg(&isci_host->pdev->dev,
1395 "%s: domain_device = %p\n", __func__, domain_dev); 1401 "%s: domain_device = %p\n", __func__, dev);
1396
1397 wait_for_start(isci_host);
1398
1399 sas_port = domain_dev->port;
1400 sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
1401 port_phy_el);
1402 isci_phy = to_iphy(sas_phy);
1403 isci_port = isci_phy->isci_port;
1404
1405 /* we are being called for a device on this port,
1406 * so it has to come up eventually
1407 */
1408 wait_for_completion(&isci_port->start_complete);
1409 1402
1410 if ((isci_stopping == isci_port_get_state(isci_port)) || 1403 if (!isci_port)
1411 (isci_stopped == isci_port_get_state(isci_port)))
1412 return -ENODEV; 1404 return -ENODEV;
1413 1405
1414 isci_device = isci_remote_device_alloc(isci_host, isci_port); 1406 isci_device = isci_remote_device_alloc(isci_host, isci_port);
@@ -1419,7 +1411,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
1419 INIT_LIST_HEAD(&isci_device->node); 1411 INIT_LIST_HEAD(&isci_device->node);
1420 1412
1421 spin_lock_irq(&isci_host->scic_lock); 1413 spin_lock_irq(&isci_host->scic_lock);
1422 isci_device->domain_dev = domain_dev; 1414 isci_device->domain_dev = dev;
1423 isci_device->isci_port = isci_port; 1415 isci_device->isci_port = isci_port;
1424 list_add_tail(&isci_device->node, &isci_port->remote_dev_list); 1416 list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
1425 1417
@@ -1432,7 +1424,7 @@ int isci_remote_device_found(struct domain_device *domain_dev)
1432 1424
1433 if (status == SCI_SUCCESS) { 1425 if (status == SCI_SUCCESS) {
1434 /* device came up, advertise it to the world */ 1426 /* device came up, advertise it to the world */
1435 domain_dev->lldd_dev = isci_device; 1427 dev->lldd_dev = isci_device;
1436 } else 1428 } else
1437 isci_put_device(isci_device); 1429 isci_put_device(isci_device);
1438 spin_unlock_irq(&isci_host->scic_lock); 1430 spin_unlock_irq(&isci_host->scic_lock);
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 483ee50152f..58637ee08f5 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -82,10 +82,9 @@ struct isci_remote_device {
82 #define IDEV_START_PENDING 0 82 #define IDEV_START_PENDING 0
83 #define IDEV_STOP_PENDING 1 83 #define IDEV_STOP_PENDING 1
84 #define IDEV_ALLOCATED 2 84 #define IDEV_ALLOCATED 2
85 #define IDEV_EH 3 85 #define IDEV_GONE 3
86 #define IDEV_GONE 4 86 #define IDEV_IO_READY 4
87 #define IDEV_IO_READY 5 87 #define IDEV_IO_NCQERROR 5
88 #define IDEV_IO_NCQERROR 6
89 unsigned long flags; 88 unsigned long flags;
90 struct kref kref; 89 struct kref kref;
91 struct isci_port *isci_port; 90 struct isci_port *isci_port;
@@ -180,122 +179,101 @@ enum sci_status sci_remote_device_reset_complete(
180/** 179/**
181 * enum sci_remote_device_states - This enumeration depicts all the states 180 * enum sci_remote_device_states - This enumeration depicts all the states
182 * for the common remote device state machine. 181 * for the common remote device state machine.
182 * @SCI_DEV_INITIAL: Simply the initial state for the base remote device
183 * state machine.
183 * 184 *
185 * @SCI_DEV_STOPPED: This state indicates that the remote device has
186 * successfully been stopped. In this state no new IO operations are
187 * permitted. This state is entered from the INITIAL state. This state
188 * is entered from the STOPPING state.
184 * 189 *
190 * @SCI_DEV_STARTING: This state indicates the the remote device is in
191 * the process of becoming ready (i.e. starting). In this state no new
192 * IO operations are permitted. This state is entered from the STOPPED
193 * state.
194 *
195 * @SCI_DEV_READY: This state indicates the remote device is now ready.
196 * Thus, the user is able to perform IO operations on the remote device.
197 * This state is entered from the STARTING state.
198 *
199 * @SCI_STP_DEV_IDLE: This is the idle substate for the stp remote
200 * device. When there are no active IO for the device it is is in this
201 * state.
202 *
203 * @SCI_STP_DEV_CMD: This is the command state for for the STP remote
204 * device. This state is entered when the device is processing a
205 * non-NCQ command. The device object will fail any new start IO
206 * requests until this command is complete.
207 *
208 * @SCI_STP_DEV_NCQ: This is the NCQ state for the STP remote device.
209 * This state is entered when the device is processing an NCQ reuqest.
210 * It will remain in this state so long as there is one or more NCQ
211 * requests being processed.
212 *
213 * @SCI_STP_DEV_NCQ_ERROR: This is the NCQ error state for the STP
214 * remote device. This state is entered when an SDB error FIS is
215 * received by the device object while in the NCQ state. The device
216 * object will only accept a READ LOG command while in this state.
217 *
218 * @SCI_STP_DEV_ATAPI_ERROR: This is the ATAPI error state for the STP
219 * ATAPI remote device. This state is entered when ATAPI device sends
220 * error status FIS without data while the device object is in CMD
221 * state. A suspension event is expected in this state. The device
222 * object will resume right away.
223 *
224 * @SCI_STP_DEV_AWAIT_RESET: This is the READY substate indicates the
225 * device is waiting for the RESET task coming to be recovered from
226 * certain hardware specific error.
227 *
228 * @SCI_SMP_DEV_IDLE: This is the ready operational substate for the
229 * remote device. This is the normal operational state for a remote
230 * device.
231 *
232 * @SCI_SMP_DEV_CMD: This is the suspended state for the remote device.
233 * This is the state that the device is placed in when a RNC suspend is
234 * received by the SCU hardware.
235 *
236 * @SCI_DEV_STOPPING: This state indicates that the remote device is in
237 * the process of stopping. In this state no new IO operations are
238 * permitted, but existing IO operations are allowed to complete. This
239 * state is entered from the READY state. This state is entered from
240 * the FAILED state.
241 *
242 * @SCI_DEV_FAILED: This state indicates that the remote device has
243 * failed. In this state no new IO operations are permitted. This
244 * state is entered from the INITIALIZING state. This state is entered
245 * from the READY state.
246 *
247 * @SCI_DEV_RESETTING: This state indicates the device is being reset.
248 * In this state no new IO operations are permitted. This state is
249 * entered from the READY state.
250 *
251 * @SCI_DEV_FINAL: Simply the final state for the base remote device
252 * state machine.
185 */ 253 */
186enum sci_remote_device_states { 254#define REMOTE_DEV_STATES {\
187 /** 255 C(DEV_INITIAL),\
188 * Simply the initial state for the base remote device state machine. 256 C(DEV_STOPPED),\
189 */ 257 C(DEV_STARTING),\
190 SCI_DEV_INITIAL, 258 C(DEV_READY),\
191 259 C(STP_DEV_IDLE),\
192 /** 260 C(STP_DEV_CMD),\
193 * This state indicates that the remote device has successfully been 261 C(STP_DEV_NCQ),\
194 * stopped. In this state no new IO operations are permitted. 262 C(STP_DEV_NCQ_ERROR),\
195 * This state is entered from the INITIAL state. 263 C(STP_DEV_ATAPI_ERROR),\
196 * This state is entered from the STOPPING state. 264 C(STP_DEV_AWAIT_RESET),\
197 */ 265 C(SMP_DEV_IDLE),\
198 SCI_DEV_STOPPED, 266 C(SMP_DEV_CMD),\
199 267 C(DEV_STOPPING),\
200 /** 268 C(DEV_FAILED),\
201 * This state indicates the the remote device is in the process of 269 C(DEV_RESETTING),\
202 * becoming ready (i.e. starting). In this state no new IO operations 270 C(DEV_FINAL),\
203 * are permitted. 271 }
204 * This state is entered from the STOPPED state. 272#undef C
205 */ 273#define C(a) SCI_##a
206 SCI_DEV_STARTING, 274enum sci_remote_device_states REMOTE_DEV_STATES;
207 275#undef C
208 /** 276const char *dev_state_name(enum sci_remote_device_states state);
209 * This state indicates the remote device is now ready. Thus, the user
210 * is able to perform IO operations on the remote device.
211 * This state is entered from the STARTING state.
212 */
213 SCI_DEV_READY,
214
215 /**
216 * This is the idle substate for the stp remote device. When there are no
217 * active IO for the device it is is in this state.
218 */
219 SCI_STP_DEV_IDLE,
220
221 /**
222 * This is the command state for for the STP remote device. This state is
223 * entered when the device is processing a non-NCQ command. The device object
224 * will fail any new start IO requests until this command is complete.
225 */
226 SCI_STP_DEV_CMD,
227
228 /**
229 * This is the NCQ state for the STP remote device. This state is entered
230 * when the device is processing an NCQ reuqest. It will remain in this state
231 * so long as there is one or more NCQ requests being processed.
232 */
233 SCI_STP_DEV_NCQ,
234
235 /**
236 * This is the NCQ error state for the STP remote device. This state is
237 * entered when an SDB error FIS is received by the device object while in the
238 * NCQ state. The device object will only accept a READ LOG command while in
239 * this state.
240 */
241 SCI_STP_DEV_NCQ_ERROR,
242
243 /**
244 * This is the ATAPI error state for the STP ATAPI remote device.
245 * This state is entered when ATAPI device sends error status FIS
246 * without data while the device object is in CMD state.
247 * A suspension event is expected in this state.
248 * The device object will resume right away.
249 */
250 SCI_STP_DEV_ATAPI_ERROR,
251
252 /**
253 * This is the READY substate indicates the device is waiting for the RESET task
254 * coming to be recovered from certain hardware specific error.
255 */
256 SCI_STP_DEV_AWAIT_RESET,
257
258 /**
259 * This is the ready operational substate for the remote device. This is the
260 * normal operational state for a remote device.
261 */
262 SCI_SMP_DEV_IDLE,
263
264 /**
265 * This is the suspended state for the remote device. This is the state that
266 * the device is placed in when a RNC suspend is received by the SCU hardware.
267 */
268 SCI_SMP_DEV_CMD,
269
270 /**
271 * This state indicates that the remote device is in the process of
272 * stopping. In this state no new IO operations are permitted, but
273 * existing IO operations are allowed to complete.
274 * This state is entered from the READY state.
275 * This state is entered from the FAILED state.
276 */
277 SCI_DEV_STOPPING,
278
279 /**
280 * This state indicates that the remote device has failed.
281 * In this state no new IO operations are permitted.
282 * This state is entered from the INITIALIZING state.
283 * This state is entered from the READY state.
284 */
285 SCI_DEV_FAILED,
286
287 /**
288 * This state indicates the device is being reset.
289 * In this state no new IO operations are permitted.
290 * This state is entered from the READY state.
291 */
292 SCI_DEV_RESETTING,
293
294 /**
295 * Simply the final state for the base remote device state machine.
296 */
297 SCI_DEV_FINAL,
298};
299 277
300static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc) 278static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
301{ 279{
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 748e8339d1e..3a9463481f3 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -60,18 +60,15 @@
60#include "scu_event_codes.h" 60#include "scu_event_codes.h"
61#include "scu_task_context.h" 61#include "scu_task_context.h"
62 62
63#undef C
64#define C(a) (#a)
65const char *rnc_state_name(enum scis_sds_remote_node_context_states state)
66{
67 static const char * const strings[] = RNC_STATES;
63 68
64/** 69 return strings[state];
65 * 70}
66 * @sci_rnc: The RNC for which the is posted request is being made. 71#undef C
67 *
68 * This method will return true if the RNC is not in the initial state. In all
69 * other states the RNC is considered active and this will return true. The
70 * destroy request of the state machine drives the RNC back to the initial
71 * state. If the state machine changes then this routine will also have to be
72 * changed. bool true if the state machine is not in the initial state false if
73 * the state machine is in the initial state
74 */
75 72
76/** 73/**
77 * 74 *
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
index 41580ad1252..a241e0f4c86 100644
--- a/drivers/scsi/isci/remote_node_context.h
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -85,61 +85,50 @@ struct sci_remote_node_context;
85typedef void (*scics_sds_remote_node_context_callback)(void *); 85typedef void (*scics_sds_remote_node_context_callback)(void *);
86 86
87/** 87/**
88 * This is the enumeration of the remote node context states. 88 * enum sci_remote_node_context_states
89 * @SCI_RNC_INITIAL initial state for a remote node context. On a resume
90 * request the remote node context will transition to the posting state.
91 *
92 * @SCI_RNC_POSTING: transition state that posts the RNi to the hardware. Once
93 * the RNC is posted the remote node context will be made ready.
94 *
95 * @SCI_RNC_INVALIDATING: transition state that will post an RNC invalidate to
96 * the hardware. Once the invalidate is complete the remote node context will
97 * transition to the posting state.
98 *
99 * @SCI_RNC_RESUMING: transition state that will post an RNC resume to the
100 * hardare. Once the event notification of resume complete is received the
101 * remote node context will transition to the ready state.
102 *
103 * @SCI_RNC_READY: state that the remote node context must be in to accept io
104 * request operations.
105 *
106 * @SCI_RNC_TX_SUSPENDED: state that the remote node context transitions to when
107 * it gets a TX suspend notification from the hardware.
108 *
109 * @SCI_RNC_TX_RX_SUSPENDED: state that the remote node context transitions to
110 * when it gets a TX RX suspend notification from the hardware.
111 *
112 * @SCI_RNC_AWAIT_SUSPENSION: wait state for the remote node context that waits
113 * for a suspend notification from the hardware. This state is entered when
114 * either there is a request to supend the remote node context or when there is
115 * a TC completion where the remote node will be suspended by the hardware.
89 */ 116 */
90enum scis_sds_remote_node_context_states { 117#define RNC_STATES {\
91 /** 118 C(RNC_INITIAL),\
92 * This state is the initial state for a remote node context. On a resume 119 C(RNC_POSTING),\
93 * request the remote node context will transition to the posting state. 120 C(RNC_INVALIDATING),\
94 */ 121 C(RNC_RESUMING),\
95 SCI_RNC_INITIAL, 122 C(RNC_READY),\
96 123 C(RNC_TX_SUSPENDED),\
97 /** 124 C(RNC_TX_RX_SUSPENDED),\
98 * This is a transition state that posts the RNi to the hardware. Once the RNC 125 C(RNC_AWAIT_SUSPENSION),\
99 * is posted the remote node context will be made ready. 126 }
100 */ 127#undef C
101 SCI_RNC_POSTING, 128#define C(a) SCI_##a
102 129enum scis_sds_remote_node_context_states RNC_STATES;
103 /** 130#undef C
104 * This is a transition state that will post an RNC invalidate to the 131const char *rnc_state_name(enum scis_sds_remote_node_context_states state);
105 * hardware. Once the invalidate is complete the remote node context will
106 * transition to the posting state.
107 */
108 SCI_RNC_INVALIDATING,
109
110 /**
111 * This is a transition state that will post an RNC resume to the hardare.
112 * Once the event notification of resume complete is received the remote node
113 * context will transition to the ready state.
114 */
115 SCI_RNC_RESUMING,
116
117 /**
118 * This is the state that the remote node context must be in to accept io
119 * request operations.
120 */
121 SCI_RNC_READY,
122
123 /**
124 * This is the state that the remote node context transitions to when it gets
125 * a TX suspend notification from the hardware.
126 */
127 SCI_RNC_TX_SUSPENDED,
128
129 /**
130 * This is the state that the remote node context transitions to when it gets
131 * a TX RX suspend notification from the hardware.
132 */
133 SCI_RNC_TX_RX_SUSPENDED,
134
135 /**
136 * This state is a wait state for the remote node context that waits for a
137 * suspend notification from the hardware. This state is entered when either
138 * there is a request to supend the remote node context or when there is a TC
139 * completion where the remote node will be suspended by the hardware.
140 */
141 SCI_RNC_AWAIT_SUSPENSION
142};
143 132
144/** 133/**
145 * 134 *
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ee0dc05c626..2def1e3960f 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -53,6 +53,7 @@
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */ 54 */
55 55
56#include <scsi/scsi_cmnd.h>
56#include "isci.h" 57#include "isci.h"
57#include "task.h" 58#include "task.h"
58#include "request.h" 59#include "request.h"
@@ -60,6 +61,16 @@
60#include "scu_event_codes.h" 61#include "scu_event_codes.h"
61#include "sas.h" 62#include "sas.h"
62 63
64#undef C
65#define C(a) (#a)
66const char *req_state_name(enum sci_base_request_states state)
67{
68 static const char * const strings[] = REQUEST_STATES;
69
70 return strings[state];
71}
72#undef C
73
63static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, 74static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
64 int idx) 75 int idx)
65{ 76{
@@ -264,6 +275,141 @@ static void scu_ssp_reqeust_construct_task_context(
264 task_context->response_iu_lower = lower_32_bits(dma_addr); 275 task_context->response_iu_lower = lower_32_bits(dma_addr);
265} 276}
266 277
278static u8 scu_bg_blk_size(struct scsi_device *sdp)
279{
280 switch (sdp->sector_size) {
281 case 512:
282 return 0;
283 case 1024:
284 return 1;
285 case 4096:
286 return 3;
287 default:
288 return 0xff;
289 }
290}
291
292static u32 scu_dif_bytes(u32 len, u32 sector_size)
293{
294 return (len >> ilog2(sector_size)) * 8;
295}
296
297static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op)
298{
299 struct scu_task_context *tc = ireq->tc;
300 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
301 u8 blk_sz = scu_bg_blk_size(scmd->device);
302
303 tc->block_guard_enable = 1;
304 tc->blk_prot_en = 1;
305 tc->blk_sz = blk_sz;
306 /* DIF write insert */
307 tc->blk_prot_func = 0x2;
308
309 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
310 scmd->device->sector_size);
311
312 /* always init to 0, used by hw */
313 tc->interm_crc_val = 0;
314
315 tc->init_crc_seed = 0;
316 tc->app_tag_verify = 0;
317 tc->app_tag_gen = 0;
318 tc->ref_tag_seed_verify = 0;
319
320 /* always init to same as bg_blk_sz */
321 tc->UD_bytes_immed_val = scmd->device->sector_size;
322
323 tc->reserved_DC_0 = 0;
324
325 /* always init to 8 */
326 tc->DIF_bytes_immed_val = 8;
327
328 tc->reserved_DC_1 = 0;
329 tc->bgc_blk_sz = scmd->device->sector_size;
330 tc->reserved_E0_0 = 0;
331 tc->app_tag_gen_mask = 0;
332
333 /** setup block guard control **/
334 tc->bgctl = 0;
335
336 /* DIF write insert */
337 tc->bgctl_f.op = 0x2;
338
339 tc->app_tag_verify_mask = 0;
340
341 /* must init to 0 for hw */
342 tc->blk_guard_err = 0;
343
344 tc->reserved_E8_0 = 0;
345
346 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
347 tc->ref_tag_seed_gen = scsi_get_lba(scmd) & 0xffffffff;
348 else if (type & SCSI_PROT_DIF_TYPE3)
349 tc->ref_tag_seed_gen = 0;
350}
351
352static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op)
353{
354 struct scu_task_context *tc = ireq->tc;
355 struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task;
356 u8 blk_sz = scu_bg_blk_size(scmd->device);
357
358 tc->block_guard_enable = 1;
359 tc->blk_prot_en = 1;
360 tc->blk_sz = blk_sz;
361 /* DIF read strip */
362 tc->blk_prot_func = 0x1;
363
364 tc->transfer_length_bytes += scu_dif_bytes(tc->transfer_length_bytes,
365 scmd->device->sector_size);
366
367 /* always init to 0, used by hw */
368 tc->interm_crc_val = 0;
369
370 tc->init_crc_seed = 0;
371 tc->app_tag_verify = 0;
372 tc->app_tag_gen = 0;
373
374 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2))
375 tc->ref_tag_seed_verify = scsi_get_lba(scmd) & 0xffffffff;
376 else if (type & SCSI_PROT_DIF_TYPE3)
377 tc->ref_tag_seed_verify = 0;
378
379 /* always init to same as bg_blk_sz */
380 tc->UD_bytes_immed_val = scmd->device->sector_size;
381
382 tc->reserved_DC_0 = 0;
383
384 /* always init to 8 */
385 tc->DIF_bytes_immed_val = 8;
386
387 tc->reserved_DC_1 = 0;
388 tc->bgc_blk_sz = scmd->device->sector_size;
389 tc->reserved_E0_0 = 0;
390 tc->app_tag_gen_mask = 0;
391
392 /** setup block guard control **/
393 tc->bgctl = 0;
394
395 /* DIF read strip */
396 tc->bgctl_f.crc_verify = 1;
397 tc->bgctl_f.op = 0x1;
398 if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) {
399 tc->bgctl_f.ref_tag_chk = 1;
400 tc->bgctl_f.app_f_detect = 1;
401 } else if (type & SCSI_PROT_DIF_TYPE3)
402 tc->bgctl_f.app_ref_f_detect = 1;
403
404 tc->app_tag_verify_mask = 0;
405
406 /* must init to 0 for hw */
407 tc->blk_guard_err = 0;
408
409 tc->reserved_E8_0 = 0;
410 tc->ref_tag_seed_gen = 0;
411}
412
267/** 413/**
268 * This method is will fill in the SCU Task Context for a SSP IO request. 414 * This method is will fill in the SCU Task Context for a SSP IO request.
269 * @sci_req: 415 * @sci_req:
@@ -274,6 +420,10 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
274 u32 len) 420 u32 len)
275{ 421{
276 struct scu_task_context *task_context = ireq->tc; 422 struct scu_task_context *task_context = ireq->tc;
423 struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr;
424 struct scsi_cmnd *scmd = sas_task->uldd_task;
425 u8 prot_type = scsi_get_prot_type(scmd);
426 u8 prot_op = scsi_get_prot_op(scmd);
277 427
278 scu_ssp_reqeust_construct_task_context(ireq, task_context); 428 scu_ssp_reqeust_construct_task_context(ireq, task_context);
279 429
@@ -296,6 +446,13 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
296 446
297 if (task_context->transfer_length_bytes > 0) 447 if (task_context->transfer_length_bytes > 0)
298 sci_request_build_sgl(ireq); 448 sci_request_build_sgl(ireq);
449
450 if (prot_type != SCSI_PROT_DIF_TYPE0) {
451 if (prot_op == SCSI_PROT_READ_STRIP)
452 scu_ssp_ireq_dif_strip(ireq, prot_type, prot_op);
453 else if (prot_op == SCSI_PROT_WRITE_INSERT)
454 scu_ssp_ireq_dif_insert(ireq, prot_type, prot_op);
455 }
299} 456}
300 457
301/** 458/**
@@ -519,18 +676,12 @@ sci_io_request_construct_sata(struct isci_request *ireq,
519 if (test_bit(IREQ_TMF, &ireq->flags)) { 676 if (test_bit(IREQ_TMF, &ireq->flags)) {
520 struct isci_tmf *tmf = isci_request_access_tmf(ireq); 677 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
521 678
522 if (tmf->tmf_code == isci_tmf_sata_srst_high || 679 dev_err(&ireq->owning_controller->pdev->dev,
523 tmf->tmf_code == isci_tmf_sata_srst_low) { 680 "%s: Request 0x%p received un-handled SAT "
524 scu_stp_raw_request_construct_task_context(ireq); 681 "management protocol 0x%x.\n",
525 return SCI_SUCCESS; 682 __func__, ireq, tmf->tmf_code);
526 } else {
527 dev_err(&ireq->owning_controller->pdev->dev,
528 "%s: Request 0x%p received un-handled SAT "
529 "management protocol 0x%x.\n",
530 __func__, ireq, tmf->tmf_code);
531 683
532 return SCI_FAILURE; 684 return SCI_FAILURE;
533 }
534 } 685 }
535 686
536 if (!sas_protocol_ata(task->task_proto)) { 687 if (!sas_protocol_ata(task->task_proto)) {
@@ -627,34 +778,6 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *
627 return status; 778 return status;
628} 779}
629 780
630enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
631{
632 enum sci_status status = SCI_SUCCESS;
633
634 /* check for management protocols */
635 if (test_bit(IREQ_TMF, &ireq->flags)) {
636 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
637
638 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
639 tmf->tmf_code == isci_tmf_sata_srst_low) {
640 scu_stp_raw_request_construct_task_context(ireq);
641 } else {
642 dev_err(&ireq->owning_controller->pdev->dev,
643 "%s: Request 0x%p received un-handled SAT "
644 "Protocol 0x%x.\n",
645 __func__, ireq, tmf->tmf_code);
646
647 return SCI_FAILURE;
648 }
649 }
650
651 if (status != SCI_SUCCESS)
652 return status;
653 sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
654
655 return status;
656}
657
658/** 781/**
659 * sci_req_tx_bytes - bytes transferred when reply underruns request 782 * sci_req_tx_bytes - bytes transferred when reply underruns request
660 * @ireq: request that was terminated early 783 * @ireq: request that was terminated early
@@ -756,9 +879,6 @@ sci_io_request_terminate(struct isci_request *ireq)
756 case SCI_REQ_STP_PIO_WAIT_FRAME: 879 case SCI_REQ_STP_PIO_WAIT_FRAME:
757 case SCI_REQ_STP_PIO_DATA_IN: 880 case SCI_REQ_STP_PIO_DATA_IN:
758 case SCI_REQ_STP_PIO_DATA_OUT: 881 case SCI_REQ_STP_PIO_DATA_OUT:
759 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
760 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
761 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
762 case SCI_REQ_ATAPI_WAIT_H2D: 882 case SCI_REQ_ATAPI_WAIT_H2D:
763 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: 883 case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
764 case SCI_REQ_ATAPI_WAIT_D2H: 884 case SCI_REQ_ATAPI_WAIT_D2H:
@@ -800,7 +920,8 @@ enum sci_status sci_request_complete(struct isci_request *ireq)
800 920
801 state = ireq->sm.current_state_id; 921 state = ireq->sm.current_state_id;
802 if (WARN_ONCE(state != SCI_REQ_COMPLETED, 922 if (WARN_ONCE(state != SCI_REQ_COMPLETED,
803 "isci: request completion from wrong state (%d)\n", state)) 923 "isci: request completion from wrong state (%s)\n",
924 req_state_name(state)))
804 return SCI_FAILURE_INVALID_STATE; 925 return SCI_FAILURE_INVALID_STATE;
805 926
806 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) 927 if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
@@ -821,8 +942,8 @@ enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
821 state = ireq->sm.current_state_id; 942 state = ireq->sm.current_state_id;
822 943
823 if (state != SCI_REQ_STP_PIO_DATA_IN) { 944 if (state != SCI_REQ_STP_PIO_DATA_IN) {
824 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n", 945 dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n",
825 __func__, event_code, state); 946 __func__, event_code, req_state_name(state));
826 947
827 return SCI_FAILURE_INVALID_STATE; 948 return SCI_FAILURE_INVALID_STATE;
828 } 949 }
@@ -1938,59 +2059,6 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1938 return status; 2059 return status;
1939 } 2060 }
1940 2061
1941 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
1942 struct dev_to_host_fis *frame_header;
1943 u32 *frame_buffer;
1944
1945 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1946 frame_index,
1947 (void **)&frame_header);
1948 if (status != SCI_SUCCESS) {
1949 dev_err(&ihost->pdev->dev,
1950 "%s: SCIC IO Request 0x%p could not get frame "
1951 "header for frame index %d, status %x\n",
1952 __func__,
1953 stp_req,
1954 frame_index,
1955 status);
1956 return status;
1957 }
1958
1959 switch (frame_header->fis_type) {
1960 case FIS_REGD2H:
1961 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1962 frame_index,
1963 (void **)&frame_buffer);
1964
1965 sci_controller_copy_sata_response(&ireq->stp.rsp,
1966 frame_header,
1967 frame_buffer);
1968
1969 /* The command has completed with error */
1970 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1971 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
1972 break;
1973
1974 default:
1975 dev_warn(&ihost->pdev->dev,
1976 "%s: IO Request:0x%p Frame Id:%d protocol "
1977 "violation occurred\n",
1978 __func__,
1979 stp_req,
1980 frame_index);
1981
1982 ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
1983 ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
1984 break;
1985 }
1986
1987 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1988
1989 /* Frame has been decoded return it to the controller */
1990 sci_controller_release_frame(ihost, frame_index);
1991
1992 return status;
1993 }
1994 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { 2062 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
1995 struct sas_task *task = isci_request_access_task(ireq); 2063 struct sas_task *task = isci_request_access_task(ireq);
1996 2064
@@ -2088,57 +2156,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
2088 return status; 2156 return status;
2089} 2157}
2090 2158
2091static enum sci_status
2092stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
2093 u32 completion_code)
2094{
2095 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2096 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2097 ireq->scu_status = SCU_TASK_DONE_GOOD;
2098 ireq->sci_status = SCI_SUCCESS;
2099 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
2100 break;
2101
2102 default:
2103 /*
2104 * All other completion status cause the IO to be complete.
2105 * If a NAK was received, then it is up to the user to retry
2106 * the request.
2107 */
2108 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2109 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2110 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2111 break;
2112 }
2113
2114 return SCI_SUCCESS;
2115}
2116
2117static enum sci_status
2118stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
2119 u32 completion_code)
2120{
2121 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2122 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2123 ireq->scu_status = SCU_TASK_DONE_GOOD;
2124 ireq->sci_status = SCI_SUCCESS;
2125 sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
2126 break;
2127
2128 default:
2129 /* All other completion status cause the IO to be complete. If
2130 * a NAK was received, then it is up to the user to retry the
2131 * request.
2132 */
2133 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2134 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2135 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2136 break;
2137 }
2138
2139 return SCI_SUCCESS;
2140}
2141
2142static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, 2159static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2143 enum sci_base_request_states next) 2160 enum sci_base_request_states next)
2144{ 2161{
@@ -2284,14 +2301,6 @@ sci_io_request_tc_completion(struct isci_request *ireq,
2284 case SCI_REQ_STP_PIO_DATA_OUT: 2301 case SCI_REQ_STP_PIO_DATA_OUT:
2285 return pio_data_out_tx_done_tc_event(ireq, completion_code); 2302 return pio_data_out_tx_done_tc_event(ireq, completion_code);
2286 2303
2287 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
2288 return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
2289 completion_code);
2290
2291 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
2292 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
2293 completion_code);
2294
2295 case SCI_REQ_ABORTING: 2304 case SCI_REQ_ABORTING:
2296 return request_aborting_state_tc_event(ireq, 2305 return request_aborting_state_tc_event(ireq,
2297 completion_code); 2306 completion_code);
@@ -2308,12 +2317,8 @@ sci_io_request_tc_completion(struct isci_request *ireq,
2308 return atapi_data_tc_completion_handler(ireq, completion_code); 2317 return atapi_data_tc_completion_handler(ireq, completion_code);
2309 2318
2310 default: 2319 default:
2311 dev_warn(&ihost->pdev->dev, 2320 dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n",
2312 "%s: SCIC IO Request given task completion " 2321 __func__, completion_code, req_state_name(state));
2313 "notification %x while in wrong state %d\n",
2314 __func__,
2315 completion_code,
2316 state);
2317 return SCI_FAILURE_INVALID_STATE; 2322 return SCI_FAILURE_INVALID_STATE;
2318 } 2323 }
2319} 2324}
@@ -3065,10 +3070,6 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
3065 */ 3070 */
3066 if (!task && dev->dev_type == SAS_END_DEV) { 3071 if (!task && dev->dev_type == SAS_END_DEV) {
3067 state = SCI_REQ_TASK_WAIT_TC_COMP; 3072 state = SCI_REQ_TASK_WAIT_TC_COMP;
3068 } else if (!task &&
3069 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3070 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3071 state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
3072 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3073 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3073 state = SCI_REQ_SMP_WAIT_RESP; 3074 state = SCI_REQ_SMP_WAIT_RESP;
3074 } else if (task && sas_protocol_ata(task->task_proto) && 3075 } else if (task && sas_protocol_ata(task->task_proto) &&
@@ -3125,31 +3126,6 @@ static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_ba
3125 ireq->target_device->working_request = ireq; 3126 ireq->target_device->working_request = ireq;
3126} 3127}
3127 3128
3128static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
3129{
3130 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3131
3132 ireq->target_device->working_request = ireq;
3133}
3134
3135static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
3136{
3137 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
3138 struct scu_task_context *tc = ireq->tc;
3139 struct host_to_dev_fis *h2d_fis;
3140 enum sci_status status;
3141
3142 /* Clear the SRST bit */
3143 h2d_fis = &ireq->stp.cmd;
3144 h2d_fis->control = 0;
3145
3146 /* Clear the TC control bit */
3147 tc->control_frame = 0;
3148
3149 status = sci_controller_continue_io(ireq);
3150 WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
3151}
3152
3153static const struct sci_base_state sci_request_state_table[] = { 3129static const struct sci_base_state sci_request_state_table[] = {
3154 [SCI_REQ_INIT] = { }, 3130 [SCI_REQ_INIT] = { },
3155 [SCI_REQ_CONSTRUCTED] = { }, 3131 [SCI_REQ_CONSTRUCTED] = { },
@@ -3168,13 +3144,6 @@ static const struct sci_base_state sci_request_state_table[] = {
3168 [SCI_REQ_STP_PIO_DATA_OUT] = { }, 3144 [SCI_REQ_STP_PIO_DATA_OUT] = { },
3169 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, 3145 [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
3170 [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, 3146 [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
3171 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
3172 .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3173 },
3174 [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
3175 .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3176 },
3177 [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
3178 [SCI_REQ_TASK_WAIT_TC_COMP] = { }, 3147 [SCI_REQ_TASK_WAIT_TC_COMP] = { },
3179 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3148 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
3180 [SCI_REQ_SMP_WAIT_RESP] = { }, 3149 [SCI_REQ_SMP_WAIT_RESP] = { },
@@ -3649,8 +3618,7 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide
3649 /* Cause this task to be scheduled in the SCSI error 3618 /* Cause this task to be scheduled in the SCSI error
3650 * handler thread. 3619 * handler thread.
3651 */ 3620 */
3652 isci_execpath_callback(ihost, task, 3621 sas_task_abort(task);
3653 sas_task_abort);
3654 3622
3655 /* Change the status, since we are holding 3623 /* Change the status, since we are holding
3656 * the I/O until it is managed by the SCSI 3624 * the I/O until it is managed by the SCSI
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index be38933dd6d..057f2378452 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -182,138 +182,103 @@ static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
182} 182}
183 183
184/** 184/**
185 * enum sci_base_request_states - This enumeration depicts all the states for 185 * enum sci_base_request_states - request state machine states
186 * the common request state machine.
187 * 186 *
187 * @SCI_REQ_INIT: Simply the initial state for the base request state machine.
188 * 188 *
189 * @SCI_REQ_CONSTRUCTED: This state indicates that the request has been
190 * constructed. This state is entered from the INITIAL state.
191 *
192 * @SCI_REQ_STARTED: This state indicates that the request has been started.
193 * This state is entered from the CONSTRUCTED state.
194 *
195 * @SCI_REQ_STP_UDMA_WAIT_TC_COMP:
196 * @SCI_REQ_STP_UDMA_WAIT_D2H:
197 * @SCI_REQ_STP_NON_DATA_WAIT_H2D:
198 * @SCI_REQ_STP_NON_DATA_WAIT_D2H:
199 *
200 * @SCI_REQ_STP_PIO_WAIT_H2D: While in this state the IO request object is
201 * waiting for the TC completion notification for the H2D Register FIS
202 *
203 * @SCI_REQ_STP_PIO_WAIT_FRAME: While in this state the IO request object is
204 * waiting for either a PIO Setup FIS or a D2H register FIS. The type of frame
205 * received is based on the result of the prior frame and line conditions.
206 *
207 * @SCI_REQ_STP_PIO_DATA_IN: While in this state the IO request object is
208 * waiting for a DATA frame from the device.
209 *
210 * @SCI_REQ_STP_PIO_DATA_OUT: While in this state the IO request object is
211 * waiting to transmit the next data frame to the device.
212 *
213 * @SCI_REQ_ATAPI_WAIT_H2D: While in this state the IO request object is
214 * waiting for the TC completion notification for the H2D Register FIS
215 *
216 * @SCI_REQ_ATAPI_WAIT_PIO_SETUP: While in this state the IO request object is
217 * waiting for either a PIO Setup.
218 *
219 * @SCI_REQ_ATAPI_WAIT_D2H: The non-data IO transit to this state in this state
220 * after receiving TC completion. While in this state IO request object is
221 * waiting for D2H status frame as UF.
222 *
223 * @SCI_REQ_ATAPI_WAIT_TC_COMP: When transmitting raw frames hardware reports
224 * task context completion after every frame submission, so in the
225 * non-accelerated case we need to expect the completion for the "cdb" frame.
226 *
227 * @SCI_REQ_TASK_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
228 * the started raw task management request is waiting for the transmission of
229 * the initial frame (i.e. command, task, etc.).
230 *
231 * @SCI_REQ_TASK_WAIT_TC_RESP: This sub-state indicates that the started task
232 * management request is waiting for the reception of an unsolicited frame
233 * (i.e. response IU).
234 *
235 * @SCI_REQ_SMP_WAIT_RESP: This sub-state indicates that the started task
236 * management request is waiting for the reception of an unsolicited frame
237 * (i.e. response IU).
238 *
239 * @SCI_REQ_SMP_WAIT_TC_COMP: The AWAIT_TC_COMPLETION sub-state indicates that
240 * the started SMP request is waiting for the transmission of the initial frame
241 * (i.e. command, task, etc.).
242 *
243 * @SCI_REQ_COMPLETED: This state indicates that the request has completed.
244 * This state is entered from the STARTED state. This state is entered from the
245 * ABORTING state.
246 *
247 * @SCI_REQ_ABORTING: This state indicates that the request is in the process
248 * of being terminated/aborted. This state is entered from the CONSTRUCTED
249 * state. This state is entered from the STARTED state.
250 *
251 * @SCI_REQ_FINAL: Simply the final state for the base request state machine.
189 */ 252 */
190enum sci_base_request_states { 253#define REQUEST_STATES {\
191 /* 254 C(REQ_INIT),\
192 * Simply the initial state for the base request state machine. 255 C(REQ_CONSTRUCTED),\
193 */ 256 C(REQ_STARTED),\
194 SCI_REQ_INIT, 257 C(REQ_STP_UDMA_WAIT_TC_COMP),\
195 258 C(REQ_STP_UDMA_WAIT_D2H),\
196 /* 259 C(REQ_STP_NON_DATA_WAIT_H2D),\
197 * This state indicates that the request has been constructed. 260 C(REQ_STP_NON_DATA_WAIT_D2H),\
198 * This state is entered from the INITIAL state. 261 C(REQ_STP_PIO_WAIT_H2D),\
199 */ 262 C(REQ_STP_PIO_WAIT_FRAME),\
200 SCI_REQ_CONSTRUCTED, 263 C(REQ_STP_PIO_DATA_IN),\
201 264 C(REQ_STP_PIO_DATA_OUT),\
202 /* 265 C(REQ_ATAPI_WAIT_H2D),\
203 * This state indicates that the request has been started. This state 266 C(REQ_ATAPI_WAIT_PIO_SETUP),\
204 * is entered from the CONSTRUCTED state. 267 C(REQ_ATAPI_WAIT_D2H),\
205 */ 268 C(REQ_ATAPI_WAIT_TC_COMP),\
206 SCI_REQ_STARTED, 269 C(REQ_TASK_WAIT_TC_COMP),\
207 270 C(REQ_TASK_WAIT_TC_RESP),\
208 SCI_REQ_STP_UDMA_WAIT_TC_COMP, 271 C(REQ_SMP_WAIT_RESP),\
209 SCI_REQ_STP_UDMA_WAIT_D2H, 272 C(REQ_SMP_WAIT_TC_COMP),\
210 273 C(REQ_COMPLETED),\
211 SCI_REQ_STP_NON_DATA_WAIT_H2D, 274 C(REQ_ABORTING),\
212 SCI_REQ_STP_NON_DATA_WAIT_D2H, 275 C(REQ_FINAL),\
213 276 }
214 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED, 277#undef C
215 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG, 278#define C(a) SCI_##a
216 SCI_REQ_STP_SOFT_RESET_WAIT_D2H, 279enum sci_base_request_states REQUEST_STATES;
217 280#undef C
218 /* 281const char *req_state_name(enum sci_base_request_states state);
219 * While in this state the IO request object is waiting for the TC
220 * completion notification for the H2D Register FIS
221 */
222 SCI_REQ_STP_PIO_WAIT_H2D,
223
224 /*
225 * While in this state the IO request object is waiting for either a
226 * PIO Setup FIS or a D2H register FIS. The type of frame received is
227 * based on the result of the prior frame and line conditions.
228 */
229 SCI_REQ_STP_PIO_WAIT_FRAME,
230
231 /*
232 * While in this state the IO request object is waiting for a DATA
233 * frame from the device.
234 */
235 SCI_REQ_STP_PIO_DATA_IN,
236
237 /*
238 * While in this state the IO request object is waiting to transmit
239 * the next data frame to the device.
240 */
241 SCI_REQ_STP_PIO_DATA_OUT,
242
243 /*
244 * While in this state the IO request object is waiting for the TC
245 * completion notification for the H2D Register FIS
246 */
247 SCI_REQ_ATAPI_WAIT_H2D,
248
249 /*
250 * While in this state the IO request object is waiting for either a
251 * PIO Setup.
252 */
253 SCI_REQ_ATAPI_WAIT_PIO_SETUP,
254
255 /*
256 * The non-data IO transit to this state in this state after receiving
257 * TC completion. While in this state IO request object is waiting for
258 * D2H status frame as UF.
259 */
260 SCI_REQ_ATAPI_WAIT_D2H,
261
262 /*
263 * When transmitting raw frames hardware reports task context completion
264 * after every frame submission, so in the non-accelerated case we need
265 * to expect the completion for the "cdb" frame.
266 */
267 SCI_REQ_ATAPI_WAIT_TC_COMP,
268
269 /*
270 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
271 * task management request is waiting for the transmission of the
272 * initial frame (i.e. command, task, etc.).
273 */
274 SCI_REQ_TASK_WAIT_TC_COMP,
275
276 /*
277 * This sub-state indicates that the started task management request
278 * is waiting for the reception of an unsolicited frame
279 * (i.e. response IU).
280 */
281 SCI_REQ_TASK_WAIT_TC_RESP,
282
283 /*
284 * This sub-state indicates that the started task management request
285 * is waiting for the reception of an unsolicited frame
286 * (i.e. response IU).
287 */
288 SCI_REQ_SMP_WAIT_RESP,
289
290 /*
291 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
292 * request is waiting for the transmission of the initial frame
293 * (i.e. command, task, etc.).
294 */
295 SCI_REQ_SMP_WAIT_TC_COMP,
296
297 /*
298 * This state indicates that the request has completed.
299 * This state is entered from the STARTED state. This state is entered
300 * from the ABORTING state.
301 */
302 SCI_REQ_COMPLETED,
303
304 /*
305 * This state indicates that the request is in the process of being
306 * terminated/aborted.
307 * This state is entered from the CONSTRUCTED state.
308 * This state is entered from the STARTED state.
309 */
310 SCI_REQ_ABORTING,
311
312 /*
313 * Simply the final state for the base request state machine.
314 */
315 SCI_REQ_FINAL,
316};
317 282
318enum sci_status sci_request_start(struct isci_request *ireq); 283enum sci_status sci_request_start(struct isci_request *ireq);
319enum sci_status sci_io_request_terminate(struct isci_request *ireq); 284enum sci_status sci_io_request_terminate(struct isci_request *ireq);
@@ -446,10 +411,7 @@ sci_task_request_construct(struct isci_host *ihost,
446 struct isci_remote_device *idev, 411 struct isci_remote_device *idev,
447 u16 io_tag, 412 u16 io_tag,
448 struct isci_request *ireq); 413 struct isci_request *ireq);
449enum sci_status 414enum sci_status sci_task_request_construct_ssp(struct isci_request *ireq);
450sci_task_request_construct_ssp(struct isci_request *ireq);
451enum sci_status
452sci_task_request_construct_sata(struct isci_request *ireq);
453void sci_smp_request_copy_response(struct isci_request *ireq); 415void sci_smp_request_copy_response(struct isci_request *ireq);
454 416
455static inline int isci_task_is_ncq_recovery(struct sas_task *task) 417static inline int isci_task_is_ncq_recovery(struct sas_task *task)
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
index 7df87d92328..869a979eb5b 100644
--- a/drivers/scsi/isci/scu_task_context.h
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -866,9 +866,9 @@ struct scu_task_context {
866 struct transport_snapshot snapshot; /* read only set to 0 */ 866 struct transport_snapshot snapshot; /* read only set to 0 */
867 867
868 /* OFFSET 0x5C */ 868 /* OFFSET 0x5C */
869 u32 block_protection_enable:1; 869 u32 blk_prot_en:1;
870 u32 block_size:2; 870 u32 blk_sz:2;
871 u32 block_protection_function:2; 871 u32 blk_prot_func:2;
872 u32 reserved_5C_0:9; 872 u32 reserved_5C_0:9;
873 u32 active_sgl_element:2; /* read only set to 0 */ 873 u32 active_sgl_element:2; /* read only set to 0 */
874 u32 sgl_exhausted:1; /* read only set to 0 */ 874 u32 sgl_exhausted:1; /* read only set to 0 */
@@ -896,33 +896,56 @@ struct scu_task_context {
896 u32 reserved_C4_CC[3]; 896 u32 reserved_C4_CC[3];
897 897
898 /* OFFSET 0xD0 */ 898 /* OFFSET 0xD0 */
899 u32 intermediate_crc_value:16; 899 u32 interm_crc_val:16;
900 u32 initial_crc_seed:16; 900 u32 init_crc_seed:16;
901 901
902 /* OFFSET 0xD4 */ 902 /* OFFSET 0xD4 */
903 u32 application_tag_for_verify:16; 903 u32 app_tag_verify:16;
904 u32 application_tag_for_generate:16; 904 u32 app_tag_gen:16;
905 905
906 /* OFFSET 0xD8 */ 906 /* OFFSET 0xD8 */
907 u32 reference_tag_seed_for_verify_function; 907 u32 ref_tag_seed_verify;
908 908
909 /* OFFSET 0xDC */ 909 /* OFFSET 0xDC */
910 u32 reserved_DC; 910 u32 UD_bytes_immed_val:13;
911 u32 reserved_DC_0:3;
912 u32 DIF_bytes_immed_val:4;
913 u32 reserved_DC_1:12;
911 914
912 /* OFFSET 0xE0 */ 915 /* OFFSET 0xE0 */
913 u32 reserved_E0_0:16; 916 u32 bgc_blk_sz:13;
914 u32 application_tag_mask_for_generate:16; 917 u32 reserved_E0_0:3;
918 u32 app_tag_gen_mask:16;
915 919
916 /* OFFSET 0xE4 */ 920 /* OFFSET 0xE4 */
917 u32 block_protection_control:16; 921 union {
918 u32 application_tag_mask_for_verify:16; 922 u16 bgctl;
923 struct {
924 u16 crc_verify:1;
925 u16 app_tag_chk:1;
926 u16 ref_tag_chk:1;
927 u16 op:2;
928 u16 legacy:1;
929 u16 invert_crc_seed:1;
930 u16 ref_tag_gen:1;
931 u16 fixed_ref_tag:1;
932 u16 invert_crc:1;
933 u16 app_ref_f_detect:1;
934 u16 uninit_dif_check_err:1;
935 u16 uninit_dif_bypass:1;
936 u16 app_f_detect:1;
937 u16 reserved_0:2;
938 } bgctl_f;
939 };
940
941 u16 app_tag_verify_mask;
919 942
920 /* OFFSET 0xE8 */ 943 /* OFFSET 0xE8 */
921 u32 block_protection_error:8; 944 u32 blk_guard_err:8;
922 u32 reserved_E8_0:24; 945 u32 reserved_E8_0:24;
923 946
924 /* OFFSET 0xEC */ 947 /* OFFSET 0xEC */
925 u32 reference_tag_seed_for_verify; 948 u32 ref_tag_seed_gen;
926 949
927 /* OFFSET 0xF0 */ 950 /* OFFSET 0xF0 */
928 u32 intermediate_crc_valid_snapshot:16; 951 u32 intermediate_crc_valid_snapshot:16;
@@ -937,6 +960,6 @@ struct scu_task_context {
937 /* OFFSET 0xFC */ 960 /* OFFSET 0xFC */
938 u32 reference_tag_seed_for_generate_function_snapshot; 961 u32 reference_tag_seed_for_generate_function_snapshot;
939 962
940}; 963} __packed;
941 964
942#endif /* _SCU_TASK_CONTEXT_H_ */ 965#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index f5a3f7d2bda..374254ede9d 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -96,8 +96,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
96 __func__, task, response, status); 96 __func__, task, response, status);
97 97
98 task->lldd_task = NULL; 98 task->lldd_task = NULL;
99 99 task->task_done(task);
100 isci_execpath_callback(ihost, task, task->task_done);
101 break; 100 break;
102 101
103 case isci_perform_aborted_io_completion: 102 case isci_perform_aborted_io_completion:
@@ -117,8 +116,7 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
117 "%s: Error - task = %p, response=%d, " 116 "%s: Error - task = %p, response=%d, "
118 "status=%d\n", 117 "status=%d\n",
119 __func__, task, response, status); 118 __func__, task, response, status);
120 119 sas_task_abort(task);
121 isci_execpath_callback(ihost, task, sas_task_abort);
122 break; 120 break;
123 121
124 default: 122 default:
@@ -249,46 +247,6 @@ int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
249 return 0; 247 return 0;
250} 248}
251 249
252static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
253{
254 struct isci_tmf *isci_tmf;
255 enum sci_status status;
256
257 if (!test_bit(IREQ_TMF, &ireq->flags))
258 return SCI_FAILURE;
259
260 isci_tmf = isci_request_access_tmf(ireq);
261
262 switch (isci_tmf->tmf_code) {
263
264 case isci_tmf_sata_srst_high:
265 case isci_tmf_sata_srst_low: {
266 struct host_to_dev_fis *fis = &ireq->stp.cmd;
267
268 memset(fis, 0, sizeof(*fis));
269
270 fis->fis_type = 0x27;
271 fis->flags &= ~0x80;
272 fis->flags &= 0xF0;
273 if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
274 fis->control |= ATA_SRST;
275 else
276 fis->control &= ~ATA_SRST;
277 break;
278 }
279 /* other management commnd go here... */
280 default:
281 return SCI_FAILURE;
282 }
283
284 /* core builds the protocol specific request
285 * based on the h2d fis.
286 */
287 status = sci_task_request_construct_sata(ireq);
288
289 return status;
290}
291
292static struct isci_request *isci_task_request_build(struct isci_host *ihost, 250static struct isci_request *isci_task_request_build(struct isci_host *ihost,
293 struct isci_remote_device *idev, 251 struct isci_remote_device *idev,
294 u16 tag, struct isci_tmf *isci_tmf) 252 u16 tag, struct isci_tmf *isci_tmf)
@@ -328,13 +286,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost,
328 return NULL; 286 return NULL;
329 } 287 }
330 288
331 if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
332 isci_tmf->proto = SAS_PROTOCOL_SATA;
333 status = isci_sata_management_task_request_build(ireq);
334
335 if (status != SCI_SUCCESS)
336 return NULL;
337 }
338 return ireq; 289 return ireq;
339} 290}
340 291
@@ -873,53 +824,20 @@ static int isci_task_send_lu_reset_sas(
873 return ret; 824 return ret;
874} 825}
875 826
876static int isci_task_send_lu_reset_sata(struct isci_host *ihost, 827int isci_task_lu_reset(struct domain_device *dev, u8 *lun)
877 struct isci_remote_device *idev, u8 *lun)
878{
879 int ret = TMF_RESP_FUNC_FAILED;
880 struct isci_tmf tmf;
881
882 /* Send the soft reset to the target */
883 #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
884 isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
885
886 ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
887
888 if (ret != TMF_RESP_FUNC_COMPLETE) {
889 dev_dbg(&ihost->pdev->dev,
890 "%s: Assert SRST failed (%p) = %x",
891 __func__, idev, ret);
892
893 /* Return the failure so that the LUN reset is escalated
894 * to a target reset.
895 */
896 }
897 return ret;
898}
899
900/**
901 * isci_task_lu_reset() - This function is one of the SAS Domain Template
902 * functions. This is one of the Task Management functoins called by libsas,
903 * to reset the given lun. Note the assumption that while this call is
904 * executing, no I/O will be sent by the host to the device.
905 * @lun: This parameter specifies the lun to be reset.
906 *
907 * status, zero indicates success.
908 */
909int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
910{ 828{
911 struct isci_host *isci_host = dev_to_ihost(domain_device); 829 struct isci_host *isci_host = dev_to_ihost(dev);
912 struct isci_remote_device *isci_device; 830 struct isci_remote_device *isci_device;
913 unsigned long flags; 831 unsigned long flags;
914 int ret; 832 int ret;
915 833
916 spin_lock_irqsave(&isci_host->scic_lock, flags); 834 spin_lock_irqsave(&isci_host->scic_lock, flags);
917 isci_device = isci_lookup_device(domain_device); 835 isci_device = isci_lookup_device(dev);
918 spin_unlock_irqrestore(&isci_host->scic_lock, flags); 836 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
919 837
920 dev_dbg(&isci_host->pdev->dev, 838 dev_dbg(&isci_host->pdev->dev,
921 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", 839 "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
922 __func__, domain_device, isci_host, isci_device); 840 __func__, dev, isci_host, isci_device);
923 841
924 if (!isci_device) { 842 if (!isci_device) {
925 /* If the device is gone, stop the escalations. */ 843 /* If the device is gone, stop the escalations. */
@@ -928,11 +846,11 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
928 ret = TMF_RESP_FUNC_COMPLETE; 846 ret = TMF_RESP_FUNC_COMPLETE;
929 goto out; 847 goto out;
930 } 848 }
931 set_bit(IDEV_EH, &isci_device->flags);
932 849
933 /* Send the task management part of the reset. */ 850 /* Send the task management part of the reset. */
934 if (sas_protocol_ata(domain_device->tproto)) { 851 if (dev_is_sata(dev)) {
935 ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun); 852 sas_ata_schedule_reset(dev);
853 ret = TMF_RESP_FUNC_COMPLETE;
936 } else 854 } else
937 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); 855 ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
938 856
@@ -1062,9 +980,6 @@ int isci_task_abort_task(struct sas_task *task)
1062 "%s: dev = %p, task = %p, old_request == %p\n", 980 "%s: dev = %p, task = %p, old_request == %p\n",
1063 __func__, isci_device, task, old_request); 981 __func__, isci_device, task, old_request);
1064 982
1065 if (isci_device)
1066 set_bit(IDEV_EH, &isci_device->flags);
1067
1068 /* Device reset conditions signalled in task_state_flags are the 983 /* Device reset conditions signalled in task_state_flags are the
1069 * responsbility of libsas to observe at the start of the error 984 * responsbility of libsas to observe at the start of the error
1070 * handler thread. 985 * handler thread.
@@ -1332,29 +1247,35 @@ isci_task_request_complete(struct isci_host *ihost,
1332} 1247}
1333 1248
1334static int isci_reset_device(struct isci_host *ihost, 1249static int isci_reset_device(struct isci_host *ihost,
1250 struct domain_device *dev,
1335 struct isci_remote_device *idev) 1251 struct isci_remote_device *idev)
1336{ 1252{
1337 struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
1338 enum sci_status status;
1339 unsigned long flags;
1340 int rc; 1253 int rc;
1254 unsigned long flags;
1255 enum sci_status status;
1256 struct sas_phy *phy = sas_get_local_phy(dev);
1257 struct isci_port *iport = dev->port->lldd_port;
1341 1258
1342 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); 1259 dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
1343 1260
1344 spin_lock_irqsave(&ihost->scic_lock, flags); 1261 spin_lock_irqsave(&ihost->scic_lock, flags);
1345 status = sci_remote_device_reset(idev); 1262 status = sci_remote_device_reset(idev);
1346 if (status != SCI_SUCCESS) { 1263 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1347 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1348 1264
1265 if (status != SCI_SUCCESS) {
1349 dev_dbg(&ihost->pdev->dev, 1266 dev_dbg(&ihost->pdev->dev,
1350 "%s: sci_remote_device_reset(%p) returned %d!\n", 1267 "%s: sci_remote_device_reset(%p) returned %d!\n",
1351 __func__, idev, status); 1268 __func__, idev, status);
1352 1269 rc = TMF_RESP_FUNC_FAILED;
1353 return TMF_RESP_FUNC_FAILED; 1270 goto out;
1354 } 1271 }
1355 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1356 1272
1357 rc = sas_phy_reset(phy, true); 1273 if (scsi_is_sas_phy_local(phy)) {
1274 struct isci_phy *iphy = &ihost->phys[phy->number];
1275
1276 rc = isci_port_perform_hard_reset(ihost, iport, iphy);
1277 } else
1278 rc = sas_phy_reset(phy, !dev_is_sata(dev));
1358 1279
1359 /* Terminate in-progress I/O now. */ 1280 /* Terminate in-progress I/O now. */
1360 isci_remote_device_nuke_requests(ihost, idev); 1281 isci_remote_device_nuke_requests(ihost, idev);
@@ -1371,7 +1292,8 @@ static int isci_reset_device(struct isci_host *ihost,
1371 } 1292 }
1372 1293
1373 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); 1294 dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
1374 1295 out:
1296 sas_put_local_phy(phy);
1375 return rc; 1297 return rc;
1376} 1298}
1377 1299
@@ -1386,35 +1308,15 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
1386 idev = isci_lookup_device(dev); 1308 idev = isci_lookup_device(dev);
1387 spin_unlock_irqrestore(&ihost->scic_lock, flags); 1309 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1388 1310
1389 if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
1390 ret = TMF_RESP_FUNC_COMPLETE;
1391 goto out;
1392 }
1393
1394 ret = isci_reset_device(ihost, idev);
1395 out:
1396 isci_put_device(idev);
1397 return ret;
1398}
1399
1400int isci_bus_reset_handler(struct scsi_cmnd *cmd)
1401{
1402 struct domain_device *dev = sdev_to_domain_dev(cmd->device);
1403 struct isci_host *ihost = dev_to_ihost(dev);
1404 struct isci_remote_device *idev;
1405 unsigned long flags;
1406 int ret;
1407
1408 spin_lock_irqsave(&ihost->scic_lock, flags);
1409 idev = isci_lookup_device(dev);
1410 spin_unlock_irqrestore(&ihost->scic_lock, flags);
1411
1412 if (!idev) { 1311 if (!idev) {
1312 /* XXX: need to cleanup any ireqs targeting this
1313 * domain_device
1314 */
1413 ret = TMF_RESP_FUNC_COMPLETE; 1315 ret = TMF_RESP_FUNC_COMPLETE;
1414 goto out; 1316 goto out;
1415 } 1317 }
1416 1318
1417 ret = isci_reset_device(ihost, idev); 1319 ret = isci_reset_device(ihost, dev, idev);
1418 out: 1320 out:
1419 isci_put_device(idev); 1321 isci_put_device(idev);
1420 return ret; 1322 return ret;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 1b27b3797c6..7b6d0e32fd9 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -86,8 +86,6 @@ enum isci_tmf_function_codes {
86 isci_tmf_func_none = 0, 86 isci_tmf_func_none = 0,
87 isci_tmf_ssp_task_abort = TMF_ABORT_TASK, 87 isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
88 isci_tmf_ssp_lun_reset = TMF_LU_RESET, 88 isci_tmf_ssp_lun_reset = TMF_LU_RESET,
89 isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
90 isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
91}; 89};
92/** 90/**
93 * struct isci_tmf - This class represents the task management object which 91 * struct isci_tmf - This class represents the task management object which
@@ -210,8 +208,6 @@ int isci_queuecommand(
210 struct scsi_cmnd *scsi_cmd, 208 struct scsi_cmnd *scsi_cmd,
211 void (*donefunc)(struct scsi_cmnd *)); 209 void (*donefunc)(struct scsi_cmnd *));
212 210
213int isci_bus_reset_handler(struct scsi_cmnd *cmd);
214
215/** 211/**
216 * enum isci_completion_selection - This enum defines the possible actions to 212 * enum isci_completion_selection - This enum defines the possible actions to
217 * take with respect to a given request's notification back to libsas. 213 * take with respect to a given request's notification back to libsas.
@@ -321,40 +317,4 @@ isci_task_set_completion_status(
321 return task_notification_selection; 317 return task_notification_selection;
322 318
323} 319}
324/**
325* isci_execpath_callback() - This function is called from the task
326* execute path when the task needs to callback libsas about the submit-time
327* task failure. The callback occurs either through the task's done function
328* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
329* requests, libsas takes the host lock before calling execute task. Therefore
330* in this situation the host lock must be managed before calling the func.
331*
332* @ihost: This parameter is the controller to which the I/O request was sent.
333* @task: This parameter is the I/O request.
334* @func: This parameter is the function to call in the correct context.
335* @status: This parameter is the status code for the completed task.
336*
337*/
338static inline void isci_execpath_callback(struct isci_host *ihost,
339 struct sas_task *task,
340 void (*func)(struct sas_task *))
341{
342 struct domain_device *dev = task->dev;
343
344 if (dev_is_sata(dev) && task->uldd_task) {
345 unsigned long flags;
346
347 /* Since we are still in the submit path, and since
348 * libsas takes the host lock on behalf of SATA
349 * devices before I/O starts (in the non-discovery case),
350 * we need to unlock before we can call the callback function.
351 */
352 raw_local_irq_save(flags);
353 spin_unlock(dev->sata_dev.ap->lock);
354 func(task);
355 spin_lock(dev->sata_dev.ap->lock);
356 raw_local_irq_restore(flags);
357 } else
358 func(task);
359}
360#endif /* !defined(_SCI_TASK_H_) */ 320#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index db47158e0dd..453a740fa68 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -684,10 +684,8 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
684 int buflen) 684 int buflen)
685{ 685{
686 struct iscsi_conn *conn = cls_conn->dd_data; 686 struct iscsi_conn *conn = cls_conn->dd_data;
687 struct iscsi_session *session = conn->session;
688 struct iscsi_tcp_conn *tcp_conn = conn->dd_data; 687 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
689 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; 688 struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
690 int value;
691 689
692 switch(param) { 690 switch(param) {
693 case ISCSI_PARAM_HDRDGST_EN: 691 case ISCSI_PARAM_HDRDGST_EN:
@@ -699,16 +697,7 @@ static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
699 sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage; 697 sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
700 break; 698 break;
701 case ISCSI_PARAM_MAX_R2T: 699 case ISCSI_PARAM_MAX_R2T:
702 sscanf(buf, "%d", &value); 700 return iscsi_tcp_set_max_r2t(conn, buf);
703 if (value <= 0 || !is_power_of_2(value))
704 return -EINVAL;
705 if (session->max_r2t == value)
706 break;
707 iscsi_tcp_r2tpool_free(session);
708 iscsi_set_param(cls_conn, param, buf, buflen);
709 if (iscsi_tcp_r2tpool_alloc(session))
710 return -ENOMEM;
711 break;
712 default: 701 default:
713 return iscsi_set_param(cls_conn, param, buf, buflen); 702 return iscsi_set_param(cls_conn, param, buf, buflen);
714 } 703 }
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 1d1b0c9da29..8e561e6a557 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -337,6 +337,13 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
337 schedule_delayed_work(&disc->disc_work, delay); 337 schedule_delayed_work(&disc->disc_work, delay);
338 } else 338 } else
339 fc_disc_done(disc, DISC_EV_FAILED); 339 fc_disc_done(disc, DISC_EV_FAILED);
340 } else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
341 /*
342 * if discovery fails due to lport reset, clear
343 * pending flag so that subsequent discovery can
344 * continue
345 */
346 disc->pending = 0;
340 } 347 }
341} 348}
342 349
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index e17a28d324d..c2384d50147 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -56,8 +56,7 @@ struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
56 rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type); 56 rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
57 else { 57 else {
58 /* CT requests */ 58 /* CT requests */
59 rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type); 59 rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
60 did = FC_FID_DIR_SERV;
61 } 60 }
62 61
63 if (rc) { 62 if (rc) {
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 4d70d96fa5d..630291f0182 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1642,9 +1642,10 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1642 case FC_RCTL_ACK_0: 1642 case FC_RCTL_ACK_0:
1643 break; 1643 break;
1644 default: 1644 default:
1645 FC_EXCH_DBG(ep, "BLS rctl %x - %s received", 1645 if (ep)
1646 fh->fh_r_ctl, 1646 FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
1647 fc_exch_rctl_name(fh->fh_r_ctl)); 1647 fh->fh_r_ctl,
1648 fc_exch_rctl_name(fh->fh_r_ctl));
1648 break; 1649 break;
1649 } 1650 }
1650 fc_frame_free(fp); 1651 fc_frame_free(fp);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c1a808cc592..bd5d31d022d 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -116,6 +116,8 @@ static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
116static void fc_lport_enter_scr(struct fc_lport *); 116static void fc_lport_enter_scr(struct fc_lport *);
117static void fc_lport_enter_ready(struct fc_lport *); 117static void fc_lport_enter_ready(struct fc_lport *);
118static void fc_lport_enter_logo(struct fc_lport *); 118static void fc_lport_enter_logo(struct fc_lport *);
119static void fc_lport_enter_fdmi(struct fc_lport *lport);
120static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
119 121
120static const char *fc_lport_state_names[] = { 122static const char *fc_lport_state_names[] = {
121 [LPORT_ST_DISABLED] = "disabled", 123 [LPORT_ST_DISABLED] = "disabled",
@@ -126,6 +128,11 @@ static const char *fc_lport_state_names[] = {
126 [LPORT_ST_RSPN_ID] = "RSPN_ID", 128 [LPORT_ST_RSPN_ID] = "RSPN_ID",
127 [LPORT_ST_RFT_ID] = "RFT_ID", 129 [LPORT_ST_RFT_ID] = "RFT_ID",
128 [LPORT_ST_RFF_ID] = "RFF_ID", 130 [LPORT_ST_RFF_ID] = "RFF_ID",
131 [LPORT_ST_FDMI] = "FDMI",
132 [LPORT_ST_RHBA] = "RHBA",
133 [LPORT_ST_RPA] = "RPA",
134 [LPORT_ST_DHBA] = "DHBA",
135 [LPORT_ST_DPRT] = "DPRT",
129 [LPORT_ST_SCR] = "SCR", 136 [LPORT_ST_SCR] = "SCR",
130 [LPORT_ST_READY] = "Ready", 137 [LPORT_ST_READY] = "Ready",
131 [LPORT_ST_LOGO] = "LOGO", 138 [LPORT_ST_LOGO] = "LOGO",
@@ -183,11 +190,14 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
183 if (lport->state == LPORT_ST_DNS) { 190 if (lport->state == LPORT_ST_DNS) {
184 lport->dns_rdata = rdata; 191 lport->dns_rdata = rdata;
185 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID); 192 fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
193 } else if (lport->state == LPORT_ST_FDMI) {
194 lport->ms_rdata = rdata;
195 fc_lport_enter_ms(lport, LPORT_ST_DHBA);
186 } else { 196 } else {
187 FC_LPORT_DBG(lport, "Received an READY event " 197 FC_LPORT_DBG(lport, "Received an READY event "
188 "on port (%6.6x) for the directory " 198 "on port (%6.6x) for the directory "
189 "server, but the lport is not " 199 "server, but the lport is not "
190 "in the DNS state, it's in the " 200 "in the DNS or FDMI state, it's in the "
191 "%d state", rdata->ids.port_id, 201 "%d state", rdata->ids.port_id,
192 lport->state); 202 lport->state);
193 lport->tt.rport_logoff(rdata); 203 lport->tt.rport_logoff(rdata);
@@ -196,7 +206,10 @@ static void fc_lport_rport_callback(struct fc_lport *lport,
196 case RPORT_EV_LOGO: 206 case RPORT_EV_LOGO:
197 case RPORT_EV_FAILED: 207 case RPORT_EV_FAILED:
198 case RPORT_EV_STOP: 208 case RPORT_EV_STOP:
199 lport->dns_rdata = NULL; 209 if (rdata->ids.port_id == FC_FID_DIR_SERV)
210 lport->dns_rdata = NULL;
211 else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
212 lport->ms_rdata = NULL;
200 break; 213 break;
201 case RPORT_EV_NONE: 214 case RPORT_EV_NONE:
202 break; 215 break;
@@ -1148,7 +1161,10 @@ static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
1148 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID); 1161 fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
1149 break; 1162 break;
1150 case LPORT_ST_RFF_ID: 1163 case LPORT_ST_RFF_ID:
1151 fc_lport_enter_scr(lport); 1164 if (lport->fdmi_enabled)
1165 fc_lport_enter_fdmi(lport);
1166 else
1167 fc_lport_enter_scr(lport);
1152 break; 1168 break;
1153 default: 1169 default:
1154 /* should have already been caught by state checks */ 1170 /* should have already been caught by state checks */
@@ -1163,6 +1179,85 @@ err:
1163} 1179}
1164 1180
1165/** 1181/**
1182 * fc_lport_ms_resp() - Handle response to a management server
1183 * exchange
1184 * @sp: current sequence in exchange
1185 * @fp: response frame
1186 * @lp_arg: Fibre Channel host port instance
1187 *
1188 * Locking Note: This function will be called without the lport lock
1189 * held, but it will lock, call an _enter_* function or fc_lport_error()
1190 * and then unlock the lport.
1191 */
1192static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
1193 void *lp_arg)
1194{
1195 struct fc_lport *lport = lp_arg;
1196 struct fc_frame_header *fh;
1197 struct fc_ct_hdr *ct;
1198
1199 FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
1200
1201 if (fp == ERR_PTR(-FC_EX_CLOSED))
1202 return;
1203
1204 mutex_lock(&lport->lp_mutex);
1205
1206 if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
1207 FC_LPORT_DBG(lport, "Received a management server response, "
1208 "but in state %s\n", fc_lport_state(lport));
1209 if (IS_ERR(fp))
1210 goto err;
1211 goto out;
1212 }
1213
1214 if (IS_ERR(fp)) {
1215 fc_lport_error(lport, fp);
1216 goto err;
1217 }
1218
1219 fh = fc_frame_header_get(fp);
1220 ct = fc_frame_payload_get(fp, sizeof(*ct));
1221
1222 if (fh && ct && fh->fh_type == FC_TYPE_CT &&
1223 ct->ct_fs_type == FC_FST_MGMT &&
1224 ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
1225 FC_LPORT_DBG(lport, "Received a management server response, "
1226 "reason=%d explain=%d\n",
1227 ct->ct_reason,
1228 ct->ct_explan);
1229
1230 switch (lport->state) {
1231 case LPORT_ST_RHBA:
1232 if (ntohs(ct->ct_cmd) == FC_FS_ACC)
1233 fc_lport_enter_ms(lport, LPORT_ST_RPA);
1234 else /* Error Skip RPA */
1235 fc_lport_enter_scr(lport);
1236 break;
1237 case LPORT_ST_RPA:
1238 fc_lport_enter_scr(lport);
1239 break;
1240 case LPORT_ST_DPRT:
1241 fc_lport_enter_ms(lport, LPORT_ST_RHBA);
1242 break;
1243 case LPORT_ST_DHBA:
1244 fc_lport_enter_ms(lport, LPORT_ST_DPRT);
1245 break;
1246 default:
1247 /* should have already been caught by state checks */
1248 break;
1249 }
1250 } else {
1251 /* Invalid Frame? */
1252 fc_lport_error(lport, fp);
1253 }
1254out:
1255 fc_frame_free(fp);
1256err:
1257 mutex_unlock(&lport->lp_mutex);
1258}
1259
1260/**
1166 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request 1261 * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
1167 * @sp: current sequence in SCR exchange 1262 * @sp: current sequence in SCR exchange
1168 * @fp: response frame 1263 * @fp: response frame
@@ -1339,6 +1434,123 @@ err:
1339} 1434}
1340 1435
1341/** 1436/**
1437 * fc_lport_enter_ms() - management server commands
1438 * @lport: Fibre Channel local port to register
1439 *
1440 * Locking Note: The lport lock is expected to be held before calling
1441 * this routine.
1442 */
1443static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
1444{
1445 struct fc_frame *fp;
1446 enum fc_fdmi_req cmd;
1447 int size = sizeof(struct fc_ct_hdr);
1448 size_t len;
1449 int numattrs;
1450
1451 FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
1452 fc_lport_state_names[state],
1453 fc_lport_state(lport));
1454
1455 fc_lport_state_enter(lport, state);
1456
1457 switch (state) {
1458 case LPORT_ST_RHBA:
1459 cmd = FC_FDMI_RHBA;
1460 /* Number of HBA Attributes */
1461 numattrs = 10;
1462 len = sizeof(struct fc_fdmi_rhba);
1463 len -= sizeof(struct fc_fdmi_attr_entry);
1464 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1465 len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
1466 len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
1467 len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
1468 len += FC_FDMI_HBA_ATTR_MODEL_LEN;
1469 len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
1470 len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
1471 len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
1472 len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
1473 len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
1474 len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
1475
1476 size += len;
1477 break;
1478 case LPORT_ST_RPA:
1479 cmd = FC_FDMI_RPA;
1480 /* Number of Port Attributes */
1481 numattrs = 6;
1482 len = sizeof(struct fc_fdmi_rpa);
1483 len -= sizeof(struct fc_fdmi_attr_entry);
1484 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
1485 len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
1486 len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
1487 len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
1488 len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
1489 len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
1490 len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
1491
1492 size += len;
1493 break;
1494 case LPORT_ST_DPRT:
1495 cmd = FC_FDMI_DPRT;
1496 len = sizeof(struct fc_fdmi_dprt);
1497 size += len;
1498 break;
1499 case LPORT_ST_DHBA:
1500 cmd = FC_FDMI_DHBA;
1501 len = sizeof(struct fc_fdmi_dhba);
1502 size += len;
1503 break;
1504 default:
1505 fc_lport_error(lport, NULL);
1506 return;
1507 }
1508
1509 FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
1510 cmd, (int)len, size);
1511 fp = fc_frame_alloc(lport, size);
1512 if (!fp) {
1513 fc_lport_error(lport, fp);
1514 return;
1515 }
1516
1517 if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
1518 fc_lport_ms_resp,
1519 lport, 3 * lport->r_a_tov))
1520 fc_lport_error(lport, fp);
1521}
1522
1523/**
1524 * fc_rport_enter_fdmi() - Create a fc_rport for the management server
1525 * @lport: The local port requesting a remote port for the management server
1526 *
1527 * Locking Note: The lport lock is expected to be held before calling
1528 * this routine.
1529 */
1530static void fc_lport_enter_fdmi(struct fc_lport *lport)
1531{
1532 struct fc_rport_priv *rdata;
1533
1534 FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
1535 fc_lport_state(lport));
1536
1537 fc_lport_state_enter(lport, LPORT_ST_FDMI);
1538
1539 mutex_lock(&lport->disc.disc_mutex);
1540 rdata = lport->tt.rport_create(lport, FC_FID_MGMT_SERV);
1541 mutex_unlock(&lport->disc.disc_mutex);
1542 if (!rdata)
1543 goto err;
1544
1545 rdata->ops = &fc_lport_rport_ops;
1546 lport->tt.rport_login(rdata);
1547 return;
1548
1549err:
1550 fc_lport_error(lport, NULL);
1551}
1552
1553/**
1342 * fc_lport_timeout() - Handler for the retry_work timer 1554 * fc_lport_timeout() - Handler for the retry_work timer
1343 * @work: The work struct of the local port 1555 * @work: The work struct of the local port
1344 */ 1556 */
@@ -1371,6 +1583,15 @@ static void fc_lport_timeout(struct work_struct *work)
1371 case LPORT_ST_RFF_ID: 1583 case LPORT_ST_RFF_ID:
1372 fc_lport_enter_ns(lport, lport->state); 1584 fc_lport_enter_ns(lport, lport->state);
1373 break; 1585 break;
1586 case LPORT_ST_FDMI:
1587 fc_lport_enter_fdmi(lport);
1588 break;
1589 case LPORT_ST_RHBA:
1590 case LPORT_ST_RPA:
1591 case LPORT_ST_DHBA:
1592 case LPORT_ST_DPRT:
1593 fc_lport_enter_ms(lport, lport->state);
1594 break;
1374 case LPORT_ST_SCR: 1595 case LPORT_ST_SCR:
1375 fc_lport_enter_scr(lport); 1596 fc_lport_enter_scr(lport);
1376 break; 1597 break;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 143bbe448be..82c3fd4bc93 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1909,6 +1909,16 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1909 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); 1909 ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
1910 1910
1911 spin_lock(&session->lock); 1911 spin_lock(&session->lock);
1912 task = (struct iscsi_task *)sc->SCp.ptr;
1913 if (!task) {
1914 /*
1915 * Raced with completion. Blk layer has taken ownership
1916 * so let timeout code complete it now.
1917 */
1918 rc = BLK_EH_HANDLED;
1919 goto done;
1920 }
1921
1912 if (session->state != ISCSI_STATE_LOGGED_IN) { 1922 if (session->state != ISCSI_STATE_LOGGED_IN) {
1913 /* 1923 /*
1914 * We are probably in the middle of iscsi recovery so let 1924 * We are probably in the middle of iscsi recovery so let
@@ -1925,16 +1935,6 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
1925 goto done; 1935 goto done;
1926 } 1936 }
1927 1937
1928 task = (struct iscsi_task *)sc->SCp.ptr;
1929 if (!task) {
1930 /*
1931 * Raced with completion. Just reset timer, and let it
1932 * complete normally
1933 */
1934 rc = BLK_EH_RESET_TIMER;
1935 goto done;
1936 }
1937
1938 /* 1938 /*
1939 * If we have sent (at least queued to the network layer) a pdu or 1939 * If we have sent (at least queued to the network layer) a pdu or
1940 * recvd one for the task since the last timeout ask for 1940 * recvd one for the task since the last timeout ask for
@@ -2807,6 +2807,7 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2807 kfree(session->username); 2807 kfree(session->username);
2808 kfree(session->username_in); 2808 kfree(session->username_in);
2809 kfree(session->targetname); 2809 kfree(session->targetname);
2810 kfree(session->targetalias);
2810 kfree(session->initiatorname); 2811 kfree(session->initiatorname);
2811 kfree(session->ifacename); 2812 kfree(session->ifacename);
2812 2813
@@ -3200,7 +3201,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3200 sscanf(buf, "%d", &session->initial_r2t_en); 3201 sscanf(buf, "%d", &session->initial_r2t_en);
3201 break; 3202 break;
3202 case ISCSI_PARAM_MAX_R2T: 3203 case ISCSI_PARAM_MAX_R2T:
3203 sscanf(buf, "%d", &session->max_r2t); 3204 sscanf(buf, "%hu", &session->max_r2t);
3204 break; 3205 break;
3205 case ISCSI_PARAM_IMM_DATA_EN: 3206 case ISCSI_PARAM_IMM_DATA_EN:
3206 sscanf(buf, "%d", &session->imm_data_en); 3207 sscanf(buf, "%d", &session->imm_data_en);
@@ -3233,6 +3234,8 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3233 return iscsi_switch_str_param(&session->password_in, buf); 3234 return iscsi_switch_str_param(&session->password_in, buf);
3234 case ISCSI_PARAM_TARGET_NAME: 3235 case ISCSI_PARAM_TARGET_NAME:
3235 return iscsi_switch_str_param(&session->targetname, buf); 3236 return iscsi_switch_str_param(&session->targetname, buf);
3237 case ISCSI_PARAM_TARGET_ALIAS:
3238 return iscsi_switch_str_param(&session->targetalias, buf);
3236 case ISCSI_PARAM_TPGT: 3239 case ISCSI_PARAM_TPGT:
3237 sscanf(buf, "%d", &session->tpgt); 3240 sscanf(buf, "%d", &session->tpgt);
3238 break; 3241 break;
@@ -3299,6 +3302,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
3299 case ISCSI_PARAM_TARGET_NAME: 3302 case ISCSI_PARAM_TARGET_NAME:
3300 len = sprintf(buf, "%s\n", session->targetname); 3303 len = sprintf(buf, "%s\n", session->targetname);
3301 break; 3304 break;
3305 case ISCSI_PARAM_TARGET_ALIAS:
3306 len = sprintf(buf, "%s\n", session->targetalias);
3307 break;
3302 case ISCSI_PARAM_TPGT: 3308 case ISCSI_PARAM_TPGT:
3303 len = sprintf(buf, "%d\n", session->tpgt); 3309 len = sprintf(buf, "%d\n", session->tpgt);
3304 break; 3310 break;
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 7f0465b9623..552e8a2b6f5 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -1170,6 +1170,24 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session)
1170} 1170}
1171EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free); 1171EXPORT_SYMBOL_GPL(iscsi_tcp_r2tpool_free);
1172 1172
1173int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf)
1174{
1175 struct iscsi_session *session = conn->session;
1176 unsigned short r2ts = 0;
1177
1178 sscanf(buf, "%hu", &r2ts);
1179 if (session->max_r2t == r2ts)
1180 return 0;
1181
1182 if (!r2ts || !is_power_of_2(r2ts))
1183 return -EINVAL;
1184
1185 session->max_r2t = r2ts;
1186 iscsi_tcp_r2tpool_free(session);
1187 return iscsi_tcp_r2tpool_alloc(session);
1188}
1189EXPORT_SYMBOL_GPL(iscsi_tcp_set_max_r2t);
1190
1173void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn, 1191void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
1174 struct iscsi_stats *stats) 1192 struct iscsi_stats *stats)
1175{ 1193{
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index db9238f2ecb..bc0cecc6ad6 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -23,6 +23,8 @@
23 23
24#include <linux/scatterlist.h> 24#include <linux/scatterlist.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/async.h>
27#include <linux/export.h>
26 28
27#include <scsi/sas_ata.h> 29#include <scsi/sas_ata.h>
28#include "sas_internal.h" 30#include "sas_internal.h"
@@ -93,22 +95,47 @@ static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
93static void sas_ata_task_done(struct sas_task *task) 95static void sas_ata_task_done(struct sas_task *task)
94{ 96{
95 struct ata_queued_cmd *qc = task->uldd_task; 97 struct ata_queued_cmd *qc = task->uldd_task;
96 struct domain_device *dev; 98 struct domain_device *dev = task->dev;
97 struct task_status_struct *stat = &task->task_status; 99 struct task_status_struct *stat = &task->task_status;
98 struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf; 100 struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
99 struct sas_ha_struct *sas_ha; 101 struct sas_ha_struct *sas_ha = dev->port->ha;
100 enum ata_completion_errors ac; 102 enum ata_completion_errors ac;
101 unsigned long flags; 103 unsigned long flags;
102 struct ata_link *link; 104 struct ata_link *link;
105 struct ata_port *ap;
106
107 spin_lock_irqsave(&dev->done_lock, flags);
108 if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
109 task = NULL;
110 else if (qc && qc->scsicmd)
111 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
112 spin_unlock_irqrestore(&dev->done_lock, flags);
113
114 /* check if libsas-eh got to the task before us */
115 if (unlikely(!task))
116 return;
103 117
104 if (!qc) 118 if (!qc)
105 goto qc_already_gone; 119 goto qc_already_gone;
106 120
107 dev = qc->ap->private_data; 121 ap = qc->ap;
108 sas_ha = dev->port->ha; 122 link = &ap->link;
109 link = &dev->sata_dev.ap->link; 123
124 spin_lock_irqsave(ap->lock, flags);
125 /* check if we lost the race with libata/sas_ata_post_internal() */
126 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) {
127 spin_unlock_irqrestore(ap->lock, flags);
128 if (qc->scsicmd)
129 goto qc_already_gone;
130 else {
131 /* if eh is not involved and the port is frozen then the
132 * ata internal abort process has taken responsibility
133 * for this sas_task
134 */
135 return;
136 }
137 }
110 138
111 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
112 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || 139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
113 ((stat->stat == SAM_STAT_CHECK_CONDITION && 140 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
114 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { 141 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
@@ -121,10 +148,6 @@ static void sas_ata_task_done(struct sas_task *task)
121 if (unlikely(link->eh_info.err_mask)) 148 if (unlikely(link->eh_info.err_mask))
122 qc->flags |= ATA_QCFLAG_FAILED; 149 qc->flags |= ATA_QCFLAG_FAILED;
123 } 150 }
124
125 dev->sata_dev.sstatus = resp->sstatus;
126 dev->sata_dev.serror = resp->serror;
127 dev->sata_dev.scontrol = resp->scontrol;
128 } else { 151 } else {
129 ac = sas_to_ata_err(stat); 152 ac = sas_to_ata_err(stat);
130 if (ac) { 153 if (ac) {
@@ -144,24 +167,8 @@ static void sas_ata_task_done(struct sas_task *task)
144 } 167 }
145 168
146 qc->lldd_task = NULL; 169 qc->lldd_task = NULL;
147 if (qc->scsicmd)
148 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
149 ata_qc_complete(qc); 170 ata_qc_complete(qc);
150 spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); 171 spin_unlock_irqrestore(ap->lock, flags);
151
152 /*
153 * If the sas_task has an ata qc, a scsi_cmnd and the aborted
154 * flag is set, then we must have come in via the libsas EH
155 * functions. When we exit this function, we need to put the
156 * scsi_cmnd on the list of finished errors. The ata_qc_complete
157 * call cleans up the libata side of things but we're protected
158 * from the scsi_cmnd going away because the scsi_cmnd is owned
159 * by the EH, making libata's call to scsi_done a NOP.
160 */
161 spin_lock_irqsave(&task->task_state_lock, flags);
162 if (qc->scsicmd && task->task_state_flags & SAS_TASK_STATE_ABORTED)
163 scsi_eh_finish_cmd(qc->scsicmd, &sas_ha->eh_done_q);
164 spin_unlock_irqrestore(&task->task_state_lock, flags);
165 172
166qc_already_gone: 173qc_already_gone:
167 list_del_init(&task->list); 174 list_del_init(&task->list);
@@ -170,23 +177,30 @@ qc_already_gone:
170 177
171static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) 178static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
172{ 179{
173 int res; 180 unsigned long flags;
174 struct sas_task *task; 181 struct sas_task *task;
175 struct domain_device *dev = qc->ap->private_data; 182 struct scatterlist *sg;
183 int ret = AC_ERR_SYSTEM;
184 unsigned int si, xfer = 0;
185 struct ata_port *ap = qc->ap;
186 struct domain_device *dev = ap->private_data;
176 struct sas_ha_struct *sas_ha = dev->port->ha; 187 struct sas_ha_struct *sas_ha = dev->port->ha;
177 struct Scsi_Host *host = sas_ha->core.shost; 188 struct Scsi_Host *host = sas_ha->core.shost;
178 struct sas_internal *i = to_sas_internal(host->transportt); 189 struct sas_internal *i = to_sas_internal(host->transportt);
179 struct scatterlist *sg; 190
180 unsigned int xfer = 0; 191 /* TODO: audit callers to ensure they are ready for qc_issue to
181 unsigned int si; 192 * unconditionally re-enable interrupts
193 */
194 local_irq_save(flags);
195 spin_unlock(ap->lock);
182 196
183 /* If the device fell off, no sense in issuing commands */ 197 /* If the device fell off, no sense in issuing commands */
184 if (dev->gone) 198 if (test_bit(SAS_DEV_GONE, &dev->state))
185 return AC_ERR_SYSTEM; 199 goto out;
186 200
187 task = sas_alloc_task(GFP_ATOMIC); 201 task = sas_alloc_task(GFP_ATOMIC);
188 if (!task) 202 if (!task)
189 return AC_ERR_SYSTEM; 203 goto out;
190 task->dev = dev; 204 task->dev = dev;
191 task->task_proto = SAS_PROTOCOL_STP; 205 task->task_proto = SAS_PROTOCOL_STP;
192 task->task_done = sas_ata_task_done; 206 task->task_done = sas_ata_task_done;
@@ -231,21 +245,24 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
231 ASSIGN_SAS_TASK(qc->scsicmd, task); 245 ASSIGN_SAS_TASK(qc->scsicmd, task);
232 246
233 if (sas_ha->lldd_max_execute_num < 2) 247 if (sas_ha->lldd_max_execute_num < 2)
234 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); 248 ret = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
235 else 249 else
236 res = sas_queue_up(task); 250 ret = sas_queue_up(task);
237 251
238 /* Examine */ 252 /* Examine */
239 if (res) { 253 if (ret) {
240 SAS_DPRINTK("lldd_execute_task returned: %d\n", res); 254 SAS_DPRINTK("lldd_execute_task returned: %d\n", ret);
241 255
242 if (qc->scsicmd) 256 if (qc->scsicmd)
243 ASSIGN_SAS_TASK(qc->scsicmd, NULL); 257 ASSIGN_SAS_TASK(qc->scsicmd, NULL);
244 sas_free_task(task); 258 sas_free_task(task);
245 return AC_ERR_SYSTEM; 259 ret = AC_ERR_SYSTEM;
246 } 260 }
247 261
248 return 0; 262 out:
263 spin_lock(ap->lock);
264 local_irq_restore(flags);
265 return ret;
249} 266}
250 267
251static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) 268static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
@@ -256,83 +273,222 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
256 return true; 273 return true;
257} 274}
258 275
259static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, 276static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
260 unsigned long deadline) 277{
278 return to_sas_internal(dev->port->ha->core.shost->transportt);
279}
280
281static void sas_get_ata_command_set(struct domain_device *dev);
282
283int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
261{ 284{
285 if (phy->attached_tproto & SAS_PROTOCOL_STP)
286 dev->tproto = phy->attached_tproto;
287 if (phy->attached_sata_dev)
288 dev->tproto |= SATA_DEV;
289
290 if (phy->attached_dev_type == SATA_PENDING)
291 dev->dev_type = SATA_PENDING;
292 else {
293 int res;
294
295 dev->dev_type = SATA_DEV;
296 res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
297 &dev->sata_dev.rps_resp);
298 if (res) {
299 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
300 "0x%x\n", SAS_ADDR(dev->parent->sas_addr),
301 phy->phy_id, res);
302 return res;
303 }
304 memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
305 sizeof(struct dev_to_host_fis));
306 /* TODO switch to ata_dev_classify() */
307 sas_get_ata_command_set(dev);
308 }
309 return 0;
310}
311
312static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
313{
314 int res;
315
316 /* we weren't pending, so successfully end the reset sequence now */
317 if (dev->dev_type != SATA_PENDING)
318 return 1;
319
320 /* hmmm, if this succeeds do we need to repost the domain_device to the
321 * lldd so it can pick up new parameters?
322 */
323 res = sas_get_ata_info(dev, phy);
324 if (res)
325 return 0; /* retry */
326 else
327 return 1;
328}
329
330static int smp_ata_check_ready(struct ata_link *link)
331{
332 int res;
262 struct ata_port *ap = link->ap; 333 struct ata_port *ap = link->ap;
263 struct domain_device *dev = ap->private_data; 334 struct domain_device *dev = ap->private_data;
264 struct sas_internal *i = 335 struct domain_device *ex_dev = dev->parent;
265 to_sas_internal(dev->port->ha->core.shost->transportt); 336 struct sas_phy *phy = sas_get_local_phy(dev);
266 int res = TMF_RESP_FUNC_FAILED; 337 struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number];
267 int ret = 0;
268 338
269 if (i->dft->lldd_I_T_nexus_reset) 339 res = sas_ex_phy_discover(ex_dev, phy->number);
270 res = i->dft->lldd_I_T_nexus_reset(dev); 340 sas_put_local_phy(phy);
271 341
272 if (res != TMF_RESP_FUNC_COMPLETE) { 342 /* break the wait early if the expander is unreachable,
273 SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __func__); 343 * otherwise keep polling
274 ret = -EAGAIN; 344 */
345 if (res == -ECOMM)
346 return res;
347 if (res != SMP_RESP_FUNC_ACC)
348 return 0;
349
350 switch (ex_phy->attached_dev_type) {
351 case SATA_PENDING:
352 return 0;
353 case SAS_END_DEV:
354 if (ex_phy->attached_sata_dev)
355 return sas_ata_clear_pending(dev, ex_phy);
356 default:
357 return -ENODEV;
275 } 358 }
359}
276 360
277 switch (dev->sata_dev.command_set) { 361static int local_ata_check_ready(struct ata_link *link)
278 case ATA_COMMAND_SET: 362{
279 SAS_DPRINTK("%s: Found ATA device.\n", __func__); 363 struct ata_port *ap = link->ap;
280 *class = ATA_DEV_ATA; 364 struct domain_device *dev = ap->private_data;
281 break; 365 struct sas_internal *i = dev_to_sas_internal(dev);
282 case ATAPI_COMMAND_SET: 366
283 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__); 367 if (i->dft->lldd_ata_check_ready)
284 *class = ATA_DEV_ATAPI; 368 return i->dft->lldd_ata_check_ready(dev);
285 break; 369 else {
286 default: 370 /* lldd's that don't implement 'ready' checking get the
287 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n", 371 * old default behavior of not coordinating reset
288 __func__, 372 * recovery with libata
289 dev->sata_dev.command_set); 373 */
290 *class = ATA_DEV_UNKNOWN; 374 return 1;
291 break;
292 } 375 }
376}
293 377
294 ap->cbl = ATA_CBL_SATA; 378static int sas_ata_printk(const char *level, const struct domain_device *ddev,
295 return ret; 379 const char *fmt, ...)
380{
381 struct ata_port *ap = ddev->sata_dev.ap;
382 struct device *dev = &ddev->rphy->dev;
383 struct va_format vaf;
384 va_list args;
385 int r;
386
387 va_start(args, fmt);
388
389 vaf.fmt = fmt;
390 vaf.va = &args;
391
392 r = printk("%ssas: ata%u: %s: %pV",
393 level, ap->print_id, dev_name(dev), &vaf);
394
395 va_end(args);
396
397 return r;
296} 398}
297 399
298static int sas_ata_soft_reset(struct ata_link *link, unsigned int *class, 400static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
299 unsigned long deadline) 401 unsigned long deadline)
300{ 402{
403 int ret = 0, res;
404 struct sas_phy *phy;
301 struct ata_port *ap = link->ap; 405 struct ata_port *ap = link->ap;
406 int (*check_ready)(struct ata_link *link);
302 struct domain_device *dev = ap->private_data; 407 struct domain_device *dev = ap->private_data;
303 struct sas_internal *i = 408 struct sas_internal *i = dev_to_sas_internal(dev);
304 to_sas_internal(dev->port->ha->core.shost->transportt);
305 int res = TMF_RESP_FUNC_FAILED;
306 int ret = 0;
307 409
308 if (i->dft->lldd_ata_soft_reset) 410 res = i->dft->lldd_I_T_nexus_reset(dev);
309 res = i->dft->lldd_ata_soft_reset(dev); 411 if (res == -ENODEV)
412 return res;
310 413
311 if (res != TMF_RESP_FUNC_COMPLETE) { 414 if (res != TMF_RESP_FUNC_COMPLETE)
312 SAS_DPRINTK("%s: Unable to soft reset\n", __func__); 415 sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
313 ret = -EAGAIN; 416
314 } 417 phy = sas_get_local_phy(dev);
418 if (scsi_is_sas_phy_local(phy))
419 check_ready = local_ata_check_ready;
420 else
421 check_ready = smp_ata_check_ready;
422 sas_put_local_phy(phy);
423
424 ret = ata_wait_after_reset(link, deadline, check_ready);
425 if (ret && ret != -EAGAIN)
426 sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
315 427
428 /* XXX: if the class changes during the reset the upper layer
429 * should be informed, if the device has gone away we assume
430 * libsas will eventually delete it
431 */
316 switch (dev->sata_dev.command_set) { 432 switch (dev->sata_dev.command_set) {
317 case ATA_COMMAND_SET: 433 case ATA_COMMAND_SET:
318 SAS_DPRINTK("%s: Found ATA device.\n", __func__);
319 *class = ATA_DEV_ATA; 434 *class = ATA_DEV_ATA;
320 break; 435 break;
321 case ATAPI_COMMAND_SET: 436 case ATAPI_COMMAND_SET:
322 SAS_DPRINTK("%s: Found ATAPI device.\n", __func__);
323 *class = ATA_DEV_ATAPI; 437 *class = ATA_DEV_ATAPI;
324 break; 438 break;
325 default:
326 SAS_DPRINTK("%s: Unknown SATA command set: %d.\n",
327 __func__, dev->sata_dev.command_set);
328 *class = ATA_DEV_UNKNOWN;
329 break;
330 } 439 }
331 440
332 ap->cbl = ATA_CBL_SATA; 441 ap->cbl = ATA_CBL_SATA;
333 return ret; 442 return ret;
334} 443}
335 444
445/*
446 * notify the lldd to forget the sas_task for this internal ata command
447 * that bypasses scsi-eh
448 */
449static void sas_ata_internal_abort(struct sas_task *task)
450{
451 struct sas_internal *si = dev_to_sas_internal(task->dev);
452 unsigned long flags;
453 int res;
454
455 spin_lock_irqsave(&task->task_state_lock, flags);
456 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
457 task->task_state_flags & SAS_TASK_STATE_DONE) {
458 spin_unlock_irqrestore(&task->task_state_lock, flags);
459 SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
460 task);
461 goto out;
462 }
463 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
464 spin_unlock_irqrestore(&task->task_state_lock, flags);
465
466 res = si->dft->lldd_abort_task(task);
467
468 spin_lock_irqsave(&task->task_state_lock, flags);
469 if (task->task_state_flags & SAS_TASK_STATE_DONE ||
470 res == TMF_RESP_FUNC_COMPLETE) {
471 spin_unlock_irqrestore(&task->task_state_lock, flags);
472 goto out;
473 }
474
475 /* XXX we are not prepared to deal with ->lldd_abort_task()
476 * failures. TODO: lldds need to unconditionally forget about
477 * aborted ata tasks, otherwise we (likely) leak the sas task
478 * here
479 */
480 SAS_DPRINTK("%s: Task %p leaked.\n", __func__, task);
481
482 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
483 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
484 spin_unlock_irqrestore(&task->task_state_lock, flags);
485
486 return;
487 out:
488 list_del_init(&task->list);
489 sas_free_task(task);
490}
491
336static void sas_ata_post_internal(struct ata_queued_cmd *qc) 492static void sas_ata_post_internal(struct ata_queued_cmd *qc)
337{ 493{
338 if (qc->flags & ATA_QCFLAG_FAILED) 494 if (qc->flags & ATA_QCFLAG_FAILED)
@@ -340,30 +496,35 @@ static void sas_ata_post_internal(struct ata_queued_cmd *qc)
340 496
341 if (qc->err_mask) { 497 if (qc->err_mask) {
342 /* 498 /*
343 * Find the sas_task and kill it. By this point, 499 * Find the sas_task and kill it. By this point, libata
344 * libata has decided to kill the qc, so we needn't 500 * has decided to kill the qc and has frozen the port.
345 * bother with sas_ata_task_done. But we still 501 * In this state sas_ata_task_done() will no longer free
346 * ought to abort the task. 502 * the sas_task, so we need to notify the lldd (via
503 * ->lldd_abort_task) that the task is dead and free it
504 * ourselves.
347 */ 505 */
348 struct sas_task *task = qc->lldd_task; 506 struct sas_task *task = qc->lldd_task;
349 unsigned long flags;
350 507
351 qc->lldd_task = NULL; 508 qc->lldd_task = NULL;
352 if (task) { 509 if (!task)
353 /* Should this be a AT(API) device reset? */ 510 return;
354 spin_lock_irqsave(&task->task_state_lock, flags); 511 task->uldd_task = NULL;
355 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; 512 sas_ata_internal_abort(task);
356 spin_unlock_irqrestore(&task->task_state_lock, flags);
357
358 task->uldd_task = NULL;
359 __sas_task_abort(task);
360 }
361 } 513 }
362} 514}
363 515
516
517static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
518{
519 struct domain_device *dev = ap->private_data;
520 struct sas_internal *i = dev_to_sas_internal(dev);
521
522 if (i->dft->lldd_ata_set_dmamode)
523 i->dft->lldd_ata_set_dmamode(dev);
524}
525
364static struct ata_port_operations sas_sata_ops = { 526static struct ata_port_operations sas_sata_ops = {
365 .prereset = ata_std_prereset, 527 .prereset = ata_std_prereset,
366 .softreset = sas_ata_soft_reset,
367 .hardreset = sas_ata_hard_reset, 528 .hardreset = sas_ata_hard_reset,
368 .postreset = ata_std_postreset, 529 .postreset = ata_std_postreset,
369 .error_handler = ata_std_error_handler, 530 .error_handler = ata_std_error_handler,
@@ -374,6 +535,7 @@ static struct ata_port_operations sas_sata_ops = {
374 .qc_fill_rtf = sas_ata_qc_fill_rtf, 535 .qc_fill_rtf = sas_ata_qc_fill_rtf,
375 .port_start = ata_sas_port_start, 536 .port_start = ata_sas_port_start,
376 .port_stop = ata_sas_port_stop, 537 .port_stop = ata_sas_port_stop,
538 .set_dmamode = sas_ata_set_dmamode,
377}; 539};
378 540
379static struct ata_port_info sata_port_info = { 541static struct ata_port_info sata_port_info = {
@@ -384,11 +546,10 @@ static struct ata_port_info sata_port_info = {
384 .port_ops = &sas_sata_ops 546 .port_ops = &sas_sata_ops
385}; 547};
386 548
387int sas_ata_init_host_and_port(struct domain_device *found_dev, 549int sas_ata_init_host_and_port(struct domain_device *found_dev)
388 struct scsi_target *starget)
389{ 550{
390 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 551 struct sas_ha_struct *ha = found_dev->port->ha;
391 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 552 struct Scsi_Host *shost = ha->core.shost;
392 struct ata_port *ap; 553 struct ata_port *ap;
393 554
394 ata_host_init(&found_dev->sata_dev.ata_host, 555 ata_host_init(&found_dev->sata_dev.ata_host,
@@ -406,6 +567,8 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev,
406 ap->private_data = found_dev; 567 ap->private_data = found_dev;
407 ap->cbl = ATA_CBL_SATA; 568 ap->cbl = ATA_CBL_SATA;
408 ap->scsi_host = shost; 569 ap->scsi_host = shost;
570 /* publish initialized ata port */
571 smp_wmb();
409 found_dev->sata_dev.ap = ap; 572 found_dev->sata_dev.ap = ap;
410 573
411 return 0; 574 return 0;
@@ -436,168 +599,14 @@ void sas_ata_task_abort(struct sas_task *task)
436 complete(waiting); 599 complete(waiting);
437} 600}
438 601
439static void sas_task_timedout(unsigned long _task)
440{
441 struct sas_task *task = (void *) _task;
442 unsigned long flags;
443
444 spin_lock_irqsave(&task->task_state_lock, flags);
445 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
446 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
447 spin_unlock_irqrestore(&task->task_state_lock, flags);
448
449 complete(&task->completion);
450}
451
452static void sas_disc_task_done(struct sas_task *task)
453{
454 if (!del_timer(&task->timer))
455 return;
456 complete(&task->completion);
457}
458
459#define SAS_DEV_TIMEOUT 10
460
461/**
462 * sas_execute_task -- Basic task processing for discovery
463 * @task: the task to be executed
464 * @buffer: pointer to buffer to do I/O
465 * @size: size of @buffer
466 * @dma_dir: DMA direction. DMA_xxx
467 */
468static int sas_execute_task(struct sas_task *task, void *buffer, int size,
469 enum dma_data_direction dma_dir)
470{
471 int res = 0;
472 struct scatterlist *scatter = NULL;
473 struct task_status_struct *ts = &task->task_status;
474 int num_scatter = 0;
475 int retries = 0;
476 struct sas_internal *i =
477 to_sas_internal(task->dev->port->ha->core.shost->transportt);
478
479 if (dma_dir != DMA_NONE) {
480 scatter = kzalloc(sizeof(*scatter), GFP_KERNEL);
481 if (!scatter)
482 goto out;
483
484 sg_init_one(scatter, buffer, size);
485 num_scatter = 1;
486 }
487
488 task->task_proto = task->dev->tproto;
489 task->scatter = scatter;
490 task->num_scatter = num_scatter;
491 task->total_xfer_len = size;
492 task->data_dir = dma_dir;
493 task->task_done = sas_disc_task_done;
494 if (dma_dir != DMA_NONE &&
495 sas_protocol_ata(task->task_proto)) {
496 task->num_scatter = dma_map_sg(task->dev->port->ha->dev,
497 task->scatter,
498 task->num_scatter,
499 task->data_dir);
500 }
501
502 for (retries = 0; retries < 5; retries++) {
503 task->task_state_flags = SAS_TASK_STATE_PENDING;
504 init_completion(&task->completion);
505
506 task->timer.data = (unsigned long) task;
507 task->timer.function = sas_task_timedout;
508 task->timer.expires = jiffies + SAS_DEV_TIMEOUT*HZ;
509 add_timer(&task->timer);
510
511 res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
512 if (res) {
513 del_timer(&task->timer);
514 SAS_DPRINTK("executing SAS discovery task failed:%d\n",
515 res);
516 goto ex_err;
517 }
518 wait_for_completion(&task->completion);
519 res = -ECOMM;
520 if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
521 int res2;
522 SAS_DPRINTK("task aborted, flags:0x%x\n",
523 task->task_state_flags);
524 res2 = i->dft->lldd_abort_task(task);
525 SAS_DPRINTK("came back from abort task\n");
526 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
527 if (res2 == TMF_RESP_FUNC_COMPLETE)
528 continue; /* Retry the task */
529 else
530 goto ex_err;
531 }
532 }
533 if (task->task_status.stat == SAM_STAT_BUSY ||
534 task->task_status.stat == SAM_STAT_TASK_SET_FULL ||
535 task->task_status.stat == SAS_QUEUE_FULL) {
536 SAS_DPRINTK("task: q busy, sleeping...\n");
537 schedule_timeout_interruptible(HZ);
538 } else if (task->task_status.stat == SAM_STAT_CHECK_CONDITION) {
539 struct scsi_sense_hdr shdr;
540
541 if (!scsi_normalize_sense(ts->buf, ts->buf_valid_size,
542 &shdr)) {
543 SAS_DPRINTK("couldn't normalize sense\n");
544 continue;
545 }
546 if ((shdr.sense_key == 6 && shdr.asc == 0x29) ||
547 (shdr.sense_key == 2 && shdr.asc == 4 &&
548 shdr.ascq == 1)) {
549 SAS_DPRINTK("device %016llx LUN: %016llx "
550 "powering up or not ready yet, "
551 "sleeping...\n",
552 SAS_ADDR(task->dev->sas_addr),
553 SAS_ADDR(task->ssp_task.LUN));
554
555 schedule_timeout_interruptible(5*HZ);
556 } else if (shdr.sense_key == 1) {
557 res = 0;
558 break;
559 } else if (shdr.sense_key == 5) {
560 break;
561 } else {
562 SAS_DPRINTK("dev %016llx LUN: %016llx "
563 "sense key:0x%x ASC:0x%x ASCQ:0x%x"
564 "\n",
565 SAS_ADDR(task->dev->sas_addr),
566 SAS_ADDR(task->ssp_task.LUN),
567 shdr.sense_key,
568 shdr.asc, shdr.ascq);
569 }
570 } else if (task->task_status.resp != SAS_TASK_COMPLETE ||
571 task->task_status.stat != SAM_STAT_GOOD) {
572 SAS_DPRINTK("task finished with resp:0x%x, "
573 "stat:0x%x\n",
574 task->task_status.resp,
575 task->task_status.stat);
576 goto ex_err;
577 } else {
578 res = 0;
579 break;
580 }
581 }
582ex_err:
583 if (dma_dir != DMA_NONE) {
584 if (sas_protocol_ata(task->task_proto))
585 dma_unmap_sg(task->dev->port->ha->dev,
586 task->scatter, task->num_scatter,
587 task->data_dir);
588 kfree(scatter);
589 }
590out:
591 return res;
592}
593
594/* ---------- SATA ---------- */
595
596static void sas_get_ata_command_set(struct domain_device *dev) 602static void sas_get_ata_command_set(struct domain_device *dev)
597{ 603{
598 struct dev_to_host_fis *fis = 604 struct dev_to_host_fis *fis =
599 (struct dev_to_host_fis *) dev->frame_rcvd; 605 (struct dev_to_host_fis *) dev->frame_rcvd;
600 606
607 if (dev->dev_type == SATA_PENDING)
608 return;
609
601 if ((fis->sector_count == 1 && /* ATA */ 610 if ((fis->sector_count == 1 && /* ATA */
602 fis->lbal == 1 && 611 fis->lbal == 1 &&
603 fis->lbam == 0 && 612 fis->lbam == 0 &&
@@ -636,224 +645,152 @@ static void sas_get_ata_command_set(struct domain_device *dev)
636 dev->sata_dev.command_set = ATAPI_COMMAND_SET; 645 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
637} 646}
638 647
639/** 648void sas_probe_sata(struct asd_sas_port *port)
640 * sas_issue_ata_cmd -- Basic SATA command processing for discovery
641 * @dev: the device to send the command to
642 * @command: the command register
643 * @features: the features register
644 * @buffer: pointer to buffer to do I/O
645 * @size: size of @buffer
646 * @dma_dir: DMA direction. DMA_xxx
647 */
648static int sas_issue_ata_cmd(struct domain_device *dev, u8 command,
649 u8 features, void *buffer, int size,
650 enum dma_data_direction dma_dir)
651{
652 int res = 0;
653 struct sas_task *task;
654 struct dev_to_host_fis *d2h_fis = (struct dev_to_host_fis *)
655 &dev->frame_rcvd[0];
656
657 res = -ENOMEM;
658 task = sas_alloc_task(GFP_KERNEL);
659 if (!task)
660 goto out;
661
662 task->dev = dev;
663
664 task->ata_task.fis.fis_type = 0x27;
665 task->ata_task.fis.command = command;
666 task->ata_task.fis.features = features;
667 task->ata_task.fis.device = d2h_fis->device;
668 task->ata_task.retry_count = 1;
669
670 res = sas_execute_task(task, buffer, size, dma_dir);
671
672 sas_free_task(task);
673out:
674 return res;
675}
676
677#define ATA_IDENTIFY_DEV 0xEC
678#define ATA_IDENTIFY_PACKET_DEV 0xA1
679#define ATA_SET_FEATURES 0xEF
680#define ATA_FEATURE_PUP_STBY_SPIN_UP 0x07
681
682/**
683 * sas_discover_sata_dev -- discover a STP/SATA device (SATA_DEV)
684 * @dev: STP/SATA device of interest (ATA/ATAPI)
685 *
686 * The LLDD has already been notified of this device, so that we can
687 * send FISes to it. Here we try to get IDENTIFY DEVICE or IDENTIFY
688 * PACKET DEVICE, if ATAPI device, so that the LLDD can fine-tune its
689 * performance for this device.
690 */
691static int sas_discover_sata_dev(struct domain_device *dev)
692{ 649{
693 int res; 650 struct domain_device *dev, *n;
694 __le16 *identify_x; 651 int err;
695 u8 command;
696 652
697 identify_x = kzalloc(512, GFP_KERNEL); 653 mutex_lock(&port->ha->disco_mutex);
698 if (!identify_x) 654 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
699 return -ENOMEM; 655 if (!dev_is_sata(dev))
700 656 continue;
701 if (dev->sata_dev.command_set == ATA_COMMAND_SET) {
702 dev->sata_dev.identify_device = identify_x;
703 command = ATA_IDENTIFY_DEV;
704 } else {
705 dev->sata_dev.identify_packet_device = identify_x;
706 command = ATA_IDENTIFY_PACKET_DEV;
707 }
708 657
709 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512, 658 err = sas_ata_init_host_and_port(dev);
710 DMA_FROM_DEVICE); 659 if (err)
711 if (res) 660 sas_fail_probe(dev, __func__, err);
712 goto out_err; 661 else
713 662 ata_sas_async_port_init(dev->sata_dev.ap);
714 /* lives on the media? */
715 if (le16_to_cpu(identify_x[0]) & 4) {
716 /* incomplete response */
717 SAS_DPRINTK("sending SET FEATURE/PUP_STBY_SPIN_UP to "
718 "dev %llx\n", SAS_ADDR(dev->sas_addr));
719 if (!(identify_x[83] & cpu_to_le16(1<<6)))
720 goto cont1;
721 res = sas_issue_ata_cmd(dev, ATA_SET_FEATURES,
722 ATA_FEATURE_PUP_STBY_SPIN_UP,
723 NULL, 0, DMA_NONE);
724 if (res)
725 goto cont1;
726
727 schedule_timeout_interruptible(5*HZ); /* More time? */
728 res = sas_issue_ata_cmd(dev, command, 0, identify_x, 512,
729 DMA_FROM_DEVICE);
730 if (res)
731 goto out_err;
732 } 663 }
733cont1: 664 mutex_unlock(&port->ha->disco_mutex);
734 /* XXX Hint: register this SATA device with SATL.
735 When this returns, dev->sata_dev->lu is alive and
736 present.
737 sas_satl_register_dev(dev);
738 */
739 665
740 sas_fill_in_rphy(dev, dev->rphy); 666 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
667 if (!dev_is_sata(dev))
668 continue;
741 669
742 return 0; 670 sas_ata_wait_eh(dev);
743out_err:
744 dev->sata_dev.identify_packet_device = NULL;
745 dev->sata_dev.identify_device = NULL;
746 kfree(identify_x);
747 return res;
748}
749 671
750static int sas_discover_sata_pm(struct domain_device *dev) 672 /* if libata could not bring the link up, don't surface
751{ 673 * the device
752 return -ENODEV; 674 */
675 if (ata_dev_disabled(sas_to_ata_dev(dev)))
676 sas_fail_probe(dev, __func__, -ENODEV);
677 }
753} 678}
754 679
755/** 680/**
756 * sas_discover_sata -- discover an STP/SATA domain device 681 * sas_discover_sata -- discover an STP/SATA domain device
757 * @dev: pointer to struct domain_device of interest 682 * @dev: pointer to struct domain_device of interest
758 * 683 *
759 * First we notify the LLDD of this device, so we can send frames to 684 * Devices directly attached to a HA port, have no parents. All other
760 * it. Then depending on the type of device we call the appropriate 685 * devices do, and should have their "parent" pointer set appropriately
761 * discover functions. Once device discover is done, we notify the 686 * before calling this function.
762 * LLDD so that it can fine-tune its parameters for the device, by
763 * removing it and then adding it. That is, the second time around,
764 * the driver would have certain fields, that it is looking at, set.
765 * Finally we initialize the kobj so that the device can be added to
766 * the system at registration time. Devices directly attached to a HA
767 * port, have no parents. All other devices do, and should have their
768 * "parent" pointer set appropriately before calling this function.
769 */ 687 */
770int sas_discover_sata(struct domain_device *dev) 688int sas_discover_sata(struct domain_device *dev)
771{ 689{
772 int res; 690 int res;
773 691
692 if (dev->dev_type == SATA_PM)
693 return -ENODEV;
694
774 sas_get_ata_command_set(dev); 695 sas_get_ata_command_set(dev);
696 sas_fill_in_rphy(dev, dev->rphy);
775 697
776 res = sas_notify_lldd_dev_found(dev); 698 res = sas_notify_lldd_dev_found(dev);
777 if (res) 699 if (res)
778 return res; 700 return res;
779 701
780 switch (dev->dev_type) { 702 sas_discover_event(dev->port, DISCE_PROBE);
781 case SATA_DEV: 703 return 0;
782 res = sas_discover_sata_dev(dev);
783 break;
784 case SATA_PM:
785 res = sas_discover_sata_pm(dev);
786 break;
787 default:
788 break;
789 }
790 sas_notify_lldd_dev_gone(dev);
791 if (!res) {
792 sas_notify_lldd_dev_found(dev);
793 res = sas_rphy_add(dev->rphy);
794 }
795
796 return res;
797} 704}
798 705
799void sas_ata_strategy_handler(struct Scsi_Host *shost) 706static void async_sas_ata_eh(void *data, async_cookie_t cookie)
800{ 707{
801 struct scsi_device *sdev; 708 struct domain_device *dev = data;
709 struct ata_port *ap = dev->sata_dev.ap;
710 struct sas_ha_struct *ha = dev->port->ha;
802 711
803 shost_for_each_device(sdev, shost) { 712 /* hold a reference over eh since we may be racing with final
804 struct domain_device *ddev = sdev_to_domain_dev(sdev); 713 * remove once all commands are completed
805 struct ata_port *ap = ddev->sata_dev.ap; 714 */
715 kref_get(&dev->kref);
716 sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
717 ata_scsi_port_error_handler(ha->core.shost, ap);
718 sas_put_device(dev);
719}
806 720
807 if (!dev_is_sata(ddev)) 721static bool sas_ata_dev_eh_valid(struct domain_device *dev)
808 continue; 722{
723 struct ata_port *ap;
809 724
810 ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata port error handler"); 725 if (!dev_is_sata(dev))
811 ata_scsi_port_error_handler(shost, ap); 726 return false;
812 } 727 ap = dev->sata_dev.ap;
728 /* consume fully initialized ata ports */
729 smp_rmb();
730 return !!ap;
813} 731}
814 732
815int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task, 733void sas_ata_strategy_handler(struct Scsi_Host *shost)
816 enum blk_eh_timer_return *rtn)
817{ 734{
818 struct domain_device *ddev = cmd_to_domain_dev(cmd); 735 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
736 LIST_HEAD(async);
737 int i;
738
739 /* it's ok to defer revalidation events during ata eh, these
740 * disks are in one of three states:
741 * 1/ present for initial domain discovery, and these
742 * resets will cause bcn flutters
743 * 2/ hot removed, we'll discover that after eh fails
744 * 3/ hot added after initial discovery, lost the race, and need
745 * to catch the next train.
746 */
747 sas_disable_revalidation(sas_ha);
819 748
820 if (!dev_is_sata(ddev) || task) 749 spin_lock_irq(&sas_ha->phy_port_lock);
821 return 0; 750 for (i = 0; i < sas_ha->num_phys; i++) {
751 struct asd_sas_port *port = sas_ha->sas_port[i];
752 struct domain_device *dev;
822 753
823 /* we're a sata device with no task, so this must be a libata 754 spin_lock(&port->dev_list_lock);
824 * eh timeout. Ideally should hook into libata timeout 755 list_for_each_entry(dev, &port->dev_list, dev_list_node) {
825 * handling, but there's no point, it just wants to activate 756 if (!sas_ata_dev_eh_valid(dev))
826 * the eh thread */ 757 continue;
827 *rtn = BLK_EH_NOT_HANDLED; 758 async_schedule_domain(async_sas_ata_eh, dev, &async);
828 return 1; 759 }
760 spin_unlock(&port->dev_list_lock);
761 }
762 spin_unlock_irq(&sas_ha->phy_port_lock);
763
764 async_synchronize_full_domain(&async);
765
766 sas_enable_revalidation(sas_ha);
829} 767}
830 768
831int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, 769void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
832 struct list_head *done_q) 770 struct list_head *done_q)
833{ 771{
834 int rtn = 0;
835 struct scsi_cmnd *cmd, *n; 772 struct scsi_cmnd *cmd, *n;
836 struct ata_port *ap; 773 struct domain_device *eh_dev;
837 774
838 do { 775 do {
839 LIST_HEAD(sata_q); 776 LIST_HEAD(sata_q);
840 777 eh_dev = NULL;
841 ap = NULL;
842 778
843 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 779 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
844 struct domain_device *ddev = cmd_to_domain_dev(cmd); 780 struct domain_device *ddev = cmd_to_domain_dev(cmd);
845 781
846 if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd)) 782 if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
847 continue; 783 continue;
848 if (ap && ap != ddev->sata_dev.ap) 784 if (eh_dev && eh_dev != ddev)
849 continue; 785 continue;
850 ap = ddev->sata_dev.ap; 786 eh_dev = ddev;
851 rtn = 1;
852 list_move(&cmd->eh_entry, &sata_q); 787 list_move(&cmd->eh_entry, &sata_q);
853 } 788 }
854 789
855 if (!list_empty(&sata_q)) { 790 if (!list_empty(&sata_q)) {
856 ata_port_printk(ap, KERN_DEBUG, "sas eh calling libata cmd error handler\n"); 791 struct ata_port *ap = eh_dev->sata_dev.ap;
792
793 sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n");
857 ata_scsi_cmd_error_handler(shost, ap, &sata_q); 794 ata_scsi_cmd_error_handler(shost, ap, &sata_q);
858 /* 795 /*
859 * ata's error handler may leave the cmd on the list 796 * ata's error handler may leave the cmd on the list
@@ -869,7 +806,36 @@ int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
869 while (!list_empty(&sata_q)) 806 while (!list_empty(&sata_q))
870 list_del_init(sata_q.next); 807 list_del_init(sata_q.next);
871 } 808 }
872 } while (ap); 809 } while (eh_dev);
810}
811
812void sas_ata_schedule_reset(struct domain_device *dev)
813{
814 struct ata_eh_info *ehi;
815 struct ata_port *ap;
816 unsigned long flags;
817
818 if (!dev_is_sata(dev))
819 return;
820
821 ap = dev->sata_dev.ap;
822 ehi = &ap->link.eh_info;
823
824 spin_lock_irqsave(ap->lock, flags);
825 ehi->err_mask |= AC_ERR_TIMEOUT;
826 ehi->action |= ATA_EH_RESET;
827 ata_port_schedule_eh(ap);
828 spin_unlock_irqrestore(ap->lock, flags);
829}
830EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
831
832void sas_ata_wait_eh(struct domain_device *dev)
833{
834 struct ata_port *ap;
835
836 if (!dev_is_sata(dev))
837 return;
873 838
874 return rtn; 839 ap = dev->sata_dev.ap;
840 ata_port_wait_eh(ap);
875} 841}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 54a5199ceb5..36467967560 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -30,29 +30,30 @@
30 30
31#include <scsi/scsi_transport.h> 31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_transport_sas.h> 32#include <scsi/scsi_transport_sas.h>
33#include <scsi/sas_ata.h>
33#include "../scsi_sas_internal.h" 34#include "../scsi_sas_internal.h"
34 35
35/* ---------- Basic task processing for discovery purposes ---------- */ 36/* ---------- Basic task processing for discovery purposes ---------- */
36 37
37void sas_init_dev(struct domain_device *dev) 38void sas_init_dev(struct domain_device *dev)
38{ 39{
39 INIT_LIST_HEAD(&dev->siblings); 40 switch (dev->dev_type) {
40 INIT_LIST_HEAD(&dev->dev_list_node); 41 case SAS_END_DEV:
41 switch (dev->dev_type) { 42 break;
42 case SAS_END_DEV: 43 case EDGE_DEV:
43 break; 44 case FANOUT_DEV:
44 case EDGE_DEV: 45 INIT_LIST_HEAD(&dev->ex_dev.children);
45 case FANOUT_DEV: 46 mutex_init(&dev->ex_dev.cmd_mutex);
46 INIT_LIST_HEAD(&dev->ex_dev.children); 47 break;
47 break; 48 case SATA_DEV:
48 case SATA_DEV: 49 case SATA_PM:
49 case SATA_PM: 50 case SATA_PM_PORT:
50 case SATA_PM_PORT: 51 case SATA_PENDING:
51 INIT_LIST_HEAD(&dev->sata_dev.children); 52 INIT_LIST_HEAD(&dev->sata_dev.children);
52 break; 53 break;
53 default: 54 default:
54 break; 55 break;
55 } 56 }
56} 57}
57 58
58/* ---------- Domain device discovery ---------- */ 59/* ---------- Domain device discovery ---------- */
@@ -68,19 +69,18 @@ void sas_init_dev(struct domain_device *dev)
68 */ 69 */
69static int sas_get_port_device(struct asd_sas_port *port) 70static int sas_get_port_device(struct asd_sas_port *port)
70{ 71{
71 unsigned long flags;
72 struct asd_sas_phy *phy; 72 struct asd_sas_phy *phy;
73 struct sas_rphy *rphy; 73 struct sas_rphy *rphy;
74 struct domain_device *dev; 74 struct domain_device *dev;
75 75
76 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 76 dev = sas_alloc_device();
77 if (!dev) 77 if (!dev)
78 return -ENOMEM; 78 return -ENOMEM;
79 79
80 spin_lock_irqsave(&port->phy_list_lock, flags); 80 spin_lock_irq(&port->phy_list_lock);
81 if (list_empty(&port->phy_list)) { 81 if (list_empty(&port->phy_list)) {
82 spin_unlock_irqrestore(&port->phy_list_lock, flags); 82 spin_unlock_irq(&port->phy_list_lock);
83 kfree(dev); 83 sas_put_device(dev);
84 return -ENODEV; 84 return -ENODEV;
85 } 85 }
86 phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el); 86 phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
@@ -88,7 +88,7 @@ static int sas_get_port_device(struct asd_sas_port *port)
88 memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd), 88 memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
89 (size_t)phy->frame_rcvd_size)); 89 (size_t)phy->frame_rcvd_size));
90 spin_unlock(&phy->frame_rcvd_lock); 90 spin_unlock(&phy->frame_rcvd_lock);
91 spin_unlock_irqrestore(&port->phy_list_lock, flags); 91 spin_unlock_irq(&port->phy_list_lock);
92 92
93 if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) { 93 if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
94 struct dev_to_host_fis *fis = 94 struct dev_to_host_fis *fis =
@@ -130,9 +130,14 @@ static int sas_get_port_device(struct asd_sas_port *port)
130 } 130 }
131 131
132 if (!rphy) { 132 if (!rphy) {
133 kfree(dev); 133 sas_put_device(dev);
134 return -ENODEV; 134 return -ENODEV;
135 } 135 }
136
137 spin_lock_irq(&port->phy_list_lock);
138 list_for_each_entry(phy, &port->phy_list, port_phy_el)
139 sas_phy_set_target(phy, dev);
140 spin_unlock_irq(&port->phy_list_lock);
136 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; 141 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
137 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); 142 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
138 sas_fill_in_rphy(dev, rphy); 143 sas_fill_in_rphy(dev, rphy);
@@ -147,11 +152,17 @@ static int sas_get_port_device(struct asd_sas_port *port)
147 memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE); 152 memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
148 memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE); 153 memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
149 port->disc.max_level = 0; 154 port->disc.max_level = 0;
155 sas_device_set_phy(dev, port->port);
150 156
151 dev->rphy = rphy; 157 dev->rphy = rphy;
152 spin_lock_irq(&port->dev_list_lock); 158
153 list_add_tail(&dev->dev_list_node, &port->dev_list); 159 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV)
154 spin_unlock_irq(&port->dev_list_lock); 160 list_add_tail(&dev->disco_list_node, &port->disco_list);
161 else {
162 spin_lock_irq(&port->dev_list_lock);
163 list_add_tail(&dev->dev_list_node, &port->dev_list);
164 spin_unlock_irq(&port->dev_list_lock);
165 }
155 166
156 return 0; 167 return 0;
157} 168}
@@ -173,6 +184,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev)
173 dev_name(sas_ha->dev), 184 dev_name(sas_ha->dev),
174 SAS_ADDR(dev->sas_addr), res); 185 SAS_ADDR(dev->sas_addr), res);
175 } 186 }
187 kref_get(&dev->kref);
176 } 188 }
177 return res; 189 return res;
178} 190}
@@ -184,12 +196,40 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev)
184 struct Scsi_Host *shost = sas_ha->core.shost; 196 struct Scsi_Host *shost = sas_ha->core.shost;
185 struct sas_internal *i = to_sas_internal(shost->transportt); 197 struct sas_internal *i = to_sas_internal(shost->transportt);
186 198
187 if (i->dft->lldd_dev_gone) 199 if (i->dft->lldd_dev_gone) {
188 i->dft->lldd_dev_gone(dev); 200 i->dft->lldd_dev_gone(dev);
201 sas_put_device(dev);
202 }
189} 203}
190 204
191/* ---------- Common/dispatchers ---------- */ 205static void sas_probe_devices(struct work_struct *work)
206{
207 struct domain_device *dev, *n;
208 struct sas_discovery_event *ev =
209 container_of(work, struct sas_discovery_event, work);
210 struct asd_sas_port *port = ev->port;
211
212 clear_bit(DISCE_PROBE, &port->disc.pending);
192 213
214 /* devices must be domain members before link recovery and probe */
215 list_for_each_entry(dev, &port->disco_list, disco_list_node) {
216 spin_lock_irq(&port->dev_list_lock);
217 list_add_tail(&dev->dev_list_node, &port->dev_list);
218 spin_unlock_irq(&port->dev_list_lock);
219 }
220
221 sas_probe_sata(port);
222
223 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
224 int err;
225
226 err = sas_rphy_add(dev->rphy);
227 if (err)
228 sas_fail_probe(dev, __func__, err);
229 else
230 list_del_init(&dev->disco_list_node);
231 }
232}
193 233
194/** 234/**
195 * sas_discover_end_dev -- discover an end device (SSP, etc) 235 * sas_discover_end_dev -- discover an end device (SSP, etc)
@@ -203,22 +243,36 @@ int sas_discover_end_dev(struct domain_device *dev)
203 243
204 res = sas_notify_lldd_dev_found(dev); 244 res = sas_notify_lldd_dev_found(dev);
205 if (res) 245 if (res)
206 goto out_err2; 246 return res;
207 247 sas_discover_event(dev->port, DISCE_PROBE);
208 res = sas_rphy_add(dev->rphy);
209 if (res)
210 goto out_err;
211 248
212 return 0; 249 return 0;
213
214out_err:
215 sas_notify_lldd_dev_gone(dev);
216out_err2:
217 return res;
218} 250}
219 251
220/* ---------- Device registration and unregistration ---------- */ 252/* ---------- Device registration and unregistration ---------- */
221 253
254void sas_free_device(struct kref *kref)
255{
256 struct domain_device *dev = container_of(kref, typeof(*dev), kref);
257
258 if (dev->parent)
259 sas_put_device(dev->parent);
260
261 sas_port_put_phy(dev->phy);
262 dev->phy = NULL;
263
264 /* remove the phys and ports, everything else should be gone */
265 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV)
266 kfree(dev->ex_dev.ex_phy);
267
268 if (dev_is_sata(dev) && dev->sata_dev.ap) {
269 ata_sas_port_destroy(dev->sata_dev.ap);
270 dev->sata_dev.ap = NULL;
271 }
272
273 kfree(dev);
274}
275
222static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev) 276static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
223{ 277{
224 sas_notify_lldd_dev_gone(dev); 278 sas_notify_lldd_dev_gone(dev);
@@ -230,34 +284,84 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
230 spin_lock_irq(&port->dev_list_lock); 284 spin_lock_irq(&port->dev_list_lock);
231 list_del_init(&dev->dev_list_node); 285 list_del_init(&dev->dev_list_node);
232 spin_unlock_irq(&port->dev_list_lock); 286 spin_unlock_irq(&port->dev_list_lock);
287
288 sas_put_device(dev);
233} 289}
234 290
235void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) 291static void sas_destruct_devices(struct work_struct *work)
236{ 292{
237 if (dev->rphy) { 293 struct domain_device *dev, *n;
294 struct sas_discovery_event *ev =
295 container_of(work, struct sas_discovery_event, work);
296 struct asd_sas_port *port = ev->port;
297
298 clear_bit(DISCE_DESTRUCT, &port->disc.pending);
299
300 list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
301 list_del_init(&dev->disco_list_node);
302
238 sas_remove_children(&dev->rphy->dev); 303 sas_remove_children(&dev->rphy->dev);
239 sas_rphy_delete(dev->rphy); 304 sas_rphy_delete(dev->rphy);
240 dev->rphy = NULL; 305 dev->rphy = NULL;
306 sas_unregister_common_dev(port, dev);
241 } 307 }
242 if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { 308}
243 /* remove the phys and ports, everything else should be gone */ 309
244 kfree(dev->ex_dev.ex_phy); 310void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
245 dev->ex_dev.ex_phy = NULL; 311{
312 if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
313 !list_empty(&dev->disco_list_node)) {
314 /* this rphy never saw sas_rphy_add */
315 list_del_init(&dev->disco_list_node);
316 sas_rphy_free(dev->rphy);
317 dev->rphy = NULL;
318 sas_unregister_common_dev(port, dev);
319 }
320
321 if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
322 sas_rphy_unlink(dev->rphy);
323 list_move_tail(&dev->disco_list_node, &port->destroy_list);
324 sas_discover_event(dev->port, DISCE_DESTRUCT);
246 } 325 }
247 sas_unregister_common_dev(port, dev);
248} 326}
249 327
250void sas_unregister_domain_devices(struct asd_sas_port *port) 328void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
251{ 329{
252 struct domain_device *dev, *n; 330 struct domain_device *dev, *n;
253 331
254 list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) 332 list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) {
333 if (gone)
334 set_bit(SAS_DEV_GONE, &dev->state);
335 sas_unregister_dev(port, dev);
336 }
337
338 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node)
255 sas_unregister_dev(port, dev); 339 sas_unregister_dev(port, dev);
256 340
257 port->port->rphy = NULL; 341 port->port->rphy = NULL;
258 342
259} 343}
260 344
345void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
346{
347 struct sas_ha_struct *ha;
348 struct sas_phy *new_phy;
349
350 if (!dev)
351 return;
352
353 ha = dev->port->ha;
354 new_phy = sas_port_get_phy(port);
355
356 /* pin and record last seen phy */
357 spin_lock_irq(&ha->phy_port_lock);
358 if (new_phy) {
359 sas_port_put_phy(dev->phy);
360 dev->phy = new_phy;
361 }
362 spin_unlock_irq(&ha->phy_port_lock);
363}
364
261/* ---------- Discovery and Revalidation ---------- */ 365/* ---------- Discovery and Revalidation ---------- */
262 366
263/** 367/**
@@ -277,8 +381,7 @@ static void sas_discover_domain(struct work_struct *work)
277 container_of(work, struct sas_discovery_event, work); 381 container_of(work, struct sas_discovery_event, work);
278 struct asd_sas_port *port = ev->port; 382 struct asd_sas_port *port = ev->port;
279 383
280 sas_begin_event(DISCE_DISCOVER_DOMAIN, &port->disc.disc_event_lock, 384 clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
281 &port->disc.pending);
282 385
283 if (port->port_dev) 386 if (port->port_dev)
284 return; 387 return;
@@ -318,11 +421,12 @@ static void sas_discover_domain(struct work_struct *work)
318 sas_rphy_free(dev->rphy); 421 sas_rphy_free(dev->rphy);
319 dev->rphy = NULL; 422 dev->rphy = NULL;
320 423
424 list_del_init(&dev->disco_list_node);
321 spin_lock_irq(&port->dev_list_lock); 425 spin_lock_irq(&port->dev_list_lock);
322 list_del_init(&dev->dev_list_node); 426 list_del_init(&dev->dev_list_node);
323 spin_unlock_irq(&port->dev_list_lock); 427 spin_unlock_irq(&port->dev_list_lock);
324 428
325 kfree(dev); /* not kobject_register-ed yet */ 429 sas_put_device(dev);
326 port->port_dev = NULL; 430 port->port_dev = NULL;
327 } 431 }
328 432
@@ -336,21 +440,51 @@ static void sas_revalidate_domain(struct work_struct *work)
336 struct sas_discovery_event *ev = 440 struct sas_discovery_event *ev =
337 container_of(work, struct sas_discovery_event, work); 441 container_of(work, struct sas_discovery_event, work);
338 struct asd_sas_port *port = ev->port; 442 struct asd_sas_port *port = ev->port;
443 struct sas_ha_struct *ha = port->ha;
444
445 /* prevent revalidation from finding sata links in recovery */
446 mutex_lock(&ha->disco_mutex);
447 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
448 SAS_DPRINTK("REVALIDATION DEFERRED on port %d, pid:%d\n",
449 port->id, task_pid_nr(current));
450 goto out;
451 }
339 452
340 sas_begin_event(DISCE_REVALIDATE_DOMAIN, &port->disc.disc_event_lock, 453 clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
341 &port->disc.pending);
342 454
343 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, 455 SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
344 task_pid_nr(current)); 456 task_pid_nr(current));
457
345 if (port->port_dev) 458 if (port->port_dev)
346 res = sas_ex_revalidate_domain(port->port_dev); 459 res = sas_ex_revalidate_domain(port->port_dev);
347 460
348 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", 461 SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
349 port->id, task_pid_nr(current), res); 462 port->id, task_pid_nr(current), res);
463 out:
464 mutex_unlock(&ha->disco_mutex);
350} 465}
351 466
352/* ---------- Events ---------- */ 467/* ---------- Events ---------- */
353 468
469static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work)
470{
471 /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */
472 scsi_queue_work(ha->core.shost, work);
473}
474
475static void sas_chain_event(int event, unsigned long *pending,
476 struct work_struct *work,
477 struct sas_ha_struct *ha)
478{
479 if (!test_and_set_bit(event, pending)) {
480 unsigned long flags;
481
482 spin_lock_irqsave(&ha->state_lock, flags);
483 sas_chain_work(ha, work);
484 spin_unlock_irqrestore(&ha->state_lock, flags);
485 }
486}
487
354int sas_discover_event(struct asd_sas_port *port, enum discover_event ev) 488int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
355{ 489{
356 struct sas_discovery *disc; 490 struct sas_discovery *disc;
@@ -361,8 +495,7 @@ int sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
361 495
362 BUG_ON(ev >= DISC_NUM_EVENTS); 496 BUG_ON(ev >= DISC_NUM_EVENTS);
363 497
364 sas_queue_event(ev, &disc->disc_event_lock, &disc->pending, 498 sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
365 &disc->disc_work[ev].work, port->ha);
366 499
367 return 0; 500 return 0;
368} 501}
@@ -380,9 +513,10 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
380 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = { 513 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
381 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain, 514 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
382 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain, 515 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
516 [DISCE_PROBE] = sas_probe_devices,
517 [DISCE_DESTRUCT] = sas_destruct_devices,
383 }; 518 };
384 519
385 spin_lock_init(&disc->disc_event_lock);
386 disc->pending = 0; 520 disc->pending = 0;
387 for (i = 0; i < DISC_NUM_EVENTS; i++) { 521 for (i = 0; i < DISC_NUM_EVENTS; i++) {
388 INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); 522 INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 9db30fb5caf..16639bbae62 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -22,15 +22,103 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/export.h>
25#include <scsi/scsi_host.h> 26#include <scsi/scsi_host.h>
26#include "sas_internal.h" 27#include "sas_internal.h"
27#include "sas_dump.h" 28#include "sas_dump.h"
28 29
30void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work)
31{
32 if (!test_bit(SAS_HA_REGISTERED, &ha->state))
33 return;
34
35 if (test_bit(SAS_HA_DRAINING, &ha->state))
36 list_add(&work->entry, &ha->defer_q);
37 else
38 scsi_queue_work(ha->core.shost, work);
39}
40
41static void sas_queue_event(int event, unsigned long *pending,
42 struct work_struct *work,
43 struct sas_ha_struct *ha)
44{
45 if (!test_and_set_bit(event, pending)) {
46 unsigned long flags;
47
48 spin_lock_irqsave(&ha->state_lock, flags);
49 sas_queue_work(ha, work);
50 spin_unlock_irqrestore(&ha->state_lock, flags);
51 }
52}
53
54
55void __sas_drain_work(struct sas_ha_struct *ha)
56{
57 struct workqueue_struct *wq = ha->core.shost->work_q;
58 struct work_struct *w, *_w;
59
60 set_bit(SAS_HA_DRAINING, &ha->state);
61 /* flush submitters */
62 spin_lock_irq(&ha->state_lock);
63 spin_unlock_irq(&ha->state_lock);
64
65 drain_workqueue(wq);
66
67 spin_lock_irq(&ha->state_lock);
68 clear_bit(SAS_HA_DRAINING, &ha->state);
69 list_for_each_entry_safe(w, _w, &ha->defer_q, entry) {
70 list_del_init(&w->entry);
71 sas_queue_work(ha, w);
72 }
73 spin_unlock_irq(&ha->state_lock);
74}
75
76int sas_drain_work(struct sas_ha_struct *ha)
77{
78 int err;
79
80 err = mutex_lock_interruptible(&ha->drain_mutex);
81 if (err)
82 return err;
83 if (test_bit(SAS_HA_REGISTERED, &ha->state))
84 __sas_drain_work(ha);
85 mutex_unlock(&ha->drain_mutex);
86
87 return 0;
88}
89EXPORT_SYMBOL_GPL(sas_drain_work);
90
91void sas_disable_revalidation(struct sas_ha_struct *ha)
92{
93 mutex_lock(&ha->disco_mutex);
94 set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
95 mutex_unlock(&ha->disco_mutex);
96}
97
98void sas_enable_revalidation(struct sas_ha_struct *ha)
99{
100 int i;
101
102 mutex_lock(&ha->disco_mutex);
103 clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
104 for (i = 0; i < ha->num_phys; i++) {
105 struct asd_sas_port *port = ha->sas_port[i];
106 const int ev = DISCE_REVALIDATE_DOMAIN;
107 struct sas_discovery *d = &port->disc;
108
109 if (!test_and_clear_bit(ev, &d->pending))
110 continue;
111
112 sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
113 }
114 mutex_unlock(&ha->disco_mutex);
115}
116
29static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event) 117static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
30{ 118{
31 BUG_ON(event >= HA_NUM_EVENTS); 119 BUG_ON(event >= HA_NUM_EVENTS);
32 120
33 sas_queue_event(event, &sas_ha->event_lock, &sas_ha->pending, 121 sas_queue_event(event, &sas_ha->pending,
34 &sas_ha->ha_events[event].work, sas_ha); 122 &sas_ha->ha_events[event].work, sas_ha);
35} 123}
36 124
@@ -40,7 +128,7 @@ static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
40 128
41 BUG_ON(event >= PORT_NUM_EVENTS); 129 BUG_ON(event >= PORT_NUM_EVENTS);
42 130
43 sas_queue_event(event, &ha->event_lock, &phy->port_events_pending, 131 sas_queue_event(event, &phy->port_events_pending,
44 &phy->port_events[event].work, ha); 132 &phy->port_events[event].work, ha);
45} 133}
46 134
@@ -50,7 +138,7 @@ static void notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
50 138
51 BUG_ON(event >= PHY_NUM_EVENTS); 139 BUG_ON(event >= PHY_NUM_EVENTS);
52 140
53 sas_queue_event(event, &ha->event_lock, &phy->phy_events_pending, 141 sas_queue_event(event, &phy->phy_events_pending,
54 &phy->phy_events[event].work, ha); 142 &phy->phy_events[event].work, ha);
55} 143}
56 144
@@ -62,8 +150,6 @@ int sas_init_events(struct sas_ha_struct *sas_ha)
62 150
63 int i; 151 int i;
64 152
65 spin_lock_init(&sas_ha->event_lock);
66
67 for (i = 0; i < HA_NUM_EVENTS; i++) { 153 for (i = 0; i < HA_NUM_EVENTS; i++) {
68 INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); 154 INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
69 sas_ha->ha_events[i].ha = sas_ha; 155 sas_ha->ha_events[i].ha = sas_ha;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 1b831c55ec6..05acd9e35fc 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -28,6 +28,7 @@
28 28
29#include "sas_internal.h" 29#include "sas_internal.h"
30 30
31#include <scsi/sas_ata.h>
31#include <scsi/scsi_transport.h> 32#include <scsi/scsi_transport.h>
32#include <scsi/scsi_transport_sas.h> 33#include <scsi/scsi_transport_sas.h>
33#include "../scsi_sas_internal.h" 34#include "../scsi_sas_internal.h"
@@ -71,11 +72,18 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
71 struct sas_internal *i = 72 struct sas_internal *i =
72 to_sas_internal(dev->port->ha->core.shost->transportt); 73 to_sas_internal(dev->port->ha->core.shost->transportt);
73 74
75 mutex_lock(&dev->ex_dev.cmd_mutex);
74 for (retry = 0; retry < 3; retry++) { 76 for (retry = 0; retry < 3; retry++) {
75 task = sas_alloc_task(GFP_KERNEL); 77 if (test_bit(SAS_DEV_GONE, &dev->state)) {
76 if (!task) 78 res = -ECOMM;
77 return -ENOMEM; 79 break;
80 }
78 81
82 task = sas_alloc_task(GFP_KERNEL);
83 if (!task) {
84 res = -ENOMEM;
85 break;
86 }
79 task->dev = dev; 87 task->dev = dev;
80 task->task_proto = dev->tproto; 88 task->task_proto = dev->tproto;
81 sg_init_one(&task->smp_task.smp_req, req, req_size); 89 sg_init_one(&task->smp_task.smp_req, req, req_size);
@@ -93,7 +101,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
93 if (res) { 101 if (res) {
94 del_timer(&task->timer); 102 del_timer(&task->timer);
95 SAS_DPRINTK("executing SMP task failed:%d\n", res); 103 SAS_DPRINTK("executing SMP task failed:%d\n", res);
96 goto ex_err; 104 break;
97 } 105 }
98 106
99 wait_for_completion(&task->completion); 107 wait_for_completion(&task->completion);
@@ -103,24 +111,30 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
103 i->dft->lldd_abort_task(task); 111 i->dft->lldd_abort_task(task);
104 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { 112 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
105 SAS_DPRINTK("SMP task aborted and not done\n"); 113 SAS_DPRINTK("SMP task aborted and not done\n");
106 goto ex_err; 114 break;
107 } 115 }
108 } 116 }
109 if (task->task_status.resp == SAS_TASK_COMPLETE && 117 if (task->task_status.resp == SAS_TASK_COMPLETE &&
110 task->task_status.stat == SAM_STAT_GOOD) { 118 task->task_status.stat == SAM_STAT_GOOD) {
111 res = 0; 119 res = 0;
112 break; 120 break;
113 } if (task->task_status.resp == SAS_TASK_COMPLETE && 121 }
114 task->task_status.stat == SAS_DATA_UNDERRUN) { 122 if (task->task_status.resp == SAS_TASK_COMPLETE &&
123 task->task_status.stat == SAS_DATA_UNDERRUN) {
115 /* no error, but return the number of bytes of 124 /* no error, but return the number of bytes of
116 * underrun */ 125 * underrun */
117 res = task->task_status.residual; 126 res = task->task_status.residual;
118 break; 127 break;
119 } if (task->task_status.resp == SAS_TASK_COMPLETE && 128 }
120 task->task_status.stat == SAS_DATA_OVERRUN) { 129 if (task->task_status.resp == SAS_TASK_COMPLETE &&
130 task->task_status.stat == SAS_DATA_OVERRUN) {
121 res = -EMSGSIZE; 131 res = -EMSGSIZE;
122 break; 132 break;
123 } else { 133 }
134 if (task->task_status.resp == SAS_TASK_UNDELIVERED &&
135 task->task_status.stat == SAS_DEVICE_UNKNOWN)
136 break;
137 else {
124 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x " 138 SAS_DPRINTK("%s: task to dev %016llx response: 0x%x "
125 "status 0x%x\n", __func__, 139 "status 0x%x\n", __func__,
126 SAS_ADDR(dev->sas_addr), 140 SAS_ADDR(dev->sas_addr),
@@ -130,11 +144,10 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
130 task = NULL; 144 task = NULL;
131 } 145 }
132 } 146 }
133ex_err: 147 mutex_unlock(&dev->ex_dev.cmd_mutex);
148
134 BUG_ON(retry == 3 && task != NULL); 149 BUG_ON(retry == 3 && task != NULL);
135 if (task != NULL) { 150 sas_free_task(task);
136 sas_free_task(task);
137 }
138 return res; 151 return res;
139} 152}
140 153
@@ -153,19 +166,49 @@ static inline void *alloc_smp_resp(int size)
153 return kzalloc(size, GFP_KERNEL); 166 return kzalloc(size, GFP_KERNEL);
154} 167}
155 168
156/* ---------- Expander configuration ---------- */ 169static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
170{
171 switch (phy->routing_attr) {
172 case TABLE_ROUTING:
173 if (dev->ex_dev.t2t_supp)
174 return 'U';
175 else
176 return 'T';
177 case DIRECT_ROUTING:
178 return 'D';
179 case SUBTRACTIVE_ROUTING:
180 return 'S';
181 default:
182 return '?';
183 }
184}
185
186static enum sas_dev_type to_dev_type(struct discover_resp *dr)
187{
188 /* This is detecting a failure to transmit initial dev to host
189 * FIS as described in section J.5 of sas-2 r16
190 */
191 if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev &&
192 dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
193 return SATA_PENDING;
194 else
195 return dr->attached_dev_type;
196}
157 197
158static void sas_set_ex_phy(struct domain_device *dev, int phy_id, 198static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
159 void *disc_resp)
160{ 199{
200 enum sas_dev_type dev_type;
201 enum sas_linkrate linkrate;
202 u8 sas_addr[SAS_ADDR_SIZE];
203 struct smp_resp *resp = rsp;
204 struct discover_resp *dr = &resp->disc;
161 struct expander_device *ex = &dev->ex_dev; 205 struct expander_device *ex = &dev->ex_dev;
162 struct ex_phy *phy = &ex->ex_phy[phy_id]; 206 struct ex_phy *phy = &ex->ex_phy[phy_id];
163 struct smp_resp *resp = disc_resp;
164 struct discover_resp *dr = &resp->disc;
165 struct sas_rphy *rphy = dev->rphy; 207 struct sas_rphy *rphy = dev->rphy;
166 int rediscover = (phy->phy != NULL); 208 bool new_phy = !phy->phy;
209 char *type;
167 210
168 if (!rediscover) { 211 if (new_phy) {
169 phy->phy = sas_phy_alloc(&rphy->dev, phy_id); 212 phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
170 213
171 /* FIXME: error_handling */ 214 /* FIXME: error_handling */
@@ -184,8 +227,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
184 break; 227 break;
185 } 228 }
186 229
230 /* check if anything important changed to squelch debug */
231 dev_type = phy->attached_dev_type;
232 linkrate = phy->linkrate;
233 memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
234
235 phy->attached_dev_type = to_dev_type(dr);
187 phy->phy_id = phy_id; 236 phy->phy_id = phy_id;
188 phy->attached_dev_type = dr->attached_dev_type;
189 phy->linkrate = dr->linkrate; 237 phy->linkrate = dr->linkrate;
190 phy->attached_sata_host = dr->attached_sata_host; 238 phy->attached_sata_host = dr->attached_sata_host;
191 phy->attached_sata_dev = dr->attached_sata_dev; 239 phy->attached_sata_dev = dr->attached_sata_dev;
@@ -200,9 +248,11 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
200 phy->last_da_index = -1; 248 phy->last_da_index = -1;
201 249
202 phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); 250 phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
203 phy->phy->identify.device_type = phy->attached_dev_type; 251 phy->phy->identify.device_type = dr->attached_dev_type;
204 phy->phy->identify.initiator_port_protocols = phy->attached_iproto; 252 phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
205 phy->phy->identify.target_port_protocols = phy->attached_tproto; 253 phy->phy->identify.target_port_protocols = phy->attached_tproto;
254 if (!phy->attached_tproto && dr->attached_sata_dev)
255 phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
206 phy->phy->identify.phy_identifier = phy_id; 256 phy->phy->identify.phy_identifier = phy_id;
207 phy->phy->minimum_linkrate_hw = dr->hmin_linkrate; 257 phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
208 phy->phy->maximum_linkrate_hw = dr->hmax_linkrate; 258 phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
@@ -210,20 +260,76 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
210 phy->phy->maximum_linkrate = dr->pmax_linkrate; 260 phy->phy->maximum_linkrate = dr->pmax_linkrate;
211 phy->phy->negotiated_linkrate = phy->linkrate; 261 phy->phy->negotiated_linkrate = phy->linkrate;
212 262
213 if (!rediscover) 263 if (new_phy)
214 if (sas_phy_add(phy->phy)) { 264 if (sas_phy_add(phy->phy)) {
215 sas_phy_free(phy->phy); 265 sas_phy_free(phy->phy);
216 return; 266 return;
217 } 267 }
218 268
219 SAS_DPRINTK("ex %016llx phy%02d:%c attached: %016llx\n", 269 switch (phy->attached_dev_type) {
270 case SATA_PENDING:
271 type = "stp pending";
272 break;
273 case NO_DEVICE:
274 type = "no device";
275 break;
276 case SAS_END_DEV:
277 if (phy->attached_iproto) {
278 if (phy->attached_tproto)
279 type = "host+target";
280 else
281 type = "host";
282 } else {
283 if (dr->attached_sata_dev)
284 type = "stp";
285 else
286 type = "ssp";
287 }
288 break;
289 case EDGE_DEV:
290 case FANOUT_DEV:
291 type = "smp";
292 break;
293 default:
294 type = "unknown";
295 }
296
297 /* this routine is polled by libata error recovery so filter
298 * unimportant messages
299 */
300 if (new_phy || phy->attached_dev_type != dev_type ||
301 phy->linkrate != linkrate ||
302 SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr))
303 /* pass */;
304 else
305 return;
306
307 SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
220 SAS_ADDR(dev->sas_addr), phy->phy_id, 308 SAS_ADDR(dev->sas_addr), phy->phy_id,
221 phy->routing_attr == TABLE_ROUTING ? 'T' : 309 sas_route_char(dev, phy), phy->linkrate,
222 phy->routing_attr == DIRECT_ROUTING ? 'D' : 310 SAS_ADDR(phy->attached_sas_addr), type);
223 phy->routing_attr == SUBTRACTIVE_ROUTING ? 'S' : '?', 311}
224 SAS_ADDR(phy->attached_sas_addr)); 312
313/* check if we have an existing attached ata device on this expander phy */
314struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
315{
316 struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id];
317 struct domain_device *dev;
318 struct sas_rphy *rphy;
319
320 if (!ex_phy->port)
321 return NULL;
225 322
226 return; 323 rphy = ex_phy->port->rphy;
324 if (!rphy)
325 return NULL;
326
327 dev = sas_find_dev_by_rphy(rphy);
328
329 if (dev && dev_is_sata(dev))
330 return dev;
331
332 return NULL;
227} 333}
228 334
229#define DISCOVER_REQ_SIZE 16 335#define DISCOVER_REQ_SIZE 16
@@ -232,39 +338,25 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
232static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req, 338static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
233 u8 *disc_resp, int single) 339 u8 *disc_resp, int single)
234{ 340{
235 int i, res; 341 struct discover_resp *dr;
342 int res;
236 343
237 disc_req[9] = single; 344 disc_req[9] = single;
238 for (i = 1 ; i < 3; i++) {
239 struct discover_resp *dr;
240 345
241 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE, 346 res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
242 disc_resp, DISCOVER_RESP_SIZE); 347 disc_resp, DISCOVER_RESP_SIZE);
243 if (res) 348 if (res)
244 return res; 349 return res;
245 /* This is detecting a failure to transmit initial 350 dr = &((struct smp_resp *)disc_resp)->disc;
246 * dev to host FIS as described in section G.5 of 351 if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
247 * sas-2 r 04b */ 352 sas_printk("Found loopback topology, just ignore it!\n");
248 dr = &((struct smp_resp *)disc_resp)->disc; 353 return 0;
249 if (memcmp(dev->sas_addr, dr->attached_sas_addr,
250 SAS_ADDR_SIZE) == 0) {
251 sas_printk("Found loopback topology, just ignore it!\n");
252 return 0;
253 }
254 if (!(dr->attached_dev_type == 0 &&
255 dr->attached_sata_dev))
256 break;
257 /* In order to generate the dev to host FIS, we
258 * send a link reset to the expander port */
259 sas_smp_phy_control(dev, single, PHY_FUNC_LINK_RESET, NULL);
260 /* Wait for the reset to trigger the negotiation */
261 msleep(500);
262 } 354 }
263 sas_set_ex_phy(dev, single, disc_resp); 355 sas_set_ex_phy(dev, single, disc_resp);
264 return 0; 356 return 0;
265} 357}
266 358
267static int sas_ex_phy_discover(struct domain_device *dev, int single) 359int sas_ex_phy_discover(struct domain_device *dev, int single)
268{ 360{
269 struct expander_device *ex = &dev->ex_dev; 361 struct expander_device *ex = &dev->ex_dev;
270 int res = 0; 362 int res = 0;
@@ -569,9 +661,8 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
569#define RPS_REQ_SIZE 16 661#define RPS_REQ_SIZE 16
570#define RPS_RESP_SIZE 60 662#define RPS_RESP_SIZE 60
571 663
572static int sas_get_report_phy_sata(struct domain_device *dev, 664int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
573 int phy_id, 665 struct smp_resp *rps_resp)
574 struct smp_resp *rps_resp)
575{ 666{
576 int res; 667 int res;
577 u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE); 668 u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
@@ -657,10 +748,11 @@ static struct domain_device *sas_ex_discover_end_dev(
657 if (phy->attached_sata_host || phy->attached_sata_ps) 748 if (phy->attached_sata_host || phy->attached_sata_ps)
658 return NULL; 749 return NULL;
659 750
660 child = kzalloc(sizeof(*child), GFP_KERNEL); 751 child = sas_alloc_device();
661 if (!child) 752 if (!child)
662 return NULL; 753 return NULL;
663 754
755 kref_get(&parent->kref);
664 child->parent = parent; 756 child->parent = parent;
665 child->port = parent->port; 757 child->port = parent->port;
666 child->iproto = phy->attached_iproto; 758 child->iproto = phy->attached_iproto;
@@ -676,24 +768,13 @@ static struct domain_device *sas_ex_discover_end_dev(
676 } 768 }
677 } 769 }
678 sas_ex_get_linkrate(parent, child, phy); 770 sas_ex_get_linkrate(parent, child, phy);
771 sas_device_set_phy(child, phy->port);
679 772
680#ifdef CONFIG_SCSI_SAS_ATA 773#ifdef CONFIG_SCSI_SAS_ATA
681 if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) { 774 if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
682 child->dev_type = SATA_DEV; 775 res = sas_get_ata_info(child, phy);
683 if (phy->attached_tproto & SAS_PROTOCOL_STP) 776 if (res)
684 child->tproto = phy->attached_tproto;
685 if (phy->attached_sata_dev)
686 child->tproto |= SATA_DEV;
687 res = sas_get_report_phy_sata(parent, phy_id,
688 &child->sata_dev.rps_resp);
689 if (res) {
690 SAS_DPRINTK("report phy sata to %016llx:0x%x returned "
691 "0x%x\n", SAS_ADDR(parent->sas_addr),
692 phy_id, res);
693 goto out_free; 777 goto out_free;
694 }
695 memcpy(child->frame_rcvd, &child->sata_dev.rps_resp.rps.fis,
696 sizeof(struct dev_to_host_fis));
697 778
698 rphy = sas_end_device_alloc(phy->port); 779 rphy = sas_end_device_alloc(phy->port);
699 if (unlikely(!rphy)) 780 if (unlikely(!rphy))
@@ -703,9 +784,7 @@ static struct domain_device *sas_ex_discover_end_dev(
703 784
704 child->rphy = rphy; 785 child->rphy = rphy;
705 786
706 spin_lock_irq(&parent->port->dev_list_lock); 787 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
707 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
708 spin_unlock_irq(&parent->port->dev_list_lock);
709 788
710 res = sas_discover_sata(child); 789 res = sas_discover_sata(child);
711 if (res) { 790 if (res) {
@@ -729,9 +808,7 @@ static struct domain_device *sas_ex_discover_end_dev(
729 child->rphy = rphy; 808 child->rphy = rphy;
730 sas_fill_in_rphy(child, rphy); 809 sas_fill_in_rphy(child, rphy);
731 810
732 spin_lock_irq(&parent->port->dev_list_lock); 811 list_add_tail(&child->disco_list_node, &parent->port->disco_list);
733 list_add_tail(&child->dev_list_node, &parent->port->dev_list);
734 spin_unlock_irq(&parent->port->dev_list_lock);
735 812
736 res = sas_discover_end_dev(child); 813 res = sas_discover_end_dev(child);
737 if (res) { 814 if (res) {
@@ -755,6 +832,7 @@ static struct domain_device *sas_ex_discover_end_dev(
755 sas_rphy_free(child->rphy); 832 sas_rphy_free(child->rphy);
756 child->rphy = NULL; 833 child->rphy = NULL;
757 834
835 list_del(&child->disco_list_node);
758 spin_lock_irq(&parent->port->dev_list_lock); 836 spin_lock_irq(&parent->port->dev_list_lock);
759 list_del(&child->dev_list_node); 837 list_del(&child->dev_list_node);
760 spin_unlock_irq(&parent->port->dev_list_lock); 838 spin_unlock_irq(&parent->port->dev_list_lock);
@@ -762,7 +840,7 @@ static struct domain_device *sas_ex_discover_end_dev(
762 sas_port_delete(phy->port); 840 sas_port_delete(phy->port);
763 out_err: 841 out_err:
764 phy->port = NULL; 842 phy->port = NULL;
765 kfree(child); 843 sas_put_device(child);
766 return NULL; 844 return NULL;
767} 845}
768 846
@@ -809,7 +887,7 @@ static struct domain_device *sas_ex_discover_expander(
809 phy->attached_phy_id); 887 phy->attached_phy_id);
810 return NULL; 888 return NULL;
811 } 889 }
812 child = kzalloc(sizeof(*child), GFP_KERNEL); 890 child = sas_alloc_device();
813 if (!child) 891 if (!child)
814 return NULL; 892 return NULL;
815 893
@@ -835,6 +913,7 @@ static struct domain_device *sas_ex_discover_expander(
835 child->rphy = rphy; 913 child->rphy = rphy;
836 edev = rphy_to_expander_device(rphy); 914 edev = rphy_to_expander_device(rphy);
837 child->dev_type = phy->attached_dev_type; 915 child->dev_type = phy->attached_dev_type;
916 kref_get(&parent->kref);
838 child->parent = parent; 917 child->parent = parent;
839 child->port = port; 918 child->port = port;
840 child->iproto = phy->attached_iproto; 919 child->iproto = phy->attached_iproto;
@@ -858,7 +937,7 @@ static struct domain_device *sas_ex_discover_expander(
858 spin_lock_irq(&parent->port->dev_list_lock); 937 spin_lock_irq(&parent->port->dev_list_lock);
859 list_del(&child->dev_list_node); 938 list_del(&child->dev_list_node);
860 spin_unlock_irq(&parent->port->dev_list_lock); 939 spin_unlock_irq(&parent->port->dev_list_lock);
861 kfree(child); 940 sas_put_device(child);
862 return NULL; 941 return NULL;
863 } 942 }
864 list_add_tail(&child->siblings, &parent->ex_dev.children); 943 list_add_tail(&child->siblings, &parent->ex_dev.children);
@@ -908,7 +987,8 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
908 987
909 if (ex_phy->attached_dev_type != SAS_END_DEV && 988 if (ex_phy->attached_dev_type != SAS_END_DEV &&
910 ex_phy->attached_dev_type != FANOUT_DEV && 989 ex_phy->attached_dev_type != FANOUT_DEV &&
911 ex_phy->attached_dev_type != EDGE_DEV) { 990 ex_phy->attached_dev_type != EDGE_DEV &&
991 ex_phy->attached_dev_type != SATA_PENDING) {
912 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " 992 SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx "
913 "phy 0x%x\n", ex_phy->attached_dev_type, 993 "phy 0x%x\n", ex_phy->attached_dev_type,
914 SAS_ADDR(dev->sas_addr), 994 SAS_ADDR(dev->sas_addr),
@@ -934,6 +1014,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
934 1014
935 switch (ex_phy->attached_dev_type) { 1015 switch (ex_phy->attached_dev_type) {
936 case SAS_END_DEV: 1016 case SAS_END_DEV:
1017 case SATA_PENDING:
937 child = sas_ex_discover_end_dev(dev, phy_id); 1018 child = sas_ex_discover_end_dev(dev, phy_id);
938 break; 1019 break;
939 case FANOUT_DEV: 1020 case FANOUT_DEV:
@@ -1128,32 +1209,25 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
1128 struct ex_phy *parent_phy, 1209 struct ex_phy *parent_phy,
1129 struct ex_phy *child_phy) 1210 struct ex_phy *child_phy)
1130{ 1211{
1131 static const char ra_char[] = {
1132 [DIRECT_ROUTING] = 'D',
1133 [SUBTRACTIVE_ROUTING] = 'S',
1134 [TABLE_ROUTING] = 'T',
1135 };
1136 static const char *ex_type[] = { 1212 static const char *ex_type[] = {
1137 [EDGE_DEV] = "edge", 1213 [EDGE_DEV] = "edge",
1138 [FANOUT_DEV] = "fanout", 1214 [FANOUT_DEV] = "fanout",
1139 }; 1215 };
1140 struct domain_device *parent = child->parent; 1216 struct domain_device *parent = child->parent;
1141 1217
1142 sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx " 1218 sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx "
1143 "(T2T supp:%d) phy 0x%x has %c:%c routing link!\n", 1219 "phy 0x%x has %c:%c routing link!\n",
1144 1220
1145 ex_type[parent->dev_type], 1221 ex_type[parent->dev_type],
1146 SAS_ADDR(parent->sas_addr), 1222 SAS_ADDR(parent->sas_addr),
1147 parent->ex_dev.t2t_supp,
1148 parent_phy->phy_id, 1223 parent_phy->phy_id,
1149 1224
1150 ex_type[child->dev_type], 1225 ex_type[child->dev_type],
1151 SAS_ADDR(child->sas_addr), 1226 SAS_ADDR(child->sas_addr),
1152 child->ex_dev.t2t_supp,
1153 child_phy->phy_id, 1227 child_phy->phy_id,
1154 1228
1155 ra_char[parent_phy->routing_attr], 1229 sas_route_char(parent, parent_phy),
1156 ra_char[child_phy->routing_attr]); 1230 sas_route_char(child, child_phy));
1157} 1231}
1158 1232
1159static int sas_check_eeds(struct domain_device *child, 1233static int sas_check_eeds(struct domain_device *child,
@@ -1610,8 +1684,8 @@ static int sas_get_phy_change_count(struct domain_device *dev,
1610 return res; 1684 return res;
1611} 1685}
1612 1686
1613static int sas_get_phy_attached_sas_addr(struct domain_device *dev, 1687static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
1614 int phy_id, u8 *attached_sas_addr) 1688 u8 *sas_addr, enum sas_dev_type *type)
1615{ 1689{
1616 int res; 1690 int res;
1617 struct smp_resp *disc_resp; 1691 struct smp_resp *disc_resp;
@@ -1623,10 +1697,11 @@ static int sas_get_phy_attached_sas_addr(struct domain_device *dev,
1623 dr = &disc_resp->disc; 1697 dr = &disc_resp->disc;
1624 1698
1625 res = sas_get_phy_discover(dev, phy_id, disc_resp); 1699 res = sas_get_phy_discover(dev, phy_id, disc_resp);
1626 if (!res) { 1700 if (res == 0) {
1627 memcpy(attached_sas_addr,disc_resp->disc.attached_sas_addr,8); 1701 memcpy(sas_addr, disc_resp->disc.attached_sas_addr, 8);
1628 if (dr->attached_dev_type == 0) 1702 *type = to_dev_type(dr);
1629 memset(attached_sas_addr, 0, 8); 1703 if (*type == 0)
1704 memset(sas_addr, 0, 8);
1630 } 1705 }
1631 kfree(disc_resp); 1706 kfree(disc_resp);
1632 return res; 1707 return res;
@@ -1748,7 +1823,7 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi
1748 struct domain_device *child, *n; 1823 struct domain_device *child, *n;
1749 1824
1750 list_for_each_entry_safe(child, n, &ex->children, siblings) { 1825 list_for_each_entry_safe(child, n, &ex->children, siblings) {
1751 child->gone = 1; 1826 set_bit(SAS_DEV_GONE, &child->state);
1752 if (child->dev_type == EDGE_DEV || 1827 if (child->dev_type == EDGE_DEV ||
1753 child->dev_type == FANOUT_DEV) 1828 child->dev_type == FANOUT_DEV)
1754 sas_unregister_ex_tree(port, child); 1829 sas_unregister_ex_tree(port, child);
@@ -1763,27 +1838,28 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1763{ 1838{
1764 struct expander_device *ex_dev = &parent->ex_dev; 1839 struct expander_device *ex_dev = &parent->ex_dev;
1765 struct ex_phy *phy = &ex_dev->ex_phy[phy_id]; 1840 struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
1766 struct domain_device *child, *n; 1841 struct domain_device *child, *n, *found = NULL;
1767 if (last) { 1842 if (last) {
1768 list_for_each_entry_safe(child, n, 1843 list_for_each_entry_safe(child, n,
1769 &ex_dev->children, siblings) { 1844 &ex_dev->children, siblings) {
1770 if (SAS_ADDR(child->sas_addr) == 1845 if (SAS_ADDR(child->sas_addr) ==
1771 SAS_ADDR(phy->attached_sas_addr)) { 1846 SAS_ADDR(phy->attached_sas_addr)) {
1772 child->gone = 1; 1847 set_bit(SAS_DEV_GONE, &child->state);
1773 if (child->dev_type == EDGE_DEV || 1848 if (child->dev_type == EDGE_DEV ||
1774 child->dev_type == FANOUT_DEV) 1849 child->dev_type == FANOUT_DEV)
1775 sas_unregister_ex_tree(parent->port, child); 1850 sas_unregister_ex_tree(parent->port, child);
1776 else 1851 else
1777 sas_unregister_dev(parent->port, child); 1852 sas_unregister_dev(parent->port, child);
1853 found = child;
1778 break; 1854 break;
1779 } 1855 }
1780 } 1856 }
1781 parent->gone = 1;
1782 sas_disable_routing(parent, phy->attached_sas_addr); 1857 sas_disable_routing(parent, phy->attached_sas_addr);
1783 } 1858 }
1784 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); 1859 memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
1785 if (phy->port) { 1860 if (phy->port) {
1786 sas_port_delete_phy(phy->port, phy->phy); 1861 sas_port_delete_phy(phy->port, phy->phy);
1862 sas_device_set_phy(found, phy->port);
1787 if (phy->port->num_phys == 0) 1863 if (phy->port->num_phys == 0)
1788 sas_port_delete(phy->port); 1864 sas_port_delete(phy->port);
1789 phy->port = NULL; 1865 phy->port = NULL;
@@ -1874,39 +1950,71 @@ out:
1874 return res; 1950 return res;
1875} 1951}
1876 1952
1953static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old)
1954{
1955 if (old == new)
1956 return true;
1957
1958 /* treat device directed resets as flutter, if we went
1959 * SAS_END_DEV to SATA_PENDING the link needs recovery
1960 */
1961 if ((old == SATA_PENDING && new == SAS_END_DEV) ||
1962 (old == SAS_END_DEV && new == SATA_PENDING))
1963 return true;
1964
1965 return false;
1966}
1967
1877static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) 1968static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
1878{ 1969{
1879 struct expander_device *ex = &dev->ex_dev; 1970 struct expander_device *ex = &dev->ex_dev;
1880 struct ex_phy *phy = &ex->ex_phy[phy_id]; 1971 struct ex_phy *phy = &ex->ex_phy[phy_id];
1881 u8 attached_sas_addr[8]; 1972 enum sas_dev_type type = NO_DEVICE;
1973 u8 sas_addr[8];
1882 int res; 1974 int res;
1883 1975
1884 res = sas_get_phy_attached_sas_addr(dev, phy_id, attached_sas_addr); 1976 res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
1885 switch (res) { 1977 switch (res) {
1886 case SMP_RESP_NO_PHY: 1978 case SMP_RESP_NO_PHY:
1887 phy->phy_state = PHY_NOT_PRESENT; 1979 phy->phy_state = PHY_NOT_PRESENT;
1888 sas_unregister_devs_sas_addr(dev, phy_id, last); 1980 sas_unregister_devs_sas_addr(dev, phy_id, last);
1889 goto out; break; 1981 return res;
1890 case SMP_RESP_PHY_VACANT: 1982 case SMP_RESP_PHY_VACANT:
1891 phy->phy_state = PHY_VACANT; 1983 phy->phy_state = PHY_VACANT;
1892 sas_unregister_devs_sas_addr(dev, phy_id, last); 1984 sas_unregister_devs_sas_addr(dev, phy_id, last);
1893 goto out; break; 1985 return res;
1894 case SMP_RESP_FUNC_ACC: 1986 case SMP_RESP_FUNC_ACC:
1895 break; 1987 break;
1896 } 1988 }
1897 1989
1898 if (SAS_ADDR(attached_sas_addr) == 0) { 1990 if (SAS_ADDR(sas_addr) == 0) {
1899 phy->phy_state = PHY_EMPTY; 1991 phy->phy_state = PHY_EMPTY;
1900 sas_unregister_devs_sas_addr(dev, phy_id, last); 1992 sas_unregister_devs_sas_addr(dev, phy_id, last);
1901 } else if (SAS_ADDR(attached_sas_addr) == 1993 return res;
1902 SAS_ADDR(phy->attached_sas_addr)) { 1994 } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
1903 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter\n", 1995 dev_type_flutter(type, phy->attached_dev_type)) {
1904 SAS_ADDR(dev->sas_addr), phy_id); 1996 struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
1997 char *action = "";
1998
1905 sas_ex_phy_discover(dev, phy_id); 1999 sas_ex_phy_discover(dev, phy_id);
1906 } else 2000
1907 res = sas_discover_new(dev, phy_id); 2001 if (ata_dev && phy->attached_dev_type == SATA_PENDING)
1908out: 2002 action = ", needs recovery";
1909 return res; 2003 SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n",
2004 SAS_ADDR(dev->sas_addr), phy_id, action);
2005 return res;
2006 }
2007
2008 /* delete the old link */
2009 if (SAS_ADDR(phy->attached_sas_addr) &&
2010 SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
2011 SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
2012 SAS_ADDR(dev->sas_addr), phy_id,
2013 SAS_ADDR(phy->attached_sas_addr));
2014 sas_unregister_devs_sas_addr(dev, phy_id, last);
2015 }
2016
2017 return sas_discover_new(dev, phy_id);
1910} 2018}
1911 2019
1912/** 2020/**
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 3814d3eed40..d2479257516 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -187,11 +187,14 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
187 struct sas_internal *i = 187 struct sas_internal *i =
188 to_sas_internal(sas_ha->core.shost->transportt); 188 to_sas_internal(sas_ha->core.shost->transportt);
189 struct sas_phy_linkrates rates; 189 struct sas_phy_linkrates rates;
190 struct asd_sas_phy *asd_phy;
190 191
191 if (phy_id >= sas_ha->num_phys) { 192 if (phy_id >= sas_ha->num_phys) {
192 resp_data[2] = SMP_RESP_NO_PHY; 193 resp_data[2] = SMP_RESP_NO_PHY;
193 return; 194 return;
194 } 195 }
196
197 asd_phy = sas_ha->sas_phy[phy_id];
195 switch (phy_op) { 198 switch (phy_op) {
196 case PHY_FUNC_NOP: 199 case PHY_FUNC_NOP:
197 case PHY_FUNC_LINK_RESET: 200 case PHY_FUNC_LINK_RESET:
@@ -210,7 +213,13 @@ static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
210 rates.minimum_linkrate = min; 213 rates.minimum_linkrate = min;
211 rates.maximum_linkrate = max; 214 rates.maximum_linkrate = max;
212 215
213 if (i->dft->lldd_control_phy(sas_ha->sas_phy[phy_id], phy_op, &rates)) 216 /* filter reset requests through libata eh */
217 if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) {
218 resp_data[2] = SMP_RESP_FUNC_ACC;
219 return;
220 }
221
222 if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates))
214 resp_data[2] = SMP_RESP_FUNC_FAILED; 223 resp_data[2] = SMP_RESP_FUNC_FAILED;
215 else 224 else
216 resp_data[2] = SMP_RESP_FUNC_ACC; 225 resp_data[2] = SMP_RESP_FUNC_ACC;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index d81c3b1989f..120bff64be3 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -28,6 +28,7 @@
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
31#include <scsi/sas_ata.h>
31#include <scsi/scsi_host.h> 32#include <scsi/scsi_host.h>
32#include <scsi/scsi_device.h> 33#include <scsi/scsi_device.h>
33#include <scsi/scsi_transport.h> 34#include <scsi/scsi_transport.h>
@@ -97,14 +98,14 @@ void sas_hae_reset(struct work_struct *work)
97 container_of(work, struct sas_ha_event, work); 98 container_of(work, struct sas_ha_event, work);
98 struct sas_ha_struct *ha = ev->ha; 99 struct sas_ha_struct *ha = ev->ha;
99 100
100 sas_begin_event(HAE_RESET, &ha->event_lock, 101 clear_bit(HAE_RESET, &ha->pending);
101 &ha->pending);
102} 102}
103 103
104int sas_register_ha(struct sas_ha_struct *sas_ha) 104int sas_register_ha(struct sas_ha_struct *sas_ha)
105{ 105{
106 int error = 0; 106 int error = 0;
107 107
108 mutex_init(&sas_ha->disco_mutex);
108 spin_lock_init(&sas_ha->phy_port_lock); 109 spin_lock_init(&sas_ha->phy_port_lock);
109 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr); 110 sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
110 111
@@ -113,8 +114,10 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
113 else if (sas_ha->lldd_queue_size == -1) 114 else if (sas_ha->lldd_queue_size == -1)
114 sas_ha->lldd_queue_size = 128; /* Sanity */ 115 sas_ha->lldd_queue_size = 128; /* Sanity */
115 116
116 sas_ha->state = SAS_HA_REGISTERED; 117 set_bit(SAS_HA_REGISTERED, &sas_ha->state);
117 spin_lock_init(&sas_ha->state_lock); 118 spin_lock_init(&sas_ha->state_lock);
119 mutex_init(&sas_ha->drain_mutex);
120 INIT_LIST_HEAD(&sas_ha->defer_q);
118 121
119 error = sas_register_phys(sas_ha); 122 error = sas_register_phys(sas_ha);
120 if (error) { 123 if (error) {
@@ -144,6 +147,7 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
144 } 147 }
145 148
146 INIT_LIST_HEAD(&sas_ha->eh_done_q); 149 INIT_LIST_HEAD(&sas_ha->eh_done_q);
150 INIT_LIST_HEAD(&sas_ha->eh_ata_q);
147 151
148 return 0; 152 return 0;
149 153
@@ -156,17 +160,23 @@ Undo_phys:
156 160
157int sas_unregister_ha(struct sas_ha_struct *sas_ha) 161int sas_unregister_ha(struct sas_ha_struct *sas_ha)
158{ 162{
159 unsigned long flags; 163 /* Set the state to unregistered to avoid further unchained
160 164 * events to be queued, and flush any in-progress drainers
161 /* Set the state to unregistered to avoid further 165 */
162 * events to be queued */ 166 mutex_lock(&sas_ha->drain_mutex);
163 spin_lock_irqsave(&sas_ha->state_lock, flags); 167 spin_lock_irq(&sas_ha->state_lock);
164 sas_ha->state = SAS_HA_UNREGISTERED; 168 clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
165 spin_unlock_irqrestore(&sas_ha->state_lock, flags); 169 spin_unlock_irq(&sas_ha->state_lock);
166 scsi_flush_work(sas_ha->core.shost); 170 __sas_drain_work(sas_ha);
171 mutex_unlock(&sas_ha->drain_mutex);
167 172
168 sas_unregister_ports(sas_ha); 173 sas_unregister_ports(sas_ha);
169 174
175 /* flush unregistration work */
176 mutex_lock(&sas_ha->drain_mutex);
177 __sas_drain_work(sas_ha);
178 mutex_unlock(&sas_ha->drain_mutex);
179
170 if (sas_ha->lldd_max_execute_num > 1) { 180 if (sas_ha->lldd_max_execute_num > 1) {
171 sas_shutdown_queue(sas_ha); 181 sas_shutdown_queue(sas_ha);
172 sas_ha->lldd_max_execute_num = 1; 182 sas_ha->lldd_max_execute_num = 1;
@@ -190,15 +200,75 @@ static int sas_get_linkerrors(struct sas_phy *phy)
190 return sas_smp_get_phy_events(phy); 200 return sas_smp_get_phy_events(phy);
191} 201}
192 202
193int sas_phy_enable(struct sas_phy *phy, int enable) 203int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
204{
205 struct domain_device *dev = NULL;
206
207 /* try to route user requested link resets through libata */
208 if (asd_phy->port)
209 dev = asd_phy->port->port_dev;
210
211 /* validate that dev has been probed */
212 if (dev)
213 dev = sas_find_dev_by_rphy(dev->rphy);
214
215 if (dev && dev_is_sata(dev)) {
216 sas_ata_schedule_reset(dev);
217 sas_ata_wait_eh(dev);
218 return 0;
219 }
220
221 return -ENODEV;
222}
223
224/**
225 * transport_sas_phy_reset - reset a phy and permit libata to manage the link
226 *
227 * phy reset request via sysfs in host workqueue context so we know we
228 * can block on eh and safely traverse the domain_device topology
229 */
230static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
231{
232 enum phy_func reset_type;
233
234 if (hard_reset)
235 reset_type = PHY_FUNC_HARD_RESET;
236 else
237 reset_type = PHY_FUNC_LINK_RESET;
238
239 if (scsi_is_sas_phy_local(phy)) {
240 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
241 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
242 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
243 struct sas_internal *i =
244 to_sas_internal(sas_ha->core.shost->transportt);
245
246 if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
247 return 0;
248 return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
249 } else {
250 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
251 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
252 struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
253
254 if (ata_dev && !hard_reset) {
255 sas_ata_schedule_reset(ata_dev);
256 sas_ata_wait_eh(ata_dev);
257 return 0;
258 } else
259 return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
260 }
261}
262
263static int sas_phy_enable(struct sas_phy *phy, int enable)
194{ 264{
195 int ret; 265 int ret;
196 enum phy_func command; 266 enum phy_func cmd;
197 267
198 if (enable) 268 if (enable)
199 command = PHY_FUNC_LINK_RESET; 269 cmd = PHY_FUNC_LINK_RESET;
200 else 270 else
201 command = PHY_FUNC_DISABLE; 271 cmd = PHY_FUNC_DISABLE;
202 272
203 if (scsi_is_sas_phy_local(phy)) { 273 if (scsi_is_sas_phy_local(phy)) {
204 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); 274 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
@@ -207,15 +277,18 @@ int sas_phy_enable(struct sas_phy *phy, int enable)
207 struct sas_internal *i = 277 struct sas_internal *i =
208 to_sas_internal(sas_ha->core.shost->transportt); 278 to_sas_internal(sas_ha->core.shost->transportt);
209 279
210 if (!enable) { 280 if (enable)
211 sas_phy_disconnected(asd_phy); 281 ret = transport_sas_phy_reset(phy, 0);
212 sas_ha->notify_phy_event(asd_phy, PHYE_LOSS_OF_SIGNAL); 282 else
213 } 283 ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
214 ret = i->dft->lldd_control_phy(asd_phy, command, NULL);
215 } else { 284 } else {
216 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent); 285 struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
217 struct domain_device *ddev = sas_find_dev_by_rphy(rphy); 286 struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
218 ret = sas_smp_phy_control(ddev, phy->number, command, NULL); 287
288 if (enable)
289 ret = transport_sas_phy_reset(phy, 0);
290 else
291 ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
219 } 292 }
220 return ret; 293 return ret;
221} 294}
@@ -225,6 +298,9 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset)
225 int ret; 298 int ret;
226 enum phy_func reset_type; 299 enum phy_func reset_type;
227 300
301 if (!phy->enabled)
302 return -ENODEV;
303
228 if (hard_reset) 304 if (hard_reset)
229 reset_type = PHY_FUNC_HARD_RESET; 305 reset_type = PHY_FUNC_HARD_RESET;
230 else 306 else
@@ -285,9 +361,101 @@ int sas_set_phy_speed(struct sas_phy *phy,
285 return ret; 361 return ret;
286} 362}
287 363
364static void sas_phy_release(struct sas_phy *phy)
365{
366 kfree(phy->hostdata);
367 phy->hostdata = NULL;
368}
369
370static void phy_reset_work(struct work_struct *work)
371{
372 struct sas_phy_data *d = container_of(work, typeof(*d), reset_work);
373
374 d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
375}
376
377static void phy_enable_work(struct work_struct *work)
378{
379 struct sas_phy_data *d = container_of(work, typeof(*d), enable_work);
380
381 d->enable_result = sas_phy_enable(d->phy, d->enable);
382}
383
384static int sas_phy_setup(struct sas_phy *phy)
385{
386 struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
387
388 if (!d)
389 return -ENOMEM;
390
391 mutex_init(&d->event_lock);
392 INIT_WORK(&d->reset_work, phy_reset_work);
393 INIT_WORK(&d->enable_work, phy_enable_work);
394 d->phy = phy;
395 phy->hostdata = d;
396
397 return 0;
398}
399
400static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
401{
402 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
403 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
404 struct sas_phy_data *d = phy->hostdata;
405 int rc;
406
407 if (!d)
408 return -ENOMEM;
409
410 /* libsas workqueue coordinates ata-eh reset with discovery */
411 mutex_lock(&d->event_lock);
412 d->reset_result = 0;
413 d->hard_reset = hard_reset;
414
415 spin_lock_irq(&ha->state_lock);
416 sas_queue_work(ha, &d->reset_work);
417 spin_unlock_irq(&ha->state_lock);
418
419 rc = sas_drain_work(ha);
420 if (rc == 0)
421 rc = d->reset_result;
422 mutex_unlock(&d->event_lock);
423
424 return rc;
425}
426
427static int queue_phy_enable(struct sas_phy *phy, int enable)
428{
429 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
430 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
431 struct sas_phy_data *d = phy->hostdata;
432 int rc;
433
434 if (!d)
435 return -ENOMEM;
436
437 /* libsas workqueue coordinates ata-eh reset with discovery */
438 mutex_lock(&d->event_lock);
439 d->enable_result = 0;
440 d->enable = enable;
441
442 spin_lock_irq(&ha->state_lock);
443 sas_queue_work(ha, &d->enable_work);
444 spin_unlock_irq(&ha->state_lock);
445
446 rc = sas_drain_work(ha);
447 if (rc == 0)
448 rc = d->enable_result;
449 mutex_unlock(&d->event_lock);
450
451 return rc;
452}
453
288static struct sas_function_template sft = { 454static struct sas_function_template sft = {
289 .phy_enable = sas_phy_enable, 455 .phy_enable = queue_phy_enable,
290 .phy_reset = sas_phy_reset, 456 .phy_reset = queue_phy_reset,
457 .phy_setup = sas_phy_setup,
458 .phy_release = sas_phy_release,
291 .set_phy_speed = sas_set_phy_speed, 459 .set_phy_speed = sas_set_phy_speed,
292 .get_linkerrors = sas_get_linkerrors, 460 .get_linkerrors = sas_get_linkerrors,
293 .smp_handler = sas_smp_handler, 461 .smp_handler = sas_smp_handler,
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 14e21b5fb8b..f05c6387994 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -30,6 +30,7 @@
30#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
31#include <scsi/scsi_transport_sas.h> 31#include <scsi/scsi_transport_sas.h>
32#include <scsi/libsas.h> 32#include <scsi/libsas.h>
33#include <scsi/sas_ata.h>
33 34
34#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__) 35#define sas_printk(fmt, ...) printk(KERN_NOTICE "sas: " fmt, ## __VA_ARGS__)
35 36
@@ -38,6 +39,18 @@
38#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble) 39#define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
39#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0) 40#define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
40 41
42struct sas_phy_data {
43 /* let reset be performed in sas_queue_work() context */
44 struct sas_phy *phy;
45 struct mutex event_lock;
46 int hard_reset;
47 int reset_result;
48 struct work_struct reset_work;
49 int enable;
50 int enable_result;
51 struct work_struct enable_work;
52};
53
41void sas_scsi_recover_host(struct Scsi_Host *shost); 54void sas_scsi_recover_host(struct Scsi_Host *shost);
42 55
43int sas_show_class(enum sas_class class, char *buf); 56int sas_show_class(enum sas_class class, char *buf);
@@ -56,6 +69,9 @@ enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
56int sas_init_queue(struct sas_ha_struct *sas_ha); 69int sas_init_queue(struct sas_ha_struct *sas_ha);
57int sas_init_events(struct sas_ha_struct *sas_ha); 70int sas_init_events(struct sas_ha_struct *sas_ha);
58void sas_shutdown_queue(struct sas_ha_struct *sas_ha); 71void sas_shutdown_queue(struct sas_ha_struct *sas_ha);
72void sas_disable_revalidation(struct sas_ha_struct *ha);
73void sas_enable_revalidation(struct sas_ha_struct *ha);
74void __sas_drain_work(struct sas_ha_struct *ha);
59 75
60void sas_deform_port(struct asd_sas_phy *phy, int gone); 76void sas_deform_port(struct asd_sas_phy *phy, int gone);
61 77
@@ -64,6 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
64void sas_porte_link_reset_err(struct work_struct *work); 80void sas_porte_link_reset_err(struct work_struct *work);
65void sas_porte_timer_event(struct work_struct *work); 81void sas_porte_timer_event(struct work_struct *work);
66void sas_porte_hard_reset(struct work_struct *work); 82void sas_porte_hard_reset(struct work_struct *work);
83void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work);
67 84
68int sas_notify_lldd_dev_found(struct domain_device *); 85int sas_notify_lldd_dev_found(struct domain_device *);
69void sas_notify_lldd_dev_gone(struct domain_device *); 86void sas_notify_lldd_dev_gone(struct domain_device *);
@@ -72,10 +89,17 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
72 enum phy_func phy_func, struct sas_phy_linkrates *); 89 enum phy_func phy_func, struct sas_phy_linkrates *);
73int sas_smp_get_phy_events(struct sas_phy *phy); 90int sas_smp_get_phy_events(struct sas_phy *phy);
74 91
92void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
75struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy); 93struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
76 94struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
95int sas_ex_phy_discover(struct domain_device *dev, int single);
96int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
97 struct smp_resp *rps_resp);
98int sas_try_ata_reset(struct asd_sas_phy *phy);
77void sas_hae_reset(struct work_struct *work); 99void sas_hae_reset(struct work_struct *work);
78 100
101void sas_free_device(struct kref *kref);
102
79#ifdef CONFIG_SCSI_SAS_HOST_SMP 103#ifdef CONFIG_SCSI_SAS_HOST_SMP
80extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, 104extern int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
81 struct request *rsp); 105 struct request *rsp);
@@ -90,36 +114,13 @@ static inline int sas_smp_host_handler(struct Scsi_Host *shost,
90} 114}
91#endif 115#endif
92 116
93static inline void sas_queue_event(int event, spinlock_t *lock, 117static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err)
94 unsigned long *pending,
95 struct work_struct *work,
96 struct sas_ha_struct *sas_ha)
97{ 118{
98 unsigned long flags; 119 SAS_DPRINTK("%s: for %s device %16llx returned %d\n",
99 120 func, dev->parent ? "exp-attached" :
100 spin_lock_irqsave(lock, flags); 121 "direct-attached",
101 if (test_bit(event, pending)) { 122 SAS_ADDR(dev->sas_addr), err);
102 spin_unlock_irqrestore(lock, flags); 123 sas_unregister_dev(dev->port, dev);
103 return;
104 }
105 __set_bit(event, pending);
106 spin_unlock_irqrestore(lock, flags);
107
108 spin_lock_irqsave(&sas_ha->state_lock, flags);
109 if (sas_ha->state != SAS_HA_UNREGISTERED) {
110 scsi_queue_work(sas_ha->core.shost, work);
111 }
112 spin_unlock_irqrestore(&sas_ha->state_lock, flags);
113}
114
115static inline void sas_begin_event(int event, spinlock_t *lock,
116 unsigned long *pending)
117{
118 unsigned long flags;
119
120 spin_lock_irqsave(lock, flags);
121 __clear_bit(event, pending);
122 spin_unlock_irqrestore(lock, flags);
123} 124}
124 125
125static inline void sas_fill_in_rphy(struct domain_device *dev, 126static inline void sas_fill_in_rphy(struct domain_device *dev,
@@ -132,6 +133,7 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
132 case SATA_DEV: 133 case SATA_DEV:
133 /* FIXME: need sata device type */ 134 /* FIXME: need sata device type */
134 case SAS_END_DEV: 135 case SAS_END_DEV:
136 case SATA_PENDING:
135 rphy->identify.device_type = SAS_END_DEVICE; 137 rphy->identify.device_type = SAS_END_DEVICE;
136 break; 138 break;
137 case EDGE_DEV: 139 case EDGE_DEV:
@@ -146,6 +148,22 @@ static inline void sas_fill_in_rphy(struct domain_device *dev,
146 } 148 }
147} 149}
148 150
151static inline void sas_phy_set_target(struct asd_sas_phy *p, struct domain_device *dev)
152{
153 struct sas_phy *phy = p->phy;
154
155 if (dev) {
156 if (dev_is_sata(dev))
157 phy->identify.device_type = SAS_END_DEVICE;
158 else
159 phy->identify.device_type = dev->dev_type;
160 phy->identify.target_port_protocols = dev->tproto;
161 } else {
162 phy->identify.device_type = SAS_PHY_UNUSED;
163 phy->identify.target_port_protocols = 0;
164 }
165}
166
149static inline void sas_add_parent_port(struct domain_device *dev, int phy_id) 167static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
150{ 168{
151 struct expander_device *ex = &dev->ex_dev; 169 struct expander_device *ex = &dev->ex_dev;
@@ -161,4 +179,23 @@ static inline void sas_add_parent_port(struct domain_device *dev, int phy_id)
161 sas_port_add_phy(ex->parent_port, ex_phy->phy); 179 sas_port_add_phy(ex->parent_port, ex_phy->phy);
162} 180}
163 181
182static inline struct domain_device *sas_alloc_device(void)
183{
184 struct domain_device *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
185
186 if (dev) {
187 INIT_LIST_HEAD(&dev->siblings);
188 INIT_LIST_HEAD(&dev->dev_list_node);
189 INIT_LIST_HEAD(&dev->disco_list_node);
190 kref_init(&dev->kref);
191 spin_lock_init(&dev->done_lock);
192 }
193 return dev;
194}
195
196static inline void sas_put_device(struct domain_device *dev)
197{
198 kref_put(&dev->kref, sas_free_device);
199}
200
164#endif /* _SAS_INTERNAL_H_ */ 201#endif /* _SAS_INTERNAL_H_ */
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index e0f5018e907..dcfd4a9105c 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -36,8 +36,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work)
36 container_of(work, struct asd_sas_event, work); 36 container_of(work, struct asd_sas_event, work);
37 struct asd_sas_phy *phy = ev->phy; 37 struct asd_sas_phy *phy = ev->phy;
38 38
39 sas_begin_event(PHYE_LOSS_OF_SIGNAL, &phy->ha->event_lock, 39 clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending);
40 &phy->phy_events_pending);
41 phy->error = 0; 40 phy->error = 0;
42 sas_deform_port(phy, 1); 41 sas_deform_port(phy, 1);
43} 42}
@@ -48,8 +47,7 @@ static void sas_phye_oob_done(struct work_struct *work)
48 container_of(work, struct asd_sas_event, work); 47 container_of(work, struct asd_sas_event, work);
49 struct asd_sas_phy *phy = ev->phy; 48 struct asd_sas_phy *phy = ev->phy;
50 49
51 sas_begin_event(PHYE_OOB_DONE, &phy->ha->event_lock, 50 clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending);
52 &phy->phy_events_pending);
53 phy->error = 0; 51 phy->error = 0;
54} 52}
55 53
@@ -63,8 +61,7 @@ static void sas_phye_oob_error(struct work_struct *work)
63 struct sas_internal *i = 61 struct sas_internal *i =
64 to_sas_internal(sas_ha->core.shost->transportt); 62 to_sas_internal(sas_ha->core.shost->transportt);
65 63
66 sas_begin_event(PHYE_OOB_ERROR, &phy->ha->event_lock, 64 clear_bit(PHYE_OOB_ERROR, &phy->phy_events_pending);
67 &phy->phy_events_pending);
68 65
69 sas_deform_port(phy, 1); 66 sas_deform_port(phy, 1);
70 67
@@ -95,8 +92,7 @@ static void sas_phye_spinup_hold(struct work_struct *work)
95 struct sas_internal *i = 92 struct sas_internal *i =
96 to_sas_internal(sas_ha->core.shost->transportt); 93 to_sas_internal(sas_ha->core.shost->transportt);
97 94
98 sas_begin_event(PHYE_SPINUP_HOLD, &phy->ha->event_lock, 95 clear_bit(PHYE_SPINUP_HOLD, &phy->phy_events_pending);
99 &phy->phy_events_pending);
100 96
101 phy->error = 0; 97 phy->error = 0;
102 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL); 98 i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c
index 42fd1f25b66..eb19c016d50 100644
--- a/drivers/scsi/libsas/sas_port.c
+++ b/drivers/scsi/libsas/sas_port.c
@@ -104,13 +104,11 @@ static void sas_form_port(struct asd_sas_phy *phy)
104 104
105 /* add the phy to the port */ 105 /* add the phy to the port */
106 list_add_tail(&phy->port_phy_el, &port->phy_list); 106 list_add_tail(&phy->port_phy_el, &port->phy_list);
107 sas_phy_set_target(phy, port->port_dev);
107 phy->port = port; 108 phy->port = port;
108 port->num_phys++; 109 port->num_phys++;
109 port->phy_mask |= (1U << phy->id); 110 port->phy_mask |= (1U << phy->id);
110 111
111 if (!port->phy)
112 port->phy = phy->phy;
113
114 if (*(u64 *)port->attached_sas_addr == 0) { 112 if (*(u64 *)port->attached_sas_addr == 0) {
115 port->class = phy->class; 113 port->class = phy->class;
116 memcpy(port->attached_sas_addr, phy->attached_sas_addr, 114 memcpy(port->attached_sas_addr, phy->attached_sas_addr,
@@ -125,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy)
125 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); 123 spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
126 124
127 if (!port->port) { 125 if (!port->port) {
128 port->port = sas_port_alloc(phy->phy->dev.parent, port->id); 126 port->port = sas_port_alloc(phy->phy->dev.parent, phy->id);
129 BUG_ON(!port->port); 127 BUG_ON(!port->port);
130 sas_port_add(port->port); 128 sas_port_add(port->port);
131 } 129 }
@@ -170,13 +168,13 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
170 dev->pathways--; 168 dev->pathways--;
171 169
172 if (port->num_phys == 1) { 170 if (port->num_phys == 1) {
173 if (dev && gone) 171 sas_unregister_domain_devices(port, gone);
174 dev->gone = 1;
175 sas_unregister_domain_devices(port);
176 sas_port_delete(port->port); 172 sas_port_delete(port->port);
177 port->port = NULL; 173 port->port = NULL;
178 } else 174 } else {
179 sas_port_delete_phy(port->port, phy->phy); 175 sas_port_delete_phy(port->port, phy->phy);
176 sas_device_set_phy(dev, port->port);
177 }
180 178
181 if (si->dft->lldd_port_deformed) 179 if (si->dft->lldd_port_deformed)
182 si->dft->lldd_port_deformed(phy); 180 si->dft->lldd_port_deformed(phy);
@@ -185,6 +183,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone)
185 spin_lock(&port->phy_list_lock); 183 spin_lock(&port->phy_list_lock);
186 184
187 list_del_init(&phy->port_phy_el); 185 list_del_init(&phy->port_phy_el);
186 sas_phy_set_target(phy, NULL);
188 phy->port = NULL; 187 phy->port = NULL;
189 port->num_phys--; 188 port->num_phys--;
190 port->phy_mask &= ~(1U << phy->id); 189 port->phy_mask &= ~(1U << phy->id);
@@ -213,8 +212,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work)
213 container_of(work, struct asd_sas_event, work); 212 container_of(work, struct asd_sas_event, work);
214 struct asd_sas_phy *phy = ev->phy; 213 struct asd_sas_phy *phy = ev->phy;
215 214
216 sas_begin_event(PORTE_BYTES_DMAED, &phy->ha->event_lock, 215 clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending);
217 &phy->port_events_pending);
218 216
219 sas_form_port(phy); 217 sas_form_port(phy);
220} 218}
@@ -227,8 +225,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work)
227 unsigned long flags; 225 unsigned long flags;
228 u32 prim; 226 u32 prim;
229 227
230 sas_begin_event(PORTE_BROADCAST_RCVD, &phy->ha->event_lock, 228 clear_bit(PORTE_BROADCAST_RCVD, &phy->port_events_pending);
231 &phy->port_events_pending);
232 229
233 spin_lock_irqsave(&phy->sas_prim_lock, flags); 230 spin_lock_irqsave(&phy->sas_prim_lock, flags);
234 prim = phy->sas_prim; 231 prim = phy->sas_prim;
@@ -244,8 +241,7 @@ void sas_porte_link_reset_err(struct work_struct *work)
244 container_of(work, struct asd_sas_event, work); 241 container_of(work, struct asd_sas_event, work);
245 struct asd_sas_phy *phy = ev->phy; 242 struct asd_sas_phy *phy = ev->phy;
246 243
247 sas_begin_event(PORTE_LINK_RESET_ERR, &phy->ha->event_lock, 244 clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending);
248 &phy->port_events_pending);
249 245
250 sas_deform_port(phy, 1); 246 sas_deform_port(phy, 1);
251} 247}
@@ -256,8 +252,7 @@ void sas_porte_timer_event(struct work_struct *work)
256 container_of(work, struct asd_sas_event, work); 252 container_of(work, struct asd_sas_event, work);
257 struct asd_sas_phy *phy = ev->phy; 253 struct asd_sas_phy *phy = ev->phy;
258 254
259 sas_begin_event(PORTE_TIMER_EVENT, &phy->ha->event_lock, 255 clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending);
260 &phy->port_events_pending);
261 256
262 sas_deform_port(phy, 1); 257 sas_deform_port(phy, 1);
263} 258}
@@ -268,8 +263,7 @@ void sas_porte_hard_reset(struct work_struct *work)
268 container_of(work, struct asd_sas_event, work); 263 container_of(work, struct asd_sas_event, work);
269 struct asd_sas_phy *phy = ev->phy; 264 struct asd_sas_phy *phy = ev->phy;
270 265
271 sas_begin_event(PORTE_HARD_RESET, &phy->ha->event_lock, 266 clear_bit(PORTE_HARD_RESET, &phy->port_events_pending);
272 &phy->port_events_pending);
273 267
274 sas_deform_port(phy, 1); 268 sas_deform_port(phy, 1);
275} 269}
@@ -282,6 +276,8 @@ static void sas_init_port(struct asd_sas_port *port,
282 memset(port, 0, sizeof(*port)); 276 memset(port, 0, sizeof(*port));
283 port->id = i; 277 port->id = i;
284 INIT_LIST_HEAD(&port->dev_list); 278 INIT_LIST_HEAD(&port->dev_list);
279 INIT_LIST_HEAD(&port->disco_list);
280 INIT_LIST_HEAD(&port->destroy_list);
285 spin_lock_init(&port->phy_list_lock); 281 spin_lock_init(&port->phy_list_lock);
286 INIT_LIST_HEAD(&port->phy_list); 282 INIT_LIST_HEAD(&port->phy_list);
287 port->ha = sas_ha; 283 port->ha = sas_ha;
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b6e233d9a0a..f0b9b7bf188 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -49,27 +49,12 @@
49#include <linux/scatterlist.h> 49#include <linux/scatterlist.h>
50#include <linux/libata.h> 50#include <linux/libata.h>
51 51
52/* ---------- SCSI Host glue ---------- */ 52/* record final status and free the task */
53 53static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
54static void sas_scsi_task_done(struct sas_task *task)
55{ 54{
56 struct task_status_struct *ts = &task->task_status; 55 struct task_status_struct *ts = &task->task_status;
57 struct scsi_cmnd *sc = task->uldd_task;
58 int hs = 0, stat = 0; 56 int hs = 0, stat = 0;
59 57
60 if (unlikely(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
61 /* Aborted tasks will be completed by the error handler */
62 SAS_DPRINTK("task done but aborted\n");
63 return;
64 }
65
66 if (unlikely(!sc)) {
67 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
68 list_del_init(&task->list);
69 sas_free_task(task);
70 return;
71 }
72
73 if (ts->resp == SAS_TASK_UNDELIVERED) { 58 if (ts->resp == SAS_TASK_UNDELIVERED) {
74 /* transport error */ 59 /* transport error */
75 hs = DID_NO_CONNECT; 60 hs = DID_NO_CONNECT;
@@ -124,10 +109,41 @@ static void sas_scsi_task_done(struct sas_task *task)
124 break; 109 break;
125 } 110 }
126 } 111 }
127 ASSIGN_SAS_TASK(sc, NULL); 112
128 sc->result = (hs << 16) | stat; 113 sc->result = (hs << 16) | stat;
114 ASSIGN_SAS_TASK(sc, NULL);
129 list_del_init(&task->list); 115 list_del_init(&task->list);
130 sas_free_task(task); 116 sas_free_task(task);
117}
118
119static void sas_scsi_task_done(struct sas_task *task)
120{
121 struct scsi_cmnd *sc = task->uldd_task;
122 struct domain_device *dev = task->dev;
123 struct sas_ha_struct *ha = dev->port->ha;
124 unsigned long flags;
125
126 spin_lock_irqsave(&dev->done_lock, flags);
127 if (test_bit(SAS_HA_FROZEN, &ha->state))
128 task = NULL;
129 else
130 ASSIGN_SAS_TASK(sc, NULL);
131 spin_unlock_irqrestore(&dev->done_lock, flags);
132
133 if (unlikely(!task)) {
134 /* task will be completed by the error handler */
135 SAS_DPRINTK("task done but aborted\n");
136 return;
137 }
138
139 if (unlikely(!sc)) {
140 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
141 list_del_init(&task->list);
142 sas_free_task(task);
143 return;
144 }
145
146 sas_end_task(sc, task);
131 sc->scsi_done(sc); 147 sc->scsi_done(sc);
132} 148}
133 149
@@ -192,17 +208,15 @@ int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
192 int res = 0; 208 int res = 0;
193 209
194 /* If the device fell off, no sense in issuing commands */ 210 /* If the device fell off, no sense in issuing commands */
195 if (dev->gone) { 211 if (test_bit(SAS_DEV_GONE, &dev->state)) {
196 cmd->result = DID_BAD_TARGET << 16; 212 cmd->result = DID_BAD_TARGET << 16;
197 goto out_done; 213 goto out_done;
198 } 214 }
199 215
200 if (dev_is_sata(dev)) { 216 if (dev_is_sata(dev)) {
201 unsigned long flags; 217 spin_lock_irq(dev->sata_dev.ap->lock);
202
203 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
204 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); 218 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
205 spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); 219 spin_unlock_irq(dev->sata_dev.ap->lock);
206 return res; 220 return res;
207 } 221 }
208 222
@@ -235,24 +249,38 @@ out_done:
235 249
236static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 250static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
237{ 251{
238 struct sas_task *task = TO_SAS_TASK(cmd);
239 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); 252 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
253 struct sas_task *task = TO_SAS_TASK(cmd);
254
255 /* At this point, we only get called following an actual abort
256 * of the task, so we should be guaranteed not to be racing with
257 * any completions from the LLD. Task is freed after this.
258 */
259 sas_end_task(cmd, task);
240 260
241 /* remove the aborted task flag to allow the task to be
242 * completed now. At this point, we only get called following
243 * an actual abort of the task, so we should be guaranteed not
244 * to be racing with any completions from the LLD (hence we
245 * don't need the task state lock to clear the flag) */
246 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
247 /* Now call task_done. However, task will be free'd after
248 * this */
249 task->task_done(task);
250 /* now finish the command and move it on to the error 261 /* now finish the command and move it on to the error
251 * handler done list, this also takes it off the 262 * handler done list, this also takes it off the
252 * error handler pending list */ 263 * error handler pending list.
264 */
253 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); 265 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
254} 266}
255 267
268static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
269{
270 struct domain_device *dev = cmd_to_domain_dev(cmd);
271 struct sas_ha_struct *ha = dev->port->ha;
272 struct sas_task *task = TO_SAS_TASK(cmd);
273
274 if (!dev_is_sata(dev)) {
275 sas_eh_finish_cmd(cmd);
276 return;
277 }
278
279 /* report the timeout to libata */
280 sas_end_task(cmd, task);
281 list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
282}
283
256static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 284static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
257{ 285{
258 struct scsi_cmnd *cmd, *n; 286 struct scsi_cmnd *cmd, *n;
@@ -260,7 +288,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
260 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 288 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
261 if (cmd->device->sdev_target == my_cmd->device->sdev_target && 289 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
262 cmd->device->lun == my_cmd->device->lun) 290 cmd->device->lun == my_cmd->device->lun)
263 sas_eh_finish_cmd(cmd); 291 sas_eh_defer_cmd(cmd);
264 } 292 }
265} 293}
266 294
@@ -295,6 +323,7 @@ enum task_disposition {
295 TASK_IS_DONE, 323 TASK_IS_DONE,
296 TASK_IS_ABORTED, 324 TASK_IS_ABORTED,
297 TASK_IS_AT_LU, 325 TASK_IS_AT_LU,
326 TASK_IS_NOT_AT_HA,
298 TASK_IS_NOT_AT_LU, 327 TASK_IS_NOT_AT_LU,
299 TASK_ABORT_FAILED, 328 TASK_ABORT_FAILED,
300}; 329};
@@ -311,19 +340,18 @@ static enum task_disposition sas_scsi_find_task(struct sas_task *task)
311 struct scsi_core *core = &ha->core; 340 struct scsi_core *core = &ha->core;
312 struct sas_task *t, *n; 341 struct sas_task *t, *n;
313 342
343 mutex_lock(&core->task_queue_flush);
314 spin_lock_irqsave(&core->task_queue_lock, flags); 344 spin_lock_irqsave(&core->task_queue_lock, flags);
315 list_for_each_entry_safe(t, n, &core->task_queue, list) { 345 list_for_each_entry_safe(t, n, &core->task_queue, list)
316 if (task == t) { 346 if (task == t) {
317 list_del_init(&t->list); 347 list_del_init(&t->list);
318 spin_unlock_irqrestore(&core->task_queue_lock, 348 break;
319 flags);
320 SAS_DPRINTK("%s: task 0x%p aborted from "
321 "task_queue\n",
322 __func__, task);
323 return TASK_IS_ABORTED;
324 } 349 }
325 }
326 spin_unlock_irqrestore(&core->task_queue_lock, flags); 350 spin_unlock_irqrestore(&core->task_queue_lock, flags);
351 mutex_unlock(&core->task_queue_flush);
352
353 if (task == t)
354 return TASK_IS_NOT_AT_HA;
327 } 355 }
328 356
329 for (i = 0; i < 5; i++) { 357 for (i = 0; i < 5; i++) {
@@ -411,30 +439,26 @@ static int sas_recover_I_T(struct domain_device *dev)
411 return res; 439 return res;
412} 440}
413 441
414/* Find the sas_phy that's attached to this device */ 442/* take a reference on the last known good phy for this device */
415struct sas_phy *sas_find_local_phy(struct domain_device *dev) 443struct sas_phy *sas_get_local_phy(struct domain_device *dev)
416{ 444{
417 struct domain_device *pdev = dev->parent; 445 struct sas_ha_struct *ha = dev->port->ha;
418 struct ex_phy *exphy = NULL; 446 struct sas_phy *phy;
419 int i; 447 unsigned long flags;
420 448
421 /* Directly attached device */ 449 /* a published domain device always has a valid phy, it may be
422 if (!pdev) 450 * stale, but it is never NULL
423 return dev->port->phy; 451 */
452 BUG_ON(!dev->phy);
424 453
425 /* Otherwise look in the expander */ 454 spin_lock_irqsave(&ha->phy_port_lock, flags);
426 for (i = 0; i < pdev->ex_dev.num_phys; i++) 455 phy = dev->phy;
427 if (!memcmp(dev->sas_addr, 456 get_device(&phy->dev);
428 pdev->ex_dev.ex_phy[i].attached_sas_addr, 457 spin_unlock_irqrestore(&ha->phy_port_lock, flags);
429 SAS_ADDR_SIZE)) {
430 exphy = &pdev->ex_dev.ex_phy[i];
431 break;
432 }
433 458
434 BUG_ON(!exphy); 459 return phy;
435 return exphy->phy;
436} 460}
437EXPORT_SYMBOL_GPL(sas_find_local_phy); 461EXPORT_SYMBOL_GPL(sas_get_local_phy);
438 462
439/* Attempt to send a LUN reset message to a device */ 463/* Attempt to send a LUN reset message to a device */
440int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) 464int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
@@ -461,7 +485,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
461int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) 485int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
462{ 486{
463 struct domain_device *dev = cmd_to_domain_dev(cmd); 487 struct domain_device *dev = cmd_to_domain_dev(cmd);
464 struct sas_phy *phy = sas_find_local_phy(dev); 488 struct sas_phy *phy = sas_get_local_phy(dev);
465 int res; 489 int res;
466 490
467 res = sas_phy_reset(phy, 1); 491 res = sas_phy_reset(phy, 1);
@@ -469,6 +493,8 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
469 SAS_DPRINTK("Bus reset of %s failed 0x%x\n", 493 SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
470 kobject_name(&phy->dev.kobj), 494 kobject_name(&phy->dev.kobj),
471 res); 495 res);
496 sas_put_local_phy(phy);
497
472 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE) 498 if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
473 return SUCCESS; 499 return SUCCESS;
474 500
@@ -495,9 +521,7 @@ try_bus_reset:
495 return FAILED; 521 return FAILED;
496} 522}
497 523
498static int sas_eh_handle_sas_errors(struct Scsi_Host *shost, 524static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
499 struct list_head *work_q,
500 struct list_head *done_q)
501{ 525{
502 struct scsi_cmnd *cmd, *n; 526 struct scsi_cmnd *cmd, *n;
503 enum task_disposition res = TASK_IS_DONE; 527 enum task_disposition res = TASK_IS_DONE;
@@ -505,13 +529,28 @@ static int sas_eh_handle_sas_errors(struct Scsi_Host *shost,
505 struct sas_internal *i = to_sas_internal(shost->transportt); 529 struct sas_internal *i = to_sas_internal(shost->transportt);
506 unsigned long flags; 530 unsigned long flags;
507 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); 531 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
532 LIST_HEAD(done);
508 533
509Again: 534 /* clean out any commands that won the completion vs eh race */
510 list_for_each_entry_safe(cmd, n, work_q, eh_entry) { 535 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
511 struct sas_task *task = TO_SAS_TASK(cmd); 536 struct domain_device *dev = cmd_to_domain_dev(cmd);
537 struct sas_task *task;
538
539 spin_lock_irqsave(&dev->done_lock, flags);
540 /* by this point the lldd has either observed
541 * SAS_HA_FROZEN and is leaving the task alone, or has
542 * won the race with eh and decided to complete it
543 */
544 task = TO_SAS_TASK(cmd);
545 spin_unlock_irqrestore(&dev->done_lock, flags);
512 546
513 if (!task) 547 if (!task)
514 continue; 548 list_move_tail(&cmd->eh_entry, &done);
549 }
550
551 Again:
552 list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
553 struct sas_task *task = TO_SAS_TASK(cmd);
515 554
516 list_del_init(&cmd->eh_entry); 555 list_del_init(&cmd->eh_entry);
517 556
@@ -531,15 +570,23 @@ Again:
531 cmd->eh_eflags = 0; 570 cmd->eh_eflags = 0;
532 571
533 switch (res) { 572 switch (res) {
573 case TASK_IS_NOT_AT_HA:
574 SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
575 __func__, task,
576 cmd->retries ? "retry" : "aborted");
577 if (cmd->retries)
578 cmd->retries--;
579 sas_eh_finish_cmd(cmd);
580 continue;
534 case TASK_IS_DONE: 581 case TASK_IS_DONE:
535 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 582 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
536 task); 583 task);
537 sas_eh_finish_cmd(cmd); 584 sas_eh_defer_cmd(cmd);
538 continue; 585 continue;
539 case TASK_IS_ABORTED: 586 case TASK_IS_ABORTED:
540 SAS_DPRINTK("%s: task 0x%p is aborted\n", 587 SAS_DPRINTK("%s: task 0x%p is aborted\n",
541 __func__, task); 588 __func__, task);
542 sas_eh_finish_cmd(cmd); 589 sas_eh_defer_cmd(cmd);
543 continue; 590 continue;
544 case TASK_IS_AT_LU: 591 case TASK_IS_AT_LU:
545 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 592 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -550,7 +597,7 @@ Again:
550 "recovered\n", 597 "recovered\n",
551 SAS_ADDR(task->dev), 598 SAS_ADDR(task->dev),
552 cmd->device->lun); 599 cmd->device->lun);
553 sas_eh_finish_cmd(cmd); 600 sas_eh_defer_cmd(cmd);
554 sas_scsi_clear_queue_lu(work_q, cmd); 601 sas_scsi_clear_queue_lu(work_q, cmd);
555 goto Again; 602 goto Again;
556 } 603 }
@@ -560,7 +607,8 @@ Again:
560 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n", 607 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
561 task); 608 task);
562 tmf_resp = sas_recover_I_T(task->dev); 609 tmf_resp = sas_recover_I_T(task->dev);
563 if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { 610 if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
611 tmf_resp == -ENODEV) {
564 struct domain_device *dev = task->dev; 612 struct domain_device *dev = task->dev;
565 SAS_DPRINTK("I_T %016llx recovered\n", 613 SAS_DPRINTK("I_T %016llx recovered\n",
566 SAS_ADDR(task->dev->sas_addr)); 614 SAS_ADDR(task->dev->sas_addr));
@@ -607,13 +655,16 @@ Again:
607 goto clear_q; 655 goto clear_q;
608 } 656 }
609 } 657 }
610 return list_empty(work_q); 658 out:
611clear_q: 659 list_splice_tail(&done, work_q);
660 list_splice_tail_init(&ha->eh_ata_q, work_q);
661 return;
662
663 clear_q:
612 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__); 664 SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
613 list_for_each_entry_safe(cmd, n, work_q, eh_entry) 665 list_for_each_entry_safe(cmd, n, work_q, eh_entry)
614 sas_eh_finish_cmd(cmd); 666 sas_eh_finish_cmd(cmd);
615 667 goto out;
616 return list_empty(work_q);
617} 668}
618 669
619void sas_scsi_recover_host(struct Scsi_Host *shost) 670void sas_scsi_recover_host(struct Scsi_Host *shost)
@@ -627,12 +678,17 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
627 shost->host_eh_scheduled = 0; 678 shost->host_eh_scheduled = 0;
628 spin_unlock_irqrestore(shost->host_lock, flags); 679 spin_unlock_irqrestore(shost->host_lock, flags);
629 680
630 SAS_DPRINTK("Enter %s\n", __func__); 681 SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
682 __func__, shost->host_busy, shost->host_failed);
631 /* 683 /*
632 * Deal with commands that still have SAS tasks (i.e. they didn't 684 * Deal with commands that still have SAS tasks (i.e. they didn't
633 * complete via the normal sas_task completion mechanism) 685 * complete via the normal sas_task completion mechanism),
686 * SAS_HA_FROZEN gives eh dominion over all sas_task completion.
634 */ 687 */
635 if (sas_eh_handle_sas_errors(shost, &eh_work_q, &ha->eh_done_q)) 688 set_bit(SAS_HA_FROZEN, &ha->state);
689 sas_eh_handle_sas_errors(shost, &eh_work_q);
690 clear_bit(SAS_HA_FROZEN, &ha->state);
691 if (list_empty(&eh_work_q))
636 goto out; 692 goto out;
637 693
638 /* 694 /*
@@ -641,59 +697,26 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
641 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any 697 * scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
642 * command we see here has no sas_task and is thus unknown to the HA. 698 * command we see here has no sas_task and is thus unknown to the HA.
643 */ 699 */
644 if (!sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q)) 700 sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
645 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q)) 701 if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
646 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q); 702 scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
647 703
648out: 704out:
705 if (ha->lldd_max_execute_num > 1)
706 wake_up_process(ha->core.queue_thread);
707
649 /* now link into libata eh --- if we have any ata devices */ 708 /* now link into libata eh --- if we have any ata devices */
650 sas_ata_strategy_handler(shost); 709 sas_ata_strategy_handler(shost);
651 710
652 scsi_eh_flush_done_q(&ha->eh_done_q); 711 scsi_eh_flush_done_q(&ha->eh_done_q);
653 712
654 SAS_DPRINTK("--- Exit %s\n", __func__); 713 SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
655 return; 714 __func__, shost->host_busy, shost->host_failed);
656} 715}
657 716
658enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd) 717enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
659{ 718{
660 struct sas_task *task = TO_SAS_TASK(cmd); 719 scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
661 unsigned long flags;
662 enum blk_eh_timer_return rtn;
663
664 if (sas_ata_timed_out(cmd, task, &rtn))
665 return rtn;
666
667 if (!task) {
668 cmd->request->timeout /= 2;
669 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
670 cmd, task, (cmd->request->timeout ?
671 "BLK_EH_RESET_TIMER" : "BLK_EH_NOT_HANDLED"));
672 if (!cmd->request->timeout)
673 return BLK_EH_NOT_HANDLED;
674 return BLK_EH_RESET_TIMER;
675 }
676
677 spin_lock_irqsave(&task->task_state_lock, flags);
678 BUG_ON(task->task_state_flags & SAS_TASK_STATE_ABORTED);
679 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
680 spin_unlock_irqrestore(&task->task_state_lock, flags);
681 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: "
682 "BLK_EH_HANDLED\n", cmd, task);
683 return BLK_EH_HANDLED;
684 }
685 if (!(task->task_state_flags & SAS_TASK_AT_INITIATOR)) {
686 spin_unlock_irqrestore(&task->task_state_lock, flags);
687 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
688 "BLK_EH_RESET_TIMER\n",
689 cmd, task);
690 return BLK_EH_RESET_TIMER;
691 }
692 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
693 spin_unlock_irqrestore(&task->task_state_lock, flags);
694
695 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: BLK_EH_NOT_HANDLED\n",
696 cmd, task);
697 720
698 return BLK_EH_NOT_HANDLED; 721 return BLK_EH_NOT_HANDLED;
699} 722}
@@ -737,27 +760,15 @@ struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
737 return found_dev; 760 return found_dev;
738} 761}
739 762
740static inline struct domain_device *sas_find_target(struct scsi_target *starget)
741{
742 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
743
744 return sas_find_dev_by_rphy(rphy);
745}
746
747int sas_target_alloc(struct scsi_target *starget) 763int sas_target_alloc(struct scsi_target *starget)
748{ 764{
749 struct domain_device *found_dev = sas_find_target(starget); 765 struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
750 int res; 766 struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
751 767
752 if (!found_dev) 768 if (!found_dev)
753 return -ENODEV; 769 return -ENODEV;
754 770
755 if (dev_is_sata(found_dev)) { 771 kref_get(&found_dev->kref);
756 res = sas_ata_init_host_and_port(found_dev, starget);
757 if (res)
758 return res;
759 }
760
761 starget->hostdata = found_dev; 772 starget->hostdata = found_dev;
762 return 0; 773 return 0;
763} 774}
@@ -797,14 +808,6 @@ int sas_slave_configure(struct scsi_device *scsi_dev)
797 return 0; 808 return 0;
798} 809}
799 810
800void sas_slave_destroy(struct scsi_device *scsi_dev)
801{
802 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
803
804 if (dev_is_sata(dev))
805 sas_to_ata_dev(dev)->class = ATA_DEV_NONE;
806}
807
808int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason) 811int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
809{ 812{
810 struct domain_device *dev = sdev_to_domain_dev(sdev); 813 struct domain_device *dev = sdev_to_domain_dev(sdev);
@@ -871,9 +874,11 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
871 int res; 874 int res;
872 struct sas_internal *i = to_sas_internal(core->shost->transportt); 875 struct sas_internal *i = to_sas_internal(core->shost->transportt);
873 876
877 mutex_lock(&core->task_queue_flush);
874 spin_lock_irqsave(&core->task_queue_lock, flags); 878 spin_lock_irqsave(&core->task_queue_lock, flags);
875 while (!kthread_should_stop() && 879 while (!kthread_should_stop() &&
876 !list_empty(&core->task_queue)) { 880 !list_empty(&core->task_queue) &&
881 !test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
877 882
878 can_queue = sas_ha->lldd_queue_size - core->task_queue_size; 883 can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
879 if (can_queue >= 0) { 884 if (can_queue >= 0) {
@@ -909,6 +914,7 @@ static void sas_queue(struct sas_ha_struct *sas_ha)
909 } 914 }
910 } 915 }
911 spin_unlock_irqrestore(&core->task_queue_lock, flags); 916 spin_unlock_irqrestore(&core->task_queue_lock, flags);
917 mutex_unlock(&core->task_queue_flush);
912} 918}
913 919
914/** 920/**
@@ -935,6 +941,7 @@ int sas_init_queue(struct sas_ha_struct *sas_ha)
935 struct scsi_core *core = &sas_ha->core; 941 struct scsi_core *core = &sas_ha->core;
936 942
937 spin_lock_init(&core->task_queue_lock); 943 spin_lock_init(&core->task_queue_lock);
944 mutex_init(&core->task_queue_flush);
938 core->task_queue_size = 0; 945 core->task_queue_size = 0;
939 INIT_LIST_HEAD(&core->task_queue); 946 INIT_LIST_HEAD(&core->task_queue);
940 947
@@ -972,49 +979,6 @@ void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
972} 979}
973 980
974/* 981/*
975 * Call the LLDD task abort routine directly. This function is intended for
976 * use by upper layers that need to tell the LLDD to abort a task.
977 */
978int __sas_task_abort(struct sas_task *task)
979{
980 struct sas_internal *si =
981 to_sas_internal(task->dev->port->ha->core.shost->transportt);
982 unsigned long flags;
983 int res;
984
985 spin_lock_irqsave(&task->task_state_lock, flags);
986 if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
987 task->task_state_flags & SAS_TASK_STATE_DONE) {
988 spin_unlock_irqrestore(&task->task_state_lock, flags);
989 SAS_DPRINTK("%s: Task %p already finished.\n", __func__,
990 task);
991 return 0;
992 }
993 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
994 spin_unlock_irqrestore(&task->task_state_lock, flags);
995
996 if (!si->dft->lldd_abort_task)
997 return -ENODEV;
998
999 res = si->dft->lldd_abort_task(task);
1000
1001 spin_lock_irqsave(&task->task_state_lock, flags);
1002 if ((task->task_state_flags & SAS_TASK_STATE_DONE) ||
1003 (res == TMF_RESP_FUNC_COMPLETE))
1004 {
1005 spin_unlock_irqrestore(&task->task_state_lock, flags);
1006 task->task_done(task);
1007 return 0;
1008 }
1009
1010 if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
1011 task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
1012 spin_unlock_irqrestore(&task->task_state_lock, flags);
1013
1014 return -EAGAIN;
1015}
1016
1017/*
1018 * Tell an upper layer that it needs to initiate an abort for a given task. 982 * Tell an upper layer that it needs to initiate an abort for a given task.
1019 * This should only ever be called by an LLDD. 983 * This should only ever be called by an LLDD.
1020 */ 984 */
@@ -1043,27 +1007,15 @@ void sas_task_abort(struct sas_task *task)
1043 } 1007 }
1044} 1008}
1045 1009
1046int sas_slave_alloc(struct scsi_device *scsi_dev)
1047{
1048 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
1049
1050 if (dev_is_sata(dev))
1051 return ata_sas_port_init(dev->sata_dev.ap);
1052
1053 return 0;
1054}
1055
1056void sas_target_destroy(struct scsi_target *starget) 1010void sas_target_destroy(struct scsi_target *starget)
1057{ 1011{
1058 struct domain_device *found_dev = sas_find_target(starget); 1012 struct domain_device *found_dev = starget->hostdata;
1059 1013
1060 if (!found_dev) 1014 if (!found_dev)
1061 return; 1015 return;
1062 1016
1063 if (dev_is_sata(found_dev)) 1017 starget->hostdata = NULL;
1064 ata_sas_port_destroy(found_dev->sata_dev.ap); 1018 sas_put_device(found_dev);
1065
1066 return;
1067} 1019}
1068 1020
1069static void sas_parse_addr(u8 *sas_addr, const char *p) 1021static void sas_parse_addr(u8 *sas_addr, const char *p)
@@ -1108,16 +1060,12 @@ EXPORT_SYMBOL_GPL(sas_request_addr);
1108EXPORT_SYMBOL_GPL(sas_queuecommand); 1060EXPORT_SYMBOL_GPL(sas_queuecommand);
1109EXPORT_SYMBOL_GPL(sas_target_alloc); 1061EXPORT_SYMBOL_GPL(sas_target_alloc);
1110EXPORT_SYMBOL_GPL(sas_slave_configure); 1062EXPORT_SYMBOL_GPL(sas_slave_configure);
1111EXPORT_SYMBOL_GPL(sas_slave_destroy);
1112EXPORT_SYMBOL_GPL(sas_change_queue_depth); 1063EXPORT_SYMBOL_GPL(sas_change_queue_depth);
1113EXPORT_SYMBOL_GPL(sas_change_queue_type); 1064EXPORT_SYMBOL_GPL(sas_change_queue_type);
1114EXPORT_SYMBOL_GPL(sas_bios_param); 1065EXPORT_SYMBOL_GPL(sas_bios_param);
1115EXPORT_SYMBOL_GPL(__sas_task_abort);
1116EXPORT_SYMBOL_GPL(sas_task_abort); 1066EXPORT_SYMBOL_GPL(sas_task_abort);
1117EXPORT_SYMBOL_GPL(sas_phy_reset); 1067EXPORT_SYMBOL_GPL(sas_phy_reset);
1118EXPORT_SYMBOL_GPL(sas_phy_enable);
1119EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler); 1068EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
1120EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler); 1069EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
1121EXPORT_SYMBOL_GPL(sas_slave_alloc);
1122EXPORT_SYMBOL_GPL(sas_target_destroy); 1070EXPORT_SYMBOL_GPL(sas_target_destroy);
1123EXPORT_SYMBOL_GPL(sas_ioctl); 1071EXPORT_SYMBOL_GPL(sas_ioctl);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 825f9307417..5fc044ff656 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -534,6 +534,7 @@ struct lpfc_hba {
534 void (*lpfc_scsi_prep_cmnd) 534 void (*lpfc_scsi_prep_cmnd)
535 (struct lpfc_vport *, struct lpfc_scsi_buf *, 535 (struct lpfc_vport *, struct lpfc_scsi_buf *,
536 struct lpfc_nodelist *); 536 struct lpfc_nodelist *);
537
537 /* IOCB interface function jump table entries */ 538 /* IOCB interface function jump table entries */
538 int (*__lpfc_sli_issue_iocb) 539 int (*__lpfc_sli_issue_iocb)
539 (struct lpfc_hba *, uint32_t, 540 (struct lpfc_hba *, uint32_t,
@@ -541,8 +542,6 @@ struct lpfc_hba {
541 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *, 542 void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
542 struct lpfc_iocbq *); 543 struct lpfc_iocbq *);
543 int (*lpfc_hba_down_post)(struct lpfc_hba *phba); 544 int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
544
545
546 IOCB_t * (*lpfc_get_iocb_from_iocbq) 545 IOCB_t * (*lpfc_get_iocb_from_iocbq)
547 (struct lpfc_iocbq *); 546 (struct lpfc_iocbq *);
548 void (*lpfc_scsi_cmd_iocb_cmpl) 547 void (*lpfc_scsi_cmd_iocb_cmpl)
@@ -551,10 +550,12 @@ struct lpfc_hba {
551 /* MBOX interface function jump table entries */ 550 /* MBOX interface function jump table entries */
552 int (*lpfc_sli_issue_mbox) 551 int (*lpfc_sli_issue_mbox)
553 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t); 552 (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
553
554 /* Slow-path IOCB process function jump table entries */ 554 /* Slow-path IOCB process function jump table entries */
555 void (*lpfc_sli_handle_slow_ring_event) 555 void (*lpfc_sli_handle_slow_ring_event)
556 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring, 556 (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
557 uint32_t mask); 557 uint32_t mask);
558
558 /* INIT device interface function jump table entries */ 559 /* INIT device interface function jump table entries */
559 int (*lpfc_sli_hbq_to_firmware) 560 int (*lpfc_sli_hbq_to_firmware)
560 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *); 561 (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
@@ -573,6 +574,10 @@ struct lpfc_hba {
573 int (*lpfc_selective_reset) 574 int (*lpfc_selective_reset)
574 (struct lpfc_hba *); 575 (struct lpfc_hba *);
575 576
577 int (*lpfc_bg_scsi_prep_dma_buf)
578 (struct lpfc_hba *, struct lpfc_scsi_buf *);
579 /* Add new entries here */
580
576 /* SLI4 specific HBA data structure */ 581 /* SLI4 specific HBA data structure */
577 struct lpfc_sli4_hba sli4_hba; 582 struct lpfc_sli4_hba sli4_hba;
578 583
@@ -838,6 +843,7 @@ struct lpfc_hba {
838 struct dentry *debug_writeGuard; /* inject write guard_tag errors */ 843 struct dentry *debug_writeGuard; /* inject write guard_tag errors */
839 struct dentry *debug_writeApp; /* inject write app_tag errors */ 844 struct dentry *debug_writeApp; /* inject write app_tag errors */
840 struct dentry *debug_writeRef; /* inject write ref_tag errors */ 845 struct dentry *debug_writeRef; /* inject write ref_tag errors */
846 struct dentry *debug_readGuard; /* inject read guard_tag errors */
841 struct dentry *debug_readApp; /* inject read app_tag errors */ 847 struct dentry *debug_readApp; /* inject read app_tag errors */
842 struct dentry *debug_readRef; /* inject read ref_tag errors */ 848 struct dentry *debug_readRef; /* inject read ref_tag errors */
843 849
@@ -845,10 +851,11 @@ struct lpfc_hba {
845 uint32_t lpfc_injerr_wgrd_cnt; 851 uint32_t lpfc_injerr_wgrd_cnt;
846 uint32_t lpfc_injerr_wapp_cnt; 852 uint32_t lpfc_injerr_wapp_cnt;
847 uint32_t lpfc_injerr_wref_cnt; 853 uint32_t lpfc_injerr_wref_cnt;
854 uint32_t lpfc_injerr_rgrd_cnt;
848 uint32_t lpfc_injerr_rapp_cnt; 855 uint32_t lpfc_injerr_rapp_cnt;
849 uint32_t lpfc_injerr_rref_cnt; 856 uint32_t lpfc_injerr_rref_cnt;
850 sector_t lpfc_injerr_lba; 857 sector_t lpfc_injerr_lba;
851#define LPFC_INJERR_LBA_OFF (sector_t)0xffffffffffffffff 858#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
852 859
853 struct dentry *debug_slow_ring_trc; 860 struct dentry *debug_slow_ring_trc;
854 struct lpfc_debugfs_trc *slow_ring_trc; 861 struct lpfc_debugfs_trc *slow_ring_trc;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f6697cb0e21..296ad5bc424 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -353,7 +353,7 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
353 struct lpfc_hba *phba = vport->phba; 353 struct lpfc_hba *phba = vport->phba;
354 uint32_t if_type; 354 uint32_t if_type;
355 uint8_t sli_family; 355 uint8_t sli_family;
356 char fwrev[32]; 356 char fwrev[FW_REV_STR_SIZE];
357 int len; 357 int len;
358 358
359 lpfc_decode_firmware_rev(phba, fwrev, 1); 359 lpfc_decode_firmware_rev(phba, fwrev, 1);
@@ -922,11 +922,15 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
922 rc = lpfc_sli4_pdev_status_reg_wait(phba); 922 rc = lpfc_sli4_pdev_status_reg_wait(phba);
923 923
924 if (rc == -EPERM) { 924 if (rc == -EPERM) {
925 /* no privilage for reset, restore if needed */ 925 /* no privilage for reset */
926 if (before_fc_flag & FC_OFFLINE_MODE) 926 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
927 goto out; 927 "3150 No privilage to perform the requested "
928 "access: x%x\n", reg_val);
928 } else if (rc == -EIO) { 929 } else if (rc == -EIO) {
929 /* reset failed, there is nothing more we can do */ 930 /* reset failed, there is nothing more we can do */
931 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
932 "3153 Fail to perform the requested "
933 "access: x%x\n", reg_val);
930 return rc; 934 return rc;
931 } 935 }
932 936
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 56a86baece5..141e4b40bb5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -589,7 +589,10 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job)
589 } 589 }
590 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize = 590 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
591 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64); 591 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
592 cmdiocbq->iocb.ulpContext = rpi; 592 if (phba->sli_rev == LPFC_SLI_REV4)
593 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
594 else
595 cmdiocbq->iocb.ulpContext = rpi;
593 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC; 596 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
594 cmdiocbq->context1 = NULL; 597 cmdiocbq->context1 = NULL;
595 cmdiocbq->context2 = NULL; 598 cmdiocbq->context2 = NULL;
@@ -1768,7 +1771,7 @@ lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1768 bf_set(lpfc_mbx_set_diag_state_link_type, 1771 bf_set(lpfc_mbx_set_diag_state_link_type,
1769 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp); 1772 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1770 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req, 1773 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1771 LPFC_DIAG_LOOPBACK_TYPE_SERDES); 1774 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1772 1775
1773 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO); 1776 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1774 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) { 1777 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
@@ -3977,7 +3980,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3977 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES: 3980 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
3978 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3981 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3979 "3106 Handled SLI_CONFIG " 3982 "3106 Handled SLI_CONFIG "
3980 "subsys_fcoe, opcode:x%x\n", 3983 "subsys_comn, opcode:x%x\n",
3981 opcode); 3984 opcode);
3982 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job, 3985 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3983 nemb_mse, dmabuf); 3986 nemb_mse, dmabuf);
@@ -3985,7 +3988,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3985 default: 3988 default:
3986 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC, 3989 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3987 "3107 Reject SLI_CONFIG " 3990 "3107 Reject SLI_CONFIG "
3988 "subsys_fcoe, opcode:x%x\n", 3991 "subsys_comn, opcode:x%x\n",
3989 opcode); 3992 opcode);
3990 rc = -EPERM; 3993 rc = -EPERM;
3991 break; 3994 break;
@@ -4556,7 +4559,12 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4556 + sizeof(MAILBOX_t)); 4559 + sizeof(MAILBOX_t));
4557 } 4560 }
4558 } else if (phba->sli_rev == LPFC_SLI_REV4) { 4561 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4559 if (pmb->mbxCommand == MBX_DUMP_MEMORY) { 4562 /* Let type 4 (well known data) through because the data is
4563 * returned in varwords[4-8]
4564 * otherwise check the recieve length and fetch the buffer addr
4565 */
4566 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4567 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4560 /* rebuild the command for sli4 using our own buffers 4568 /* rebuild the command for sli4 using our own buffers
4561 * like we do for biu diags 4569 * like we do for biu diags
4562 */ 4570 */
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 26924b7a6cd..330dd7192a7 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -462,3 +462,4 @@ int lpfc_issue_unreg_vfi(struct lpfc_vport *);
462int lpfc_selective_reset(struct lpfc_hba *); 462int lpfc_selective_reset(struct lpfc_hba *);
463int lpfc_sli4_read_config(struct lpfc_hba *phba); 463int lpfc_sli4_read_config(struct lpfc_hba *phba);
464int lpfc_scsi_buf_update(struct lpfc_hba *phba); 464int lpfc_scsi_buf_update(struct lpfc_hba *phba);
465void lpfc_sli4_node_prep(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 707081d0a22..93e96b3c909 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1076,7 +1076,7 @@ int
1076lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, 1076lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
1077 size_t size) 1077 size_t size)
1078{ 1078{
1079 char fwrev[16]; 1079 char fwrev[FW_REV_STR_SIZE];
1080 int n; 1080 int n;
1081 1081
1082 lpfc_decode_firmware_rev(vport->phba, fwrev, 0); 1082 lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
@@ -1834,7 +1834,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1834 uint8_t *fwname; 1834 uint8_t *fwname;
1835 1835
1836 if (phba->sli_rev == LPFC_SLI_REV4) 1836 if (phba->sli_rev == LPFC_SLI_REV4)
1837 sprintf(fwrevision, "%s", vp->rev.opFwName); 1837 snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
1838 else if (vp->rev.rBit) { 1838 else if (vp->rev.rBit) {
1839 if (psli->sli_flag & LPFC_SLI_ACTIVE) 1839 if (psli->sli_flag & LPFC_SLI_ACTIVE)
1840 rev = vp->rev.sli2FwRev; 1840 rev = vp->rev.sli2FwRev;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 3587a3fe8fc..22e17be04d8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1019,6 +1019,8 @@ lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
1019 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt); 1019 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
1020 else if (dent == phba->debug_writeRef) 1020 else if (dent == phba->debug_writeRef)
1021 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt); 1021 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
1022 else if (dent == phba->debug_readGuard)
1023 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rgrd_cnt);
1022 else if (dent == phba->debug_readApp) 1024 else if (dent == phba->debug_readApp)
1023 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt); 1025 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
1024 else if (dent == phba->debug_readRef) 1026 else if (dent == phba->debug_readRef)
@@ -1057,6 +1059,8 @@ lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
1057 phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp; 1059 phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
1058 else if (dent == phba->debug_writeRef) 1060 else if (dent == phba->debug_writeRef)
1059 phba->lpfc_injerr_wref_cnt = (uint32_t)tmp; 1061 phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
1062 else if (dent == phba->debug_readGuard)
1063 phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
1060 else if (dent == phba->debug_readApp) 1064 else if (dent == phba->debug_readApp)
1061 phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp; 1065 phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
1062 else if (dent == phba->debug_readRef) 1066 else if (dent == phba->debug_readRef)
@@ -3978,6 +3982,17 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
3978 goto debug_failed; 3982 goto debug_failed;
3979 } 3983 }
3980 3984
3985 snprintf(name, sizeof(name), "readGuardInjErr");
3986 phba->debug_readGuard =
3987 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3988 phba->hba_debugfs_root,
3989 phba, &lpfc_debugfs_op_dif_err);
3990 if (!phba->debug_readGuard) {
3991 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3992 "0808 Cannot create debugfs readGuard\n");
3993 goto debug_failed;
3994 }
3995
3981 snprintf(name, sizeof(name), "readAppInjErr"); 3996 snprintf(name, sizeof(name), "readAppInjErr");
3982 phba->debug_readApp = 3997 phba->debug_readApp =
3983 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, 3998 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
@@ -4318,6 +4333,10 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
4318 debugfs_remove(phba->debug_writeRef); /* writeRef */ 4333 debugfs_remove(phba->debug_writeRef); /* writeRef */
4319 phba->debug_writeRef = NULL; 4334 phba->debug_writeRef = NULL;
4320 } 4335 }
4336 if (phba->debug_readGuard) {
4337 debugfs_remove(phba->debug_readGuard); /* readGuard */
4338 phba->debug_readGuard = NULL;
4339 }
4321 if (phba->debug_readApp) { 4340 if (phba->debug_readApp) {
4322 debugfs_remove(phba->debug_readApp); /* readApp */ 4341 debugfs_remove(phba->debug_readApp); /* readApp */
4323 phba->debug_readApp = NULL; 4342 phba->debug_readApp = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7afc757338d..8db2fb3b45e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1526,7 +1526,6 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1526 memcpy(&ndlp->active_rrqs.xri_bitmap, 1526 memcpy(&ndlp->active_rrqs.xri_bitmap,
1527 &rrq.xri_bitmap, 1527 &rrq.xri_bitmap,
1528 sizeof(ndlp->active_rrqs.xri_bitmap)); 1528 sizeof(ndlp->active_rrqs.xri_bitmap));
1529 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1530 /* Since we are swapping the ndlp passed in with the new one 1529 /* Since we are swapping the ndlp passed in with the new one
1531 * and the did has already been swapped, copy over the 1530 * and the did has already been swapped, copy over the
1532 * state and names. 1531 * state and names.
@@ -1536,6 +1535,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1536 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename, 1535 memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
1537 sizeof(struct lpfc_name)); 1536 sizeof(struct lpfc_name));
1538 new_ndlp->nlp_state = ndlp->nlp_state; 1537 new_ndlp->nlp_state = ndlp->nlp_state;
1538 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1539 /* Fix up the rport accordingly */ 1539 /* Fix up the rport accordingly */
1540 rport = ndlp->rport; 1540 rport = ndlp->rport;
1541 if (rport) { 1541 if (rport) {
@@ -7172,7 +7172,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
7172 goto out; 7172 goto out;
7173 /* FDISC failed */ 7173 /* FDISC failed */
7174 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 7174 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
7175 "0126 FDISC failed. (%d/%d)\n", 7175 "0126 FDISC failed. (x%x/x%x)\n",
7176 irsp->ulpStatus, irsp->un.ulpWord[4]); 7176 irsp->ulpStatus, irsp->un.ulpWord[4]);
7177 goto fdisc_failed; 7177 goto fdisc_failed;
7178 } 7178 }
@@ -7283,6 +7283,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7283 int rc; 7283 int rc;
7284 7284
7285 vport->port_state = LPFC_FDISC; 7285 vport->port_state = LPFC_FDISC;
7286 vport->fc_myDID = 0;
7286 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm)); 7287 cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
7287 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did, 7288 elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
7288 ELS_CMD_FDISC); 7289 ELS_CMD_FDISC);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 678a4b11059..343d87ba4df 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2977,9 +2977,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2977 "topology\n"); 2977 "topology\n");
2978 /* Get Loop Map information */ 2978 /* Get Loop Map information */
2979 if (bf_get(lpfc_mbx_read_top_il, la)) { 2979 if (bf_get(lpfc_mbx_read_top_il, la)) {
2980 spin_lock_irq(shost->host_lock); 2980 spin_lock(shost->host_lock);
2981 vport->fc_flag |= FC_LBIT; 2981 vport->fc_flag |= FC_LBIT;
2982 spin_unlock_irq(shost->host_lock); 2982 spin_unlock(shost->host_lock);
2983 } 2983 }
2984 2984
2985 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 2985 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
@@ -3029,9 +3029,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
3029 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3029 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3030 } 3030 }
3031 vport->fc_myDID = phba->fc_pref_DID; 3031 vport->fc_myDID = phba->fc_pref_DID;
3032 spin_lock_irq(shost->host_lock); 3032 spin_lock(shost->host_lock);
3033 vport->fc_flag |= FC_LBIT; 3033 vport->fc_flag |= FC_LBIT;
3034 spin_unlock_irq(shost->host_lock); 3034 spin_unlock(shost->host_lock);
3035 } 3035 }
3036 spin_unlock_irq(&phba->hbalock); 3036 spin_unlock_irq(&phba->hbalock);
3037 3037
@@ -5332,6 +5332,10 @@ lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
5332{ 5332{
5333 uint16_t *rpi = param; 5333 uint16_t *rpi = param;
5334 5334
5335 /* check for active node */
5336 if (!NLP_CHK_NODE_ACT(ndlp))
5337 return 0;
5338
5335 return ndlp->nlp_rpi == *rpi; 5339 return ndlp->nlp_rpi == *rpi;
5336} 5340}
5337 5341
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7245bead375..5f280b5ae3d 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2010 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * * 7 * *
@@ -70,6 +70,7 @@
70/* vendor ID used in SCSI netlink calls */ 70/* vendor ID used in SCSI netlink calls */
71#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX) 71#define LPFC_NL_VENDOR_ID (SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX)
72 72
73#define FW_REV_STR_SIZE 32
73/* Common Transport structures and definitions */ 74/* Common Transport structures and definitions */
74 75
75union CtRevisionId { 76union CtRevisionId {
@@ -2567,6 +2568,8 @@ typedef struct {
2567 2568
2568#define DMP_MEM_REG 0x1 2569#define DMP_MEM_REG 0x1
2569#define DMP_NV_PARAMS 0x2 2570#define DMP_NV_PARAMS 0x2
2571#define DMP_LMSD 0x3 /* Link Module Serial Data */
2572#define DMP_WELL_KNOWN 0x4
2570 2573
2571#define DMP_REGION_VPD 0xe 2574#define DMP_REGION_VPD 0xe
2572#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */ 2575#define DMP_VPD_SIZE 0x400 /* maximum amount of VPD */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index e5bfa7f334e..9e2b9b227e1 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -321,6 +321,10 @@ struct lpfc_cqe {
321#define CQE_STATUS_CMD_REJECT 0xb 321#define CQE_STATUS_CMD_REJECT 0xb
322#define CQE_STATUS_FCP_TGT_LENCHECK 0xc 322#define CQE_STATUS_FCP_TGT_LENCHECK 0xc
323#define CQE_STATUS_NEED_BUFF_ENTRY 0xf 323#define CQE_STATUS_NEED_BUFF_ENTRY 0xf
324#define CQE_STATUS_DI_ERROR 0x16
325
326/* Used when mapping CQE status to IOCB */
327#define LPFC_IOCB_STATUS_MASK 0xf
324 328
325/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */ 329/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
326#define CQE_HW_STATUS_NO_ERR 0x0 330#define CQE_HW_STATUS_NO_ERR 0x0
@@ -348,6 +352,21 @@ struct lpfc_wcqe_complete {
348#define lpfc_wcqe_c_hw_status_WORD word0 352#define lpfc_wcqe_c_hw_status_WORD word0
349 uint32_t total_data_placed; 353 uint32_t total_data_placed;
350 uint32_t parameter; 354 uint32_t parameter;
355#define lpfc_wcqe_c_bg_edir_SHIFT 5
356#define lpfc_wcqe_c_bg_edir_MASK 0x00000001
357#define lpfc_wcqe_c_bg_edir_WORD parameter
358#define lpfc_wcqe_c_bg_tdpv_SHIFT 3
359#define lpfc_wcqe_c_bg_tdpv_MASK 0x00000001
360#define lpfc_wcqe_c_bg_tdpv_WORD parameter
361#define lpfc_wcqe_c_bg_re_SHIFT 2
362#define lpfc_wcqe_c_bg_re_MASK 0x00000001
363#define lpfc_wcqe_c_bg_re_WORD parameter
364#define lpfc_wcqe_c_bg_ae_SHIFT 1
365#define lpfc_wcqe_c_bg_ae_MASK 0x00000001
366#define lpfc_wcqe_c_bg_ae_WORD parameter
367#define lpfc_wcqe_c_bg_ge_SHIFT 0
368#define lpfc_wcqe_c_bg_ge_MASK 0x00000001
369#define lpfc_wcqe_c_bg_ge_WORD parameter
351 uint32_t word3; 370 uint32_t word3;
352#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT 371#define lpfc_wcqe_c_valid_SHIFT lpfc_cqe_valid_SHIFT
353#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK 372#define lpfc_wcqe_c_valid_MASK lpfc_cqe_valid_MASK
@@ -359,8 +378,8 @@ struct lpfc_wcqe_complete {
359#define lpfc_wcqe_c_pv_MASK 0x00000001 378#define lpfc_wcqe_c_pv_MASK 0x00000001
360#define lpfc_wcqe_c_pv_WORD word3 379#define lpfc_wcqe_c_pv_WORD word3
361#define lpfc_wcqe_c_priority_SHIFT 24 380#define lpfc_wcqe_c_priority_SHIFT 24
362#define lpfc_wcqe_c_priority_MASK 0x00000007 381#define lpfc_wcqe_c_priority_MASK 0x00000007
363#define lpfc_wcqe_c_priority_WORD word3 382#define lpfc_wcqe_c_priority_WORD word3
364#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT 383#define lpfc_wcqe_c_code_SHIFT lpfc_cqe_code_SHIFT
365#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK 384#define lpfc_wcqe_c_code_MASK lpfc_cqe_code_MASK
366#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD 385#define lpfc_wcqe_c_code_WORD lpfc_cqe_code_WORD
@@ -715,12 +734,20 @@ struct lpfc_register {
715#define lpfc_eqcq_doorbell_eqci_SHIFT 9 734#define lpfc_eqcq_doorbell_eqci_SHIFT 9
716#define lpfc_eqcq_doorbell_eqci_MASK 0x0001 735#define lpfc_eqcq_doorbell_eqci_MASK 0x0001
717#define lpfc_eqcq_doorbell_eqci_WORD word0 736#define lpfc_eqcq_doorbell_eqci_WORD word0
718#define lpfc_eqcq_doorbell_cqid_SHIFT 0 737#define lpfc_eqcq_doorbell_cqid_lo_SHIFT 0
719#define lpfc_eqcq_doorbell_cqid_MASK 0x03FF 738#define lpfc_eqcq_doorbell_cqid_lo_MASK 0x03FF
720#define lpfc_eqcq_doorbell_cqid_WORD word0 739#define lpfc_eqcq_doorbell_cqid_lo_WORD word0
721#define lpfc_eqcq_doorbell_eqid_SHIFT 0 740#define lpfc_eqcq_doorbell_cqid_hi_SHIFT 11
722#define lpfc_eqcq_doorbell_eqid_MASK 0x01FF 741#define lpfc_eqcq_doorbell_cqid_hi_MASK 0x001F
723#define lpfc_eqcq_doorbell_eqid_WORD word0 742#define lpfc_eqcq_doorbell_cqid_hi_WORD word0
743#define lpfc_eqcq_doorbell_eqid_lo_SHIFT 0
744#define lpfc_eqcq_doorbell_eqid_lo_MASK 0x01FF
745#define lpfc_eqcq_doorbell_eqid_lo_WORD word0
746#define lpfc_eqcq_doorbell_eqid_hi_SHIFT 11
747#define lpfc_eqcq_doorbell_eqid_hi_MASK 0x001F
748#define lpfc_eqcq_doorbell_eqid_hi_WORD word0
749#define LPFC_CQID_HI_FIELD_SHIFT 10
750#define LPFC_EQID_HI_FIELD_SHIFT 9
724 751
725#define LPFC_BMBX 0x0160 752#define LPFC_BMBX 0x0160
726#define lpfc_bmbx_addr_SHIFT 2 753#define lpfc_bmbx_addr_SHIFT 2
@@ -3313,7 +3340,11 @@ struct xmit_bls_rsp64_wqe {
3313 uint32_t rsrvd4; 3340 uint32_t rsrvd4;
3314 struct wqe_did wqe_dest; 3341 struct wqe_did wqe_dest;
3315 struct wqe_common wqe_com; /* words 6-11 */ 3342 struct wqe_common wqe_com; /* words 6-11 */
3316 uint32_t rsvd_12_15[4]; 3343 uint32_t word12;
3344#define xmit_bls_rsp64_temprpi_SHIFT 0
3345#define xmit_bls_rsp64_temprpi_MASK 0x0000ffff
3346#define xmit_bls_rsp64_temprpi_WORD word12
3347 uint32_t rsvd_13_15[3];
3317}; 3348};
3318 3349
3319struct wqe_rctl_dfctl { 3350struct wqe_rctl_dfctl {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index dfea2dada02..b38f99f3be3 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -32,6 +32,7 @@
32#include <linux/aer.h> 32#include <linux/aer.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/firmware.h> 34#include <linux/firmware.h>
35#include <linux/miscdevice.h>
35 36
36#include <scsi/scsi.h> 37#include <scsi/scsi.h>
37#include <scsi/scsi_device.h> 38#include <scsi/scsi_device.h>
@@ -1474,8 +1475,12 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1474 phba->sli4_hba.u.if_type2.STATUSregaddr, 1475 phba->sli4_hba.u.if_type2.STATUSregaddr,
1475 &portstat_reg.word0); 1476 &portstat_reg.word0);
1476 /* consider PCI bus read error as pci_channel_offline */ 1477 /* consider PCI bus read error as pci_channel_offline */
1477 if (pci_rd_rc1 == -EIO) 1478 if (pci_rd_rc1 == -EIO) {
1479 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1480 "3151 PCI bus read access failure: x%x\n",
1481 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
1478 return; 1482 return;
1483 }
1479 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr); 1484 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
1480 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr); 1485 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
1481 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) { 1486 if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
@@ -1525,6 +1530,9 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1525 } 1530 }
1526 /* fall through for not able to recover */ 1531 /* fall through for not able to recover */
1527 } 1532 }
1533 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1534 "3152 Unrecoverable error, bring the port "
1535 "offline\n");
1528 lpfc_sli4_offline_eratt(phba); 1536 lpfc_sli4_offline_eratt(phba);
1529 break; 1537 break;
1530 case LPFC_SLI_INTF_IF_TYPE_1: 1538 case LPFC_SLI_INTF_IF_TYPE_1:
@@ -2333,13 +2341,20 @@ lpfc_cleanup(struct lpfc_vport *vport)
2333 continue; 2341 continue;
2334 } 2342 }
2335 2343
2344 /* take care of nodes in unused state before the state
2345 * machine taking action.
2346 */
2347 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
2348 lpfc_nlp_put(ndlp);
2349 continue;
2350 }
2351
2336 if (ndlp->nlp_type & NLP_FABRIC) 2352 if (ndlp->nlp_type & NLP_FABRIC)
2337 lpfc_disc_state_machine(vport, ndlp, NULL, 2353 lpfc_disc_state_machine(vport, ndlp, NULL,
2338 NLP_EVT_DEVICE_RECOVERY); 2354 NLP_EVT_DEVICE_RECOVERY);
2339 2355
2340 lpfc_disc_state_machine(vport, ndlp, NULL, 2356 lpfc_disc_state_machine(vport, ndlp, NULL,
2341 NLP_EVT_DEVICE_RM); 2357 NLP_EVT_DEVICE_RM);
2342
2343 } 2358 }
2344 2359
2345 /* At this point, ALL ndlp's should be gone 2360 /* At this point, ALL ndlp's should be gone
@@ -2513,6 +2528,42 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
2513} 2528}
2514 2529
2515/** 2530/**
2531 * lpfc_sli4_node_prep - Assign RPIs for active nodes.
2532 * @phba: pointer to lpfc hba data structure.
2533 *
2534 * Allocate RPIs for all active remote nodes. This is needed whenever
2535 * an SLI4 adapter is reset and the driver is not unloading. Its purpose
2536 * is to fixup the temporary rpi assignments.
2537 **/
2538void
2539lpfc_sli4_node_prep(struct lpfc_hba *phba)
2540{
2541 struct lpfc_nodelist *ndlp, *next_ndlp;
2542 struct lpfc_vport **vports;
2543 int i;
2544
2545 if (phba->sli_rev != LPFC_SLI_REV4)
2546 return;
2547
2548 vports = lpfc_create_vport_work_array(phba);
2549 if (vports != NULL) {
2550 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2551 if (vports[i]->load_flag & FC_UNLOADING)
2552 continue;
2553
2554 list_for_each_entry_safe(ndlp, next_ndlp,
2555 &vports[i]->fc_nodes,
2556 nlp_listp) {
2557 if (NLP_CHK_NODE_ACT(ndlp))
2558 ndlp->nlp_rpi =
2559 lpfc_sli4_alloc_rpi(phba);
2560 }
2561 }
2562 }
2563 lpfc_destroy_vport_work_array(phba, vports);
2564}
2565
2566/**
2516 * lpfc_online - Initialize and bring a HBA online 2567 * lpfc_online - Initialize and bring a HBA online
2517 * @phba: pointer to lpfc hba data structure. 2568 * @phba: pointer to lpfc hba data structure.
2518 * 2569 *
@@ -2653,6 +2704,15 @@ lpfc_offline_prep(struct lpfc_hba * phba)
2653 } 2704 }
2654 spin_lock_irq(shost->host_lock); 2705 spin_lock_irq(shost->host_lock);
2655 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 2706 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
2707
2708 /*
2709 * Whenever an SLI4 port goes offline, free the
2710 * RPI. A new RPI when the adapter port comes
2711 * back online.
2712 */
2713 if (phba->sli_rev == LPFC_SLI_REV4)
2714 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
2715
2656 spin_unlock_irq(shost->host_lock); 2716 spin_unlock_irq(shost->host_lock);
2657 lpfc_unreg_rpi(vports[i], ndlp); 2717 lpfc_unreg_rpi(vports[i], ndlp);
2658 } 2718 }
@@ -4327,6 +4387,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4327 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; 4387 uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
4328 struct lpfc_mqe *mqe; 4388 struct lpfc_mqe *mqe;
4329 int longs, sli_family; 4389 int longs, sli_family;
4390 int sges_per_segment;
4330 4391
4331 /* Before proceed, wait for POST done and device ready */ 4392 /* Before proceed, wait for POST done and device ready */
4332 rc = lpfc_sli4_post_status_check(phba); 4393 rc = lpfc_sli4_post_status_check(phba);
@@ -4390,6 +4451,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4390 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; 4451 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4391 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; 4452 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4392 4453
4454 /* With BlockGuard we can have multiple SGEs per Data Segemnt */
4455 sges_per_segment = 1;
4456 if (phba->cfg_enable_bg)
4457 sges_per_segment = 2;
4458
4393 /* 4459 /*
4394 * Since the sg_tablesize is module parameter, the sg_dma_buf_size 4460 * Since the sg_tablesize is module parameter, the sg_dma_buf_size
4395 * used to create the sg_dma_buf_pool must be dynamically calculated. 4461 * used to create the sg_dma_buf_pool must be dynamically calculated.
@@ -4398,7 +4464,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4398 * sgl sizes of must be a power of 2. 4464 * sgl sizes of must be a power of 2.
4399 */ 4465 */
4400 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + 4466 buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) +
4401 ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge))); 4467 (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) *
4468 sizeof(struct sli4_sge)));
4402 4469
4403 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); 4470 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
4404 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; 4471 max_buf_size = LPFC_SLI4_MAX_BUF_SIZE;
@@ -4415,6 +4482,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4415 default: 4482 default:
4416 break; 4483 break;
4417 } 4484 }
4485
4418 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; 4486 for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE;
4419 dma_buf_size < max_buf_size && buf_size > dma_buf_size; 4487 dma_buf_size < max_buf_size && buf_size > dma_buf_size;
4420 dma_buf_size = dma_buf_size << 1) 4488 dma_buf_size = dma_buf_size << 1)
@@ -7223,19 +7291,17 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7223 rc = -ENODEV; 7291 rc = -ENODEV;
7224 goto out; 7292 goto out;
7225 } 7293 }
7226 if (bf_get(lpfc_sliport_status_rdy, &reg_data)) 7294 if (bf_get(lpfc_sliport_status_rn, &reg_data))
7227 break;
7228 if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
7229 reset_again++; 7295 reset_again++;
7296 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7230 break; 7297 break;
7231 }
7232 } 7298 }
7233 7299
7234 /* 7300 /*
7235 * If the port responds to the init request with 7301 * If the port responds to the init request with
7236 * reset needed, delay for a bit and restart the loop. 7302 * reset needed, delay for a bit and restart the loop.
7237 */ 7303 */
7238 if (reset_again) { 7304 if (reset_again && (rdy_chk < 1000)) {
7239 msleep(10); 7305 msleep(10);
7240 reset_again = 0; 7306 reset_again = 0;
7241 continue; 7307 continue;
@@ -8112,6 +8178,9 @@ lpfc_unset_hba(struct lpfc_hba *phba)
8112 vport->load_flag |= FC_UNLOADING; 8178 vport->load_flag |= FC_UNLOADING;
8113 spin_unlock_irq(shost->host_lock); 8179 spin_unlock_irq(shost->host_lock);
8114 8180
8181 kfree(phba->vpi_bmask);
8182 kfree(phba->vpi_ids);
8183
8115 lpfc_stop_hba_timers(phba); 8184 lpfc_stop_hba_timers(phba);
8116 8185
8117 phba->pport->work_port_events = 0; 8186 phba->pport->work_port_events = 0;
@@ -8644,6 +8713,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
8644 /* Final cleanup of txcmplq and reset the HBA */ 8713 /* Final cleanup of txcmplq and reset the HBA */
8645 lpfc_sli_brdrestart(phba); 8714 lpfc_sli_brdrestart(phba);
8646 8715
8716 kfree(phba->vpi_bmask);
8717 kfree(phba->vpi_ids);
8718
8647 lpfc_stop_hba_timers(phba); 8719 lpfc_stop_hba_timers(phba);
8648 spin_lock_irq(&phba->hbalock); 8720 spin_lock_irq(&phba->hbalock);
8649 list_del_init(&vport->listentry); 8721 list_del_init(&vport->listentry);
@@ -9058,7 +9130,7 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
9058int 9130int
9059lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) 9131lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
9060{ 9132{
9061 char fwrev[32]; 9133 char fwrev[FW_REV_STR_SIZE];
9062 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data; 9134 struct lpfc_grp_hdr *image = (struct lpfc_grp_hdr *)fw->data;
9063 struct list_head dma_buffer_list; 9135 struct list_head dma_buffer_list;
9064 int i, rc = 0; 9136 int i, rc = 0;
@@ -10012,6 +10084,36 @@ lpfc_io_resume(struct pci_dev *pdev)
10012 return; 10084 return;
10013} 10085}
10014 10086
10087/**
10088 * lpfc_mgmt_open - method called when 'lpfcmgmt' is opened from userspace
10089 * @inode: pointer to the inode representing the lpfcmgmt device
10090 * @filep: pointer to the file representing the open lpfcmgmt device
10091 *
10092 * This routine puts a reference count on the lpfc module whenever the
10093 * character device is opened
10094 **/
10095static int
10096lpfc_mgmt_open(struct inode *inode, struct file *filep)
10097{
10098 try_module_get(THIS_MODULE);
10099 return 0;
10100}
10101
10102/**
10103 * lpfc_mgmt_release - method called when 'lpfcmgmt' is closed in userspace
10104 * @inode: pointer to the inode representing the lpfcmgmt device
10105 * @filep: pointer to the file representing the open lpfcmgmt device
10106 *
10107 * This routine removes a reference count from the lpfc module when the
10108 * character device is closed
10109 **/
10110static int
10111lpfc_mgmt_release(struct inode *inode, struct file *filep)
10112{
10113 module_put(THIS_MODULE);
10114 return 0;
10115}
10116
10015static struct pci_device_id lpfc_id_table[] = { 10117static struct pci_device_id lpfc_id_table[] = {
10016 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER, 10118 {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
10017 PCI_ANY_ID, PCI_ANY_ID, }, 10119 PCI_ANY_ID, PCI_ANY_ID, },
@@ -10124,6 +10226,17 @@ static struct pci_driver lpfc_driver = {
10124 .err_handler = &lpfc_err_handler, 10226 .err_handler = &lpfc_err_handler,
10125}; 10227};
10126 10228
10229static const struct file_operations lpfc_mgmt_fop = {
10230 .open = lpfc_mgmt_open,
10231 .release = lpfc_mgmt_release,
10232};
10233
10234static struct miscdevice lpfc_mgmt_dev = {
10235 .minor = MISC_DYNAMIC_MINOR,
10236 .name = "lpfcmgmt",
10237 .fops = &lpfc_mgmt_fop,
10238};
10239
10127/** 10240/**
10128 * lpfc_init - lpfc module initialization routine 10241 * lpfc_init - lpfc module initialization routine
10129 * 10242 *
@@ -10144,6 +10257,11 @@ lpfc_init(void)
10144 printk(LPFC_MODULE_DESC "\n"); 10257 printk(LPFC_MODULE_DESC "\n");
10145 printk(LPFC_COPYRIGHT "\n"); 10258 printk(LPFC_COPYRIGHT "\n");
10146 10259
10260 error = misc_register(&lpfc_mgmt_dev);
10261 if (error)
10262 printk(KERN_ERR "Could not register lpfcmgmt device, "
10263 "misc_register returned with status %d", error);
10264
10147 if (lpfc_enable_npiv) { 10265 if (lpfc_enable_npiv) {
10148 lpfc_transport_functions.vport_create = lpfc_vport_create; 10266 lpfc_transport_functions.vport_create = lpfc_vport_create;
10149 lpfc_transport_functions.vport_delete = lpfc_vport_delete; 10267 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
@@ -10180,6 +10298,7 @@ lpfc_init(void)
10180static void __exit 10298static void __exit
10181lpfc_exit(void) 10299lpfc_exit(void)
10182{ 10300{
10301 misc_deregister(&lpfc_mgmt_dev);
10183 pci_unregister_driver(&lpfc_driver); 10302 pci_unregister_driver(&lpfc_driver);
10184 fc_release_transport(lpfc_transport_template); 10303 fc_release_transport(lpfc_transport_template);
10185 if (lpfc_enable_npiv) 10304 if (lpfc_enable_npiv)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index e8bb0055994..7b6b2aa5795 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -48,6 +48,10 @@ static int
48lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 48lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
49 struct lpfc_name *nn, struct lpfc_name *pn) 49 struct lpfc_name *nn, struct lpfc_name *pn)
50{ 50{
51 /* First, we MUST have a RPI registered */
52 if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED))
53 return 0;
54
51 /* Compare the ADISC rsp WWNN / WWPN matches our internal node 55 /* Compare the ADISC rsp WWNN / WWPN matches our internal node
52 * table entry for that node. 56 * table entry for that node.
53 */ 57 */
@@ -385,6 +389,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
385 if (!mbox) 389 if (!mbox)
386 goto out; 390 goto out;
387 391
392 /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */
393 if (phba->sli_rev == LPFC_SLI_REV4)
394 lpfc_unreg_rpi(vport, ndlp);
395
388 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, 396 rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
389 (uint8_t *) sp, mbox, ndlp->nlp_rpi); 397 (uint8_t *) sp, mbox, ndlp->nlp_rpi);
390 if (rc) { 398 if (rc) {
@@ -445,11 +453,43 @@ out:
445 return 0; 453 return 0;
446} 454}
447 455
456/**
457 * lpfc_mbx_cmpl_resume_rpi - Resume RPI completion routine
458 * @phba: pointer to lpfc hba data structure.
459 * @mboxq: pointer to mailbox object
460 *
461 * This routine is invoked to issue a completion to a rcv'ed
462 * ADISC or PDISC after the paused RPI has been resumed.
463 **/
464static void
465lpfc_mbx_cmpl_resume_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
466{
467 struct lpfc_vport *vport;
468 struct lpfc_iocbq *elsiocb;
469 struct lpfc_nodelist *ndlp;
470 uint32_t cmd;
471
472 elsiocb = (struct lpfc_iocbq *)mboxq->context1;
473 ndlp = (struct lpfc_nodelist *) mboxq->context2;
474 vport = mboxq->vport;
475 cmd = elsiocb->drvrTimeout;
476
477 if (cmd == ELS_CMD_ADISC) {
478 lpfc_els_rsp_adisc_acc(vport, elsiocb, ndlp);
479 } else {
480 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, elsiocb,
481 ndlp, NULL);
482 }
483 kfree(elsiocb);
484 mempool_free(mboxq, phba->mbox_mem_pool);
485}
486
448static int 487static int
449lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 488lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
450 struct lpfc_iocbq *cmdiocb) 489 struct lpfc_iocbq *cmdiocb)
451{ 490{
452 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 491 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
492 struct lpfc_iocbq *elsiocb;
453 struct lpfc_dmabuf *pcmd; 493 struct lpfc_dmabuf *pcmd;
454 struct serv_parm *sp; 494 struct serv_parm *sp;
455 struct lpfc_name *pnn, *ppn; 495 struct lpfc_name *pnn, *ppn;
@@ -475,12 +515,43 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
475 515
476 icmd = &cmdiocb->iocb; 516 icmd = &cmdiocb->iocb;
477 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) { 517 if (icmd->ulpStatus == 0 && lpfc_check_adisc(vport, ndlp, pnn, ppn)) {
518
519 /*
520 * As soon as we send ACC, the remote NPort can
521 * start sending us data. Thus, for SLI4 we must
522 * resume the RPI before the ACC goes out.
523 */
524 if (vport->phba->sli_rev == LPFC_SLI_REV4) {
525 elsiocb = kmalloc(sizeof(struct lpfc_iocbq),
526 GFP_KERNEL);
527 if (elsiocb) {
528
529 /* Save info from cmd IOCB used in rsp */
530 memcpy((uint8_t *)elsiocb, (uint8_t *)cmdiocb,
531 sizeof(struct lpfc_iocbq));
532
533 /* Save the ELS cmd */
534 elsiocb->drvrTimeout = cmd;
535
536 lpfc_sli4_resume_rpi(ndlp,
537 lpfc_mbx_cmpl_resume_rpi, elsiocb);
538 goto out;
539 }
540 }
541
478 if (cmd == ELS_CMD_ADISC) { 542 if (cmd == ELS_CMD_ADISC) {
479 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp); 543 lpfc_els_rsp_adisc_acc(vport, cmdiocb, ndlp);
480 } else { 544 } else {
481 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, 545 lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
482 NULL); 546 ndlp, NULL);
483 } 547 }
548out:
549 /* If we are authenticated, move to the proper state */
550 if (ndlp->nlp_type & NLP_FCP_TARGET)
551 lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
552 else
553 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
554
484 return 1; 555 return 1;
485 } 556 }
486 /* Reject this request because invalid parameters */ 557 /* Reject this request because invalid parameters */
@@ -1229,7 +1300,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
1229 } 1300 }
1230 1301
1231 if (phba->sli_rev == LPFC_SLI_REV4) { 1302 if (phba->sli_rev == LPFC_SLI_REV4) {
1232 rc = lpfc_sli4_resume_rpi(ndlp); 1303 rc = lpfc_sli4_resume_rpi(ndlp, NULL, NULL);
1233 if (rc) { 1304 if (rc) {
1234 /* Stay in state and retry. */ 1305 /* Stay in state and retry. */
1235 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; 1306 ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c60f5d0b386..efc055b6bac 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
1/******************************************************************* 1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -1280,31 +1280,45 @@ lpfc_cmd_blksize(struct scsi_cmnd *sc)
1280} 1280}
1281 1281
1282#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1282#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1283/* 1283
1284 * Given a scsi cmnd, determine the BlockGuard tags to be used with it 1284#define BG_ERR_INIT 1
1285#define BG_ERR_TGT 2
1286#define BG_ERR_SWAP 3
1287#define BG_ERR_CHECK 4
1288
1289/**
1290 * lpfc_bg_err_inject - Determine if we should inject an error
1291 * @phba: The Hba for which this call is being executed.
1285 * @sc: The SCSI command to examine 1292 * @sc: The SCSI command to examine
1286 * @reftag: (out) BlockGuard reference tag for transmitted data 1293 * @reftag: (out) BlockGuard reference tag for transmitted data
1287 * @apptag: (out) BlockGuard application tag for transmitted data 1294 * @apptag: (out) BlockGuard application tag for transmitted data
1288 * @new_guard (in) Value to replace CRC with if needed 1295 * @new_guard (in) Value to replace CRC with if needed
1289 * 1296 *
1290 * Returns (1) if error injection was performed, (0) otherwise 1297 * Returns (1) if error injection is detected by Initiator
1291 */ 1298 * Returns (2) if error injection is detected by Target
1299 * Returns (3) if swapping CSUM->CRC is required for error injection
1300 * Returns (4) disabling Guard/Ref/App checking is required for error injection
1301 **/
1292static int 1302static int
1293lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1303lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1294 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) 1304 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1295{ 1305{
1296 struct scatterlist *sgpe; /* s/g prot entry */ 1306 struct scatterlist *sgpe; /* s/g prot entry */
1297 struct scatterlist *sgde; /* s/g data entry */ 1307 struct scatterlist *sgde; /* s/g data entry */
1298 struct scsi_dif_tuple *src; 1308 struct scsi_dif_tuple *src = NULL;
1299 uint32_t op = scsi_get_prot_op(sc); 1309 uint32_t op = scsi_get_prot_op(sc);
1300 uint32_t blksize; 1310 uint32_t blksize;
1301 uint32_t numblks; 1311 uint32_t numblks;
1302 sector_t lba; 1312 sector_t lba;
1303 int rc = 0; 1313 int rc = 0;
1314 int blockoff = 0;
1304 1315
1305 if (op == SCSI_PROT_NORMAL) 1316 if (op == SCSI_PROT_NORMAL)
1306 return 0; 1317 return 0;
1307 1318
1319 sgpe = scsi_prot_sglist(sc);
1320 sgde = scsi_sglist(sc);
1321
1308 lba = scsi_get_lba(sc); 1322 lba = scsi_get_lba(sc);
1309 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { 1323 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1310 blksize = lpfc_cmd_blksize(sc); 1324 blksize = lpfc_cmd_blksize(sc);
@@ -1314,142 +1328,296 @@ lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1314 if ((phba->lpfc_injerr_lba < lba) || 1328 if ((phba->lpfc_injerr_lba < lba) ||
1315 (phba->lpfc_injerr_lba >= (lba + numblks))) 1329 (phba->lpfc_injerr_lba >= (lba + numblks)))
1316 return 0; 1330 return 0;
1331 if (sgpe) {
1332 blockoff = phba->lpfc_injerr_lba - lba;
1333 numblks = sg_dma_len(sgpe) /
1334 sizeof(struct scsi_dif_tuple);
1335 if (numblks < blockoff)
1336 blockoff = numblks;
1337 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1338 src += blockoff;
1339 }
1317 } 1340 }
1318 1341
1319 sgpe = scsi_prot_sglist(sc);
1320 sgde = scsi_sglist(sc);
1321
1322 /* Should we change the Reference Tag */ 1342 /* Should we change the Reference Tag */
1323 if (reftag) { 1343 if (reftag) {
1324 /* 1344 if (phba->lpfc_injerr_wref_cnt) {
1325 * If we are SCSI_PROT_WRITE_STRIP, the protection data is 1345 switch (op) {
1326 * being stripped from the wire, thus it doesn't matter. 1346 case SCSI_PROT_WRITE_PASS:
1327 */ 1347 if (blockoff && src) {
1328 if ((op == SCSI_PROT_WRITE_PASS) || 1348 /* Insert error in middle of the IO */
1329 (op == SCSI_PROT_WRITE_INSERT)) { 1349
1330 if (phba->lpfc_injerr_wref_cnt) { 1350 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1351 "9076 BLKGRD: Injecting reftag error: "
1352 "write lba x%lx + x%x oldrefTag x%x\n",
1353 (unsigned long)lba, blockoff,
1354 src->ref_tag);
1355
1356 /*
1357 * NOTE, this will change ref tag in
1358 * the memory location forever!
1359 */
1360 src->ref_tag = 0xDEADBEEF;
1361 phba->lpfc_injerr_wref_cnt--;
1362 phba->lpfc_injerr_lba =
1363 LPFC_INJERR_LBA_OFF;
1364 rc = BG_ERR_CHECK;
1365 break;
1366 }
1367 /* Drop thru */
1368 case SCSI_PROT_WRITE_STRIP:
1369 /*
1370 * For WRITE_STRIP and WRITE_PASS,
1371 * force the error on data
1372 * being copied from SLI-Host to SLI-Port.
1373 */
1374 *reftag = 0xDEADBEEF;
1375 phba->lpfc_injerr_wref_cnt--;
1376 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1377 rc = BG_ERR_INIT;
1331 1378
1379 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1380 "9077 BLKGRD: Injecting reftag error: "
1381 "write lba x%lx\n", (unsigned long)lba);
1382 break;
1383 case SCSI_PROT_WRITE_INSERT:
1384 /*
1385 * For WRITE_INSERT, force the
1386 * error to be sent on the wire. It should be
1387 * detected by the Target.
1388 */
1332 /* DEADBEEF will be the reftag on the wire */ 1389 /* DEADBEEF will be the reftag on the wire */
1333 *reftag = 0xDEADBEEF; 1390 *reftag = 0xDEADBEEF;
1334 phba->lpfc_injerr_wref_cnt--; 1391 phba->lpfc_injerr_wref_cnt--;
1335 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1392 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1336 rc = 1; 1393 rc = BG_ERR_TGT;
1337 1394
1338 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1395 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1339 "9081 BLKGRD: Injecting reftag error: " 1396 "9078 BLKGRD: Injecting reftag error: "
1340 "write lba x%lx\n", (unsigned long)lba); 1397 "write lba x%lx\n", (unsigned long)lba);
1398 break;
1341 } 1399 }
1342 } else { 1400 }
1343 if (phba->lpfc_injerr_rref_cnt) { 1401 if (phba->lpfc_injerr_rref_cnt) {
1402 switch (op) {
1403 case SCSI_PROT_READ_INSERT:
1404 /*
1405 * For READ_INSERT, it doesn't make sense
1406 * to change the reftag.
1407 */
1408 break;
1409 case SCSI_PROT_READ_STRIP:
1410 case SCSI_PROT_READ_PASS:
1411 /*
1412 * For READ_STRIP and READ_PASS, force the
1413 * error on data being read off the wire. It
1414 * should force an IO error to the driver.
1415 */
1344 *reftag = 0xDEADBEEF; 1416 *reftag = 0xDEADBEEF;
1345 phba->lpfc_injerr_rref_cnt--; 1417 phba->lpfc_injerr_rref_cnt--;
1346 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1418 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1347 rc = 1; 1419 rc = BG_ERR_INIT;
1348 1420
1349 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1421 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1350 "9076 BLKGRD: Injecting reftag error: " 1422 "9079 BLKGRD: Injecting reftag error: "
1351 "read lba x%lx\n", (unsigned long)lba); 1423 "read lba x%lx\n", (unsigned long)lba);
1424 break;
1352 } 1425 }
1353 } 1426 }
1354 } 1427 }
1355 1428
1356 /* Should we change the Application Tag */ 1429 /* Should we change the Application Tag */
1357 if (apptag) { 1430 if (apptag) {
1358 /* 1431 if (phba->lpfc_injerr_wapp_cnt) {
1359 * If we are SCSI_PROT_WRITE_STRIP, the protection data is 1432 switch (op) {
1360 * being stripped from the wire, thus it doesn't matter. 1433 case SCSI_PROT_WRITE_PASS:
1361 */ 1434 if (blockoff && src) {
1362 if ((op == SCSI_PROT_WRITE_PASS) || 1435 /* Insert error in middle of the IO */
1363 (op == SCSI_PROT_WRITE_INSERT)) { 1436
1364 if (phba->lpfc_injerr_wapp_cnt) { 1437 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1438 "9080 BLKGRD: Injecting apptag error: "
1439 "write lba x%lx + x%x oldappTag x%x\n",
1440 (unsigned long)lba, blockoff,
1441 src->app_tag);
1365 1442
1443 /*
1444 * NOTE, this will change app tag in
1445 * the memory location forever!
1446 */
1447 src->app_tag = 0xDEAD;
1448 phba->lpfc_injerr_wapp_cnt--;
1449 phba->lpfc_injerr_lba =
1450 LPFC_INJERR_LBA_OFF;
1451 rc = BG_ERR_CHECK;
1452 break;
1453 }
1454 /* Drop thru */
1455 case SCSI_PROT_WRITE_STRIP:
1456 /*
1457 * For WRITE_STRIP and WRITE_PASS,
1458 * force the error on data
1459 * being copied from SLI-Host to SLI-Port.
1460 */
1461 *apptag = 0xDEAD;
1462 phba->lpfc_injerr_wapp_cnt--;
1463 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1464 rc = BG_ERR_INIT;
1465
1466 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1467 "0812 BLKGRD: Injecting apptag error: "
1468 "write lba x%lx\n", (unsigned long)lba);
1469 break;
1470 case SCSI_PROT_WRITE_INSERT:
1471 /*
1472 * For WRITE_INSERT, force the
1473 * error to be sent on the wire. It should be
1474 * detected by the Target.
1475 */
1366 /* DEAD will be the apptag on the wire */ 1476 /* DEAD will be the apptag on the wire */
1367 *apptag = 0xDEAD; 1477 *apptag = 0xDEAD;
1368 phba->lpfc_injerr_wapp_cnt--; 1478 phba->lpfc_injerr_wapp_cnt--;
1369 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1479 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1370 rc = 1; 1480 rc = BG_ERR_TGT;
1371 1481
1372 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1482 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1373 "9077 BLKGRD: Injecting apptag error: " 1483 "0813 BLKGRD: Injecting apptag error: "
1374 "write lba x%lx\n", (unsigned long)lba); 1484 "write lba x%lx\n", (unsigned long)lba);
1485 break;
1375 } 1486 }
1376 } else { 1487 }
1377 if (phba->lpfc_injerr_rapp_cnt) { 1488 if (phba->lpfc_injerr_rapp_cnt) {
1489 switch (op) {
1490 case SCSI_PROT_READ_INSERT:
1491 /*
1492 * For READ_INSERT, it doesn't make sense
1493 * to change the apptag.
1494 */
1495 break;
1496 case SCSI_PROT_READ_STRIP:
1497 case SCSI_PROT_READ_PASS:
1498 /*
1499 * For READ_STRIP and READ_PASS, force the
1500 * error on data being read off the wire. It
1501 * should force an IO error to the driver.
1502 */
1378 *apptag = 0xDEAD; 1503 *apptag = 0xDEAD;
1379 phba->lpfc_injerr_rapp_cnt--; 1504 phba->lpfc_injerr_rapp_cnt--;
1380 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1505 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1381 rc = 1; 1506 rc = BG_ERR_INIT;
1382 1507
1383 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1508 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1384 "9078 BLKGRD: Injecting apptag error: " 1509 "0814 BLKGRD: Injecting apptag error: "
1385 "read lba x%lx\n", (unsigned long)lba); 1510 "read lba x%lx\n", (unsigned long)lba);
1511 break;
1386 } 1512 }
1387 } 1513 }
1388 } 1514 }
1389 1515
1516
1390 /* Should we change the Guard Tag */ 1517 /* Should we change the Guard Tag */
1518 if (new_guard) {
1519 if (phba->lpfc_injerr_wgrd_cnt) {
1520 switch (op) {
1521 case SCSI_PROT_WRITE_PASS:
1522 if (blockoff && src) {
1523 /* Insert error in middle of the IO */
1524
1525 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1526 "0815 BLKGRD: Injecting guard error: "
1527 "write lba x%lx + x%x oldgrdTag x%x\n",
1528 (unsigned long)lba, blockoff,
1529 src->guard_tag);
1391 1530
1392 /* 1531 /*
1393 * If we are SCSI_PROT_WRITE_INSERT, the protection data is 1532 * NOTE, this will change guard tag in
1394 * being on the wire is being fully generated on the HBA. 1533 * the memory location forever!
1395 * The host cannot change it or force an error. 1534 */
1396 */ 1535 src->guard_tag = 0xDEAD;
1397 if (((op == SCSI_PROT_WRITE_STRIP) || 1536 phba->lpfc_injerr_wgrd_cnt--;
1398 (op == SCSI_PROT_WRITE_PASS)) && 1537 phba->lpfc_injerr_lba =
1399 phba->lpfc_injerr_wgrd_cnt) { 1538 LPFC_INJERR_LBA_OFF;
1400 if (sgpe) { 1539 rc = BG_ERR_CHECK;
1401 src = (struct scsi_dif_tuple *)sg_virt(sgpe); 1540 break;
1402 /* 1541 }
1403 * Just inject an error in the first 1542 /* Drop thru */
1404 * prot block. 1543 case SCSI_PROT_WRITE_STRIP:
1405 */ 1544 /*
1406 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1545 * For WRITE_STRIP and WRITE_PASS,
1407 "9079 BLKGRD: Injecting guard error: " 1546 * force the error on data
1408 "write lba x%lx oldGuard x%x refTag x%x\n", 1547 * being copied from SLI-Host to SLI-Port.
1409 (unsigned long)lba, src->guard_tag, 1548 */
1410 src->ref_tag); 1549 phba->lpfc_injerr_wgrd_cnt--;
1550 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1411 1551
1412 src->guard_tag = (uint16_t)new_guard; 1552 rc = BG_ERR_SWAP;
1413 phba->lpfc_injerr_wgrd_cnt--; 1553 /* Signals the caller to swap CRC->CSUM */
1414 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1415 rc = 1;
1416 1554
1417 } else { 1555 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1418 blksize = lpfc_cmd_blksize(sc); 1556 "0816 BLKGRD: Injecting guard error: "
1419 /* 1557 "write lba x%lx\n", (unsigned long)lba);
1420 * Jump past the first data block 1558 break;
1421 * and inject an error in the 1559 case SCSI_PROT_WRITE_INSERT:
1422 * prot data. The prot data is already 1560 /*
1423 * embedded after the regular data. 1561 * For WRITE_INSERT, force the
1424 */ 1562 * error to be sent on the wire. It should be
1425 src = (struct scsi_dif_tuple *) 1563 * detected by the Target.
1426 (sg_virt(sgde) + blksize); 1564 */
1565 phba->lpfc_injerr_wgrd_cnt--;
1566 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1427 1567
1428 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 1568 rc = BG_ERR_SWAP;
1429 "9080 BLKGRD: Injecting guard error: " 1569 /* Signals the caller to swap CRC->CSUM */
1430 "write lba x%lx oldGuard x%x refTag x%x\n", 1570
1431 (unsigned long)lba, src->guard_tag, 1571 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1432 src->ref_tag); 1572 "0817 BLKGRD: Injecting guard error: "
1433 1573 "write lba x%lx\n", (unsigned long)lba);
1434 src->guard_tag = (uint16_t)new_guard; 1574 break;
1435 phba->lpfc_injerr_wgrd_cnt--; 1575 }
1436 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; 1576 }
1437 rc = 1; 1577 if (phba->lpfc_injerr_rgrd_cnt) {
1578 switch (op) {
1579 case SCSI_PROT_READ_INSERT:
1580 /*
1581 * For READ_INSERT, it doesn't make sense
1582 * to change the guard tag.
1583 */
1584 break;
1585 case SCSI_PROT_READ_STRIP:
1586 case SCSI_PROT_READ_PASS:
1587 /*
1588 * For READ_STRIP and READ_PASS, force the
1589 * error on data being read off the wire. It
1590 * should force an IO error to the driver.
1591 */
1592 *apptag = 0xDEAD;
1593 phba->lpfc_injerr_rgrd_cnt--;
1594 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1595
1596 rc = BG_ERR_SWAP;
1597 /* Signals the caller to swap CRC->CSUM */
1598
1599 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1600 "0818 BLKGRD: Injecting guard error: "
1601 "read lba x%lx\n", (unsigned long)lba);
1602 }
1438 } 1603 }
1439 } 1604 }
1605
1440 return rc; 1606 return rc;
1441} 1607}
1442#endif 1608#endif
1443 1609
1444/* 1610/**
1445 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it 1611 * lpfc_sc_to_bg_opcodes - Determine the BlockGuard opcodes to be used with
1612 * the specified SCSI command.
1613 * @phba: The Hba for which this call is being executed.
1446 * @sc: The SCSI command to examine 1614 * @sc: The SCSI command to examine
1447 * @txopt: (out) BlockGuard operation for transmitted data 1615 * @txopt: (out) BlockGuard operation for transmitted data
1448 * @rxopt: (out) BlockGuard operation for received data 1616 * @rxopt: (out) BlockGuard operation for received data
1449 * 1617 *
1450 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined 1618 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1451 * 1619 *
1452 */ 1620 **/
1453static int 1621static int
1454lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1622lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1455 uint8_t *txop, uint8_t *rxop) 1623 uint8_t *txop, uint8_t *rxop)
@@ -1519,8 +1687,88 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1519 return ret; 1687 return ret;
1520} 1688}
1521 1689
1522/* 1690#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1523 * This function sets up buffer list for protection groups of 1691/**
1692 * lpfc_bg_err_opcodes - reDetermine the BlockGuard opcodes to be used with
1693 * the specified SCSI command in order to force a guard tag error.
1694 * @phba: The Hba for which this call is being executed.
1695 * @sc: The SCSI command to examine
1696 * @txopt: (out) BlockGuard operation for transmitted data
1697 * @rxopt: (out) BlockGuard operation for received data
1698 *
1699 * Returns: zero on success; non-zero if tx and/or rx op cannot be determined
1700 *
1701 **/
1702static int
1703lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1704 uint8_t *txop, uint8_t *rxop)
1705{
1706 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
1707 uint8_t ret = 0;
1708
1709 if (guard_type == SHOST_DIX_GUARD_IP) {
1710 switch (scsi_get_prot_op(sc)) {
1711 case SCSI_PROT_READ_INSERT:
1712 case SCSI_PROT_WRITE_STRIP:
1713 *txop = BG_OP_IN_CRC_OUT_NODIF;
1714 *rxop = BG_OP_IN_NODIF_OUT_CRC;
1715 break;
1716
1717 case SCSI_PROT_READ_STRIP:
1718 case SCSI_PROT_WRITE_INSERT:
1719 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1720 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1721 break;
1722
1723 case SCSI_PROT_READ_PASS:
1724 case SCSI_PROT_WRITE_PASS:
1725 *txop = BG_OP_IN_CRC_OUT_CRC;
1726 *rxop = BG_OP_IN_CRC_OUT_CRC;
1727 break;
1728
1729 case SCSI_PROT_NORMAL:
1730 default:
1731 break;
1732
1733 }
1734 } else {
1735 switch (scsi_get_prot_op(sc)) {
1736 case SCSI_PROT_READ_STRIP:
1737 case SCSI_PROT_WRITE_INSERT:
1738 *txop = BG_OP_IN_NODIF_OUT_CSUM;
1739 *rxop = BG_OP_IN_CSUM_OUT_NODIF;
1740 break;
1741
1742 case SCSI_PROT_READ_PASS:
1743 case SCSI_PROT_WRITE_PASS:
1744 *txop = BG_OP_IN_CSUM_OUT_CRC;
1745 *rxop = BG_OP_IN_CRC_OUT_CSUM;
1746 break;
1747
1748 case SCSI_PROT_READ_INSERT:
1749 case SCSI_PROT_WRITE_STRIP:
1750 *txop = BG_OP_IN_CSUM_OUT_NODIF;
1751 *rxop = BG_OP_IN_NODIF_OUT_CSUM;
1752 break;
1753
1754 case SCSI_PROT_NORMAL:
1755 default:
1756 break;
1757 }
1758 }
1759
1760 return ret;
1761}
1762#endif
1763
1764/**
1765 * lpfc_bg_setup_bpl - Setup BlockGuard BPL with no protection data
1766 * @phba: The Hba for which this call is being executed.
1767 * @sc: pointer to scsi command we're working on
1768 * @bpl: pointer to buffer list for protection groups
1769 * @datacnt: number of segments of data that have been dma mapped
1770 *
1771 * This function sets up BPL buffer list for protection groups of
1524 * type LPFC_PG_TYPE_NO_DIF 1772 * type LPFC_PG_TYPE_NO_DIF
1525 * 1773 *
1526 * This is usually used when the HBA is instructed to generate 1774 * This is usually used when the HBA is instructed to generate
@@ -1539,12 +1787,11 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1539 * |more Data BDE's ... (opt)| 1787 * |more Data BDE's ... (opt)|
1540 * +-------------------------+ 1788 * +-------------------------+
1541 * 1789 *
1542 * @sc: pointer to scsi command we're working on
1543 * @bpl: pointer to buffer list for protection groups
1544 * @datacnt: number of segments of data that have been dma mapped
1545 * 1790 *
1546 * Note: Data s/g buffers have been dma mapped 1791 * Note: Data s/g buffers have been dma mapped
1547 */ 1792 *
1793 * Returns the number of BDEs added to the BPL.
1794 **/
1548static int 1795static int
1549lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1796lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1550 struct ulp_bde64 *bpl, int datasegcnt) 1797 struct ulp_bde64 *bpl, int datasegcnt)
@@ -1555,6 +1802,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1555 dma_addr_t physaddr; 1802 dma_addr_t physaddr;
1556 int i = 0, num_bde = 0, status; 1803 int i = 0, num_bde = 0, status;
1557 int datadir = sc->sc_data_direction; 1804 int datadir = sc->sc_data_direction;
1805 uint32_t rc;
1806 uint32_t checking = 1;
1558 uint32_t reftag; 1807 uint32_t reftag;
1559 unsigned blksize; 1808 unsigned blksize;
1560 uint8_t txop, rxop; 1809 uint8_t txop, rxop;
@@ -1565,11 +1814,16 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1565 1814
1566 /* extract some info from the scsi command for pde*/ 1815 /* extract some info from the scsi command for pde*/
1567 blksize = lpfc_cmd_blksize(sc); 1816 blksize = lpfc_cmd_blksize(sc);
1568 reftag = scsi_get_lba(sc) & 0xffffffff; 1817 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1569 1818
1570#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1819#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1571 /* reftag is the only error we can inject here */ 1820 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
1572 lpfc_bg_err_inject(phba, sc, &reftag, 0, 0); 1821 if (rc) {
1822 if (rc == BG_ERR_SWAP)
1823 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1824 if (rc == BG_ERR_CHECK)
1825 checking = 0;
1826 }
1573#endif 1827#endif
1574 1828
1575 /* setup PDE5 with what we have */ 1829 /* setup PDE5 with what we have */
@@ -1592,8 +1846,8 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1592 bf_set(pde6_optx, pde6, txop); 1846 bf_set(pde6_optx, pde6, txop);
1593 bf_set(pde6_oprx, pde6, rxop); 1847 bf_set(pde6_oprx, pde6, rxop);
1594 if (datadir == DMA_FROM_DEVICE) { 1848 if (datadir == DMA_FROM_DEVICE) {
1595 bf_set(pde6_ce, pde6, 1); 1849 bf_set(pde6_ce, pde6, checking);
1596 bf_set(pde6_re, pde6, 1); 1850 bf_set(pde6_re, pde6, checking);
1597 } 1851 }
1598 bf_set(pde6_ai, pde6, 1); 1852 bf_set(pde6_ai, pde6, 1);
1599 bf_set(pde6_ae, pde6, 0); 1853 bf_set(pde6_ae, pde6, 0);
@@ -1627,9 +1881,16 @@ out:
1627 return num_bde; 1881 return num_bde;
1628} 1882}
1629 1883
1630/* 1884/**
1631 * This function sets up buffer list for protection groups of 1885 * lpfc_bg_setup_bpl_prot - Setup BlockGuard BPL with protection data
1632 * type LPFC_PG_TYPE_DIF_BUF 1886 * @phba: The Hba for which this call is being executed.
1887 * @sc: pointer to scsi command we're working on
1888 * @bpl: pointer to buffer list for protection groups
1889 * @datacnt: number of segments of data that have been dma mapped
1890 * @protcnt: number of segment of protection data that have been dma mapped
1891 *
1892 * This function sets up BPL buffer list for protection groups of
1893 * type LPFC_PG_TYPE_DIF
1633 * 1894 *
1634 * This is usually used when DIFs are in their own buffers, 1895 * This is usually used when DIFs are in their own buffers,
1635 * separate from the data. The HBA can then by instructed 1896 * separate from the data. The HBA can then by instructed
@@ -1654,14 +1915,11 @@ out:
1654 * | ... | 1915 * | ... |
1655 * +-------------------------+ 1916 * +-------------------------+
1656 * 1917 *
1657 * @sc: pointer to scsi command we're working on
1658 * @bpl: pointer to buffer list for protection groups
1659 * @datacnt: number of segments of data that have been dma mapped
1660 * @protcnt: number of segment of protection data that have been dma mapped
1661 *
1662 * Note: It is assumed that both data and protection s/g buffers have been 1918 * Note: It is assumed that both data and protection s/g buffers have been
1663 * mapped for DMA 1919 * mapped for DMA
1664 */ 1920 *
1921 * Returns the number of BDEs added to the BPL.
1922 **/
1665static int 1923static int
1666lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, 1924lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1667 struct ulp_bde64 *bpl, int datacnt, int protcnt) 1925 struct ulp_bde64 *bpl, int datacnt, int protcnt)
@@ -1681,6 +1939,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1681 int datadir = sc->sc_data_direction; 1939 int datadir = sc->sc_data_direction;
1682 unsigned char pgdone = 0, alldone = 0; 1940 unsigned char pgdone = 0, alldone = 0;
1683 unsigned blksize; 1941 unsigned blksize;
1942 uint32_t rc;
1943 uint32_t checking = 1;
1684 uint32_t reftag; 1944 uint32_t reftag;
1685 uint8_t txop, rxop; 1945 uint8_t txop, rxop;
1686 int num_bde = 0; 1946 int num_bde = 0;
@@ -1701,11 +1961,16 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1701 1961
1702 /* extract some info from the scsi command */ 1962 /* extract some info from the scsi command */
1703 blksize = lpfc_cmd_blksize(sc); 1963 blksize = lpfc_cmd_blksize(sc);
1704 reftag = scsi_get_lba(sc) & 0xffffffff; 1964 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
1705 1965
1706#ifdef CONFIG_SCSI_LPFC_DEBUG_FS 1966#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1707 /* reftag / guard tag are the only errors we can inject here */ 1967 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
1708 lpfc_bg_err_inject(phba, sc, &reftag, 0, 0xDEAD); 1968 if (rc) {
1969 if (rc == BG_ERR_SWAP)
1970 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1971 if (rc == BG_ERR_CHECK)
1972 checking = 0;
1973 }
1709#endif 1974#endif
1710 1975
1711 split_offset = 0; 1976 split_offset = 0;
@@ -1729,8 +1994,8 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1729 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); 1994 bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR);
1730 bf_set(pde6_optx, pde6, txop); 1995 bf_set(pde6_optx, pde6, txop);
1731 bf_set(pde6_oprx, pde6, rxop); 1996 bf_set(pde6_oprx, pde6, rxop);
1732 bf_set(pde6_ce, pde6, 1); 1997 bf_set(pde6_ce, pde6, checking);
1733 bf_set(pde6_re, pde6, 1); 1998 bf_set(pde6_re, pde6, checking);
1734 bf_set(pde6_ai, pde6, 1); 1999 bf_set(pde6_ai, pde6, 1);
1735 bf_set(pde6_ae, pde6, 0); 2000 bf_set(pde6_ae, pde6, 0);
1736 bf_set(pde6_apptagval, pde6, 0); 2001 bf_set(pde6_apptagval, pde6, 0);
@@ -1852,13 +2117,358 @@ out:
1852 return num_bde; 2117 return num_bde;
1853} 2118}
1854 2119
1855/* 2120/**
2121 * lpfc_bg_setup_sgl - Setup BlockGuard SGL with no protection data
2122 * @phba: The Hba for which this call is being executed.
2123 * @sc: pointer to scsi command we're working on
2124 * @sgl: pointer to buffer list for protection groups
2125 * @datacnt: number of segments of data that have been dma mapped
2126 *
2127 * This function sets up SGL buffer list for protection groups of
2128 * type LPFC_PG_TYPE_NO_DIF
2129 *
2130 * This is usually used when the HBA is instructed to generate
2131 * DIFs and insert them into data stream (or strip DIF from
2132 * incoming data stream)
2133 *
2134 * The buffer list consists of just one protection group described
2135 * below:
2136 * +-------------------------+
2137 * start of prot group --> | DI_SEED |
2138 * +-------------------------+
2139 * | Data SGE |
2140 * +-------------------------+
2141 * |more Data SGE's ... (opt)|
2142 * +-------------------------+
2143 *
2144 *
2145 * Note: Data s/g buffers have been dma mapped
2146 *
2147 * Returns the number of SGEs added to the SGL.
2148 **/
2149static int
2150lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2151 struct sli4_sge *sgl, int datasegcnt)
2152{
2153 struct scatterlist *sgde = NULL; /* s/g data entry */
2154 struct sli4_sge_diseed *diseed = NULL;
2155 dma_addr_t physaddr;
2156 int i = 0, num_sge = 0, status;
2157 int datadir = sc->sc_data_direction;
2158 uint32_t reftag;
2159 unsigned blksize;
2160 uint8_t txop, rxop;
2161 uint32_t rc;
2162 uint32_t checking = 1;
2163 uint32_t dma_len;
2164 uint32_t dma_offset = 0;
2165
2166 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2167 if (status)
2168 goto out;
2169
2170 /* extract some info from the scsi command for pde*/
2171 blksize = lpfc_cmd_blksize(sc);
2172 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2173
2174#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2175 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
2176 if (rc) {
2177 if (rc == BG_ERR_SWAP)
2178 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2179 if (rc == BG_ERR_CHECK)
2180 checking = 0;
2181 }
2182#endif
2183
2184 /* setup DISEED with what we have */
2185 diseed = (struct sli4_sge_diseed *) sgl;
2186 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2187 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2188
2189 /* Endianness conversion if necessary */
2190 diseed->ref_tag = cpu_to_le32(reftag);
2191 diseed->ref_tag_tran = diseed->ref_tag;
2192
2193 /* setup DISEED with the rest of the info */
2194 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2195 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2196 if (datadir == DMA_FROM_DEVICE) {
2197 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2198 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2199 }
2200 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2201 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2202
2203 /* Endianness conversion if necessary for DISEED */
2204 diseed->word2 = cpu_to_le32(diseed->word2);
2205 diseed->word3 = cpu_to_le32(diseed->word3);
2206
2207 /* advance bpl and increment sge count */
2208 num_sge++;
2209 sgl++;
2210
2211 /* assumption: caller has already run dma_map_sg on command data */
2212 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
2213 physaddr = sg_dma_address(sgde);
2214 dma_len = sg_dma_len(sgde);
2215 sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
2216 sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
2217 if ((i + 1) == datasegcnt)
2218 bf_set(lpfc_sli4_sge_last, sgl, 1);
2219 else
2220 bf_set(lpfc_sli4_sge_last, sgl, 0);
2221 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2222 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2223
2224 sgl->sge_len = cpu_to_le32(dma_len);
2225 dma_offset += dma_len;
2226
2227 sgl++;
2228 num_sge++;
2229 }
2230
2231out:
2232 return num_sge;
2233}
2234
2235/**
2236 * lpfc_bg_setup_sgl_prot - Setup BlockGuard SGL with protection data
2237 * @phba: The Hba for which this call is being executed.
2238 * @sc: pointer to scsi command we're working on
2239 * @sgl: pointer to buffer list for protection groups
2240 * @datacnt: number of segments of data that have been dma mapped
2241 * @protcnt: number of segment of protection data that have been dma mapped
2242 *
2243 * This function sets up SGL buffer list for protection groups of
2244 * type LPFC_PG_TYPE_DIF
2245 *
2246 * This is usually used when DIFs are in their own buffers,
2247 * separate from the data. The HBA can then by instructed
2248 * to place the DIFs in the outgoing stream. For read operations,
2249 * The HBA could extract the DIFs and place it in DIF buffers.
2250 *
2251 * The buffer list for this type consists of one or more of the
2252 * protection groups described below:
2253 * +-------------------------+
2254 * start of first prot group --> | DISEED |
2255 * +-------------------------+
2256 * | DIF (Prot SGE) |
2257 * +-------------------------+
2258 * | Data SGE |
2259 * +-------------------------+
2260 * |more Data SGE's ... (opt)|
2261 * +-------------------------+
2262 * start of new prot group --> | DISEED |
2263 * +-------------------------+
2264 * | ... |
2265 * +-------------------------+
2266 *
2267 * Note: It is assumed that both data and protection s/g buffers have been
2268 * mapped for DMA
2269 *
2270 * Returns the number of SGEs added to the SGL.
2271 **/
2272static int
2273lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2274 struct sli4_sge *sgl, int datacnt, int protcnt)
2275{
2276 struct scatterlist *sgde = NULL; /* s/g data entry */
2277 struct scatterlist *sgpe = NULL; /* s/g prot entry */
2278 struct sli4_sge_diseed *diseed = NULL;
2279 dma_addr_t dataphysaddr, protphysaddr;
2280 unsigned short curr_data = 0, curr_prot = 0;
2281 unsigned int split_offset;
2282 unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
2283 unsigned int protgrp_blks, protgrp_bytes;
2284 unsigned int remainder, subtotal;
2285 int status;
2286 unsigned char pgdone = 0, alldone = 0;
2287 unsigned blksize;
2288 uint32_t reftag;
2289 uint8_t txop, rxop;
2290 uint32_t dma_len;
2291 uint32_t rc;
2292 uint32_t checking = 1;
2293 uint32_t dma_offset = 0;
2294 int num_sge = 0;
2295
2296 sgpe = scsi_prot_sglist(sc);
2297 sgde = scsi_sglist(sc);
2298
2299 if (!sgpe || !sgde) {
2300 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
2301 "9082 Invalid s/g entry: data=0x%p prot=0x%p\n",
2302 sgpe, sgde);
2303 return 0;
2304 }
2305
2306 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2307 if (status)
2308 goto out;
2309
2310 /* extract some info from the scsi command */
2311 blksize = lpfc_cmd_blksize(sc);
2312 reftag = (uint32_t)scsi_get_lba(sc); /* Truncate LBA */
2313
2314#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2315 rc = lpfc_bg_err_inject(phba, sc, &reftag, 0, 1);
2316 if (rc) {
2317 if (rc == BG_ERR_SWAP)
2318 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2319 if (rc == BG_ERR_CHECK)
2320 checking = 0;
2321 }
2322#endif
2323
2324 split_offset = 0;
2325 do {
2326 /* setup DISEED with what we have */
2327 diseed = (struct sli4_sge_diseed *) sgl;
2328 memset(diseed, 0, sizeof(struct sli4_sge_diseed));
2329 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DISEED);
2330
2331 /* Endianness conversion if necessary */
2332 diseed->ref_tag = cpu_to_le32(reftag);
2333 diseed->ref_tag_tran = diseed->ref_tag;
2334
2335 /* setup DISEED with the rest of the info */
2336 bf_set(lpfc_sli4_sge_dif_optx, diseed, txop);
2337 bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop);
2338 bf_set(lpfc_sli4_sge_dif_ce, diseed, checking);
2339 bf_set(lpfc_sli4_sge_dif_re, diseed, checking);
2340 bf_set(lpfc_sli4_sge_dif_ai, diseed, 1);
2341 bf_set(lpfc_sli4_sge_dif_me, diseed, 0);
2342
2343 /* Endianness conversion if necessary for DISEED */
2344 diseed->word2 = cpu_to_le32(diseed->word2);
2345 diseed->word3 = cpu_to_le32(diseed->word3);
2346
2347 /* advance sgl and increment bde count */
2348 num_sge++;
2349 sgl++;
2350
2351 /* setup the first BDE that points to protection buffer */
2352 protphysaddr = sg_dma_address(sgpe) + protgroup_offset;
2353 protgroup_len = sg_dma_len(sgpe) - protgroup_offset;
2354
2355 /* must be integer multiple of the DIF block length */
2356 BUG_ON(protgroup_len % 8);
2357
2358 /* Now setup DIF SGE */
2359 sgl->word2 = 0;
2360 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DIF);
2361 sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr));
2362 sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr));
2363 sgl->word2 = cpu_to_le32(sgl->word2);
2364
2365 protgrp_blks = protgroup_len / 8;
2366 protgrp_bytes = protgrp_blks * blksize;
2367
2368 /* check if DIF SGE is crossing the 4K boundary; if so split */
2369 if ((sgl->addr_lo & 0xfff) + protgroup_len > 0x1000) {
2370 protgroup_remainder = 0x1000 - (sgl->addr_lo & 0xfff);
2371 protgroup_offset += protgroup_remainder;
2372 protgrp_blks = protgroup_remainder / 8;
2373 protgrp_bytes = protgrp_blks * blksize;
2374 } else {
2375 protgroup_offset = 0;
2376 curr_prot++;
2377 }
2378
2379 num_sge++;
2380
2381 /* setup SGE's for data blocks associated with DIF data */
2382 pgdone = 0;
2383 subtotal = 0; /* total bytes processed for current prot grp */
2384 while (!pgdone) {
2385 if (!sgde) {
2386 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2387 "9086 BLKGRD:%s Invalid data segment\n",
2388 __func__);
2389 return 0;
2390 }
2391 sgl++;
2392 dataphysaddr = sg_dma_address(sgde) + split_offset;
2393
2394 remainder = sg_dma_len(sgde) - split_offset;
2395
2396 if ((subtotal + remainder) <= protgrp_bytes) {
2397 /* we can use this whole buffer */
2398 dma_len = remainder;
2399 split_offset = 0;
2400
2401 if ((subtotal + remainder) == protgrp_bytes)
2402 pgdone = 1;
2403 } else {
2404 /* must split this buffer with next prot grp */
2405 dma_len = protgrp_bytes - subtotal;
2406 split_offset += dma_len;
2407 }
2408
2409 subtotal += dma_len;
2410
2411 sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr));
2412 sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr));
2413 bf_set(lpfc_sli4_sge_last, sgl, 0);
2414 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2415 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2416
2417 sgl->sge_len = cpu_to_le32(dma_len);
2418 dma_offset += dma_len;
2419
2420 num_sge++;
2421 curr_data++;
2422
2423 if (split_offset)
2424 break;
2425
2426 /* Move to the next s/g segment if possible */
2427 sgde = sg_next(sgde);
2428 }
2429
2430 if (protgroup_offset) {
2431 /* update the reference tag */
2432 reftag += protgrp_blks;
2433 sgl++;
2434 continue;
2435 }
2436
2437 /* are we done ? */
2438 if (curr_prot == protcnt) {
2439 bf_set(lpfc_sli4_sge_last, sgl, 1);
2440 alldone = 1;
2441 } else if (curr_prot < protcnt) {
2442 /* advance to next prot buffer */
2443 sgpe = sg_next(sgpe);
2444 sgl++;
2445
2446 /* update the reference tag */
2447 reftag += protgrp_blks;
2448 } else {
2449 /* if we're here, we have a bug */
2450 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
2451 "9085 BLKGRD: bug in %s\n", __func__);
2452 }
2453
2454 } while (!alldone);
2455
2456out:
2457
2458 return num_sge;
2459}
2460
2461/**
2462 * lpfc_prot_group_type - Get prtotection group type of SCSI command
2463 * @phba: The Hba for which this call is being executed.
2464 * @sc: pointer to scsi command we're working on
2465 *
1856 * Given a SCSI command that supports DIF, determine composition of protection 2466 * Given a SCSI command that supports DIF, determine composition of protection
1857 * groups involved in setting up buffer lists 2467 * groups involved in setting up buffer lists
1858 * 2468 *
1859 * Returns: 2469 * Returns: Protection group type (with or without DIF)
1860 * for DIF (for both read and write) 2470 *
1861 * */ 2471 **/
1862static int 2472static int
1863lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) 2473lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1864{ 2474{
@@ -1885,13 +2495,17 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1885 return ret; 2495 return ret;
1886} 2496}
1887 2497
1888/* 2498/**
2499 * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
2500 * @phba: The Hba for which this call is being executed.
2501 * @lpfc_cmd: The scsi buffer which is going to be prep'ed.
2502 *
1889 * This is the protection/DIF aware version of 2503 * This is the protection/DIF aware version of
1890 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the 2504 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1891 * two functions eventually, but for now, it's here 2505 * two functions eventually, but for now, it's here
1892 */ 2506 **/
1893static int 2507static int
1894lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, 2508lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
1895 struct lpfc_scsi_buf *lpfc_cmd) 2509 struct lpfc_scsi_buf *lpfc_cmd)
1896{ 2510{
1897 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd; 2511 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
@@ -2147,7 +2761,21 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
2147 cmd->sense_buffer[8] = 0; /* Information descriptor type */ 2761 cmd->sense_buffer[8] = 0; /* Information descriptor type */
2148 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */ 2762 cmd->sense_buffer[9] = 0xa; /* Additional descriptor length */
2149 cmd->sense_buffer[10] = 0x80; /* Validity bit */ 2763 cmd->sense_buffer[10] = 0x80; /* Validity bit */
2150 bghm /= cmd->device->sector_size; 2764
2765 /* bghm is a "on the wire" FC frame based count */
2766 switch (scsi_get_prot_op(cmd)) {
2767 case SCSI_PROT_READ_INSERT:
2768 case SCSI_PROT_WRITE_STRIP:
2769 bghm /= cmd->device->sector_size;
2770 break;
2771 case SCSI_PROT_READ_STRIP:
2772 case SCSI_PROT_WRITE_INSERT:
2773 case SCSI_PROT_READ_PASS:
2774 case SCSI_PROT_WRITE_PASS:
2775 bghm /= (cmd->device->sector_size +
2776 sizeof(struct scsi_dif_tuple));
2777 break;
2778 }
2151 2779
2152 failing_sector = scsi_get_lba(cmd); 2780 failing_sector = scsi_get_lba(cmd);
2153 failing_sector += bghm; 2781 failing_sector += bghm;
@@ -2292,6 +2920,180 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2292} 2920}
2293 2921
2294/** 2922/**
2923 * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard
2924 * @phba: The Hba for which this call is being executed.
2925 * @lpfc_cmd: The scsi buffer which is going to be adjusted.
2926 *
2927 * Adjust the data length to account for how much data
2928 * is actually on the wire.
2929 *
2930 * returns the adjusted data length
2931 **/
2932static int
2933lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2934 struct lpfc_scsi_buf *lpfc_cmd)
2935{
2936 struct scsi_cmnd *sc = lpfc_cmd->pCmd;
2937 int diflen, fcpdl;
2938 unsigned blksize;
2939
2940 fcpdl = scsi_bufflen(sc);
2941
2942 /* Check if there is protection data on the wire */
2943 if (sc->sc_data_direction == DMA_FROM_DEVICE) {
2944 /* Read */
2945 if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT)
2946 return fcpdl;
2947
2948 } else {
2949 /* Write */
2950 if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP)
2951 return fcpdl;
2952 }
2953
2954 /* If protection data on the wire, adjust the count accordingly */
2955 blksize = lpfc_cmd_blksize(sc);
2956 diflen = (fcpdl / blksize) * 8;
2957 fcpdl += diflen;
2958 return fcpdl;
2959}
2960
2961/**
2962 * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
2963 * @phba: The Hba for which this call is being executed.
2964 * @lpfc_cmd: The scsi buffer which is going to be mapped.
2965 *
2966 * This is the protection/DIF aware version of
2967 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
2968 * two functions eventually, but for now, it's here
2969 **/
2970static int
2971lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
2972 struct lpfc_scsi_buf *lpfc_cmd)
2973{
2974 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
2975 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
2976 struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
2977 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
2978 uint32_t num_bde = 0;
2979 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
2980 int prot_group_type = 0;
2981 int fcpdl;
2982
2983 /*
2984 * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd
2985 * fcp_rsp regions to the first data bde entry
2986 */
2987 if (scsi_sg_count(scsi_cmnd)) {
2988 /*
2989 * The driver stores the segment count returned from pci_map_sg
2990 * because this a count of dma-mappings used to map the use_sg
2991 * pages. They are not guaranteed to be the same for those
2992 * architectures that implement an IOMMU.
2993 */
2994 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2995 scsi_sglist(scsi_cmnd),
2996 scsi_sg_count(scsi_cmnd), datadir);
2997 if (unlikely(!datasegcnt))
2998 return 1;
2999
3000 sgl += 1;
3001 /* clear the last flag in the fcp_rsp map entry */
3002 sgl->word2 = le32_to_cpu(sgl->word2);
3003 bf_set(lpfc_sli4_sge_last, sgl, 0);
3004 sgl->word2 = cpu_to_le32(sgl->word2);
3005
3006 sgl += 1;
3007 lpfc_cmd->seg_cnt = datasegcnt;
3008 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3009 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3010 "9087 BLKGRD: %s: Too many sg segments"
3011 " from dma_map_sg. Config %d, seg_cnt"
3012 " %d\n",
3013 __func__, phba->cfg_sg_seg_cnt,
3014 lpfc_cmd->seg_cnt);
3015 scsi_dma_unmap(scsi_cmnd);
3016 return 1;
3017 }
3018
3019 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3020
3021 switch (prot_group_type) {
3022 case LPFC_PG_TYPE_NO_DIF:
3023 num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3024 datasegcnt);
3025 /* we should have 2 or more entries in buffer list */
3026 if (num_bde < 2)
3027 goto err;
3028 break;
3029 case LPFC_PG_TYPE_DIF_BUF:{
3030 /*
3031 * This type indicates that protection buffers are
3032 * passed to the driver, so that needs to be prepared
3033 * for DMA
3034 */
3035 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3036 scsi_prot_sglist(scsi_cmnd),
3037 scsi_prot_sg_count(scsi_cmnd), datadir);
3038 if (unlikely(!protsegcnt)) {
3039 scsi_dma_unmap(scsi_cmnd);
3040 return 1;
3041 }
3042
3043 lpfc_cmd->prot_seg_cnt = protsegcnt;
3044 if (lpfc_cmd->prot_seg_cnt
3045 > phba->cfg_prot_sg_seg_cnt) {
3046 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3047 "9088 BLKGRD: %s: Too many prot sg "
3048 "segments from dma_map_sg. Config %d,"
3049 "prot_seg_cnt %d\n", __func__,
3050 phba->cfg_prot_sg_seg_cnt,
3051 lpfc_cmd->prot_seg_cnt);
3052 dma_unmap_sg(&phba->pcidev->dev,
3053 scsi_prot_sglist(scsi_cmnd),
3054 scsi_prot_sg_count(scsi_cmnd),
3055 datadir);
3056 scsi_dma_unmap(scsi_cmnd);
3057 return 1;
3058 }
3059
3060 num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3061 datasegcnt, protsegcnt);
3062 /* we should have 3 or more entries in buffer list */
3063 if (num_bde < 3)
3064 goto err;
3065 break;
3066 }
3067 case LPFC_PG_TYPE_INVALID:
3068 default:
3069 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3070 "9083 Unexpected protection group %i\n",
3071 prot_group_type);
3072 return 1;
3073 }
3074 }
3075
3076 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3077
3078 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
3079
3080 /*
3081 * Due to difference in data length between DIF/non-DIF paths,
3082 * we need to set word 4 of IOCB here
3083 */
3084 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
3085 lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
3086
3087 return 0;
3088err:
3089 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
3090 "9084 Could not setup all needed BDE's"
3091 "prot_group_type=%d, num_bde=%d\n",
3092 prot_group_type, num_bde);
3093 return 1;
3094}
3095
3096/**
2295 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer 3097 * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
2296 * @phba: The Hba for which this call is being executed. 3098 * @phba: The Hba for which this call is being executed.
2297 * @lpfc_cmd: The scsi buffer which is going to be mapped. 3099 * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -2310,6 +3112,25 @@ lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2310} 3112}
2311 3113
2312/** 3114/**
3115 * lpfc_bg_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
3116 * using BlockGuard.
3117 * @phba: The Hba for which this call is being executed.
3118 * @lpfc_cmd: The scsi buffer which is going to be mapped.
3119 *
3120 * This routine wraps the actual DMA mapping function pointer from the
3121 * lpfc_hba struct.
3122 *
3123 * Return codes:
3124 * 1 - Error
3125 * 0 - Success
3126 **/
3127static inline int
3128lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
3129{
3130 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3131}
3132
3133/**
2313 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error 3134 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
2314 * @phba: Pointer to hba context object. 3135 * @phba: Pointer to hba context object.
2315 * @vport: Pointer to vport object. 3136 * @vport: Pointer to vport object.
@@ -3072,12 +3893,14 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
3072 case LPFC_PCI_DEV_LP: 3893 case LPFC_PCI_DEV_LP:
3073 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3; 3894 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
3074 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3; 3895 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
3896 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
3075 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3; 3897 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
3076 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3; 3898 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
3077 break; 3899 break;
3078 case LPFC_PCI_DEV_OC: 3900 case LPFC_PCI_DEV_OC:
3079 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4; 3901 phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
3080 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4; 3902 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
3903 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
3081 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4; 3904 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
3082 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4; 3905 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
3083 break; 3906 break;
@@ -3250,8 +4073,7 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
3250 ndlp = rdata->pnode; 4073 ndlp = rdata->pnode;
3251 4074
3252 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && 4075 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
3253 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) || 4076 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
3254 (phba->sli_rev == LPFC_SLI_REV4))) {
3255 4077
3256 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 4078 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3257 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 4079 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 23a27592388..e0e4d8d1824 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -293,7 +293,9 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
293 } 293 }
294 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 294 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
295 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT); 295 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
296 bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id); 296 bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
297 (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
298 bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
297 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 299 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
298 /* PCI read to flush PCI pipeline on re-arming for INTx mode */ 300 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
299 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) 301 if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
@@ -372,7 +374,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
372 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1); 374 bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
373 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released); 375 bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
374 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION); 376 bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
375 bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id); 377 bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
378 (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
379 bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
376 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr); 380 writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
377 return released; 381 return released;
378} 382}
@@ -554,81 +558,6 @@ __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
554} 558}
555 559
556/** 560/**
557 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
558 * @phba: Pointer to HBA context object.
559 * @ndlp: nodelist pointer for this target.
560 * @xritag: xri used in this exchange.
561 * @rxid: Remote Exchange ID.
562 * @send_rrq: Flag used to determine if we should send rrq els cmd.
563 *
564 * This function is called with hbalock held.
565 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
566 * rrq struct and adds it to the active_rrq_list.
567 *
568 * returns 0 for rrq slot for this xri
569 * < 0 Were not able to get rrq mem or invalid parameter.
570 **/
571static int
572__lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
573 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
574{
575 struct lpfc_node_rrq *rrq;
576 int empty;
577 uint32_t did = 0;
578
579
580 if (!ndlp)
581 return -EINVAL;
582
583 if (!phba->cfg_enable_rrq)
584 return -EINVAL;
585
586 if (phba->pport->load_flag & FC_UNLOADING) {
587 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
588 goto out;
589 }
590 did = ndlp->nlp_DID;
591
592 /*
593 * set the active bit even if there is no mem available.
594 */
595 if (NLP_CHK_FREE_REQ(ndlp))
596 goto out;
597
598 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
599 goto out;
600
601 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
602 goto out;
603
604 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
605 if (rrq) {
606 rrq->send_rrq = send_rrq;
607 rrq->xritag = xritag;
608 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
609 rrq->ndlp = ndlp;
610 rrq->nlp_DID = ndlp->nlp_DID;
611 rrq->vport = ndlp->vport;
612 rrq->rxid = rxid;
613 empty = list_empty(&phba->active_rrq_list);
614 rrq->send_rrq = send_rrq;
615 list_add_tail(&rrq->list, &phba->active_rrq_list);
616 if (!(phba->hba_flag & HBA_RRQ_ACTIVE)) {
617 phba->hba_flag |= HBA_RRQ_ACTIVE;
618 if (empty)
619 lpfc_worker_wake_up(phba);
620 }
621 return 0;
622 }
623out:
624 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
625 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
626 " DID:0x%x Send:%d\n",
627 xritag, rxid, did, send_rrq);
628 return -EINVAL;
629}
630
631/**
632 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap. 561 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
633 * @phba: Pointer to HBA context object. 562 * @phba: Pointer to HBA context object.
634 * @xritag: xri used in this exchange. 563 * @xritag: xri used in this exchange.
@@ -856,15 +785,68 @@ lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
856 **/ 785 **/
857int 786int
858lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, 787lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
859 uint16_t xritag, uint16_t rxid, uint16_t send_rrq) 788 uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
860{ 789{
861 int ret;
862 unsigned long iflags; 790 unsigned long iflags;
791 struct lpfc_node_rrq *rrq;
792 int empty;
793
794 if (!ndlp)
795 return -EINVAL;
796
797 if (!phba->cfg_enable_rrq)
798 return -EINVAL;
863 799
864 spin_lock_irqsave(&phba->hbalock, iflags); 800 spin_lock_irqsave(&phba->hbalock, iflags);
865 ret = __lpfc_set_rrq_active(phba, ndlp, xritag, rxid, send_rrq); 801 if (phba->pport->load_flag & FC_UNLOADING) {
802 phba->hba_flag &= ~HBA_RRQ_ACTIVE;
803 goto out;
804 }
805
806 /*
807 * set the active bit even if there is no mem available.
808 */
809 if (NLP_CHK_FREE_REQ(ndlp))
810 goto out;
811
812 if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
813 goto out;
814
815 if (test_and_set_bit(xritag, ndlp->active_rrqs.xri_bitmap))
816 goto out;
817
866 spin_unlock_irqrestore(&phba->hbalock, iflags); 818 spin_unlock_irqrestore(&phba->hbalock, iflags);
867 return ret; 819 rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
820 if (!rrq) {
821 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
822 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
823 " DID:0x%x Send:%d\n",
824 xritag, rxid, ndlp->nlp_DID, send_rrq);
825 return -EINVAL;
826 }
827 rrq->send_rrq = send_rrq;
828 rrq->xritag = xritag;
829 rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1);
830 rrq->ndlp = ndlp;
831 rrq->nlp_DID = ndlp->nlp_DID;
832 rrq->vport = ndlp->vport;
833 rrq->rxid = rxid;
834 rrq->send_rrq = send_rrq;
835 spin_lock_irqsave(&phba->hbalock, iflags);
836 empty = list_empty(&phba->active_rrq_list);
837 list_add_tail(&rrq->list, &phba->active_rrq_list);
838 phba->hba_flag |= HBA_RRQ_ACTIVE;
839 if (empty)
840 lpfc_worker_wake_up(phba);
841 spin_unlock_irqrestore(&phba->hbalock, iflags);
842 return 0;
843out:
844 spin_unlock_irqrestore(&phba->hbalock, iflags);
845 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
846 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
847 " DID:0x%x Send:%d\n",
848 xritag, rxid, ndlp->nlp_DID, send_rrq);
849 return -EINVAL;
868} 850}
869 851
870/** 852/**
@@ -5596,6 +5578,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5596 for (i = 0; i < count; i++) 5578 for (i = 0; i < count; i++)
5597 phba->sli4_hba.rpi_ids[i] = base + i; 5579 phba->sli4_hba.rpi_ids[i] = base + i;
5598 5580
5581 lpfc_sli4_node_prep(phba);
5582
5599 /* VPIs. */ 5583 /* VPIs. */
5600 count = phba->sli4_hba.max_cfg_param.max_vpi; 5584 count = phba->sli4_hba.max_cfg_param.max_vpi;
5601 base = phba->sli4_hba.max_cfg_param.vpi_base; 5585 base = phba->sli4_hba.max_cfg_param.vpi_base;
@@ -7555,6 +7539,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7555 7539
7556 sgl = (struct sli4_sge *)sglq->sgl; 7540 sgl = (struct sli4_sge *)sglq->sgl;
7557 icmd = &piocbq->iocb; 7541 icmd = &piocbq->iocb;
7542 if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
7543 return sglq->sli4_xritag;
7558 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { 7544 if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
7559 numBdes = icmd->un.genreq64.bdl.bdeSize / 7545 numBdes = icmd->un.genreq64.bdl.bdeSize /
7560 sizeof(struct ulp_bde64); 7546 sizeof(struct ulp_bde64);
@@ -7756,6 +7742,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7756 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) { 7742 if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
7757 if (pcmd && (*pcmd == ELS_CMD_FLOGI || 7743 if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
7758 *pcmd == ELS_CMD_SCR || 7744 *pcmd == ELS_CMD_SCR ||
7745 *pcmd == ELS_CMD_FDISC ||
7759 *pcmd == ELS_CMD_PLOGI)) { 7746 *pcmd == ELS_CMD_PLOGI)) {
7760 bf_set(els_req64_sp, &wqe->els_req, 1); 7747 bf_set(els_req64_sp, &wqe->els_req, 1);
7761 bf_set(els_req64_sid, &wqe->els_req, 7748 bf_set(els_req64_sid, &wqe->els_req,
@@ -7763,7 +7750,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7763 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1); 7750 bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
7764 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7751 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7765 phba->vpi_ids[phba->pport->vpi]); 7752 phba->vpi_ids[phba->pport->vpi]);
7766 } else if (iocbq->context1) { 7753 } else if (pcmd && iocbq->context1) {
7767 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0); 7754 bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
7768 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, 7755 bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
7769 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 7756 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7830,12 +7817,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7830 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); 7817 bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
7831 /* Always open the exchange */ 7818 /* Always open the exchange */
7832 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); 7819 bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
7833 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7834 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); 7820 bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
7835 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, 7821 bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
7836 LPFC_WQE_LENLOC_WORD4); 7822 LPFC_WQE_LENLOC_WORD4);
7837 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); 7823 bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
7838 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); 7824 bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
7825 if (iocbq->iocb_flag & LPFC_IO_DIF) {
7826 iocbq->iocb_flag &= ~LPFC_IO_DIF;
7827 bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
7828 }
7829 bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
7839 break; 7830 break;
7840 case CMD_FCP_IREAD64_CR: 7831 case CMD_FCP_IREAD64_CR:
7841 /* word3 iocb=iotag wqe=payload_offset_len */ 7832 /* word3 iocb=iotag wqe=payload_offset_len */
@@ -7849,12 +7840,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7849 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); 7840 bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
7850 /* Always open the exchange */ 7841 /* Always open the exchange */
7851 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); 7842 bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
7852 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7853 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); 7843 bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
7854 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, 7844 bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
7855 LPFC_WQE_LENLOC_WORD4); 7845 LPFC_WQE_LENLOC_WORD4);
7856 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); 7846 bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
7857 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); 7847 bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
7848 if (iocbq->iocb_flag & LPFC_IO_DIF) {
7849 iocbq->iocb_flag &= ~LPFC_IO_DIF;
7850 bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
7851 }
7852 bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
7858 break; 7853 break;
7859 case CMD_FCP_ICMND64_CR: 7854 case CMD_FCP_ICMND64_CR:
7860 /* word3 iocb=IO_TAG wqe=reserved */ 7855 /* word3 iocb=IO_TAG wqe=reserved */
@@ -7982,6 +7977,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
7982 xritag = 0; 7977 xritag = 0;
7983 break; 7978 break;
7984 case CMD_XMIT_BLS_RSP64_CX: 7979 case CMD_XMIT_BLS_RSP64_CX:
7980 ndlp = (struct lpfc_nodelist *)iocbq->context1;
7985 /* As BLS ABTS RSP WQE is very different from other WQEs, 7981 /* As BLS ABTS RSP WQE is very different from other WQEs,
7986 * we re-construct this WQE here based on information in 7982 * we re-construct this WQE here based on information in
7987 * iocbq from scratch. 7983 * iocbq from scratch.
@@ -8008,8 +8004,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8008 } 8004 }
8009 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff); 8005 bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
8010 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); 8006 bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
8007
8008 /* Use CT=VPI */
8009 bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
8010 ndlp->nlp_DID);
8011 bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
8012 iocbq->iocb.ulpContext);
8013 bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
8011 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, 8014 bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
8012 iocbq->iocb.ulpContext); 8015 phba->vpi_ids[phba->pport->vpi]);
8013 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); 8016 bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
8014 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, 8017 bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
8015 LPFC_WQE_LENLOC_NONE); 8018 LPFC_WQE_LENLOC_NONE);
@@ -8073,8 +8076,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
8073 8076
8074 if (piocb->sli4_xritag == NO_XRI) { 8077 if (piocb->sli4_xritag == NO_XRI) {
8075 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 8078 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
8076 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN || 8079 piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
8077 piocb->iocb.ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8078 sglq = NULL; 8080 sglq = NULL;
8079 else { 8081 else {
8080 if (pring->txq_cnt) { 8082 if (pring->txq_cnt) {
@@ -8384,10 +8386,13 @@ lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
8384{ 8386{
8385 struct lpfc_vport *vport; 8387 struct lpfc_vport *vport;
8386 8388
8387 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 8389 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
8388 lpfc_printf_log(phba, KERN_INFO, LOG_SLI, 8390 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
8389 "3115 Node Context not found, driver " 8391 "3115 Node Context not found, driver "
8390 "ignoring abts err event\n"); 8392 "ignoring abts err event\n");
8393 return;
8394 }
8395
8391 vport = ndlp->vport; 8396 vport = ndlp->vport;
8392 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, 8397 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
8393 "3116 Port generated FCP XRI ABORT event on " 8398 "3116 Port generated FCP XRI ABORT event on "
@@ -10653,12 +10658,14 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10653 struct lpfc_wcqe_complete *wcqe) 10658 struct lpfc_wcqe_complete *wcqe)
10654{ 10659{
10655 unsigned long iflags; 10660 unsigned long iflags;
10661 uint32_t status;
10656 size_t offset = offsetof(struct lpfc_iocbq, iocb); 10662 size_t offset = offsetof(struct lpfc_iocbq, iocb);
10657 10663
10658 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset, 10664 memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
10659 sizeof(struct lpfc_iocbq) - offset); 10665 sizeof(struct lpfc_iocbq) - offset);
10660 /* Map WCQE parameters into irspiocb parameters */ 10666 /* Map WCQE parameters into irspiocb parameters */
10661 pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe); 10667 status = bf_get(lpfc_wcqe_c_status, wcqe);
10668 pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
10662 if (pIocbOut->iocb_flag & LPFC_IO_FCP) 10669 if (pIocbOut->iocb_flag & LPFC_IO_FCP)
10663 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR) 10670 if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
10664 pIocbIn->iocb.un.fcpi.fcpi_parm = 10671 pIocbIn->iocb.un.fcpi.fcpi_parm =
@@ -10671,6 +10678,44 @@ lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
10671 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed; 10678 pIocbIn->iocb.un.genreq64.bdl.bdeSize = wcqe->total_data_placed;
10672 } 10679 }
10673 10680
10681 /* Convert BG errors for completion status */
10682 if (status == CQE_STATUS_DI_ERROR) {
10683 pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
10684
10685 if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
10686 pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
10687 else
10688 pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
10689
10690 pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
10691 if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
10692 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10693 BGS_GUARD_ERR_MASK;
10694 if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
10695 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10696 BGS_APPTAG_ERR_MASK;
10697 if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
10698 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10699 BGS_REFTAG_ERR_MASK;
10700
10701 /* Check to see if there was any good data before the error */
10702 if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
10703 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10704 BGS_HI_WATER_MARK_PRESENT_MASK;
10705 pIocbIn->iocb.unsli3.sli3_bg.bghm =
10706 wcqe->total_data_placed;
10707 }
10708
10709 /*
10710 * Set ALL the error bits to indicate we don't know what
10711 * type of error it is.
10712 */
10713 if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
10714 pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
10715 (BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
10716 BGS_GUARD_ERR_MASK);
10717 }
10718
10674 /* Pick up HBA exchange busy condition */ 10719 /* Pick up HBA exchange busy condition */
10675 if (bf_get(lpfc_wcqe_c_xb, wcqe)) { 10720 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
10676 spin_lock_irqsave(&phba->hbalock, iflags); 10721 spin_lock_irqsave(&phba->hbalock, iflags);
@@ -14042,6 +14087,13 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
14042{ 14087{
14043 if (cmd_iocbq) 14088 if (cmd_iocbq)
14044 lpfc_sli_release_iocbq(phba, cmd_iocbq); 14089 lpfc_sli_release_iocbq(phba, cmd_iocbq);
14090
14091 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
14092 if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
14093 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14094 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
14095 rsp_iocbq->iocb.ulpStatus,
14096 rsp_iocbq->iocb.un.ulpWord[4]);
14045} 14097}
14046 14098
14047/** 14099/**
@@ -14748,7 +14800,8 @@ lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
14748 * provided rpi via a bitmask. 14800 * provided rpi via a bitmask.
14749 **/ 14801 **/
14750int 14802int
14751lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp) 14803lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
14804 void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
14752{ 14805{
14753 LPFC_MBOXQ_t *mboxq; 14806 LPFC_MBOXQ_t *mboxq;
14754 struct lpfc_hba *phba = ndlp->phba; 14807 struct lpfc_hba *phba = ndlp->phba;
@@ -14761,6 +14814,13 @@ lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
14761 14814
14762 /* Post all rpi memory regions to the port. */ 14815 /* Post all rpi memory regions to the port. */
14763 lpfc_resume_rpi(mboxq, ndlp); 14816 lpfc_resume_rpi(mboxq, ndlp);
14817 if (cmpl) {
14818 mboxq->mbox_cmpl = cmpl;
14819 mboxq->context1 = arg;
14820 mboxq->context2 = ndlp;
14821 } else
14822 mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14823 mboxq->vport = ndlp->vport;
14764 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 14824 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
14765 if (rc == MBX_NOT_FINISHED) { 14825 if (rc == MBX_NOT_FINISHED) {
14766 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 14826 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 29c13b63e32..3290b8e7ab6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -69,6 +69,7 @@ struct lpfc_iocbq {
69#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ 69#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
70#define DSS_SECURITY_OP 0x100 /* security IO */ 70#define DSS_SECURITY_OP 0x100 /* security IO */
71#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */ 71#define LPFC_IO_ON_Q 0x200 /* The IO is still on the TXCMPLQ */
72#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
72 73
73#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ 74#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
74#define LPFC_FIP_ELS_ID_SHIFT 14 75#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3f266e2c54e..c19d139618b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -633,7 +633,8 @@ void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
633void lpfc_sli4_remove_rpis(struct lpfc_hba *); 633void lpfc_sli4_remove_rpis(struct lpfc_hba *);
634void lpfc_sli4_async_event_proc(struct lpfc_hba *); 634void lpfc_sli4_async_event_proc(struct lpfc_hba *);
635void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *); 635void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
636int lpfc_sli4_resume_rpi(struct lpfc_nodelist *); 636int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
637 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
637void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); 638void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
638void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); 639void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
639void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, 640void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index dd044d01a07..f2a2602e5c3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.28" 21#define LPFC_DRIVER_VERSION "8.3.29"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index a78036f5e1a..82fa6ce481f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2575,6 +2575,11 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2575 2575
2576 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( 2576 ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
2577 GFP_KERNEL, ioc->chain_pages); 2577 GFP_KERNEL, ioc->chain_pages);
2578 if (!ioc->chain_lookup) {
2579 printk(MPT2SAS_ERR_FMT "chain_lookup: get_free_pages failed, "
2580 "sz(%d)\n", ioc->name, (int)sz);
2581 goto out;
2582 }
2578 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, 2583 ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
2579 ioc->request_sz, 16, 0); 2584 ioc->request_sz, 16, 0);
2580 if (!ioc->chain_dma_pool) { 2585 if (!ioc->chain_dma_pool) {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 193e33e28e4..d953a57e779 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -5744,7 +5744,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5744} 5744}
5745 5745
5746/** 5746/**
5747 * _scsih_sas_broadcast_primative_event - handle broadcast events 5747 * _scsih_sas_broadcast_primitive_event - handle broadcast events
5748 * @ioc: per adapter object 5748 * @ioc: per adapter object
5749 * @fw_event: The fw_event_work object 5749 * @fw_event: The fw_event_work object
5750 * Context: user. 5750 * Context: user.
@@ -5752,7 +5752,7 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT2SAS_ADAPTER *ioc,
5752 * Return nothing. 5752 * Return nothing.
5753 */ 5753 */
5754static void 5754static void
5755_scsih_sas_broadcast_primative_event(struct MPT2SAS_ADAPTER *ioc, 5755_scsih_sas_broadcast_primitive_event(struct MPT2SAS_ADAPTER *ioc,
5756 struct fw_event_work *fw_event) 5756 struct fw_event_work *fw_event)
5757{ 5757{
5758 struct scsi_cmnd *scmd; 5758 struct scsi_cmnd *scmd;
@@ -7263,7 +7263,7 @@ _firmware_event_work(struct work_struct *work)
7263 fw_event); 7263 fw_event);
7264 break; 7264 break;
7265 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: 7265 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
7266 _scsih_sas_broadcast_primative_event(ioc, 7266 _scsih_sas_broadcast_primitive_event(ioc,
7267 fw_event); 7267 fw_event);
7268 break; 7268 break;
7269 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: 7269 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 6f589195746..cc59dff3810 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -60,7 +60,6 @@ static struct scsi_host_template mvs_sht = {
60 .queuecommand = sas_queuecommand, 60 .queuecommand = sas_queuecommand,
61 .target_alloc = sas_target_alloc, 61 .target_alloc = sas_target_alloc,
62 .slave_configure = sas_slave_configure, 62 .slave_configure = sas_slave_configure,
63 .slave_destroy = sas_slave_destroy,
64 .scan_finished = mvs_scan_finished, 63 .scan_finished = mvs_scan_finished,
65 .scan_start = mvs_scan_start, 64 .scan_start = mvs_scan_start,
66 .change_queue_depth = sas_change_queue_depth, 65 .change_queue_depth = sas_change_queue_depth,
@@ -74,7 +73,6 @@ static struct scsi_host_template mvs_sht = {
74 .use_clustering = ENABLE_CLUSTERING, 73 .use_clustering = ENABLE_CLUSTERING,
75 .eh_device_reset_handler = sas_eh_device_reset_handler, 74 .eh_device_reset_handler = sas_eh_device_reset_handler,
76 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 75 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
77 .slave_alloc = sas_slave_alloc,
78 .target_destroy = sas_target_destroy, 76 .target_destroy = sas_target_destroy,
79 .ioctl = sas_ioctl, 77 .ioctl = sas_ioctl,
80 .shost_attrs = mvst_host_attrs, 78 .shost_attrs = mvst_host_attrs,
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 01ab9c4d346..fd3b2839843 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -308,7 +308,7 @@ int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
308 if (mvs_prv->scan_finished == 0) 308 if (mvs_prv->scan_finished == 0)
309 return 0; 309 return 0;
310 310
311 scsi_flush_work(shost); 311 sas_drain_work(sha);
312 return 1; 312 return 1;
313} 313}
314 314
@@ -893,9 +893,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
893 893
894 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; 894 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
895 895
896 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
897 spin_unlock_irq(dev->sata_dev.ap->lock);
898
899 spin_lock_irqsave(&mvi->lock, flags); 896 spin_lock_irqsave(&mvi->lock, flags);
900 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass); 897 rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
901 if (rc) 898 if (rc)
@@ -906,9 +903,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
906 (MVS_CHIP_SLOT_SZ - 1)); 903 (MVS_CHIP_SLOT_SZ - 1));
907 spin_unlock_irqrestore(&mvi->lock, flags); 904 spin_unlock_irqrestore(&mvi->lock, flags);
908 905
909 if ((dev->dev_type == SATA_DEV) && (dev->sata_dev.ap != NULL))
910 spin_lock_irq(dev->sata_dev.ap->lock);
911
912 return rc; 906 return rc;
913} 907}
914 908
@@ -1480,10 +1474,11 @@ static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1480static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) 1474static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1481{ 1475{
1482 int rc; 1476 int rc;
1483 struct sas_phy *phy = sas_find_local_phy(dev); 1477 struct sas_phy *phy = sas_get_local_phy(dev);
1484 int reset_type = (dev->dev_type == SATA_DEV || 1478 int reset_type = (dev->dev_type == SATA_DEV ||
1485 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; 1479 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1486 rc = sas_phy_reset(phy, reset_type); 1480 rc = sas_phy_reset(phy, reset_type);
1481 sas_put_local_phy(phy);
1487 msleep(2000); 1482 msleep(2000);
1488 return rc; 1483 return rc;
1489} 1484}
diff --git a/drivers/scsi/pm8001/pm8001_chips.h b/drivers/scsi/pm8001/pm8001_chips.h
index 4efa4d0950e..9241c782603 100644
--- a/drivers/scsi/pm8001/pm8001_chips.h
+++ b/drivers/scsi/pm8001/pm8001_chips.h
@@ -46,9 +46,9 @@ static inline u32 pm8001_read_32(void *virt_addr)
46 return *((u32 *)virt_addr); 46 return *((u32 *)virt_addr);
47} 47}
48 48
49static inline void pm8001_write_32(void *addr, u32 offset, u32 val) 49static inline void pm8001_write_32(void *addr, u32 offset, __le32 val)
50{ 50{
51 *((u32 *)(addr + offset)) = val; 51 *((__le32 *)(addr + offset)) = val;
52} 52}
53 53
54static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar, 54static inline u32 pm8001_cr32(struct pm8001_hba_info *pm8001_ha, u32 bar,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index e12c4f632a6..3619f6eeeed 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -338,26 +338,25 @@ update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
338} 338}
339 339
340/** 340/**
341 * bar4_shift - function is called to shift BAR base address 341 * pm8001_bar4_shift - function is called to shift BAR base address
342 * @pm8001_ha : our hba card information 342 * @pm8001_ha : our hba card infomation
343 * @shiftValue : shifting value in memory bar. 343 * @shiftValue : shifting value in memory bar.
344 */ 344 */
345static int bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) 345int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
346{ 346{
347 u32 regVal; 347 u32 regVal;
348 u32 max_wait_count; 348 unsigned long start;
349 349
350 /* program the inbound AXI translation Lower Address */ 350 /* program the inbound AXI translation Lower Address */
351 pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue); 351 pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue);
352 352
353 /* confirm the setting is written */ 353 /* confirm the setting is written */
354 max_wait_count = 1 * 1000 * 1000; /* 1 sec */ 354 start = jiffies + HZ; /* 1 sec */
355 do { 355 do {
356 udelay(1);
357 regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW); 356 regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW);
358 } while ((regVal != shiftValue) && (--max_wait_count)); 357 } while ((regVal != shiftValue) && time_before(jiffies, start));
359 358
360 if (!max_wait_count) { 359 if (regVal != shiftValue) {
361 PM8001_INIT_DBG(pm8001_ha, 360 PM8001_INIT_DBG(pm8001_ha,
362 pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW" 361 pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW"
363 " = 0x%x\n", regVal)); 362 " = 0x%x\n", regVal));
@@ -375,6 +374,7 @@ static void __devinit
375mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) 374mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
376{ 375{
377 u32 value, offset, i; 376 u32 value, offset, i;
377 unsigned long flags;
378 378
379#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 379#define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000
380#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 380#define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -388,16 +388,23 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
388 * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) 388 * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3)
389 * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7) 389 * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7)
390 */ 390 */
391 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) 391 spin_lock_irqsave(&pm8001_ha->lock, flags);
392 if (-1 == pm8001_bar4_shift(pm8001_ha,
393 SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) {
394 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
392 return; 395 return;
396 }
393 397
394 for (i = 0; i < 4; i++) { 398 for (i = 0; i < 4; i++) {
395 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; 399 offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i;
396 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); 400 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
397 } 401 }
398 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ 402 /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */
399 if (-1 == bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) 403 if (-1 == pm8001_bar4_shift(pm8001_ha,
404 SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) {
405 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
400 return; 406 return;
407 }
401 for (i = 4; i < 8; i++) { 408 for (i = 4; i < 8; i++) {
402 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); 409 offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
403 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); 410 pm8001_cw32(pm8001_ha, 2, offset, 0x80001501);
@@ -421,7 +428,8 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
421 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016); 428 pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016);
422 429
423 /*set the shifted destination address to 0x0 to avoid error operation */ 430 /*set the shifted destination address to 0x0 to avoid error operation */
424 bar4_shift(pm8001_ha, 0x0); 431 pm8001_bar4_shift(pm8001_ha, 0x0);
432 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
425 return; 433 return;
426} 434}
427 435
@@ -437,6 +445,7 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
437 u32 offset; 445 u32 offset;
438 u32 value; 446 u32 value;
439 u32 i; 447 u32 i;
448 unsigned long flags;
440 449
441#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000 450#define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000
442#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000 451#define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000
@@ -445,24 +454,30 @@ mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
445#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF 454#define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF
446 455
447 value = interval & OPEN_RETRY_INTERVAL_REG_MASK; 456 value = interval & OPEN_RETRY_INTERVAL_REG_MASK;
457 spin_lock_irqsave(&pm8001_ha->lock, flags);
448 /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/ 458 /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/
449 if (-1 == bar4_shift(pm8001_ha, 459 if (-1 == pm8001_bar4_shift(pm8001_ha,
450 OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) 460 OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) {
461 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
451 return; 462 return;
463 }
452 for (i = 0; i < 4; i++) { 464 for (i = 0; i < 4; i++) {
453 offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i; 465 offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i;
454 pm8001_cw32(pm8001_ha, 2, offset, value); 466 pm8001_cw32(pm8001_ha, 2, offset, value);
455 } 467 }
456 468
457 if (-1 == bar4_shift(pm8001_ha, 469 if (-1 == pm8001_bar4_shift(pm8001_ha,
458 OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) 470 OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) {
471 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
459 return; 472 return;
473 }
460 for (i = 4; i < 8; i++) { 474 for (i = 4; i < 8; i++) {
461 offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4); 475 offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4);
462 pm8001_cw32(pm8001_ha, 2, offset, value); 476 pm8001_cw32(pm8001_ha, 2, offset, value);
463 } 477 }
464 /*set the shifted destination address to 0x0 to avoid error operation */ 478 /*set the shifted destination address to 0x0 to avoid error operation */
465 bar4_shift(pm8001_ha, 0x0); 479 pm8001_bar4_shift(pm8001_ha, 0x0);
480 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
466 return; 481 return;
467} 482}
468 483
@@ -607,7 +622,8 @@ static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
607 update_inbnd_queue_table(pm8001_ha, 0); 622 update_inbnd_queue_table(pm8001_ha, 0);
608 update_outbnd_queue_table(pm8001_ha, 0); 623 update_outbnd_queue_table(pm8001_ha, 0);
609 mpi_set_phys_g3_with_ssc(pm8001_ha, 0); 624 mpi_set_phys_g3_with_ssc(pm8001_ha, 0);
610 mpi_set_open_retry_interval_reg(pm8001_ha, 7); 625 /* 7->130ms, 34->500ms, 119->1.5s */
626 mpi_set_open_retry_interval_reg(pm8001_ha, 119);
611 /* notify firmware update finished and check initialization status */ 627 /* notify firmware update finished and check initialization status */
612 if (0 == mpi_init_check(pm8001_ha)) { 628 if (0 == mpi_init_check(pm8001_ha)) {
613 PM8001_INIT_DBG(pm8001_ha, 629 PM8001_INIT_DBG(pm8001_ha,
@@ -688,8 +704,11 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
688 PM8001_INIT_DBG(pm8001_ha, 704 PM8001_INIT_DBG(pm8001_ha,
689 pm8001_printk("Firmware is ready for reset .\n")); 705 pm8001_printk("Firmware is ready for reset .\n"));
690 } else { 706 } else {
691 /* Trigger NMI twice via RB6 */ 707 unsigned long flags;
692 if (-1 == bar4_shift(pm8001_ha, RB6_ACCESS_REG)) { 708 /* Trigger NMI twice via RB6 */
709 spin_lock_irqsave(&pm8001_ha->lock, flags);
710 if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) {
711 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
693 PM8001_FAIL_DBG(pm8001_ha, 712 PM8001_FAIL_DBG(pm8001_ha,
694 pm8001_printk("Shift Bar4 to 0x%x failed\n", 713 pm8001_printk("Shift Bar4 to 0x%x failed\n",
695 RB6_ACCESS_REG)); 714 RB6_ACCESS_REG));
@@ -715,8 +734,10 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha)
715 PM8001_FAIL_DBG(pm8001_ha, 734 PM8001_FAIL_DBG(pm8001_ha,
716 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", 735 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
717 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); 736 pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
737 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
718 return -1; 738 return -1;
719 } 739 }
740 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
720 } 741 }
721 return 0; 742 return 0;
722} 743}
@@ -733,6 +754,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
733 u32 regVal, toggleVal; 754 u32 regVal, toggleVal;
734 u32 max_wait_count; 755 u32 max_wait_count;
735 u32 regVal1, regVal2, regVal3; 756 u32 regVal1, regVal2, regVal3;
757 unsigned long flags;
736 758
737 /* step1: Check FW is ready for soft reset */ 759 /* step1: Check FW is ready for soft reset */
738 if (soft_reset_ready_check(pm8001_ha) != 0) { 760 if (soft_reset_ready_check(pm8001_ha) != 0) {
@@ -743,7 +765,9 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
743 /* step 2: clear NMI status register on AAP1 and IOP, write the same 765 /* step 2: clear NMI status register on AAP1 and IOP, write the same
744 value to clear */ 766 value to clear */
745 /* map 0x60000 to BAR4(0x20), BAR2(win) */ 767 /* map 0x60000 to BAR4(0x20), BAR2(win) */
746 if (-1 == bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) { 768 spin_lock_irqsave(&pm8001_ha->lock, flags);
769 if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) {
770 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
747 PM8001_FAIL_DBG(pm8001_ha, 771 PM8001_FAIL_DBG(pm8001_ha,
748 pm8001_printk("Shift Bar4 to 0x%x failed\n", 772 pm8001_printk("Shift Bar4 to 0x%x failed\n",
749 MBIC_AAP1_ADDR_BASE)); 773 MBIC_AAP1_ADDR_BASE));
@@ -754,7 +778,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
754 pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal)); 778 pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal));
755 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0); 779 pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0);
756 /* map 0x70000 to BAR4(0x20), BAR2(win) */ 780 /* map 0x70000 to BAR4(0x20), BAR2(win) */
757 if (-1 == bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) { 781 if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) {
782 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
758 PM8001_FAIL_DBG(pm8001_ha, 783 PM8001_FAIL_DBG(pm8001_ha,
759 pm8001_printk("Shift Bar4 to 0x%x failed\n", 784 pm8001_printk("Shift Bar4 to 0x%x failed\n",
760 MBIC_IOP_ADDR_BASE)); 785 MBIC_IOP_ADDR_BASE));
@@ -796,7 +821,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
796 821
797 /* read required registers for confirmming */ 822 /* read required registers for confirmming */
798 /* map 0x0700000 to BAR4(0x20), BAR2(win) */ 823 /* map 0x0700000 to BAR4(0x20), BAR2(win) */
799 if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { 824 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
825 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
800 PM8001_FAIL_DBG(pm8001_ha, 826 PM8001_FAIL_DBG(pm8001_ha,
801 pm8001_printk("Shift Bar4 to 0x%x failed\n", 827 pm8001_printk("Shift Bar4 to 0x%x failed\n",
802 GSM_ADDR_BASE)); 828 GSM_ADDR_BASE));
@@ -862,7 +888,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
862 /* step 5: delay 10 usec */ 888 /* step 5: delay 10 usec */
863 udelay(10); 889 udelay(10);
864 /* step 5-b: set GPIO-0 output control to tristate anyway */ 890 /* step 5-b: set GPIO-0 output control to tristate anyway */
865 if (-1 == bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) { 891 if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) {
892 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
866 PM8001_INIT_DBG(pm8001_ha, 893 PM8001_INIT_DBG(pm8001_ha,
867 pm8001_printk("Shift Bar4 to 0x%x failed\n", 894 pm8001_printk("Shift Bar4 to 0x%x failed\n",
868 GPIO_ADDR_BASE)); 895 GPIO_ADDR_BASE));
@@ -878,7 +905,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
878 905
879 /* Step 6: Reset the IOP and AAP1 */ 906 /* Step 6: Reset the IOP and AAP1 */
880 /* map 0x00000 to BAR4(0x20), BAR2(win) */ 907 /* map 0x00000 to BAR4(0x20), BAR2(win) */
881 if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { 908 if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
909 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
882 PM8001_FAIL_DBG(pm8001_ha, 910 PM8001_FAIL_DBG(pm8001_ha,
883 pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", 911 pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
884 SPC_TOP_LEVEL_ADDR_BASE)); 912 SPC_TOP_LEVEL_ADDR_BASE));
@@ -915,7 +943,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
915 943
916 /* step 11: reads and sets the GSM Configuration and Reset Register */ 944 /* step 11: reads and sets the GSM Configuration and Reset Register */
917 /* map 0x0700000 to BAR4(0x20), BAR2(win) */ 945 /* map 0x0700000 to BAR4(0x20), BAR2(win) */
918 if (-1 == bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { 946 if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) {
947 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
919 PM8001_FAIL_DBG(pm8001_ha, 948 PM8001_FAIL_DBG(pm8001_ha,
920 pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", 949 pm8001_printk("SPC Shift Bar4 to 0x%x failed\n",
921 GSM_ADDR_BASE)); 950 GSM_ADDR_BASE));
@@ -968,7 +997,8 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
968 997
969 /* step 13: bring the IOP and AAP1 out of reset */ 998 /* step 13: bring the IOP and AAP1 out of reset */
970 /* map 0x00000 to BAR4(0x20), BAR2(win) */ 999 /* map 0x00000 to BAR4(0x20), BAR2(win) */
971 if (-1 == bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { 1000 if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) {
1001 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
972 PM8001_FAIL_DBG(pm8001_ha, 1002 PM8001_FAIL_DBG(pm8001_ha,
973 pm8001_printk("Shift Bar4 to 0x%x failed\n", 1003 pm8001_printk("Shift Bar4 to 0x%x failed\n",
974 SPC_TOP_LEVEL_ADDR_BASE)); 1004 SPC_TOP_LEVEL_ADDR_BASE));
@@ -1010,6 +1040,7 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
1010 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", 1040 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
1011 pm8001_cr32(pm8001_ha, 0, 1041 pm8001_cr32(pm8001_ha, 0,
1012 MSGU_SCRATCH_PAD_3))); 1042 MSGU_SCRATCH_PAD_3)));
1043 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1013 return -1; 1044 return -1;
1014 } 1045 }
1015 1046
@@ -1039,9 +1070,12 @@ pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature)
1039 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", 1070 pm8001_printk("SCRATCH_PAD3 value = 0x%x\n",
1040 pm8001_cr32(pm8001_ha, 0, 1071 pm8001_cr32(pm8001_ha, 0,
1041 MSGU_SCRATCH_PAD_3))); 1072 MSGU_SCRATCH_PAD_3)));
1073 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1042 return -1; 1074 return -1;
1043 } 1075 }
1044 } 1076 }
1077 pm8001_bar4_shift(pm8001_ha, 0);
1078 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1045 1079
1046 PM8001_INIT_DBG(pm8001_ha, 1080 PM8001_INIT_DBG(pm8001_ha,
1047 pm8001_printk("SPC soft reset Complete\n")); 1081 pm8001_printk("SPC soft reset Complete\n"));
@@ -1157,8 +1191,8 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha,
1157 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; 1191 msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE;
1158 msi_index += MSIX_TABLE_BASE; 1192 msi_index += MSIX_TABLE_BASE;
1159 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE); 1193 pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE);
1160
1161} 1194}
1195
1162/** 1196/**
1163 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt 1197 * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt
1164 * @pm8001_ha: our hba card information 1198 * @pm8001_ha: our hba card information
@@ -1212,7 +1246,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
1212 consumer_index = pm8001_read_32(circularQ->ci_virt); 1246 consumer_index = pm8001_read_32(circularQ->ci_virt);
1213 circularQ->consumer_index = cpu_to_le32(consumer_index); 1247 circularQ->consumer_index = cpu_to_le32(consumer_index);
1214 if (((circularQ->producer_idx + bcCount) % 256) == 1248 if (((circularQ->producer_idx + bcCount) % 256) ==
1215 circularQ->consumer_index) { 1249 le32_to_cpu(circularQ->consumer_index)) {
1216 *messagePtr = NULL; 1250 *messagePtr = NULL;
1217 return -1; 1251 return -1;
1218 } 1252 }
@@ -1321,7 +1355,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1321 u32 header_tmp; 1355 u32 header_tmp;
1322 do { 1356 do {
1323 /* If there are not-yet-delivered messages ... */ 1357 /* If there are not-yet-delivered messages ... */
1324 if (circularQ->producer_index != circularQ->consumer_idx) { 1358 if (le32_to_cpu(circularQ->producer_index)
1359 != circularQ->consumer_idx) {
1325 /*Get the pointer to the circular queue buffer element*/ 1360 /*Get the pointer to the circular queue buffer element*/
1326 msgHeader = (struct mpi_msg_hdr *) 1361 msgHeader = (struct mpi_msg_hdr *)
1327 (circularQ->base_virt + 1362 (circularQ->base_virt +
@@ -1329,14 +1364,14 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1329 /* read header */ 1364 /* read header */
1330 header_tmp = pm8001_read_32(msgHeader); 1365 header_tmp = pm8001_read_32(msgHeader);
1331 msgHeader_tmp = cpu_to_le32(header_tmp); 1366 msgHeader_tmp = cpu_to_le32(header_tmp);
1332 if (0 != (msgHeader_tmp & 0x80000000)) { 1367 if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) {
1333 if (OPC_OUB_SKIP_ENTRY != 1368 if (OPC_OUB_SKIP_ENTRY !=
1334 (msgHeader_tmp & 0xfff)) { 1369 (le32_to_cpu(msgHeader_tmp) & 0xfff)) {
1335 *messagePtr1 = 1370 *messagePtr1 =
1336 ((u8 *)msgHeader) + 1371 ((u8 *)msgHeader) +
1337 sizeof(struct mpi_msg_hdr); 1372 sizeof(struct mpi_msg_hdr);
1338 *pBC = (u8)((msgHeader_tmp >> 24) & 1373 *pBC = (u8)((le32_to_cpu(msgHeader_tmp)
1339 0x1f); 1374 >> 24) & 0x1f);
1340 PM8001_IO_DBG(pm8001_ha, 1375 PM8001_IO_DBG(pm8001_ha,
1341 pm8001_printk(": CI=%d PI=%d " 1376 pm8001_printk(": CI=%d PI=%d "
1342 "msgHeader=%x\n", 1377 "msgHeader=%x\n",
@@ -1347,8 +1382,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1347 } else { 1382 } else {
1348 circularQ->consumer_idx = 1383 circularQ->consumer_idx =
1349 (circularQ->consumer_idx + 1384 (circularQ->consumer_idx +
1350 ((msgHeader_tmp >> 24) & 0x1f)) 1385 ((le32_to_cpu(msgHeader_tmp)
1351 % 256; 1386 >> 24) & 0x1f)) % 256;
1352 msgHeader_tmp = 0; 1387 msgHeader_tmp = 0;
1353 pm8001_write_32(msgHeader, 0, 0); 1388 pm8001_write_32(msgHeader, 0, 0);
1354 /* update the CI of outbound queue */ 1389 /* update the CI of outbound queue */
@@ -1360,7 +1395,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1360 } else { 1395 } else {
1361 circularQ->consumer_idx = 1396 circularQ->consumer_idx =
1362 (circularQ->consumer_idx + 1397 (circularQ->consumer_idx +
1363 ((msgHeader_tmp >> 24) & 0x1f)) % 256; 1398 ((le32_to_cpu(msgHeader_tmp) >> 24) &
1399 0x1f)) % 256;
1364 msgHeader_tmp = 0; 1400 msgHeader_tmp = 0;
1365 pm8001_write_32(msgHeader, 0, 0); 1401 pm8001_write_32(msgHeader, 0, 0);
1366 /* update the CI of outbound queue */ 1402 /* update the CI of outbound queue */
@@ -1376,7 +1412,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
1376 producer_index = pm8001_read_32(pi_virt); 1412 producer_index = pm8001_read_32(pi_virt);
1377 circularQ->producer_index = cpu_to_le32(producer_index); 1413 circularQ->producer_index = cpu_to_le32(producer_index);
1378 } 1414 }
1379 } while (circularQ->producer_index != circularQ->consumer_idx); 1415 } while (le32_to_cpu(circularQ->producer_index) !=
1416 circularQ->consumer_idx);
1380 /* while we don't have any more not-yet-delivered message */ 1417 /* while we don't have any more not-yet-delivered message */
1381 /* report empty */ 1418 /* report empty */
1382 return MPI_IO_STATUS_BUSY; 1419 return MPI_IO_STATUS_BUSY;
@@ -1388,24 +1425,191 @@ static void pm8001_work_fn(struct work_struct *work)
1388 struct pm8001_device *pm8001_dev; 1425 struct pm8001_device *pm8001_dev;
1389 struct domain_device *dev; 1426 struct domain_device *dev;
1390 1427
1428 /*
1429 * So far, all users of this stash an associated structure here.
1430 * If we get here, and this pointer is null, then the action
1431 * was cancelled. This nullification happens when the device
1432 * goes away.
1433 */
1434 pm8001_dev = pw->data; /* Most stash device structure */
1435 if ((pm8001_dev == NULL)
1436 || ((pw->handler != IO_XFER_ERROR_BREAK)
1437 && (pm8001_dev->dev_type == NO_DEVICE))) {
1438 kfree(pw);
1439 return;
1440 }
1441
1391 switch (pw->handler) { 1442 switch (pw->handler) {
1443 case IO_XFER_ERROR_BREAK:
1444 { /* This one stashes the sas_task instead */
1445 struct sas_task *t = (struct sas_task *)pm8001_dev;
1446 u32 tag;
1447 struct pm8001_ccb_info *ccb;
1448 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
1449 unsigned long flags, flags1;
1450 struct task_status_struct *ts;
1451 int i;
1452
1453 if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC)
1454 break; /* Task still on lu */
1455 spin_lock_irqsave(&pm8001_ha->lock, flags);
1456
1457 spin_lock_irqsave(&t->task_state_lock, flags1);
1458 if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
1459 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1460 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1461 break; /* Task got completed by another */
1462 }
1463 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1464
1465 /* Search for a possible ccb that matches the task */
1466 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
1467 ccb = &pm8001_ha->ccb_info[i];
1468 tag = ccb->ccb_tag;
1469 if ((tag != 0xFFFFFFFF) && (ccb->task == t))
1470 break;
1471 }
1472 if (!ccb) {
1473 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1474 break; /* Task got freed by another */
1475 }
1476 ts = &t->task_status;
1477 ts->resp = SAS_TASK_COMPLETE;
1478 /* Force the midlayer to retry */
1479 ts->stat = SAS_QUEUE_FULL;
1480 pm8001_dev = ccb->device;
1481 if (pm8001_dev)
1482 pm8001_dev->running_req--;
1483 spin_lock_irqsave(&t->task_state_lock, flags1);
1484 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
1485 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
1486 t->task_state_flags |= SAS_TASK_STATE_DONE;
1487 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
1488 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1489 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p"
1490 " done with event 0x%x resp 0x%x stat 0x%x but"
1491 " aborted by upper layer!\n",
1492 t, pw->handler, ts->resp, ts->stat));
1493 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1494 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1495 } else {
1496 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1497 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
1498 mb();/* in order to force CPU ordering */
1499 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1500 t->task_done(t);
1501 }
1502 } break;
1503 case IO_XFER_OPEN_RETRY_TIMEOUT:
1504 { /* This one stashes the sas_task instead */
1505 struct sas_task *t = (struct sas_task *)pm8001_dev;
1506 u32 tag;
1507 struct pm8001_ccb_info *ccb;
1508 struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha;
1509 unsigned long flags, flags1;
1510 int i, ret = 0;
1511
1512 PM8001_IO_DBG(pm8001_ha,
1513 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1514
1515 ret = pm8001_query_task(t);
1516
1517 PM8001_IO_DBG(pm8001_ha,
1518 switch (ret) {
1519 case TMF_RESP_FUNC_SUCC:
1520 pm8001_printk("...Task on lu\n");
1521 break;
1522
1523 case TMF_RESP_FUNC_COMPLETE:
1524 pm8001_printk("...Task NOT on lu\n");
1525 break;
1526
1527 default:
1528 pm8001_printk("...query task failed!!!\n");
1529 break;
1530 });
1531
1532 spin_lock_irqsave(&pm8001_ha->lock, flags);
1533
1534 spin_lock_irqsave(&t->task_state_lock, flags1);
1535
1536 if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) {
1537 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1538 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1539 if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
1540 (void)pm8001_abort_task(t);
1541 break; /* Task got completed by another */
1542 }
1543
1544 spin_unlock_irqrestore(&t->task_state_lock, flags1);
1545
1546 /* Search for a possible ccb that matches the task */
1547 for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) {
1548 ccb = &pm8001_ha->ccb_info[i];
1549 tag = ccb->ccb_tag;
1550 if ((tag != 0xFFFFFFFF) && (ccb->task == t))
1551 break;
1552 }
1553 if (!ccb) {
1554 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1555 if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */
1556 (void)pm8001_abort_task(t);
1557 break; /* Task got freed by another */
1558 }
1559
1560 pm8001_dev = ccb->device;
1561 dev = pm8001_dev->sas_device;
1562
1563 switch (ret) {
1564 case TMF_RESP_FUNC_SUCC: /* task on lu */
1565 ccb->open_retry = 1; /* Snub completion */
1566 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1567 ret = pm8001_abort_task(t);
1568 ccb->open_retry = 0;
1569 switch (ret) {
1570 case TMF_RESP_FUNC_SUCC:
1571 case TMF_RESP_FUNC_COMPLETE:
1572 break;
1573 default: /* device misbehavior */
1574 ret = TMF_RESP_FUNC_FAILED;
1575 PM8001_IO_DBG(pm8001_ha,
1576 pm8001_printk("...Reset phy\n"));
1577 pm8001_I_T_nexus_reset(dev);
1578 break;
1579 }
1580 break;
1581
1582 case TMF_RESP_FUNC_COMPLETE: /* task not on lu */
1583 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1584 /* Do we need to abort the task locally? */
1585 break;
1586
1587 default: /* device misbehavior */
1588 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
1589 ret = TMF_RESP_FUNC_FAILED;
1590 PM8001_IO_DBG(pm8001_ha,
1591 pm8001_printk("...Reset phy\n"));
1592 pm8001_I_T_nexus_reset(dev);
1593 }
1594
1595 if (ret == TMF_RESP_FUNC_FAILED)
1596 t = NULL;
1597 pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev);
1598 PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n"));
1599 } break;
1392 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: 1600 case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1393 pm8001_dev = pw->data;
1394 dev = pm8001_dev->sas_device; 1601 dev = pm8001_dev->sas_device;
1395 pm8001_I_T_nexus_reset(dev); 1602 pm8001_I_T_nexus_reset(dev);
1396 break; 1603 break;
1397 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: 1604 case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
1398 pm8001_dev = pw->data;
1399 dev = pm8001_dev->sas_device; 1605 dev = pm8001_dev->sas_device;
1400 pm8001_I_T_nexus_reset(dev); 1606 pm8001_I_T_nexus_reset(dev);
1401 break; 1607 break;
1402 case IO_DS_IN_ERROR: 1608 case IO_DS_IN_ERROR:
1403 pm8001_dev = pw->data;
1404 dev = pm8001_dev->sas_device; 1609 dev = pm8001_dev->sas_device;
1405 pm8001_I_T_nexus_reset(dev); 1610 pm8001_I_T_nexus_reset(dev);
1406 break; 1611 break;
1407 case IO_DS_NON_OPERATIONAL: 1612 case IO_DS_NON_OPERATIONAL:
1408 pm8001_dev = pw->data;
1409 dev = pm8001_dev->sas_device; 1613 dev = pm8001_dev->sas_device;
1410 pm8001_I_T_nexus_reset(dev); 1614 pm8001_I_T_nexus_reset(dev);
1411 break; 1615 break;
@@ -1460,6 +1664,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1460 status = le32_to_cpu(psspPayload->status); 1664 status = le32_to_cpu(psspPayload->status);
1461 tag = le32_to_cpu(psspPayload->tag); 1665 tag = le32_to_cpu(psspPayload->tag);
1462 ccb = &pm8001_ha->ccb_info[tag]; 1666 ccb = &pm8001_ha->ccb_info[tag];
1667 if ((status == IO_ABORTED) && ccb->open_retry) {
1668 /* Being completed by another */
1669 ccb->open_retry = 0;
1670 return;
1671 }
1463 pm8001_dev = ccb->device; 1672 pm8001_dev = ccb->device;
1464 param = le32_to_cpu(psspPayload->param); 1673 param = le32_to_cpu(psspPayload->param);
1465 1674
@@ -1515,6 +1724,8 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1515 pm8001_printk("IO_XFER_ERROR_BREAK\n")); 1724 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1516 ts->resp = SAS_TASK_COMPLETE; 1725 ts->resp = SAS_TASK_COMPLETE;
1517 ts->stat = SAS_OPEN_REJECT; 1726 ts->stat = SAS_OPEN_REJECT;
1727 /* Force the midlayer to retry */
1728 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1518 break; 1729 break;
1519 case IO_XFER_ERROR_PHY_NOT_READY: 1730 case IO_XFER_ERROR_PHY_NOT_READY:
1520 PM8001_IO_DBG(pm8001_ha, 1731 PM8001_IO_DBG(pm8001_ha,
@@ -1719,9 +1930,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
1719 case IO_XFER_ERROR_BREAK: 1930 case IO_XFER_ERROR_BREAK:
1720 PM8001_IO_DBG(pm8001_ha, 1931 PM8001_IO_DBG(pm8001_ha,
1721 pm8001_printk("IO_XFER_ERROR_BREAK\n")); 1932 pm8001_printk("IO_XFER_ERROR_BREAK\n"));
1722 ts->resp = SAS_TASK_COMPLETE; 1933 pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK);
1723 ts->stat = SAS_INTERRUPTED; 1934 return;
1724 break;
1725 case IO_XFER_ERROR_PHY_NOT_READY: 1935 case IO_XFER_ERROR_PHY_NOT_READY:
1726 PM8001_IO_DBG(pm8001_ha, 1936 PM8001_IO_DBG(pm8001_ha,
1727 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); 1937 pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n"));
@@ -1800,10 +2010,8 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
1800 case IO_XFER_OPEN_RETRY_TIMEOUT: 2010 case IO_XFER_OPEN_RETRY_TIMEOUT:
1801 PM8001_IO_DBG(pm8001_ha, 2011 PM8001_IO_DBG(pm8001_ha,
1802 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); 2012 pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n"));
1803 ts->resp = SAS_TASK_COMPLETE; 2013 pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT);
1804 ts->stat = SAS_OPEN_REJECT; 2014 return;
1805 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1806 break;
1807 case IO_XFER_ERROR_UNEXPECTED_PHASE: 2015 case IO_XFER_ERROR_UNEXPECTED_PHASE:
1808 PM8001_IO_DBG(pm8001_ha, 2016 PM8001_IO_DBG(pm8001_ha,
1809 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); 2017 pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n"));
@@ -1877,7 +2085,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1877{ 2085{
1878 struct sas_task *t; 2086 struct sas_task *t;
1879 struct pm8001_ccb_info *ccb; 2087 struct pm8001_ccb_info *ccb;
1880 unsigned long flags = 0;
1881 u32 param; 2088 u32 param;
1882 u32 status; 2089 u32 status;
1883 u32 tag; 2090 u32 tag;
@@ -2016,9 +2223,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2016 ts->stat = SAS_QUEUE_FULL; 2223 ts->stat = SAS_QUEUE_FULL;
2017 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2224 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2018 mb();/*in order to force CPU ordering*/ 2225 mb();/*in order to force CPU ordering*/
2019 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2226 spin_unlock_irq(&pm8001_ha->lock);
2020 t->task_done(t); 2227 t->task_done(t);
2021 spin_lock_irqsave(&pm8001_ha->lock, flags); 2228 spin_lock_irq(&pm8001_ha->lock);
2022 return; 2229 return;
2023 } 2230 }
2024 break; 2231 break;
@@ -2036,9 +2243,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2036 ts->stat = SAS_QUEUE_FULL; 2243 ts->stat = SAS_QUEUE_FULL;
2037 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2244 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2038 mb();/*ditto*/ 2245 mb();/*ditto*/
2039 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2246 spin_unlock_irq(&pm8001_ha->lock);
2040 t->task_done(t); 2247 t->task_done(t);
2041 spin_lock_irqsave(&pm8001_ha->lock, flags); 2248 spin_lock_irq(&pm8001_ha->lock);
2042 return; 2249 return;
2043 } 2250 }
2044 break; 2251 break;
@@ -2064,9 +2271,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2064 ts->stat = SAS_QUEUE_FULL; 2271 ts->stat = SAS_QUEUE_FULL;
2065 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2272 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2066 mb();/* ditto*/ 2273 mb();/* ditto*/
2067 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2274 spin_unlock_irq(&pm8001_ha->lock);
2068 t->task_done(t); 2275 t->task_done(t);
2069 spin_lock_irqsave(&pm8001_ha->lock, flags); 2276 spin_lock_irq(&pm8001_ha->lock);
2070 return; 2277 return;
2071 } 2278 }
2072 break; 2279 break;
@@ -2131,9 +2338,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2131 ts->stat = SAS_QUEUE_FULL; 2338 ts->stat = SAS_QUEUE_FULL;
2132 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2339 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2133 mb();/*ditto*/ 2340 mb();/*ditto*/
2134 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2341 spin_unlock_irq(&pm8001_ha->lock);
2135 t->task_done(t); 2342 t->task_done(t);
2136 spin_lock_irqsave(&pm8001_ha->lock, flags); 2343 spin_lock_irq(&pm8001_ha->lock);
2137 return; 2344 return;
2138 } 2345 }
2139 break; 2346 break;
@@ -2155,9 +2362,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2155 ts->stat = SAS_QUEUE_FULL; 2362 ts->stat = SAS_QUEUE_FULL;
2156 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2363 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2157 mb();/*ditto*/ 2364 mb();/*ditto*/
2158 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2365 spin_unlock_irq(&pm8001_ha->lock);
2159 t->task_done(t); 2366 t->task_done(t);
2160 spin_lock_irqsave(&pm8001_ha->lock, flags); 2367 spin_lock_irq(&pm8001_ha->lock);
2161 return; 2368 return;
2162 } 2369 }
2163 break; 2370 break;
@@ -2175,31 +2382,31 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2175 ts->stat = SAS_DEV_NO_RESPONSE; 2382 ts->stat = SAS_DEV_NO_RESPONSE;
2176 break; 2383 break;
2177 } 2384 }
2178 spin_lock_irqsave(&t->task_state_lock, flags); 2385 spin_lock_irq(&t->task_state_lock);
2179 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2386 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2180 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2387 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2181 t->task_state_flags |= SAS_TASK_STATE_DONE; 2388 t->task_state_flags |= SAS_TASK_STATE_DONE;
2182 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2389 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2183 spin_unlock_irqrestore(&t->task_state_lock, flags); 2390 spin_unlock_irq(&t->task_state_lock);
2184 PM8001_FAIL_DBG(pm8001_ha, 2391 PM8001_FAIL_DBG(pm8001_ha,
2185 pm8001_printk("task 0x%p done with io_status 0x%x" 2392 pm8001_printk("task 0x%p done with io_status 0x%x"
2186 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2393 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2187 t, status, ts->resp, ts->stat)); 2394 t, status, ts->resp, ts->stat));
2188 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2395 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2189 } else if (t->uldd_task) { 2396 } else if (t->uldd_task) {
2190 spin_unlock_irqrestore(&t->task_state_lock, flags); 2397 spin_unlock_irq(&t->task_state_lock);
2191 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2398 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2192 mb();/* ditto */ 2399 mb();/* ditto */
2193 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2400 spin_unlock_irq(&pm8001_ha->lock);
2194 t->task_done(t); 2401 t->task_done(t);
2195 spin_lock_irqsave(&pm8001_ha->lock, flags); 2402 spin_lock_irq(&pm8001_ha->lock);
2196 } else if (!t->uldd_task) { 2403 } else if (!t->uldd_task) {
2197 spin_unlock_irqrestore(&t->task_state_lock, flags); 2404 spin_unlock_irq(&t->task_state_lock);
2198 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2405 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2199 mb();/*ditto*/ 2406 mb();/*ditto*/
2200 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2407 spin_unlock_irq(&pm8001_ha->lock);
2201 t->task_done(t); 2408 t->task_done(t);
2202 spin_lock_irqsave(&pm8001_ha->lock, flags); 2409 spin_lock_irq(&pm8001_ha->lock);
2203 } 2410 }
2204} 2411}
2205 2412
@@ -2207,7 +2414,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
2207static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) 2414static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2208{ 2415{
2209 struct sas_task *t; 2416 struct sas_task *t;
2210 unsigned long flags = 0;
2211 struct task_status_struct *ts; 2417 struct task_status_struct *ts;
2212 struct pm8001_ccb_info *ccb; 2418 struct pm8001_ccb_info *ccb;
2213 struct pm8001_device *pm8001_dev; 2419 struct pm8001_device *pm8001_dev;
@@ -2287,9 +2493,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2287 ts->stat = SAS_QUEUE_FULL; 2493 ts->stat = SAS_QUEUE_FULL;
2288 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2494 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2289 mb();/*ditto*/ 2495 mb();/*ditto*/
2290 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2496 spin_unlock_irq(&pm8001_ha->lock);
2291 t->task_done(t); 2497 t->task_done(t);
2292 spin_lock_irqsave(&pm8001_ha->lock, flags); 2498 spin_lock_irq(&pm8001_ha->lock);
2293 return; 2499 return;
2294 } 2500 }
2295 break; 2501 break;
@@ -2387,31 +2593,31 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
2387 ts->stat = SAS_OPEN_TO; 2593 ts->stat = SAS_OPEN_TO;
2388 break; 2594 break;
2389 } 2595 }
2390 spin_lock_irqsave(&t->task_state_lock, flags); 2596 spin_lock_irq(&t->task_state_lock);
2391 t->task_state_flags &= ~SAS_TASK_STATE_PENDING; 2597 t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
2392 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 2598 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
2393 t->task_state_flags |= SAS_TASK_STATE_DONE; 2599 t->task_state_flags |= SAS_TASK_STATE_DONE;
2394 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { 2600 if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
2395 spin_unlock_irqrestore(&t->task_state_lock, flags); 2601 spin_unlock_irq(&t->task_state_lock);
2396 PM8001_FAIL_DBG(pm8001_ha, 2602 PM8001_FAIL_DBG(pm8001_ha,
2397 pm8001_printk("task 0x%p done with io_status 0x%x" 2603 pm8001_printk("task 0x%p done with io_status 0x%x"
2398 " resp 0x%x stat 0x%x but aborted by upper layer!\n", 2604 " resp 0x%x stat 0x%x but aborted by upper layer!\n",
2399 t, event, ts->resp, ts->stat)); 2605 t, event, ts->resp, ts->stat));
2400 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2606 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2401 } else if (t->uldd_task) { 2607 } else if (t->uldd_task) {
2402 spin_unlock_irqrestore(&t->task_state_lock, flags); 2608 spin_unlock_irq(&t->task_state_lock);
2403 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2609 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2404 mb();/* ditto */ 2610 mb();/* ditto */
2405 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2611 spin_unlock_irq(&pm8001_ha->lock);
2406 t->task_done(t); 2612 t->task_done(t);
2407 spin_lock_irqsave(&pm8001_ha->lock, flags); 2613 spin_lock_irq(&pm8001_ha->lock);
2408 } else if (!t->uldd_task) { 2614 } else if (!t->uldd_task) {
2409 spin_unlock_irqrestore(&t->task_state_lock, flags); 2615 spin_unlock_irq(&t->task_state_lock);
2410 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); 2616 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
2411 mb();/*ditto*/ 2617 mb();/*ditto*/
2412 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 2618 spin_unlock_irq(&pm8001_ha->lock);
2413 t->task_done(t); 2619 t->task_done(t);
2414 spin_lock_irqsave(&pm8001_ha->lock, flags); 2620 spin_lock_irq(&pm8001_ha->lock);
2415 } 2621 }
2416} 2622}
2417 2623
@@ -2857,7 +3063,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
2857 3063
2858 memset((u8 *)&payload, 0, sizeof(payload)); 3064 memset((u8 *)&payload, 0, sizeof(payload));
2859 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; 3065 circularQ = &pm8001_ha->inbnd_q_tbl[Qnum];
2860 payload.tag = 1; 3066 payload.tag = cpu_to_le32(1);
2861 payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | 3067 payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) |
2862 ((phyId & 0x0F) << 4) | (port_id & 0x0F)); 3068 ((phyId & 0x0F) << 4) | (port_id & 0x0F));
2863 payload.param0 = cpu_to_le32(param0); 3069 payload.param0 = cpu_to_le32(param0);
@@ -2929,9 +3135,9 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2929 phy->phy_type |= PORT_TYPE_SAS; 3135 phy->phy_type |= PORT_TYPE_SAS;
2930 phy->identify.device_type = deviceType; 3136 phy->identify.device_type = deviceType;
2931 phy->phy_attached = 1; 3137 phy->phy_attached = 1;
2932 if (phy->identify.device_type == SAS_END_DEV) 3138 if (phy->identify.device_type == SAS_END_DEVICE)
2933 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; 3139 phy->identify.target_port_protocols = SAS_PROTOCOL_SSP;
2934 else if (phy->identify.device_type != NO_DEVICE) 3140 else if (phy->identify.device_type != SAS_PHY_UNUSED)
2935 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; 3141 phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
2936 phy->sas_phy.oob_mode = SAS_OOB_MODE; 3142 phy->sas_phy.oob_mode = SAS_OOB_MODE;
2937 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); 3143 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
@@ -3075,7 +3281,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3075 (struct dev_reg_resp *)(piomb + 4); 3281 (struct dev_reg_resp *)(piomb + 4);
3076 3282
3077 htag = le32_to_cpu(registerRespPayload->tag); 3283 htag = le32_to_cpu(registerRespPayload->tag);
3078 ccb = &pm8001_ha->ccb_info[registerRespPayload->tag]; 3284 ccb = &pm8001_ha->ccb_info[htag];
3079 pm8001_dev = ccb->device; 3285 pm8001_dev = ccb->device;
3080 status = le32_to_cpu(registerRespPayload->status); 3286 status = le32_to_cpu(registerRespPayload->status);
3081 device_id = le32_to_cpu(registerRespPayload->device_id); 3287 device_id = le32_to_cpu(registerRespPayload->device_id);
@@ -3149,7 +3355,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3149 struct fw_control_ex fw_control_context; 3355 struct fw_control_ex fw_control_context;
3150 struct fw_flash_Update_resp *ppayload = 3356 struct fw_flash_Update_resp *ppayload =
3151 (struct fw_flash_Update_resp *)(piomb + 4); 3357 (struct fw_flash_Update_resp *)(piomb + 4);
3152 u32 tag = le32_to_cpu(ppayload->tag); 3358 u32 tag = ppayload->tag;
3153 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; 3359 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag];
3154 status = le32_to_cpu(ppayload->status); 3360 status = le32_to_cpu(ppayload->status);
3155 memcpy(&fw_control_context, 3361 memcpy(&fw_control_context,
@@ -3238,13 +3444,12 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3238 3444
3239 struct task_abort_resp *pPayload = 3445 struct task_abort_resp *pPayload =
3240 (struct task_abort_resp *)(piomb + 4); 3446 (struct task_abort_resp *)(piomb + 4);
3241 ccb = &pm8001_ha->ccb_info[pPayload->tag];
3242 t = ccb->task;
3243
3244 3447
3245 status = le32_to_cpu(pPayload->status); 3448 status = le32_to_cpu(pPayload->status);
3246 tag = le32_to_cpu(pPayload->tag); 3449 tag = le32_to_cpu(pPayload->tag);
3247 scp = le32_to_cpu(pPayload->scp); 3450 scp = le32_to_cpu(pPayload->scp);
3451 ccb = &pm8001_ha->ccb_info[tag];
3452 t = ccb->task;
3248 PM8001_IO_DBG(pm8001_ha, 3453 PM8001_IO_DBG(pm8001_ha,
3249 pm8001_printk(" status = 0x%x\n", status)); 3454 pm8001_printk(" status = 0x%x\n", status));
3250 if (t == NULL) 3455 if (t == NULL)
@@ -3270,7 +3475,7 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
3270 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; 3475 t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
3271 t->task_state_flags |= SAS_TASK_STATE_DONE; 3476 t->task_state_flags |= SAS_TASK_STATE_DONE;
3272 spin_unlock_irqrestore(&t->task_state_lock, flags); 3477 spin_unlock_irqrestore(&t->task_state_lock, flags);
3273 pm8001_ccb_task_free(pm8001_ha, t, ccb, pPayload->tag); 3478 pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
3274 mb(); 3479 mb();
3275 t->task_done(t); 3480 t->task_done(t);
3276 return 0; 3481 return 0;
@@ -3497,7 +3702,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3497static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) 3702static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3498{ 3703{
3499 u32 pHeader = (u32)*(u32 *)piomb; 3704 u32 pHeader = (u32)*(u32 *)piomb;
3500 u8 opc = (u8)((le32_to_cpu(pHeader)) & 0xFFF); 3705 u8 opc = (u8)(pHeader & 0xFFF);
3501 3706
3502 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:")); 3707 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:"));
3503 3708
@@ -3664,9 +3869,11 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
3664{ 3869{
3665 struct outbound_queue_table *circularQ; 3870 struct outbound_queue_table *circularQ;
3666 void *pMsg1 = NULL; 3871 void *pMsg1 = NULL;
3667 u8 bc = 0; 3872 u8 uninitialized_var(bc);
3668 u32 ret = MPI_IO_STATUS_FAIL; 3873 u32 ret = MPI_IO_STATUS_FAIL;
3874 unsigned long flags;
3669 3875
3876 spin_lock_irqsave(&pm8001_ha->lock, flags);
3670 circularQ = &pm8001_ha->outbnd_q_tbl[0]; 3877 circularQ = &pm8001_ha->outbnd_q_tbl[0];
3671 do { 3878 do {
3672 ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); 3879 ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
@@ -3677,16 +3884,16 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha)
3677 mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); 3884 mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc);
3678 } 3885 }
3679 if (MPI_IO_STATUS_BUSY == ret) { 3886 if (MPI_IO_STATUS_BUSY == ret) {
3680 u32 producer_idx;
3681 /* Update the producer index from SPC */ 3887 /* Update the producer index from SPC */
3682 producer_idx = pm8001_read_32(circularQ->pi_virt); 3888 circularQ->producer_index =
3683 circularQ->producer_index = cpu_to_le32(producer_idx); 3889 cpu_to_le32(pm8001_read_32(circularQ->pi_virt));
3684 if (circularQ->producer_index == 3890 if (le32_to_cpu(circularQ->producer_index) ==
3685 circularQ->consumer_idx) 3891 circularQ->consumer_idx)
3686 /* OQ is empty */ 3892 /* OQ is empty */
3687 break; 3893 break;
3688 } 3894 }
3689 } while (1); 3895 } while (1);
3896 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
3690 return ret; 3897 return ret;
3691} 3898}
3692 3899
@@ -3712,9 +3919,9 @@ pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
3712 } 3919 }
3713} 3920}
3714 3921
3715static void build_smp_cmd(u32 deviceID, u32 hTag, struct smp_req *psmp_cmd) 3922static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd)
3716{ 3923{
3717 psmp_cmd->tag = cpu_to_le32(hTag); 3924 psmp_cmd->tag = hTag;
3718 psmp_cmd->device_id = cpu_to_le32(deviceID); 3925 psmp_cmd->device_id = cpu_to_le32(deviceID);
3719 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); 3926 psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1));
3720} 3927}
@@ -3798,7 +4005,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3798 struct ssp_ini_io_start_req ssp_cmd; 4005 struct ssp_ini_io_start_req ssp_cmd;
3799 u32 tag = ccb->ccb_tag; 4006 u32 tag = ccb->ccb_tag;
3800 int ret; 4007 int ret;
3801 __le64 phys_addr; 4008 u64 phys_addr;
3802 struct inbound_queue_table *circularQ; 4009 struct inbound_queue_table *circularQ;
3803 u32 opc = OPC_INB_SSPINIIOSTART; 4010 u32 opc = OPC_INB_SSPINIIOSTART;
3804 memset(&ssp_cmd, 0, sizeof(ssp_cmd)); 4011 memset(&ssp_cmd, 0, sizeof(ssp_cmd));
@@ -3819,15 +4026,15 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha,
3819 /* fill in PRD (scatter/gather) table, if any */ 4026 /* fill in PRD (scatter/gather) table, if any */
3820 if (task->num_scatter > 1) { 4027 if (task->num_scatter > 1) {
3821 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); 4028 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
3822 phys_addr = cpu_to_le64(ccb->ccb_dma_handle + 4029 phys_addr = ccb->ccb_dma_handle +
3823 offsetof(struct pm8001_ccb_info, buf_prd[0])); 4030 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3824 ssp_cmd.addr_low = lower_32_bits(phys_addr); 4031 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr));
3825 ssp_cmd.addr_high = upper_32_bits(phys_addr); 4032 ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr));
3826 ssp_cmd.esgl = cpu_to_le32(1<<31); 4033 ssp_cmd.esgl = cpu_to_le32(1<<31);
3827 } else if (task->num_scatter == 1) { 4034 } else if (task->num_scatter == 1) {
3828 __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter)); 4035 u64 dma_addr = sg_dma_address(task->scatter);
3829 ssp_cmd.addr_low = lower_32_bits(dma_addr); 4036 ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr));
3830 ssp_cmd.addr_high = upper_32_bits(dma_addr); 4037 ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr));
3831 ssp_cmd.len = cpu_to_le32(task->total_xfer_len); 4038 ssp_cmd.len = cpu_to_le32(task->total_xfer_len);
3832 ssp_cmd.esgl = 0; 4039 ssp_cmd.esgl = 0;
3833 } else if (task->num_scatter == 0) { 4040 } else if (task->num_scatter == 0) {
@@ -3850,7 +4057,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3850 int ret; 4057 int ret;
3851 struct sata_start_req sata_cmd; 4058 struct sata_start_req sata_cmd;
3852 u32 hdr_tag, ncg_tag = 0; 4059 u32 hdr_tag, ncg_tag = 0;
3853 __le64 phys_addr; 4060 u64 phys_addr;
3854 u32 ATAP = 0x0; 4061 u32 ATAP = 0x0;
3855 u32 dir; 4062 u32 dir;
3856 struct inbound_queue_table *circularQ; 4063 struct inbound_queue_table *circularQ;
@@ -3889,13 +4096,13 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3889 /* fill in PRD (scatter/gather) table, if any */ 4096 /* fill in PRD (scatter/gather) table, if any */
3890 if (task->num_scatter > 1) { 4097 if (task->num_scatter > 1) {
3891 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); 4098 pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd);
3892 phys_addr = cpu_to_le64(ccb->ccb_dma_handle + 4099 phys_addr = ccb->ccb_dma_handle +
3893 offsetof(struct pm8001_ccb_info, buf_prd[0])); 4100 offsetof(struct pm8001_ccb_info, buf_prd[0]);
3894 sata_cmd.addr_low = lower_32_bits(phys_addr); 4101 sata_cmd.addr_low = lower_32_bits(phys_addr);
3895 sata_cmd.addr_high = upper_32_bits(phys_addr); 4102 sata_cmd.addr_high = upper_32_bits(phys_addr);
3896 sata_cmd.esgl = cpu_to_le32(1 << 31); 4103 sata_cmd.esgl = cpu_to_le32(1 << 31);
3897 } else if (task->num_scatter == 1) { 4104 } else if (task->num_scatter == 1) {
3898 __le64 dma_addr = cpu_to_le64(sg_dma_address(task->scatter)); 4105 u64 dma_addr = sg_dma_address(task->scatter);
3899 sata_cmd.addr_low = lower_32_bits(dma_addr); 4106 sata_cmd.addr_low = lower_32_bits(dma_addr);
3900 sata_cmd.addr_high = upper_32_bits(dma_addr); 4107 sata_cmd.addr_high = upper_32_bits(dma_addr);
3901 sata_cmd.len = cpu_to_le32(task->total_xfer_len); 4108 sata_cmd.len = cpu_to_le32(task->total_xfer_len);
@@ -4039,7 +4246,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha,
4039 4246
4040 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4247 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4041 memset(&payload, 0, sizeof(payload)); 4248 memset(&payload, 0, sizeof(payload));
4042 payload.tag = 1; 4249 payload.tag = cpu_to_le32(1);
4043 payload.device_id = cpu_to_le32(device_id); 4250 payload.device_id = cpu_to_le32(device_id);
4044 PM8001_MSG_DBG(pm8001_ha, 4251 PM8001_MSG_DBG(pm8001_ha,
4045 pm8001_printk("unregister device device_id = %d\n", device_id)); 4252 pm8001_printk("unregister device device_id = %d\n", device_id));
@@ -4063,7 +4270,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
4063 u32 opc = OPC_INB_LOCAL_PHY_CONTROL; 4270 u32 opc = OPC_INB_LOCAL_PHY_CONTROL;
4064 memset(&payload, 0, sizeof(payload)); 4271 memset(&payload, 0, sizeof(payload));
4065 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 4272 circularQ = &pm8001_ha->inbnd_q_tbl[0];
4066 payload.tag = 1; 4273 payload.tag = cpu_to_le32(1);
4067 payload.phyop_phyid = 4274 payload.phyop_phyid =
4068 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); 4275 cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F));
4069 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4276 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
@@ -4092,12 +4299,9 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha)
4092static irqreturn_t 4299static irqreturn_t
4093pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) 4300pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha)
4094{ 4301{
4095 unsigned long flags;
4096 spin_lock_irqsave(&pm8001_ha->lock, flags);
4097 pm8001_chip_interrupt_disable(pm8001_ha); 4302 pm8001_chip_interrupt_disable(pm8001_ha);
4098 process_oq(pm8001_ha); 4303 process_oq(pm8001_ha);
4099 pm8001_chip_interrupt_enable(pm8001_ha); 4304 pm8001_chip_interrupt_enable(pm8001_ha);
4100 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
4101 return IRQ_HANDLED; 4305 return IRQ_HANDLED;
4102} 4306}
4103 4307
@@ -4360,8 +4564,10 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha,
4360 payload.cur_image_offset = cpu_to_le32(info->cur_image_offset); 4564 payload.cur_image_offset = cpu_to_le32(info->cur_image_offset);
4361 payload.total_image_len = cpu_to_le32(info->total_image_len); 4565 payload.total_image_len = cpu_to_le32(info->total_image_len);
4362 payload.len = info->sgl.im_len.len ; 4566 payload.len = info->sgl.im_len.len ;
4363 payload.sgl_addr_lo = lower_32_bits(info->sgl.addr); 4567 payload.sgl_addr_lo =
4364 payload.sgl_addr_hi = upper_32_bits(info->sgl.addr); 4568 cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr)));
4569 payload.sgl_addr_hi =
4570 cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr)));
4365 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); 4571 ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload);
4366 return ret; 4572 return ret;
4367} 4573}
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index 909132041c0..1a4611eb032 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -625,7 +625,7 @@ struct set_nvm_data_req {
625 __le32 tag; 625 __le32 tag;
626 __le32 len_ir_vpdd; 626 __le32 len_ir_vpdd;
627 __le32 vpd_offset; 627 __le32 vpd_offset;
628 u32 reserved[8]; 628 __le32 reserved[8];
629 __le32 resp_addr_lo; 629 __le32 resp_addr_lo;
630 __le32 resp_addr_hi; 630 __le32 resp_addr_hi;
631 __le32 resp_len; 631 __le32 resp_len;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c21a2163f9f..36efaa7c3a5 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -62,7 +62,6 @@ static struct scsi_host_template pm8001_sht = {
62 .queuecommand = sas_queuecommand, 62 .queuecommand = sas_queuecommand,
63 .target_alloc = sas_target_alloc, 63 .target_alloc = sas_target_alloc,
64 .slave_configure = sas_slave_configure, 64 .slave_configure = sas_slave_configure,
65 .slave_destroy = sas_slave_destroy,
66 .scan_finished = pm8001_scan_finished, 65 .scan_finished = pm8001_scan_finished,
67 .scan_start = pm8001_scan_start, 66 .scan_start = pm8001_scan_start,
68 .change_queue_depth = sas_change_queue_depth, 67 .change_queue_depth = sas_change_queue_depth,
@@ -76,7 +75,6 @@ static struct scsi_host_template pm8001_sht = {
76 .use_clustering = ENABLE_CLUSTERING, 75 .use_clustering = ENABLE_CLUSTERING,
77 .eh_device_reset_handler = sas_eh_device_reset_handler, 76 .eh_device_reset_handler = sas_eh_device_reset_handler,
78 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 77 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
79 .slave_alloc = sas_slave_alloc,
80 .target_destroy = sas_target_destroy, 78 .target_destroy = sas_target_destroy,
81 .ioctl = sas_ioctl, 79 .ioctl = sas_ioctl,
82 .shost_attrs = pm8001_host_attrs, 80 .shost_attrs = pm8001_host_attrs,
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index fb3dc997886..3b11edd4a50 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -166,6 +166,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
166 struct pm8001_hba_info *pm8001_ha = NULL; 166 struct pm8001_hba_info *pm8001_ha = NULL;
167 struct sas_phy_linkrates *rates; 167 struct sas_phy_linkrates *rates;
168 DECLARE_COMPLETION_ONSTACK(completion); 168 DECLARE_COMPLETION_ONSTACK(completion);
169 unsigned long flags;
169 pm8001_ha = sas_phy->ha->lldd_ha; 170 pm8001_ha = sas_phy->ha->lldd_ha;
170 pm8001_ha->phy[phy_id].enable_completion = &completion; 171 pm8001_ha->phy[phy_id].enable_completion = &completion;
171 switch (func) { 172 switch (func) {
@@ -209,8 +210,29 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
209 case PHY_FUNC_DISABLE: 210 case PHY_FUNC_DISABLE:
210 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); 211 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
211 break; 212 break;
213 case PHY_FUNC_GET_EVENTS:
214 spin_lock_irqsave(&pm8001_ha->lock, flags);
215 if (-1 == pm8001_bar4_shift(pm8001_ha,
216 (phy_id < 4) ? 0x30000 : 0x40000)) {
217 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
218 return -EINVAL;
219 }
220 {
221 struct sas_phy *phy = sas_phy->phy;
222 uint32_t *qp = (uint32_t *)(((char *)
223 pm8001_ha->io_mem[2].memvirtaddr)
224 + 0x1034 + (0x4000 * (phy_id & 3)));
225
226 phy->invalid_dword_count = qp[0];
227 phy->running_disparity_error_count = qp[1];
228 phy->loss_of_dword_sync_count = qp[3];
229 phy->phy_reset_problem_count = qp[4];
230 }
231 pm8001_bar4_shift(pm8001_ha, 0);
232 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
233 return 0;
212 default: 234 default:
213 rc = -ENOSYS; 235 rc = -EOPNOTSUPP;
214 } 236 }
215 msleep(300); 237 msleep(300);
216 return rc; 238 return rc;
@@ -234,12 +256,14 @@ void pm8001_scan_start(struct Scsi_Host *shost)
234 256
235int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time) 257int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
236{ 258{
259 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
260
237 /* give the phy enabling interrupt event time to come in (1s 261 /* give the phy enabling interrupt event time to come in (1s
238 * is empirically about all it takes) */ 262 * is empirically about all it takes) */
239 if (time < HZ) 263 if (time < HZ)
240 return 0; 264 return 0;
241 /* Wait for discovery to finish */ 265 /* Wait for discovery to finish */
242 scsi_flush_work(shost); 266 sas_drain_work(ha);
243 return 1; 267 return 1;
244} 268}
245 269
@@ -340,7 +364,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
340 struct pm8001_ccb_info *ccb; 364 struct pm8001_ccb_info *ccb;
341 u32 tag = 0xdeadbeef, rc, n_elem = 0; 365 u32 tag = 0xdeadbeef, rc, n_elem = 0;
342 u32 n = num; 366 u32 n = num;
343 unsigned long flags = 0, flags_libsas = 0; 367 unsigned long flags = 0;
344 368
345 if (!dev->port) { 369 if (!dev->port) {
346 struct task_status_struct *tsm = &t->task_status; 370 struct task_status_struct *tsm = &t->task_status;
@@ -364,11 +388,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
364 ts->stat = SAS_PHY_DOWN; 388 ts->stat = SAS_PHY_DOWN;
365 389
366 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 390 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
367 spin_unlock_irqrestore(dev->sata_dev.ap->lock,
368 flags_libsas);
369 t->task_done(t); 391 t->task_done(t);
370 spin_lock_irqsave(dev->sata_dev.ap->lock,
371 flags_libsas);
372 spin_lock_irqsave(&pm8001_ha->lock, flags); 392 spin_lock_irqsave(&pm8001_ha->lock, flags);
373 if (n > 1) 393 if (n > 1)
374 t = list_entry(t->list.next, 394 t = list_entry(t->list.next,
@@ -516,6 +536,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
516 task->lldd_task = NULL; 536 task->lldd_task = NULL;
517 ccb->task = NULL; 537 ccb->task = NULL;
518 ccb->ccb_tag = 0xFFFFFFFF; 538 ccb->ccb_tag = 0xFFFFFFFF;
539 ccb->open_retry = 0;
519 pm8001_ccb_free(pm8001_ha, ccb_idx); 540 pm8001_ccb_free(pm8001_ha, ccb_idx);
520} 541}
521 542
@@ -615,7 +636,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
615 wait_for_completion(&completion); 636 wait_for_completion(&completion);
616 if (dev->dev_type == SAS_END_DEV) 637 if (dev->dev_type == SAS_END_DEV)
617 msleep(50); 638 msleep(50);
618 pm8001_ha->flags |= PM8001F_RUN_TIME ; 639 pm8001_ha->flags = PM8001F_RUN_TIME;
619 return 0; 640 return 0;
620found_out: 641found_out:
621 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 642 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -860,6 +881,77 @@ static int pm8001_issue_ssp_tmf(struct domain_device *dev,
860 tmf); 881 tmf);
861} 882}
862 883
884/* retry commands by ha, by task and/or by device */
885void pm8001_open_reject_retry(
886 struct pm8001_hba_info *pm8001_ha,
887 struct sas_task *task_to_close,
888 struct pm8001_device *device_to_close)
889{
890 int i;
891 unsigned long flags;
892
893 if (pm8001_ha == NULL)
894 return;
895
896 spin_lock_irqsave(&pm8001_ha->lock, flags);
897
898 for (i = 0; i < PM8001_MAX_CCB; i++) {
899 struct sas_task *task;
900 struct task_status_struct *ts;
901 struct pm8001_device *pm8001_dev;
902 unsigned long flags1;
903 u32 tag;
904 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
905
906 pm8001_dev = ccb->device;
907 if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))
908 continue;
909 if (!device_to_close) {
910 uintptr_t d = (uintptr_t)pm8001_dev
911 - (uintptr_t)&pm8001_ha->devices;
912 if (((d % sizeof(*pm8001_dev)) != 0)
913 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
914 continue;
915 } else if (pm8001_dev != device_to_close)
916 continue;
917 tag = ccb->ccb_tag;
918 if (!tag || (tag == 0xFFFFFFFF))
919 continue;
920 task = ccb->task;
921 if (!task || !task->task_done)
922 continue;
923 if (task_to_close && (task != task_to_close))
924 continue;
925 ts = &task->task_status;
926 ts->resp = SAS_TASK_COMPLETE;
927 /* Force the midlayer to retry */
928 ts->stat = SAS_OPEN_REJECT;
929 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
930 if (pm8001_dev)
931 pm8001_dev->running_req--;
932 spin_lock_irqsave(&task->task_state_lock, flags1);
933 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
934 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
935 task->task_state_flags |= SAS_TASK_STATE_DONE;
936 if (unlikely((task->task_state_flags
937 & SAS_TASK_STATE_ABORTED))) {
938 spin_unlock_irqrestore(&task->task_state_lock,
939 flags1);
940 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
941 } else {
942 spin_unlock_irqrestore(&task->task_state_lock,
943 flags1);
944 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
945 mb();/* in order to force CPU ordering */
946 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
947 task->task_done(task);
948 spin_lock_irqsave(&pm8001_ha->lock, flags);
949 }
950 }
951
952 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
953}
954
863/** 955/**
864 * Standard mandates link reset for ATA (type 0) and hard reset for 956 * Standard mandates link reset for ATA (type 0) and hard reset for
865 * SSP (type 1) , only for RECOVERY 957 * SSP (type 1) , only for RECOVERY
@@ -875,12 +967,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
875 967
876 pm8001_dev = dev->lldd_dev; 968 pm8001_dev = dev->lldd_dev;
877 pm8001_ha = pm8001_find_ha_by_dev(dev); 969 pm8001_ha = pm8001_find_ha_by_dev(dev);
878 phy = sas_find_local_phy(dev); 970 phy = sas_get_local_phy(dev);
879 971
880 if (dev_is_sata(dev)) { 972 if (dev_is_sata(dev)) {
881 DECLARE_COMPLETION_ONSTACK(completion_setstate); 973 DECLARE_COMPLETION_ONSTACK(completion_setstate);
882 if (scsi_is_sas_phy_local(phy)) 974 if (scsi_is_sas_phy_local(phy)) {
883 return 0; 975 rc = 0;
976 goto out;
977 }
884 rc = sas_phy_reset(phy, 1); 978 rc = sas_phy_reset(phy, 1);
885 msleep(2000); 979 msleep(2000);
886 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 980 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
@@ -889,12 +983,14 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
889 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 983 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
890 pm8001_dev, 0x01); 984 pm8001_dev, 0x01);
891 wait_for_completion(&completion_setstate); 985 wait_for_completion(&completion_setstate);
892 } else{ 986 } else {
893 rc = sas_phy_reset(phy, 1); 987 rc = sas_phy_reset(phy, 1);
894 msleep(2000); 988 msleep(2000);
895 } 989 }
896 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", 990 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
897 pm8001_dev->device_id, rc)); 991 pm8001_dev->device_id, rc));
992 out:
993 sas_put_local_phy(phy);
898 return rc; 994 return rc;
899} 995}
900 996
@@ -906,10 +1002,11 @@ int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
906 struct pm8001_device *pm8001_dev = dev->lldd_dev; 1002 struct pm8001_device *pm8001_dev = dev->lldd_dev;
907 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 1003 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
908 if (dev_is_sata(dev)) { 1004 if (dev_is_sata(dev)) {
909 struct sas_phy *phy = sas_find_local_phy(dev); 1005 struct sas_phy *phy = sas_get_local_phy(dev);
910 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 1006 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
911 dev, 1, 0); 1007 dev, 1, 0);
912 rc = sas_phy_reset(phy, 1); 1008 rc = sas_phy_reset(phy, 1);
1009 sas_put_local_phy(phy);
913 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 1010 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
914 pm8001_dev, 0x01); 1011 pm8001_dev, 0x01);
915 msleep(2000); 1012 msleep(2000);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 93959febe20..11008205aeb 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -235,6 +235,7 @@ struct pm8001_ccb_info {
235 struct pm8001_device *device; 235 struct pm8001_device *device;
236 struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG]; 236 struct pm8001_prd buf_prd[PM8001_MAX_DMA_SG];
237 struct fw_control_ex *fw_control_context; 237 struct fw_control_ex *fw_control_context;
238 u8 open_retry;
238}; 239};
239 240
240struct mpi_mem { 241struct mpi_mem {
@@ -484,10 +485,15 @@ void pm8001_dev_gone(struct domain_device *dev);
484int pm8001_lu_reset(struct domain_device *dev, u8 *lun); 485int pm8001_lu_reset(struct domain_device *dev, u8 *lun);
485int pm8001_I_T_nexus_reset(struct domain_device *dev); 486int pm8001_I_T_nexus_reset(struct domain_device *dev);
486int pm8001_query_task(struct sas_task *task); 487int pm8001_query_task(struct sas_task *task);
488void pm8001_open_reject_retry(
489 struct pm8001_hba_info *pm8001_ha,
490 struct sas_task *task_to_close,
491 struct pm8001_device *device_to_close);
487int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, 492int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
488 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, 493 dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo,
489 u32 mem_size, u32 align); 494 u32 mem_size, u32 align);
490 495
496int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue);
491 497
492/* ctl shared API */ 498/* ctl shared API */
493extern struct device_attribute *pm8001_host_attrs[]; 499extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 9f41b3b4358..5926f5a87ea 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -356,7 +356,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
356 else if (start == (ha->flt_region_boot * 4) || 356 else if (start == (ha->flt_region_boot * 4) ||
357 start == (ha->flt_region_fw * 4)) 357 start == (ha->flt_region_fw * 4))
358 valid = 1; 358 valid = 1;
359 else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) 359 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
360 || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
360 valid = 1; 361 valid = 1;
361 if (!valid) { 362 if (!valid) {
362 ql_log(ql_log_warn, vha, 0x7065, 363 ql_log(ql_log_warn, vha, 0x7065,
@@ -627,144 +628,6 @@ static struct bin_attribute sysfs_reset_attr = {
627}; 628};
628 629
629static ssize_t 630static ssize_t
630qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631 struct bin_attribute *bin_attr,
632 char *buf, loff_t off, size_t count)
633{
634 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
635 struct device, kobj)));
636 struct qla_hw_data *ha = vha->hw;
637 uint16_t dev, adr, opt, len;
638 int rval;
639
640 ha->edc_data_len = 0;
641
642 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
643 return -EINVAL;
644
645 if (!ha->edc_data) {
646 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
647 &ha->edc_data_dma);
648 if (!ha->edc_data) {
649 ql_log(ql_log_warn, vha, 0x7073,
650 "Unable to allocate memory for EDC write.\n");
651 return -ENOMEM;
652 }
653 }
654
655 dev = le16_to_cpup((void *)&buf[0]);
656 adr = le16_to_cpup((void *)&buf[2]);
657 opt = le16_to_cpup((void *)&buf[4]);
658 len = le16_to_cpup((void *)&buf[6]);
659
660 if (!(opt & BIT_0))
661 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
662 return -EINVAL;
663
664 memcpy(ha->edc_data, &buf[8], len);
665
666 rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
667 dev, adr, len, opt);
668 if (rval != QLA_SUCCESS) {
669 ql_log(ql_log_warn, vha, 0x7074,
670 "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
671 rval, dev, adr, opt, len, buf[8]);
672 return -EIO;
673 }
674
675 return count;
676}
677
678static struct bin_attribute sysfs_edc_attr = {
679 .attr = {
680 .name = "edc",
681 .mode = S_IWUSR,
682 },
683 .size = 0,
684 .write = qla2x00_sysfs_write_edc,
685};
686
687static ssize_t
688qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
689 struct bin_attribute *bin_attr,
690 char *buf, loff_t off, size_t count)
691{
692 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
693 struct device, kobj)));
694 struct qla_hw_data *ha = vha->hw;
695 uint16_t dev, adr, opt, len;
696 int rval;
697
698 ha->edc_data_len = 0;
699
700 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
701 return -EINVAL;
702
703 if (!ha->edc_data) {
704 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
705 &ha->edc_data_dma);
706 if (!ha->edc_data) {
707 ql_log(ql_log_warn, vha, 0x708c,
708 "Unable to allocate memory for EDC status.\n");
709 return -ENOMEM;
710 }
711 }
712
713 dev = le16_to_cpup((void *)&buf[0]);
714 adr = le16_to_cpup((void *)&buf[2]);
715 opt = le16_to_cpup((void *)&buf[4]);
716 len = le16_to_cpup((void *)&buf[6]);
717
718 if (!(opt & BIT_0))
719 if (len == 0 || len > DMA_POOL_SIZE)
720 return -EINVAL;
721
722 memset(ha->edc_data, 0, len);
723 rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
724 dev, adr, len, opt);
725 if (rval != QLA_SUCCESS) {
726 ql_log(ql_log_info, vha, 0x7075,
727 "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
728 rval, dev, adr, opt, len);
729 return -EIO;
730 }
731
732 ha->edc_data_len = len;
733
734 return count;
735}
736
737static ssize_t
738qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
739 struct bin_attribute *bin_attr,
740 char *buf, loff_t off, size_t count)
741{
742 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
743 struct device, kobj)));
744 struct qla_hw_data *ha = vha->hw;
745
746 if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
747 return 0;
748
749 if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
750 return -EINVAL;
751
752 memcpy(buf, ha->edc_data, ha->edc_data_len);
753
754 return ha->edc_data_len;
755}
756
757static struct bin_attribute sysfs_edc_status_attr = {
758 .attr = {
759 .name = "edc_status",
760 .mode = S_IRUSR | S_IWUSR,
761 },
762 .size = 0,
763 .write = qla2x00_sysfs_write_edc_status,
764 .read = qla2x00_sysfs_read_edc_status,
765};
766
767static ssize_t
768qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj, 631qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
769 struct bin_attribute *bin_attr, 632 struct bin_attribute *bin_attr,
770 char *buf, loff_t off, size_t count) 633 char *buf, loff_t off, size_t count)
@@ -879,8 +742,6 @@ static struct sysfs_entry {
879 { "vpd", &sysfs_vpd_attr, 1 }, 742 { "vpd", &sysfs_vpd_attr, 1 },
880 { "sfp", &sysfs_sfp_attr, 1 }, 743 { "sfp", &sysfs_sfp_attr, 1 },
881 { "reset", &sysfs_reset_attr, }, 744 { "reset", &sysfs_reset_attr, },
882 { "edc", &sysfs_edc_attr, 2 },
883 { "edc_status", &sysfs_edc_status_attr, 2 },
884 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 }, 745 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
885 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 }, 746 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
886 { NULL }, 747 { NULL },
@@ -898,7 +759,7 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
898 continue; 759 continue;
899 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) 760 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
900 continue; 761 continue;
901 if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw))) 762 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
902 continue; 763 continue;
903 764
904 ret = sysfs_create_bin_file(&host->shost_gendev.kobj, 765 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
@@ -926,7 +787,7 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
926 continue; 787 continue;
927 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) 788 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
928 continue; 789 continue;
929 if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw))) 790 if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
930 continue; 791 continue;
931 792
932 sysfs_remove_bin_file(&host->shost_gendev.kobj, 793 sysfs_remove_bin_file(&host->shost_gendev.kobj,
@@ -1231,7 +1092,7 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
1231 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1092 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1232 struct qla_hw_data *ha = vha->hw; 1093 struct qla_hw_data *ha = vha->hw;
1233 1094
1234 if (!IS_QLA81XX(ha)) 1095 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1235 return snprintf(buf, PAGE_SIZE, "\n"); 1096 return snprintf(buf, PAGE_SIZE, "\n");
1236 1097
1237 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", 1098 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
@@ -1278,7 +1139,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1278 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1139 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1279 struct qla_hw_data *ha = vha->hw; 1140 struct qla_hw_data *ha = vha->hw;
1280 1141
1281 if (!IS_QLA81XX(ha)) 1142 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1282 return snprintf(buf, PAGE_SIZE, "\n"); 1143 return snprintf(buf, PAGE_SIZE, "\n");
1283 1144
1284 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1145 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1293,7 +1154,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1293 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1154 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1294 struct qla_hw_data *ha = vha->hw; 1155 struct qla_hw_data *ha = vha->hw;
1295 1156
1296 if (!IS_QLA81XX(ha)) 1157 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1297 return snprintf(buf, PAGE_SIZE, "\n"); 1158 return snprintf(buf, PAGE_SIZE, "\n");
1298 1159
1299 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1160 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1316,7 +1177,7 @@ qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1316{ 1177{
1317 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1178 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1318 1179
1319 if (!IS_QLA8XXX_TYPE(vha->hw)) 1180 if (!IS_CNA_CAPABLE(vha->hw))
1320 return snprintf(buf, PAGE_SIZE, "\n"); 1181 return snprintf(buf, PAGE_SIZE, "\n");
1321 1182
1322 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id); 1183 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
@@ -1328,7 +1189,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
1328{ 1189{
1329 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1190 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1330 1191
1331 if (!IS_QLA8XXX_TYPE(vha->hw)) 1192 if (!IS_CNA_CAPABLE(vha->hw))
1332 return snprintf(buf, PAGE_SIZE, "\n"); 1193 return snprintf(buf, PAGE_SIZE, "\n");
1333 1194
1334 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n", 1195 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
@@ -1364,7 +1225,7 @@ qla2x00_thermal_temp_show(struct device *dev,
1364 else if (!vha->hw->flags.eeh_busy) 1225 else if (!vha->hw->flags.eeh_busy)
1365 rval = qla2x00_get_thermal_temp(vha, &temp, &frac); 1226 rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1366 if (rval != QLA_SUCCESS) 1227 if (rval != QLA_SUCCESS)
1367 temp = frac = 0; 1228 return snprintf(buf, PAGE_SIZE, "\n");
1368 1229
1369 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac); 1230 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
1370} 1231}
@@ -1493,6 +1354,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
1493 case PORT_SPEED_10GB: 1354 case PORT_SPEED_10GB:
1494 speed = FC_PORTSPEED_10GBIT; 1355 speed = FC_PORTSPEED_10GBIT;
1495 break; 1356 break;
1357 case PORT_SPEED_16GB:
1358 speed = FC_PORTSPEED_16GBIT;
1359 break;
1496 } 1360 }
1497 fc_host_speed(shost) = speed; 1361 fc_host_speed(shost) = speed;
1498} 1362}
@@ -1643,10 +1507,14 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
1643 * final cleanup of firmware resources (PCBs and XCBs). 1507 * final cleanup of firmware resources (PCBs and XCBs).
1644 */ 1508 */
1645 if (fcport->loop_id != FC_NO_LOOP_ID && 1509 if (fcport->loop_id != FC_NO_LOOP_ID &&
1646 !test_bit(UNLOADING, &fcport->vha->dpc_flags)) 1510 !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
1647 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, 1511 if (IS_FWI2_CAPABLE(fcport->vha->hw))
1648 fcport->loop_id, fcport->d_id.b.domain, 1512 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1649 fcport->d_id.b.area, fcport->d_id.b.al_pa); 1513 fcport->loop_id, fcport->d_id.b.domain,
1514 fcport->d_id.b.area, fcport->d_id.b.al_pa);
1515 else
1516 qla2x00_port_logout(fcport->vha, fcport);
1517 }
1650} 1518}
1651 1519
1652static int 1520static int
@@ -1889,6 +1757,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1889 break; 1757 break;
1890 } 1758 }
1891 } 1759 }
1760
1892 if (qos) { 1761 if (qos) {
1893 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0, 1762 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1894 qos); 1763 qos);
@@ -2086,7 +1955,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
2086 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1955 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2087 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1956 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2088 1957
2089 if (IS_QLA8XXX_TYPE(ha)) 1958 if (IS_CNA_CAPABLE(ha))
2090 speed = FC_PORTSPEED_10GBIT; 1959 speed = FC_PORTSPEED_10GBIT;
2091 else if (IS_QLA25XX(ha)) 1960 else if (IS_QLA25XX(ha))
2092 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1961 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2c4714279bc..f74cc0602f3 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -11,29 +11,36 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12 12
13/* BSG support for ELS/CT pass through */ 13/* BSG support for ELS/CT pass through */
14inline srb_t * 14void
15qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size) 15qla2x00_bsg_job_done(void *data, void *ptr, int res)
16{ 16{
17 srb_t *sp; 17 srb_t *sp = (srb_t *)ptr;
18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
20
21 bsg_job->reply->result = res;
22 bsg_job->job_done(bsg_job);
23 sp->free(vha, sp);
24}
25
26void
27qla2x00_bsg_sp_free(void *data, void *ptr)
28{
29 srb_t *sp = (srb_t *)ptr;
30 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
31 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
18 struct qla_hw_data *ha = vha->hw; 32 struct qla_hw_data *ha = vha->hw;
19 struct srb_ctx *ctx;
20 33
21 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL); 34 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
22 if (!sp) 35 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
23 goto done;
24 ctx = kzalloc(size, GFP_KERNEL);
25 if (!ctx) {
26 mempool_free(sp, ha->srb_mempool);
27 sp = NULL;
28 goto done;
29 }
30 36
31 memset(sp, 0, sizeof(*sp)); 37 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
32 sp->fcport = fcport; 38 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
33 sp->ctx = ctx; 39
34 ctx->iocbs = 1; 40 if (sp->type == SRB_CT_CMD ||
35done: 41 sp->type == SRB_ELS_CMD_HST)
36 return sp; 42 kfree(sp->fcport);
43 mempool_free(sp, vha->hw->srb_mempool);
37} 44}
38 45
39int 46int
@@ -101,8 +108,6 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
101 uint32_t len; 108 uint32_t len;
102 uint32_t oper; 109 uint32_t oper;
103 110
104 bsg_job->reply->reply_payload_rcv_len = 0;
105
106 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) { 111 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
107 ret = -EINVAL; 112 ret = -EINVAL;
108 goto exit_fcp_prio_cfg; 113 goto exit_fcp_prio_cfg;
@@ -217,6 +222,7 @@ exit_fcp_prio_cfg:
217 bsg_job->job_done(bsg_job); 222 bsg_job->job_done(bsg_job);
218 return ret; 223 return ret;
219} 224}
225
220static int 226static int
221qla2x00_process_els(struct fc_bsg_job *bsg_job) 227qla2x00_process_els(struct fc_bsg_job *bsg_job)
222{ 228{
@@ -230,7 +236,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
230 int req_sg_cnt, rsp_sg_cnt; 236 int req_sg_cnt, rsp_sg_cnt;
231 int rval = (DRIVER_ERROR << 16); 237 int rval = (DRIVER_ERROR << 16);
232 uint16_t nextlid = 0; 238 uint16_t nextlid = 0;
233 struct srb_ctx *els;
234 239
235 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 240 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
236 rport = bsg_job->rport; 241 rport = bsg_job->rport;
@@ -337,20 +342,21 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
337 } 342 }
338 343
339 /* Alloc SRB structure */ 344 /* Alloc SRB structure */
340 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 345 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
341 if (!sp) { 346 if (!sp) {
342 rval = -ENOMEM; 347 rval = -ENOMEM;
343 goto done_unmap_sg; 348 goto done_unmap_sg;
344 } 349 }
345 350
346 els = sp->ctx; 351 sp->type =
347 els->type =
348 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 352 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
349 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST); 353 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
350 els->name = 354 sp->name =
351 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ? 355 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
352 "bsg_els_rpt" : "bsg_els_hst"); 356 "bsg_els_rpt" : "bsg_els_hst");
353 els->u.bsg_job = bsg_job; 357 sp->u.bsg_job = bsg_job;
358 sp->free = qla2x00_bsg_sp_free;
359 sp->done = qla2x00_bsg_job_done;
354 360
355 ql_dbg(ql_dbg_user, vha, 0x700a, 361 ql_dbg(ql_dbg_user, vha, 0x700a,
356 "bsg rqst type: %s els type: %x - loop-id=%x " 362 "bsg rqst type: %s els type: %x - loop-id=%x "
@@ -362,7 +368,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
362 if (rval != QLA_SUCCESS) { 368 if (rval != QLA_SUCCESS) {
363 ql_log(ql_log_warn, vha, 0x700e, 369 ql_log(ql_log_warn, vha, 0x700e,
364 "qla2x00_start_sp failed = %d\n", rval); 370 "qla2x00_start_sp failed = %d\n", rval);
365 kfree(sp->ctx);
366 mempool_free(sp, ha->srb_mempool); 371 mempool_free(sp, ha->srb_mempool);
367 rval = -EIO; 372 rval = -EIO;
368 goto done_unmap_sg; 373 goto done_unmap_sg;
@@ -409,7 +414,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
409 uint16_t loop_id; 414 uint16_t loop_id;
410 struct fc_port *fcport; 415 struct fc_port *fcport;
411 char *type = "FC_BSG_HST_CT"; 416 char *type = "FC_BSG_HST_CT";
412 struct srb_ctx *ct;
413 417
414 req_sg_cnt = 418 req_sg_cnt =
415 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 419 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
@@ -486,19 +490,20 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
486 fcport->loop_id = loop_id; 490 fcport->loop_id = loop_id;
487 491
488 /* Alloc SRB structure */ 492 /* Alloc SRB structure */
489 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx)); 493 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
490 if (!sp) { 494 if (!sp) {
491 ql_log(ql_log_warn, vha, 0x7015, 495 ql_log(ql_log_warn, vha, 0x7015,
492 "qla2x00_get_ctx_bsg_sp failed.\n"); 496 "qla2x00_get_sp failed.\n");
493 rval = -ENOMEM; 497 rval = -ENOMEM;
494 goto done_free_fcport; 498 goto done_free_fcport;
495 } 499 }
496 500
497 ct = sp->ctx; 501 sp->type = SRB_CT_CMD;
498 ct->type = SRB_CT_CMD; 502 sp->name = "bsg_ct";
499 ct->name = "bsg_ct"; 503 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
500 ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt); 504 sp->u.bsg_job = bsg_job;
501 ct->u.bsg_job = bsg_job; 505 sp->free = qla2x00_bsg_sp_free;
506 sp->done = qla2x00_bsg_job_done;
502 507
503 ql_dbg(ql_dbg_user, vha, 0x7016, 508 ql_dbg(ql_dbg_user, vha, 0x7016,
504 "bsg rqst type: %s else type: %x - " 509 "bsg rqst type: %s else type: %x - "
@@ -511,7 +516,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
511 if (rval != QLA_SUCCESS) { 516 if (rval != QLA_SUCCESS) {
512 ql_log(ql_log_warn, vha, 0x7017, 517 ql_log(ql_log_warn, vha, 0x7017,
513 "qla2x00_start_sp failed=%d.\n", rval); 518 "qla2x00_start_sp failed=%d.\n", rval);
514 kfree(sp->ctx);
515 mempool_free(sp, ha->srb_mempool); 519 mempool_free(sp, ha->srb_mempool);
516 rval = -EIO; 520 rval = -EIO;
517 goto done_free_fcport; 521 goto done_free_fcport;
@@ -540,7 +544,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
540 int rval = 0; 544 int rval = 0;
541 struct qla_hw_data *ha = vha->hw; 545 struct qla_hw_data *ha = vha->hw;
542 546
543 if (!IS_QLA81XX(ha)) 547 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
544 goto done_set_internal; 548 goto done_set_internal;
545 549
546 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 550 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -582,7 +586,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
582 uint16_t new_config[4]; 586 uint16_t new_config[4];
583 struct qla_hw_data *ha = vha->hw; 587 struct qla_hw_data *ha = vha->hw;
584 588
585 if (!IS_QLA81XX(ha)) 589 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
586 goto done_reset_internal; 590 goto done_reset_internal;
587 591
588 memset(new_config, 0 , sizeof(new_config)); 592 memset(new_config, 0 , sizeof(new_config));
@@ -707,7 +711,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
707 711
708 if ((ha->current_topology == ISP_CFG_F || 712 if ((ha->current_topology == ISP_CFG_F ||
709 (atomic_read(&vha->loop_state) == LOOP_DOWN) || 713 (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
710 (IS_QLA81XX(ha) && 714 ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
711 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 715 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
712 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 716 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
713 elreq.options == EXTERNAL_LOOPBACK) { 717 elreq.options == EXTERNAL_LOOPBACK) {
@@ -717,13 +721,12 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
717 command_sent = INT_DEF_LB_ECHO_CMD; 721 command_sent = INT_DEF_LB_ECHO_CMD;
718 rval = qla2x00_echo_test(vha, &elreq, response); 722 rval = qla2x00_echo_test(vha, &elreq, response);
719 } else { 723 } else {
720 if (IS_QLA81XX(ha)) { 724 if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
721 memset(config, 0, sizeof(config)); 725 memset(config, 0, sizeof(config));
722 memset(new_config, 0, sizeof(new_config)); 726 memset(new_config, 0, sizeof(new_config));
723 if (qla81xx_get_port_config(vha, config)) { 727 if (qla81xx_get_port_config(vha, config)) {
724 ql_log(ql_log_warn, vha, 0x701f, 728 ql_log(ql_log_warn, vha, 0x701f,
725 "Get port config failed.\n"); 729 "Get port config failed.\n");
726 bsg_job->reply->reply_payload_rcv_len = 0;
727 bsg_job->reply->result = (DID_ERROR << 16); 730 bsg_job->reply->result = (DID_ERROR << 16);
728 rval = -EPERM; 731 rval = -EPERM;
729 goto done_free_dma_req; 732 goto done_free_dma_req;
@@ -737,8 +740,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
737 new_config)) { 740 new_config)) {
738 ql_log(ql_log_warn, vha, 0x7024, 741 ql_log(ql_log_warn, vha, 0x7024,
739 "Internal loopback failed.\n"); 742 "Internal loopback failed.\n");
740 bsg_job->reply->reply_payload_rcv_len =
741 0;
742 bsg_job->reply->result = 743 bsg_job->reply->result =
743 (DID_ERROR << 16); 744 (DID_ERROR << 16);
744 rval = -EPERM; 745 rval = -EPERM;
@@ -750,8 +751,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
750 */ 751 */
751 if (qla81xx_reset_internal_loopback(vha, 752 if (qla81xx_reset_internal_loopback(vha,
752 config, 1)) { 753 config, 1)) {
753 bsg_job->reply->reply_payload_rcv_len =
754 0;
755 bsg_job->reply->result = 754 bsg_job->reply->result =
756 (DID_ERROR << 16); 755 (DID_ERROR << 16);
757 rval = -EPERM; 756 rval = -EPERM;
@@ -788,7 +787,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
788 "MPI reset failed.\n"); 787 "MPI reset failed.\n");
789 } 788 }
790 789
791 bsg_job->reply->reply_payload_rcv_len = 0;
792 bsg_job->reply->result = (DID_ERROR << 16); 790 bsg_job->reply->result = (DID_ERROR << 16);
793 rval = -EIO; 791 rval = -EIO;
794 goto done_free_dma_req; 792 goto done_free_dma_req;
@@ -813,7 +811,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
813 fw_sts_ptr += sizeof(response); 811 fw_sts_ptr += sizeof(response);
814 *fw_sts_ptr = command_sent; 812 *fw_sts_ptr = command_sent;
815 rval = 0; 813 rval = 0;
816 bsg_job->reply->reply_payload_rcv_len = 0;
817 bsg_job->reply->result = (DID_ERROR << 16); 814 bsg_job->reply->result = (DID_ERROR << 16);
818 } else { 815 } else {
819 ql_dbg(ql_dbg_user, vha, 0x702d, 816 ql_dbg(ql_dbg_user, vha, 0x702d,
@@ -872,7 +869,7 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
872 if (rval) { 869 if (rval) {
873 ql_log(ql_log_warn, vha, 0x7030, 870 ql_log(ql_log_warn, vha, 0x7030,
874 "Vendor request 84xx reset failed.\n"); 871 "Vendor request 84xx reset failed.\n");
875 rval = bsg_job->reply->reply_payload_rcv_len = 0; 872 rval = 0;
876 bsg_job->reply->result = (DID_ERROR << 16); 873 bsg_job->reply->result = (DID_ERROR << 16);
877 874
878 } else { 875 } else {
@@ -971,9 +968,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
971 ql_log(ql_log_warn, vha, 0x7037, 968 ql_log(ql_log_warn, vha, 0x7037,
972 "Vendor request 84xx updatefw failed.\n"); 969 "Vendor request 84xx updatefw failed.\n");
973 970
974 rval = bsg_job->reply->reply_payload_rcv_len = 0; 971 rval = 0;
975 bsg_job->reply->result = (DID_ERROR << 16); 972 bsg_job->reply->result = (DID_ERROR << 16);
976
977 } else { 973 } else {
978 ql_dbg(ql_dbg_user, vha, 0x7038, 974 ql_dbg(ql_dbg_user, vha, 0x7038,
979 "Vendor request 84xx updatefw completed.\n"); 975 "Vendor request 84xx updatefw completed.\n");
@@ -1159,7 +1155,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1159 ql_log(ql_log_warn, vha, 0x7043, 1155 ql_log(ql_log_warn, vha, 0x7043,
1160 "Vendor request 84xx mgmt failed.\n"); 1156 "Vendor request 84xx mgmt failed.\n");
1161 1157
1162 rval = bsg_job->reply->reply_payload_rcv_len = 0; 1158 rval = 0;
1163 bsg_job->reply->result = (DID_ERROR << 16); 1159 bsg_job->reply->result = (DID_ERROR << 16);
1164 1160
1165 } else { 1161 } else {
@@ -1210,8 +1206,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
1210 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1206 uint16_t mb[MAILBOX_REGISTER_COUNT];
1211 uint8_t *rsp_ptr = NULL; 1207 uint8_t *rsp_ptr = NULL;
1212 1208
1213 bsg_job->reply->reply_payload_rcv_len = 0;
1214
1215 if (!IS_IIDMA_CAPABLE(vha->hw)) { 1209 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1216 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n"); 1210 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1217 return -EINVAL; 1211 return -EINVAL;
@@ -1304,8 +1298,6 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1304 int valid = 0; 1298 int valid = 0;
1305 struct qla_hw_data *ha = vha->hw; 1299 struct qla_hw_data *ha = vha->hw;
1306 1300
1307 bsg_job->reply->reply_payload_rcv_len = 0;
1308
1309 if (unlikely(pci_channel_offline(ha->pdev))) 1301 if (unlikely(pci_channel_offline(ha->pdev)))
1310 return -EINVAL; 1302 return -EINVAL;
1311 1303
@@ -1331,7 +1323,7 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1331 start == (ha->flt_region_fw * 4)) 1323 start == (ha->flt_region_fw * 4))
1332 valid = 1; 1324 valid = 1;
1333 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || 1325 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1334 IS_QLA8XXX_TYPE(ha)) 1326 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
1335 valid = 1; 1327 valid = 1;
1336 if (!valid) { 1328 if (!valid) {
1337 ql_log(ql_log_warn, vha, 0x7058, 1329 ql_log(ql_log_warn, vha, 0x7058,
@@ -1617,6 +1609,9 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1617 struct Scsi_Host *host; 1609 struct Scsi_Host *host;
1618 scsi_qla_host_t *vha; 1610 scsi_qla_host_t *vha;
1619 1611
1612 /* In case no data transferred. */
1613 bsg_job->reply->reply_payload_rcv_len = 0;
1614
1620 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 1615 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1621 rport = bsg_job->rport; 1616 rport = bsg_job->rport;
1622 fcport = *(fc_port_t **) rport->dd_data; 1617 fcport = *(fc_port_t **) rport->dd_data;
@@ -1655,6 +1650,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
1655 case FC_BSG_RPT_CT: 1650 case FC_BSG_RPT_CT:
1656 default: 1651 default:
1657 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); 1652 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
1653 bsg_job->reply->result = ret;
1658 break; 1654 break;
1659 } 1655 }
1660 return ret; 1656 return ret;
@@ -1669,7 +1665,6 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1669 int cnt, que; 1665 int cnt, que;
1670 unsigned long flags; 1666 unsigned long flags;
1671 struct req_que *req; 1667 struct req_que *req;
1672 struct srb_ctx *sp_bsg;
1673 1668
1674 /* find the bsg job from the active list of commands */ 1669 /* find the bsg job from the active list of commands */
1675 spin_lock_irqsave(&ha->hardware_lock, flags); 1670 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1681,11 +1676,9 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
1681 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 1676 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
1682 sp = req->outstanding_cmds[cnt]; 1677 sp = req->outstanding_cmds[cnt];
1683 if (sp) { 1678 if (sp) {
1684 sp_bsg = sp->ctx; 1679 if (((sp->type == SRB_CT_CMD) ||
1685 1680 (sp->type == SRB_ELS_CMD_HST))
1686 if (((sp_bsg->type == SRB_CT_CMD) || 1681 && (sp->u.bsg_job == bsg_job)) {
1687 (sp_bsg->type == SRB_ELS_CMD_HST))
1688 && (sp_bsg->u.bsg_job == bsg_job)) {
1689 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1682 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1690 if (ha->isp_ops->abort_command(sp)) { 1683 if (ha->isp_ops->abort_command(sp)) {
1691 ql_log(ql_log_warn, vha, 0x7089, 1684 ql_log(ql_log_warn, vha, 0x7089,
@@ -1715,7 +1708,6 @@ done:
1715 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1716 if (bsg_job->request->msgcode == FC_BSG_HST_CT) 1709 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
1717 kfree(sp->fcport); 1710 kfree(sp->fcport);
1718 kfree(sp->ctx);
1719 mempool_free(sp, ha->srb_mempool); 1711 mempool_free(sp, ha->srb_mempool);
1720 return 0; 1712 return 0;
1721} 1713}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 45cbf0ba624..897731b93df 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,23 +11,27 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0116 | 0xfa | 14 * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x112b | | 15 * | Mailbox commands | 0x113e | 0x112c-0x112e |
16 * | Device Discovery | 0x2084 | | 16 * | | | 0x113a |
17 * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, | 17 * | Device Discovery | 0x2086 | 0x2020-0x2022 |
18 * | | | 0x302e | 18 * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 |
19 * | | | 0x302d-0x302e |
19 * | DPC Thread | 0x401c | | 20 * | DPC Thread | 0x401c | |
20 * | Async Events | 0x5057 | 0x5052 | 21 * | Async Events | 0x505d | 0x502b-0x502f |
21 * | Timer Routines | 0x6011 | 0x600e,0x600f | 22 * | | | 0x5047,0x5052 |
22 * | User Space Interactions | 0x709e | 0x7018,0x702e | 23 * | Timer Routines | 0x6011 | 0x600e-0x600f |
23 * | | | 0x7039,0x7045 | 24 * | User Space Interactions | 0x709f | 0x7018,0x702e, |
25 * | | | 0x7039,0x7045, |
26 * | | | 0x7073-0x7075, |
27 * | | | 0x708c |
24 * | Task Management | 0x803c | 0x8025-0x8026 | 28 * | Task Management | 0x803c | 0x8025-0x8026 |
25 * | | | 0x800b,0x8039 | 29 * | | | 0x800b,0x8039 |
26 * | AER/EEH | 0x900f | | 30 * | AER/EEH | 0x900f | |
27 * | Virtual Port | 0xa007 | | 31 * | Virtual Port | 0xa007 | |
28 * | ISP82XX Specific | 0xb052 | | 32 * | ISP82XX Specific | 0xb054 | 0xb053 |
29 * | MultiQ | 0xc00b | | 33 * | MultiQ | 0xc00c | |
30 * | Misc | 0xd00b | | 34 * | Misc | 0xd010 | |
31 * ---------------------------------------------------------------------- 35 * ----------------------------------------------------------------------
32 */ 36 */
33 37
@@ -85,7 +89,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
85 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); 89 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
86 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 90 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
87 91
88 dwords = GID_LIST_SIZE / 4; 92 dwords = qla2x00_gid_list_size(ha) / 4;
89 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; 93 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
90 cnt += dwords, addr += dwords) { 94 cnt += dwords, addr += dwords) {
91 if (cnt + dwords > ram_dwords) 95 if (cnt + dwords > ram_dwords)
@@ -260,7 +264,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
260 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED); 264 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
261 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 265 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
262 266
263 words = GID_LIST_SIZE / 2; 267 words = qla2x00_gid_list_size(ha) / 2;
264 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS; 268 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
265 cnt += words, addr += words) { 269 cnt += words, addr += words) {
266 if (cnt + words > ram_words) 270 if (cnt + words > ram_words)
@@ -375,6 +379,77 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
375} 379}
376 380
377static inline void * 381static inline void *
382qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
383{
384 struct qla2xxx_mqueue_chain *q;
385 struct qla2xxx_mqueue_header *qh;
386 struct req_que *req;
387 struct rsp_que *rsp;
388 int que;
389
390 if (!ha->mqenable)
391 return ptr;
392
393 /* Request queues */
394 for (que = 1; que < ha->max_req_queues; que++) {
395 req = ha->req_q_map[que];
396 if (!req)
397 break;
398
399 /* Add chain. */
400 q = ptr;
401 *last_chain = &q->type;
402 q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
403 q->chain_size = htonl(
404 sizeof(struct qla2xxx_mqueue_chain) +
405 sizeof(struct qla2xxx_mqueue_header) +
406 (req->length * sizeof(request_t)));
407 ptr += sizeof(struct qla2xxx_mqueue_chain);
408
409 /* Add header. */
410 qh = ptr;
411 qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
412 qh->number = htonl(que);
413 qh->size = htonl(req->length * sizeof(request_t));
414 ptr += sizeof(struct qla2xxx_mqueue_header);
415
416 /* Add data. */
417 memcpy(ptr, req->ring, req->length * sizeof(request_t));
418 ptr += req->length * sizeof(request_t);
419 }
420
421 /* Response queues */
422 for (que = 1; que < ha->max_rsp_queues; que++) {
423 rsp = ha->rsp_q_map[que];
424 if (!rsp)
425 break;
426
427 /* Add chain. */
428 q = ptr;
429 *last_chain = &q->type;
430 q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
431 q->chain_size = htonl(
432 sizeof(struct qla2xxx_mqueue_chain) +
433 sizeof(struct qla2xxx_mqueue_header) +
434 (rsp->length * sizeof(response_t)));
435 ptr += sizeof(struct qla2xxx_mqueue_chain);
436
437 /* Add header. */
438 qh = ptr;
439 qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
440 qh->number = htonl(que);
441 qh->size = htonl(rsp->length * sizeof(response_t));
442 ptr += sizeof(struct qla2xxx_mqueue_header);
443
444 /* Add data. */
445 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
446 ptr += rsp->length * sizeof(response_t);
447 }
448
449 return ptr;
450}
451
452static inline void *
378qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 453qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
379{ 454{
380 uint32_t cnt, que_idx; 455 uint32_t cnt, que_idx;
@@ -382,7 +457,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
382 struct qla2xxx_mq_chain *mq = ptr; 457 struct qla2xxx_mq_chain *mq = ptr;
383 struct device_reg_25xxmq __iomem *reg; 458 struct device_reg_25xxmq __iomem *reg;
384 459
385 if (!ha->mqenable) 460 if (!ha->mqenable || IS_QLA83XX(ha))
386 return ptr; 461 return ptr;
387 462
388 mq = ptr; 463 mq = ptr;
@@ -1322,12 +1397,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1322 nxt = qla24xx_copy_eft(ha, nxt); 1397 nxt = qla24xx_copy_eft(ha, nxt);
1323 1398
1324 /* Chain entries -- started with MQ. */ 1399 /* Chain entries -- started with MQ. */
1325 qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1400 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1401 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1326 if (last_chain) { 1402 if (last_chain) {
1327 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1403 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1328 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1404 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1329 } 1405 }
1330 1406
1407 /* Adjust valid length. */
1408 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1409
1331qla25xx_fw_dump_failed_0: 1410qla25xx_fw_dump_failed_0:
1332 qla2xxx_dump_post_process(base_vha, rval); 1411 qla2xxx_dump_post_process(base_vha, rval);
1333 1412
@@ -1636,12 +1715,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1636 nxt = qla24xx_copy_eft(ha, nxt); 1715 nxt = qla24xx_copy_eft(ha, nxt);
1637 1716
1638 /* Chain entries -- started with MQ. */ 1717 /* Chain entries -- started with MQ. */
1639 qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1718 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1719 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1640 if (last_chain) { 1720 if (last_chain) {
1641 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1721 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1642 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1722 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1643 } 1723 }
1644 1724
1725 /* Adjust valid length. */
1726 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1727
1645qla81xx_fw_dump_failed_0: 1728qla81xx_fw_dump_failed_0:
1646 qla2xxx_dump_post_process(base_vha, rval); 1729 qla2xxx_dump_post_process(base_vha, rval);
1647 1730
@@ -1650,6 +1733,507 @@ qla81xx_fw_dump_failed:
1650 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1733 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1651} 1734}
1652 1735
1736void
1737qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1738{
1739 int rval;
1740 uint32_t cnt, reg_data;
1741 uint32_t risc_address;
1742 struct qla_hw_data *ha = vha->hw;
1743 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1744 uint32_t __iomem *dmp_reg;
1745 uint32_t *iter_reg;
1746 uint16_t __iomem *mbx_reg;
1747 unsigned long flags;
1748 struct qla83xx_fw_dump *fw;
1749 uint32_t ext_mem_cnt;
1750 void *nxt, *nxt_chain;
1751 uint32_t *last_chain = NULL;
1752 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1753
1754 risc_address = ext_mem_cnt = 0;
1755 flags = 0;
1756
1757 if (!hardware_locked)
1758 spin_lock_irqsave(&ha->hardware_lock, flags);
1759
1760 if (!ha->fw_dump) {
1761 ql_log(ql_log_warn, vha, 0xd00c,
1762 "No buffer available for dump!!!\n");
1763 goto qla83xx_fw_dump_failed;
1764 }
1765
1766 if (ha->fw_dumped) {
1767 ql_log(ql_log_warn, vha, 0xd00d,
1768 "Firmware has been previously dumped (%p) -- ignoring "
1769 "request...\n", ha->fw_dump);
1770 goto qla83xx_fw_dump_failed;
1771 }
1772 fw = &ha->fw_dump->isp.isp83;
1773 qla2xxx_prep_dump(ha, ha->fw_dump);
1774
1775 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1776
1777 /* Pause RISC. */
1778 rval = qla24xx_pause_risc(reg);
1779 if (rval != QLA_SUCCESS)
1780 goto qla83xx_fw_dump_failed_0;
1781
1782 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1783 dmp_reg = &reg->iobase_window;
1784 reg_data = RD_REG_DWORD(dmp_reg);
1785 WRT_REG_DWORD(dmp_reg, 0);
1786
1787 dmp_reg = &reg->unused_4_1[0];
1788 reg_data = RD_REG_DWORD(dmp_reg);
1789 WRT_REG_DWORD(dmp_reg, 0);
1790
1791 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1792 dmp_reg = &reg->unused_4_1[2];
1793 reg_data = RD_REG_DWORD(dmp_reg);
1794 WRT_REG_DWORD(dmp_reg, 0);
1795
1796 /* select PCR and disable ecc checking and correction */
1797 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1798 RD_REG_DWORD(&reg->iobase_addr);
1799 WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
1800
1801 /* Host/Risc registers. */
1802 iter_reg = fw->host_risc_reg;
1803 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1804 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1805 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
1806
1807 /* PCIe registers. */
1808 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1809 RD_REG_DWORD(&reg->iobase_addr);
1810 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1811 dmp_reg = &reg->iobase_c4;
1812 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1813 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1814 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1815 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1816
1817 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1818 RD_REG_DWORD(&reg->iobase_window);
1819
1820 /* Host interface registers. */
1821 dmp_reg = &reg->flash_addr;
1822 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1823 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1824
1825 /* Disable interrupts. */
1826 WRT_REG_DWORD(&reg->ictrl, 0);
1827 RD_REG_DWORD(&reg->ictrl);
1828
1829 /* Shadow registers. */
1830 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1831 RD_REG_DWORD(&reg->iobase_addr);
1832 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1833 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1834
1835 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1836 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1837
1838 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1839 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1840
1841 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1842 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1843
1844 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1845 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1846
1847 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1848 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1849
1850 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1851 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1852
1853 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1854 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1855
1856 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1857 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1858
1859 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1860 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1861
1862 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1863 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1864
1865 /* RISC I/O register. */
1866 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1867 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1868
1869 /* Mailbox registers. */
1870 mbx_reg = &reg->mailbox0;
1871 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1872 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1873
1874 /* Transfer sequence registers. */
1875 iter_reg = fw->xseq_gp_reg;
1876 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
1877 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
1878 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
1879 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
1880 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
1881 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
1882 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
1883 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
1884 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1885 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1886 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1887 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1888 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1889 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1890 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1891 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1892
1893 iter_reg = fw->xseq_0_reg;
1894 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1895 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1896 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1897
1898 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1899
1900 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
1901
1902 /* Receive sequence registers. */
1903 iter_reg = fw->rseq_gp_reg;
1904 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
1905 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
1906 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
1907 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1913 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1914 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1915 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1916 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1917 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1918 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1919 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1920
1921 iter_reg = fw->rseq_0_reg;
1922 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1923 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1924
1925 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1926 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1927 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
1928
1929 /* Auxiliary sequence registers. */
1930 iter_reg = fw->aseq_gp_reg;
1931 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1932 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1933 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1934 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1935 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1936 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1937 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1938 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1939 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
1940 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
1941 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
1942 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
1943 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
1944 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
1945 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
1946 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
1947
1948 iter_reg = fw->aseq_0_reg;
1949 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1950 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1951
1952 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1953 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1954 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
1955
1956 /* Command DMA registers. */
1957 iter_reg = fw->cmd_dma_reg;
1958 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
1959 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
1960 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
1961 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
1962
1963 /* Queues. */
1964 iter_reg = fw->req0_dma_reg;
1965 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1966 dmp_reg = &reg->iobase_q;
1967 for (cnt = 0; cnt < 7; cnt++)
1968 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1969
1970 iter_reg = fw->resp0_dma_reg;
1971 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1972 dmp_reg = &reg->iobase_q;
1973 for (cnt = 0; cnt < 7; cnt++)
1974 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1975
1976 iter_reg = fw->req1_dma_reg;
1977 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1978 dmp_reg = &reg->iobase_q;
1979 for (cnt = 0; cnt < 7; cnt++)
1980 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1981
1982 /* Transmit DMA registers. */
1983 iter_reg = fw->xmt0_dma_reg;
1984 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1985 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1986
1987 iter_reg = fw->xmt1_dma_reg;
1988 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1989 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1990
1991 iter_reg = fw->xmt2_dma_reg;
1992 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1993 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1994
1995 iter_reg = fw->xmt3_dma_reg;
1996 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1997 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1998
1999 iter_reg = fw->xmt4_dma_reg;
2000 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2001 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2002
2003 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2004
2005 /* Receive DMA registers. */
2006 iter_reg = fw->rcvt0_data_dma_reg;
2007 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2008 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2009
2010 iter_reg = fw->rcvt1_data_dma_reg;
2011 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2012 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2013
2014 /* RISC registers. */
2015 iter_reg = fw->risc_gp_reg;
2016 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2017 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2018 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2019 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2020 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2021 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2022 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2023 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2024
2025 /* Local memory controller registers. */
2026 iter_reg = fw->lmc_reg;
2027 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2028 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2029 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2030 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2031 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2032 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2033 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2034 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2035
2036 /* Fibre Protocol Module registers. */
2037 iter_reg = fw->fpm_hdw_reg;
2038 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2039 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2040 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2041 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2042 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2043 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2044 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2045 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2046 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2047 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2048 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2049 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2050 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2051 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2052 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2053 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2054
2055 /* RQ0 Array registers. */
2056 iter_reg = fw->rq0_array_reg;
2057 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2058 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2059 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2060 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2061 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2062 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2063 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2064 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2065 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2066 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2067 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2068 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2069 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2070 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2071 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2072 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2073
2074 /* RQ1 Array registers. */
2075 iter_reg = fw->rq1_array_reg;
2076 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2077 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2078 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2079 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2080 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2081 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2082 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2083 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2084 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2085 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2086 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2087 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2088 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2089 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2090 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2091 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2092
2093 /* RP0 Array registers. */
2094 iter_reg = fw->rp0_array_reg;
2095 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2096 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2097 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2098 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2099 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2100 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2101 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2102 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2103 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2104 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2105 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2106 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2107 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2108 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2109 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2110 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2111
2112 /* RP1 Array registers. */
2113 iter_reg = fw->rp1_array_reg;
2114 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2115 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2116 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2117 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2118 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2119 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2120 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2121 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2122 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2123 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2124 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2125 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2126 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2127 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2128 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2129 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2130
2131 iter_reg = fw->at0_array_reg;
2132 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2133 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2134 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2135 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2136 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2137 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2138 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2139 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2140
2141 /* I/O Queue Control registers. */
2142 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2143
2144 /* Frame Buffer registers. */
2145 iter_reg = fw->fb_hdw_reg;
2146 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2147 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2148 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2149 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2161 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2162 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2163 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2164 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2165 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2166 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2167 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2168 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2169 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2170 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2171 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2172 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2173
2174 /* Multi queue registers */
2175 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2176 &last_chain);
2177
2178 rval = qla24xx_soft_reset(ha);
2179 if (rval != QLA_SUCCESS) {
2180 ql_log(ql_log_warn, vha, 0xd00e,
2181 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2182 rval = QLA_SUCCESS;
2183
2184 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2185
2186 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2187 RD_REG_DWORD(&reg->hccr);
2188
2189 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2190 RD_REG_DWORD(&reg->hccr);
2191
2192 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2193 RD_REG_DWORD(&reg->hccr);
2194
2195 for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2196 udelay(5);
2197
2198 if (!cnt) {
2199 nxt = fw->code_ram;
2200 nxt += sizeof(fw->code_ram),
2201 nxt += (ha->fw_memory_size - 0x100000 + 1);
2202 goto copy_queue;
2203 } else
2204 ql_log(ql_log_warn, vha, 0xd010,
2205 "bigger hammer success?\n");
2206 }
2207
2208 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2209 &nxt);
2210 if (rval != QLA_SUCCESS)
2211 goto qla83xx_fw_dump_failed_0;
2212
2213copy_queue:
2214 nxt = qla2xxx_copy_queues(ha, nxt);
2215
2216 nxt = qla24xx_copy_eft(ha, nxt);
2217
2218 /* Chain entries -- started with MQ. */
2219 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2220 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2221 if (last_chain) {
2222 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
2223 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
2224 }
2225
2226 /* Adjust valid length. */
2227 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2228
2229qla83xx_fw_dump_failed_0:
2230 qla2xxx_dump_post_process(base_vha, rval);
2231
2232qla83xx_fw_dump_failed:
2233 if (!hardware_locked)
2234 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2235}
2236
1653/****************************************************************************/ 2237/****************************************************************************/
1654/* Driver Debug Functions. */ 2238/* Driver Debug Functions. */
1655/****************************************************************************/ 2239/****************************************************************************/
@@ -1782,13 +2366,13 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
1782 vaf.va = &va; 2366 vaf.va = &va;
1783 2367
1784 switch (level) { 2368 switch (level) {
1785 case 0: /* FATAL LOG */ 2369 case ql_log_fatal: /* FATAL LOG */
1786 pr_crit("%s%pV", pbuf, &vaf); 2370 pr_crit("%s%pV", pbuf, &vaf);
1787 break; 2371 break;
1788 case 1: 2372 case ql_log_warn:
1789 pr_err("%s%pV", pbuf, &vaf); 2373 pr_err("%s%pV", pbuf, &vaf);
1790 break; 2374 break;
1791 case 2: 2375 case ql_log_info:
1792 pr_warn("%s%pV", pbuf, &vaf); 2376 pr_warn("%s%pV", pbuf, &vaf);
1793 break; 2377 break;
1794 default: 2378 default:
@@ -1837,13 +2421,13 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
1837 vaf.va = &va; 2421 vaf.va = &va;
1838 2422
1839 switch (level) { 2423 switch (level) {
1840 case 0: /* FATAL LOG */ 2424 case ql_log_fatal: /* FATAL LOG */
1841 pr_crit("%s%pV", pbuf, &vaf); 2425 pr_crit("%s%pV", pbuf, &vaf);
1842 break; 2426 break;
1843 case 1: 2427 case ql_log_warn:
1844 pr_err("%s%pV", pbuf, &vaf); 2428 pr_err("%s%pV", pbuf, &vaf);
1845 break; 2429 break;
1846 case 2: 2430 case ql_log_info:
1847 pr_warn("%s%pV", pbuf, &vaf); 2431 pr_warn("%s%pV", pbuf, &vaf);
1848 break; 2432 break;
1849 default: 2433 default:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 5f1b6d9c3dc..2157bdf1569 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -165,6 +165,54 @@ struct qla81xx_fw_dump {
165 uint32_t ext_mem[1]; 165 uint32_t ext_mem[1];
166}; 166};
167 167
168struct qla83xx_fw_dump {
169 uint32_t host_status;
170 uint32_t host_risc_reg[48];
171 uint32_t pcie_regs[4];
172 uint32_t host_reg[32];
173 uint32_t shadow_reg[11];
174 uint32_t risc_io_reg;
175 uint16_t mailbox_reg[32];
176 uint32_t xseq_gp_reg[256];
177 uint32_t xseq_0_reg[48];
178 uint32_t xseq_1_reg[16];
179 uint32_t xseq_2_reg[16];
180 uint32_t rseq_gp_reg[256];
181 uint32_t rseq_0_reg[32];
182 uint32_t rseq_1_reg[16];
183 uint32_t rseq_2_reg[16];
184 uint32_t rseq_3_reg[16];
185 uint32_t aseq_gp_reg[256];
186 uint32_t aseq_0_reg[32];
187 uint32_t aseq_1_reg[16];
188 uint32_t aseq_2_reg[16];
189 uint32_t aseq_3_reg[16];
190 uint32_t cmd_dma_reg[64];
191 uint32_t req0_dma_reg[15];
192 uint32_t resp0_dma_reg[15];
193 uint32_t req1_dma_reg[15];
194 uint32_t xmt0_dma_reg[32];
195 uint32_t xmt1_dma_reg[32];
196 uint32_t xmt2_dma_reg[32];
197 uint32_t xmt3_dma_reg[32];
198 uint32_t xmt4_dma_reg[32];
199 uint32_t xmt_data_dma_reg[16];
200 uint32_t rcvt0_data_dma_reg[32];
201 uint32_t rcvt1_data_dma_reg[32];
202 uint32_t risc_gp_reg[128];
203 uint32_t lmc_reg[128];
204 uint32_t fpm_hdw_reg[256];
205 uint32_t rq0_array_reg[256];
206 uint32_t rq1_array_reg[256];
207 uint32_t rp0_array_reg[256];
208 uint32_t rp1_array_reg[256];
209 uint32_t queue_control_reg[16];
210 uint32_t fb_hdw_reg[432];
211 uint32_t at0_array_reg[128];
212 uint32_t code_ram[0x2400];
213 uint32_t ext_mem[1];
214};
215
168#define EFT_NUM_BUFFERS 4 216#define EFT_NUM_BUFFERS 4
169#define EFT_BYTES_PER_BUFFER 0x4000 217#define EFT_BYTES_PER_BUFFER 0x4000
170#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS)) 218#define EFT_SIZE ((EFT_BYTES_PER_BUFFER) * (EFT_NUM_BUFFERS))
@@ -192,9 +240,23 @@ struct qla2xxx_mq_chain {
192 uint32_t qregs[4 * QLA_MQ_SIZE]; 240 uint32_t qregs[4 * QLA_MQ_SIZE];
193}; 241};
194 242
243struct qla2xxx_mqueue_header {
244 uint32_t queue;
245#define TYPE_REQUEST_QUEUE 0x1
246#define TYPE_RESPONSE_QUEUE 0x2
247 uint32_t number;
248 uint32_t size;
249};
250
251struct qla2xxx_mqueue_chain {
252 uint32_t type;
253 uint32_t chain_size;
254};
255
195#define DUMP_CHAIN_VARIANT 0x80000000 256#define DUMP_CHAIN_VARIANT 0x80000000
196#define DUMP_CHAIN_FCE 0x7FFFFAF0 257#define DUMP_CHAIN_FCE 0x7FFFFAF0
197#define DUMP_CHAIN_MQ 0x7FFFFAF1 258#define DUMP_CHAIN_MQ 0x7FFFFAF1
259#define DUMP_CHAIN_QUEUE 0x7FFFFAF2
198#define DUMP_CHAIN_LAST 0x80000000 260#define DUMP_CHAIN_LAST 0x80000000
199 261
200struct qla2xxx_fw_dump { 262struct qla2xxx_fw_dump {
@@ -228,6 +290,7 @@ struct qla2xxx_fw_dump {
228 struct qla24xx_fw_dump isp24; 290 struct qla24xx_fw_dump isp24;
229 struct qla25xx_fw_dump isp25; 291 struct qla25xx_fw_dump isp25;
230 struct qla81xx_fw_dump isp81; 292 struct qla81xx_fw_dump isp81;
293 struct qla83xx_fw_dump isp83;
231 } isp; 294 } isp;
232}; 295};
233 296
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index af1003f9de1..a2443031dbe 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -125,17 +125,17 @@
125 * Fibre Channel device definitions. 125 * Fibre Channel device definitions.
126 */ 126 */
127#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */ 127#define WWN_SIZE 8 /* Size of WWPN, WWN & WWNN */
128#define MAX_FIBRE_DEVICES 512 128#define MAX_FIBRE_DEVICES_2100 512
129#define MAX_FIBRE_DEVICES_2400 2048
130#define MAX_FIBRE_DEVICES_LOOP 128
131#define MAX_FIBRE_DEVICES_MAX MAX_FIBRE_DEVICES_2400
129#define MAX_FIBRE_LUNS 0xFFFF 132#define MAX_FIBRE_LUNS 0xFFFF
130#define MAX_RSCN_COUNT 32
131#define MAX_HOST_COUNT 16 133#define MAX_HOST_COUNT 16
132 134
133/* 135/*
134 * Host adapter default definitions. 136 * Host adapter default definitions.
135 */ 137 */
136#define MAX_BUSES 1 /* We only have one bus today */ 138#define MAX_BUSES 1 /* We only have one bus today */
137#define MAX_TARGETS_2100 MAX_FIBRE_DEVICES
138#define MAX_TARGETS_2200 MAX_FIBRE_DEVICES
139#define MIN_LUNS 8 139#define MIN_LUNS 8
140#define MAX_LUNS MAX_FIBRE_LUNS 140#define MAX_LUNS MAX_FIBRE_LUNS
141#define MAX_CMDS_PER_LUN 255 141#define MAX_CMDS_PER_LUN 255
@@ -202,20 +202,12 @@ struct sd_dif_tuple {
202/* 202/*
203 * SCSI Request Block 203 * SCSI Request Block
204 */ 204 */
205typedef struct srb { 205struct srb_cmd {
206 atomic_t ref_count;
207 struct fc_port *fcport;
208 uint32_t handle;
209
210 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */ 206 struct scsi_cmnd *cmd; /* Linux SCSI command pkt */
211
212 uint16_t flags;
213
214 uint32_t request_sense_length; 207 uint32_t request_sense_length;
215 uint8_t *request_sense_ptr; 208 uint8_t *request_sense_ptr;
216
217 void *ctx; 209 void *ctx;
218} srb_t; 210};
219 211
220/* 212/*
221 * SRB flag definitions 213 * SRB flag definitions
@@ -254,10 +246,7 @@ struct srb_iocb {
254 } u; 246 } u;
255 247
256 struct timer_list timer; 248 struct timer_list timer;
257 249 void (*timeout)(void *);
258 void (*done)(srb_t *);
259 void (*free)(srb_t *);
260 void (*timeout)(srb_t *);
261}; 250};
262 251
263/* Values for srb_ctx type */ 252/* Values for srb_ctx type */
@@ -268,16 +257,37 @@ struct srb_iocb {
268#define SRB_CT_CMD 5 257#define SRB_CT_CMD 5
269#define SRB_ADISC_CMD 6 258#define SRB_ADISC_CMD 6
270#define SRB_TM_CMD 7 259#define SRB_TM_CMD 7
260#define SRB_SCSI_CMD 8
271 261
272struct srb_ctx { 262typedef struct srb {
263 atomic_t ref_count;
264 struct fc_port *fcport;
265 uint32_t handle;
266 uint16_t flags;
273 uint16_t type; 267 uint16_t type;
274 char *name; 268 char *name;
275 int iocbs; 269 int iocbs;
276 union { 270 union {
277 struct srb_iocb *iocb_cmd; 271 struct srb_iocb iocb_cmd;
278 struct fc_bsg_job *bsg_job; 272 struct fc_bsg_job *bsg_job;
273 struct srb_cmd scmd;
279 } u; 274 } u;
280}; 275 void (*done)(void *, void *, int);
276 void (*free)(void *, void *);
277} srb_t;
278
279#define GET_CMD_SP(sp) (sp->u.scmd.cmd)
280#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd)
281#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx)
282
283#define GET_CMD_SENSE_LEN(sp) \
284 (sp->u.scmd.request_sense_length)
285#define SET_CMD_SENSE_LEN(sp, len) \
286 (sp->u.scmd.request_sense_length = len)
287#define GET_CMD_SENSE_PTR(sp) \
288 (sp->u.scmd.request_sense_ptr)
289#define SET_CMD_SENSE_PTR(sp, ptr) \
290 (sp->u.scmd.request_sense_ptr = ptr)
281 291
282struct msg_echo_lb { 292struct msg_echo_lb {
283 dma_addr_t send_dma; 293 dma_addr_t send_dma;
@@ -653,8 +663,10 @@ typedef struct {
653#define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */ 663#define MBC_DIAGNOSTIC_LOOP_BACK 0x45 /* Diagnostic loop back. */
654#define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */ 664#define MBC_ONLINE_SELF_TEST 0x46 /* Online self-test. */
655#define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */ 665#define MBC_ENHANCED_GET_PORT_DATABASE 0x47 /* Get port database + login */
666#define MBC_CONFIGURE_VF 0x4b /* Configure VFs */
656#define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */ 667#define MBC_RESET_LINK_STATUS 0x52 /* Reset Link Error Status */
657#define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */ 668#define MBC_IOCB_COMMAND_A64 0x54 /* Execute IOCB command (64) */
669#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
658#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */ 670#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
659#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */ 671#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
660#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */ 672#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
@@ -1709,6 +1721,7 @@ typedef struct fc_port {
1709 1721
1710 uint16_t vp_idx; 1722 uint16_t vp_idx;
1711 uint8_t fc4_type; 1723 uint8_t fc4_type;
1724 uint8_t scan_state;
1712} fc_port_t; 1725} fc_port_t;
1713 1726
1714/* 1727/*
@@ -1761,7 +1774,6 @@ static const char * const port_state_str[] = {
1761 1774
1762#define GID_PT_CMD 0x1A1 1775#define GID_PT_CMD 0x1A1
1763#define GID_PT_REQ_SIZE (16 + 4) 1776#define GID_PT_REQ_SIZE (16 + 4)
1764#define GID_PT_RSP_SIZE (16 + (MAX_FIBRE_DEVICES * 4))
1765 1777
1766#define GPN_ID_CMD 0x112 1778#define GPN_ID_CMD 0x112
1767#define GPN_ID_REQ_SIZE (16 + 4) 1779#define GPN_ID_REQ_SIZE (16 + 4)
@@ -2051,7 +2063,9 @@ struct ct_sns_rsp {
2051 } ga_nxt; 2063 } ga_nxt;
2052 2064
2053 struct { 2065 struct {
2054 struct ct_sns_gid_pt_data entries[MAX_FIBRE_DEVICES]; 2066 /* Assume the largest number of targets for the union */
2067 struct ct_sns_gid_pt_data
2068 entries[MAX_FIBRE_DEVICES_MAX];
2055 } gid_pt; 2069 } gid_pt;
2056 2070
2057 struct { 2071 struct {
@@ -2112,7 +2126,11 @@ struct ct_sns_pkt {
2112 2126
2113#define GID_PT_SNS_SCMD_LEN 6 2127#define GID_PT_SNS_SCMD_LEN 6
2114#define GID_PT_SNS_CMD_SIZE 28 2128#define GID_PT_SNS_CMD_SIZE 28
2115#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES * 4 + 16) 2129/*
2130 * Assume MAX_FIBRE_DEVICES_2100 as these defines are only used with older
2131 * adapters.
2132 */
2133#define GID_PT_SNS_DATA_SIZE (MAX_FIBRE_DEVICES_2100 * 4 + 16)
2116 2134
2117#define GPN_ID_SNS_SCMD_LEN 6 2135#define GPN_ID_SNS_SCMD_LEN 6
2118#define GPN_ID_SNS_CMD_SIZE 28 2136#define GPN_ID_SNS_CMD_SIZE 28
@@ -2160,7 +2178,6 @@ struct gid_list_info {
2160 uint16_t loop_id; /* ISP23XX -- 6 bytes. */ 2178 uint16_t loop_id; /* ISP23XX -- 6 bytes. */
2161 uint16_t reserved_1; /* ISP24XX -- 8 bytes. */ 2179 uint16_t reserved_1; /* ISP24XX -- 8 bytes. */
2162}; 2180};
2163#define GID_LIST_SIZE (sizeof(struct gid_list_info) * MAX_FIBRE_DEVICES)
2164 2181
2165/* NPIV */ 2182/* NPIV */
2166typedef struct vport_info { 2183typedef struct vport_info {
@@ -2261,6 +2278,7 @@ struct isp_operations {
2261#define QLA_MIDX_DEFAULT 0 2278#define QLA_MIDX_DEFAULT 0
2262#define QLA_MIDX_RSP_Q 1 2279#define QLA_MIDX_RSP_Q 1
2263#define QLA_PCI_MSIX_CONTROL 0xa2 2280#define QLA_PCI_MSIX_CONTROL 0xa2
2281#define QLA_83XX_PCI_MSIX_CONTROL 0x92
2264 2282
2265struct scsi_qla_host; 2283struct scsi_qla_host;
2266 2284
@@ -2341,7 +2359,7 @@ struct qla_statistics {
2341#define QLA_MQ_SIZE 32 2359#define QLA_MQ_SIZE 32
2342#define QLA_MAX_QUEUES 256 2360#define QLA_MAX_QUEUES 256
2343#define ISP_QUE_REG(ha, id) \ 2361#define ISP_QUE_REG(ha, id) \
2344 ((ha->mqenable) ? \ 2362 ((ha->mqenable || IS_QLA83XX(ha)) ? \
2345 ((void *)(ha->mqiobase) +\ 2363 ((void *)(ha->mqiobase) +\
2346 (QLA_QUE_PAGE * id)) :\ 2364 (QLA_QUE_PAGE * id)) :\
2347 ((void *)(ha->iobase))) 2365 ((void *)(ha->iobase)))
@@ -2461,6 +2479,7 @@ struct qla_hw_data {
2461#define MIN_IOBASE_LEN 0x100 2479#define MIN_IOBASE_LEN 0x100
2462/* Multi queue data structs */ 2480/* Multi queue data structs */
2463 device_reg_t __iomem *mqiobase; 2481 device_reg_t __iomem *mqiobase;
2482 device_reg_t __iomem *msixbase;
2464 uint16_t msix_count; 2483 uint16_t msix_count;
2465 uint8_t mqenable; 2484 uint8_t mqenable;
2466 struct req_que **req_q_map; 2485 struct req_que **req_q_map;
@@ -2485,6 +2504,7 @@ struct qla_hw_data {
2485 atomic_t loop_down_timer; /* loop down timer */ 2504 atomic_t loop_down_timer; /* loop down timer */
2486 uint8_t link_down_timeout; /* link down timeout */ 2505 uint8_t link_down_timeout; /* link down timeout */
2487 uint16_t max_loop_id; 2506 uint16_t max_loop_id;
2507 uint16_t max_fibre_devices; /* Maximum number of targets */
2488 2508
2489 uint16_t fb_rev; 2509 uint16_t fb_rev;
2490 uint16_t min_external_loopid; /* First external loop Id */ 2510 uint16_t min_external_loopid; /* First external loop Id */
@@ -2494,6 +2514,7 @@ struct qla_hw_data {
2494#define PORT_SPEED_2GB 0x01 2514#define PORT_SPEED_2GB 0x01
2495#define PORT_SPEED_4GB 0x03 2515#define PORT_SPEED_4GB 0x03
2496#define PORT_SPEED_8GB 0x04 2516#define PORT_SPEED_8GB 0x04
2517#define PORT_SPEED_16GB 0x05
2497#define PORT_SPEED_10GB 0x13 2518#define PORT_SPEED_10GB 0x13
2498 uint16_t link_data_rate; /* F/W operating speed */ 2519 uint16_t link_data_rate; /* F/W operating speed */
2499 2520
@@ -2515,6 +2536,8 @@ struct qla_hw_data {
2515#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 2536#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
2516#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 2537#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
2517#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 2538#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
2539#define PCI_DEVICE_ID_QLOGIC_ISP8031 0x8031
2540#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
2518 uint32_t device_type; 2541 uint32_t device_type;
2519#define DT_ISP2100 BIT_0 2542#define DT_ISP2100 BIT_0
2520#define DT_ISP2200 BIT_1 2543#define DT_ISP2200 BIT_1
@@ -2531,7 +2554,9 @@ struct qla_hw_data {
2531#define DT_ISP8432 BIT_12 2554#define DT_ISP8432 BIT_12
2532#define DT_ISP8001 BIT_13 2555#define DT_ISP8001 BIT_13
2533#define DT_ISP8021 BIT_14 2556#define DT_ISP8021 BIT_14
2534#define DT_ISP_LAST (DT_ISP8021 << 1) 2557#define DT_ISP2031 BIT_15
2558#define DT_ISP8031 BIT_16
2559#define DT_ISP_LAST (DT_ISP8031 << 1)
2535 2560
2536#define DT_T10_PI BIT_25 2561#define DT_T10_PI BIT_25
2537#define DT_IIDMA BIT_26 2562#define DT_IIDMA BIT_26
@@ -2555,26 +2580,30 @@ struct qla_hw_data {
2555#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532) 2580#define IS_QLA2532(ha) (DT_MASK(ha) & DT_ISP2532)
2556#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432) 2581#define IS_QLA8432(ha) (DT_MASK(ha) & DT_ISP8432)
2557#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001) 2582#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
2583#define IS_QLA81XX(ha) (IS_QLA8001(ha))
2558#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021) 2584#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
2585#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
2586#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
2559 2587
2560#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 2588#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
2561 IS_QLA6312(ha) || IS_QLA6322(ha)) 2589 IS_QLA6312(ha) || IS_QLA6322(ha))
2562#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha)) 2590#define IS_QLA24XX(ha) (IS_QLA2422(ha) || IS_QLA2432(ha))
2563#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha)) 2591#define IS_QLA54XX(ha) (IS_QLA5422(ha) || IS_QLA5432(ha))
2564#define IS_QLA25XX(ha) (IS_QLA2532(ha)) 2592#define IS_QLA25XX(ha) (IS_QLA2532(ha))
2593#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
2565#define IS_QLA84XX(ha) (IS_QLA8432(ha)) 2594#define IS_QLA84XX(ha) (IS_QLA8432(ha))
2566#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 2595#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
2567 IS_QLA84XX(ha)) 2596 IS_QLA84XX(ha))
2568#define IS_QLA81XX(ha) (IS_QLA8001(ha)) 2597#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
2569#define IS_QLA8XXX_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha)) 2598 IS_QLA8031(ha))
2570#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ 2599#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
2571 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ 2600 IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
2572 IS_QLA82XX(ha)) 2601 IS_QLA82XX(ha) || IS_QLA83XX(ha))
2573#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha)) 2602#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2574#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && \ 2603#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
2575 (ha)->flags.msix_enabled) 2604 IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
2576#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha)) 2605#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2577#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha)) 2606#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
2578#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) 2607#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
2579 2608
2580#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) 2609#define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
@@ -2583,6 +2612,8 @@ struct qla_hw_data {
2583#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED) 2612#define IS_ZIO_SUPPORTED(ha) ((ha)->device_type & DT_ZIO_SUPPORTED)
2584#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) 2613#define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001)
2585#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) 2614#define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
2615#define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
2616#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha))
2586 2617
2587 /* HBA serial number */ 2618 /* HBA serial number */
2588 uint8_t serial0; 2619 uint8_t serial0;
@@ -2621,10 +2652,6 @@ struct qla_hw_data {
2621 void *sfp_data; 2652 void *sfp_data;
2622 dma_addr_t sfp_data_dma; 2653 dma_addr_t sfp_data_dma;
2623 2654
2624 uint8_t *edc_data;
2625 dma_addr_t edc_data_dma;
2626 uint16_t edc_data_len;
2627
2628#define XGMAC_DATA_SIZE 4096 2655#define XGMAC_DATA_SIZE 4096
2629 void *xgmac_data; 2656 void *xgmac_data;
2630 dma_addr_t xgmac_data_dma; 2657 dma_addr_t xgmac_data_dma;
@@ -2653,6 +2680,8 @@ struct qla_hw_data {
2653 void *async_pd; 2680 void *async_pd;
2654 dma_addr_t async_pd_dma; 2681 dma_addr_t async_pd_dma;
2655 2682
2683 void *swl;
2684
2656 /* These are used by mailbox operations. */ 2685 /* These are used by mailbox operations. */
2657 volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT]; 2686 volatile uint16_t mailbox_out[MAILBOX_REGISTER_COUNT];
2658 2687
@@ -2674,6 +2703,8 @@ struct qla_hw_data {
2674 uint16_t fw_minor_version; 2703 uint16_t fw_minor_version;
2675 uint16_t fw_subminor_version; 2704 uint16_t fw_subminor_version;
2676 uint16_t fw_attributes; 2705 uint16_t fw_attributes;
2706 uint16_t fw_attributes_h;
2707 uint16_t fw_attributes_ext[2];
2677 uint32_t fw_memory_size; 2708 uint32_t fw_memory_size;
2678 uint32_t fw_transfer_size; 2709 uint32_t fw_transfer_size;
2679 uint32_t fw_srisc_address; 2710 uint32_t fw_srisc_address;
@@ -2851,7 +2882,6 @@ typedef struct scsi_qla_host {
2851 volatile struct { 2882 volatile struct {
2852 uint32_t init_done :1; 2883 uint32_t init_done :1;
2853 uint32_t online :1; 2884 uint32_t online :1;
2854 uint32_t rscn_queue_overflow :1;
2855 uint32_t reset_active :1; 2885 uint32_t reset_active :1;
2856 2886
2857 uint32_t management_server_logged_in :1; 2887 uint32_t management_server_logged_in :1;
@@ -2905,11 +2935,6 @@ typedef struct scsi_qla_host {
2905 2935
2906 2936
2907 2937
2908 /* RSCN queue. */
2909 uint32_t rscn_queue[MAX_RSCN_COUNT];
2910 uint8_t rscn_in_ptr;
2911 uint8_t rscn_out_ptr;
2912
2913 /* Timeout timers. */ 2938 /* Timeout timers. */
2914 uint8_t loop_down_abort_time; /* port down timer */ 2939 uint8_t loop_down_abort_time; /* port down timer */
2915 atomic_t loop_down_timer; /* loop down timer */ 2940 atomic_t loop_down_timer; /* loop down timer */
@@ -3005,7 +3030,6 @@ typedef struct scsi_qla_host {
3005#define QLA_ABORTED 0x105 3030#define QLA_ABORTED 0x105
3006#define QLA_SUSPENDED 0x106 3031#define QLA_SUSPENDED 0x106
3007#define QLA_BUSY 0x107 3032#define QLA_BUSY 0x107
3008#define QLA_RSCNS_HANDLED 0x108
3009#define QLA_ALREADY_REGISTERED 0x109 3033#define QLA_ALREADY_REGISTERED 0x109
3010 3034
3011#define NVRAM_DELAY() udelay(10) 3035#define NVRAM_DELAY() udelay(10)
@@ -3021,6 +3045,7 @@ typedef struct scsi_qla_host {
3021#define OPTROM_SIZE_25XX 0x200000 3045#define OPTROM_SIZE_25XX 0x200000
3022#define OPTROM_SIZE_81XX 0x400000 3046#define OPTROM_SIZE_81XX 0x400000
3023#define OPTROM_SIZE_82XX 0x800000 3047#define OPTROM_SIZE_82XX 0x800000
3048#define OPTROM_SIZE_83XX 0x1000000
3024 3049
3025#define OPTROM_BURST_SIZE 0x1000 3050#define OPTROM_BURST_SIZE 0x1000
3026#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) 3051#define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 0b4c2b794c6..499c74e39ee 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -114,7 +114,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
114{ 114{
115 struct qla_hw_data *ha = vha->hw; 115 struct qla_hw_data *ha = vha->hw;
116 116
117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 117 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
118 goto out; 118 goto out;
119 if (!ha->fce) 119 if (!ha->fce)
120 goto out; 120 goto out;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index aa69486dc06..6d7d7758c79 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1327,6 +1327,11 @@ struct qla_flt_header {
1327#define FLT_REG_GOLD_FW 0x2f 1327#define FLT_REG_GOLD_FW 0x2f
1328#define FLT_REG_FCP_PRIO_0 0x87 1328#define FLT_REG_FCP_PRIO_0 0x87
1329#define FLT_REG_FCP_PRIO_1 0x88 1329#define FLT_REG_FCP_PRIO_1 0x88
1330#define FLT_REG_FCOE_FW 0xA4
1331#define FLT_REG_FCOE_VPD_0 0xA9
1332#define FLT_REG_FCOE_NVRAM_0 0xAA
1333#define FLT_REG_FCOE_VPD_1 0xAB
1334#define FLT_REG_FCOE_NVRAM_1 0xAC
1330 1335
1331struct qla_flt_region { 1336struct qla_flt_region {
1332 uint32_t code; 1337 uint32_t code;
@@ -1494,6 +1499,11 @@ struct access_chip_rsp_84xx {
1494#define MBC_GET_XGMAC_STATS 0x7a 1499#define MBC_GET_XGMAC_STATS 0x7a
1495#define MBC_GET_DCBX_PARAMS 0x51 1500#define MBC_GET_DCBX_PARAMS 0x51
1496 1501
1502/*
1503 * ISP83xx mailbox commands
1504 */
1505#define MBC_WRITE_REMOTE_REG 0x0001 /* Write remote register */
1506
1497/* Flash access control option field bit definitions */ 1507/* Flash access control option field bit definitions */
1498#define FAC_OPT_FORCE_SEMAPHORE BIT_15 1508#define FAC_OPT_FORCE_SEMAPHORE BIT_15
1499#define FAC_OPT_REQUESTOR_ID BIT_14 1509#define FAC_OPT_REQUESTOR_ID BIT_14
@@ -1875,4 +1885,7 @@ struct qla_fcp_prio_cfg {
1875#define FA_NPIV_CONF0_ADDR_81 0xD1000 1885#define FA_NPIV_CONF0_ADDR_81 0xD1000
1876#define FA_NPIV_CONF1_ADDR_81 0xD2000 1886#define FA_NPIV_CONF1_ADDR_81 0xD2000
1877 1887
1888/* 83XX Flash locations -- occupies second 8MB region. */
1889#define FA_FLASH_LAYOUT_ADDR_83 0xFC400
1890
1878#endif 1891#endif
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 408679be8fd..9f065804bd1 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -71,8 +71,6 @@ extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
71 uint16_t *); 71 uint16_t *);
72extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *, 72extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
73 uint16_t *); 73 uint16_t *);
74extern void qla2x00_async_tm_cmd_done(struct scsi_qla_host *, fc_port_t *,
75 struct srb_iocb *);
76extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); 74extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
77extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); 75extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
78 76
@@ -156,8 +154,7 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *);
156extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); 154extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *);
157extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *); 155extern scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *);
158 156
159extern void qla2x00_sp_compl(struct qla_hw_data *, srb_t *); 157extern void qla2x00_sp_free_dma(void *, void *);
160
161extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); 158extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *);
162 159
163extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); 160extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int);
@@ -205,8 +202,7 @@ extern int
205qla2x00_execute_fw(scsi_qla_host_t *, uint32_t); 202qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
206 203
207extern int 204extern int
208qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *, 205qla2x00_get_fw_version(scsi_qla_host_t *);
209 uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
210 206
211extern int 207extern int
212qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *); 208qla2x00_get_fw_options(scsi_qla_host_t *, uint16_t *);
@@ -371,6 +367,9 @@ qla81xx_get_port_config(scsi_qla_host_t *, uint16_t *);
371extern int 367extern int
372qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *); 368qla81xx_set_port_config(scsi_qla_host_t *, uint16_t *);
373 369
370extern int
371qla2x00_port_logout(scsi_qla_host_t *, struct fc_port *);
372
374/* 373/*
375 * Global Function Prototypes in qla_isr.c source file. 374 * Global Function Prototypes in qla_isr.c source file.
376 */ 375 */
@@ -409,8 +408,10 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *);
409extern int qla24xx_beacon_on(struct scsi_qla_host *); 408extern int qla24xx_beacon_on(struct scsi_qla_host *);
410extern int qla24xx_beacon_off(struct scsi_qla_host *); 409extern int qla24xx_beacon_off(struct scsi_qla_host *);
411extern void qla24xx_beacon_blink(struct scsi_qla_host *); 410extern void qla24xx_beacon_blink(struct scsi_qla_host *);
411extern void qla83xx_beacon_blink(struct scsi_qla_host *);
412extern int qla82xx_beacon_on(struct scsi_qla_host *); 412extern int qla82xx_beacon_on(struct scsi_qla_host *);
413extern int qla82xx_beacon_off(struct scsi_qla_host *); 413extern int qla82xx_beacon_off(struct scsi_qla_host *);
414extern int qla83xx_write_remote_reg(struct scsi_qla_host *, uint32_t, uint32_t);
414 415
415extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 416extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
416 uint32_t, uint32_t); 417 uint32_t, uint32_t);
@@ -541,6 +542,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *);
541 542
542/* IOCB related functions */ 543/* IOCB related functions */
543extern int qla82xx_start_scsi(srb_t *); 544extern int qla82xx_start_scsi(srb_t *);
545extern void qla2x00_sp_free(void *, void *);
546extern void qla2x00_sp_timeout(unsigned long);
547extern void qla2x00_bsg_job_done(void *, void *, int);
548extern void qla2x00_bsg_sp_free(void *, void *);
544 549
545/* Interrupt related */ 550/* Interrupt related */
546extern irqreturn_t qla82xx_intr_handler(int, void *); 551extern irqreturn_t qla82xx_intr_handler(int, void *);
@@ -576,6 +581,8 @@ extern void qla82xx_start_iocbs(scsi_qla_host_t *);
576extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); 581extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
577extern int qla82xx_check_md_needed(scsi_qla_host_t *); 582extern int qla82xx_check_md_needed(scsi_qla_host_t *);
578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); 583extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
584extern int qla81xx_set_led_config(scsi_qla_host_t *, uint16_t *);
585extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
579extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); 586extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
580extern char *qdev_state(uint32_t); 587extern char *qdev_state(uint32_t);
581extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *); 588extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
@@ -589,6 +596,9 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
589extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, 596extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
590 uint16_t *, uint16_t *); 597 uint16_t *, uint16_t *);
591 598
599/* 83xx related functions */
600extern void qla83xx_fw_dump(scsi_qla_host_t *, int);
601
592/* Minidump related functions */ 602/* Minidump related functions */
593extern int qla82xx_md_get_template_size(scsi_qla_host_t *); 603extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
594extern int qla82xx_md_get_template(scsi_qla_host_t *); 604extern int qla82xx_md_get_template(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4aea4ae2330..3128f80441f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -240,6 +240,12 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
240 return (rval); 240 return (rval);
241} 241}
242 242
243static inline int
244qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
245{
246 return vha->hw->max_fibre_devices * 4 + 16;
247}
248
243/** 249/**
244 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command. 250 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
245 * @ha: HA context 251 * @ha: HA context
@@ -261,20 +267,21 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
261 267
262 struct ct_sns_gid_pt_data *gid_data; 268 struct ct_sns_gid_pt_data *gid_data;
263 struct qla_hw_data *ha = vha->hw; 269 struct qla_hw_data *ha = vha->hw;
270 uint16_t gid_pt_rsp_size;
264 271
265 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 272 if (IS_QLA2100(ha) || IS_QLA2200(ha))
266 return qla2x00_sns_gid_pt(vha, list); 273 return qla2x00_sns_gid_pt(vha, list);
267 274
268 gid_data = NULL; 275 gid_data = NULL;
269 276 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
270 /* Issue GID_PT */ 277 /* Issue GID_PT */
271 /* Prepare common MS IOCB */ 278 /* Prepare common MS IOCB */
272 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE, 279 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GID_PT_REQ_SIZE,
273 GID_PT_RSP_SIZE); 280 gid_pt_rsp_size);
274 281
275 /* Prepare CT request */ 282 /* Prepare CT request */
276 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD, 283 ct_req = qla2x00_prep_ct_req(&ha->ct_sns->p.req, GID_PT_CMD,
277 GID_PT_RSP_SIZE); 284 gid_pt_rsp_size);
278 ct_rsp = &ha->ct_sns->p.rsp; 285 ct_rsp = &ha->ct_sns->p.rsp;
279 286
280 /* Prepare CT arguments -- port_type */ 287 /* Prepare CT arguments -- port_type */
@@ -292,7 +299,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
292 rval = QLA_FUNCTION_FAILED; 299 rval = QLA_FUNCTION_FAILED;
293 } else { 300 } else {
294 /* Set port IDs in switch info list. */ 301 /* Set port IDs in switch info list. */
295 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 302 for (i = 0; i < ha->max_fibre_devices; i++) {
296 gid_data = &ct_rsp->rsp.gid_pt.entries[i]; 303 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
297 list[i].d_id.b.domain = gid_data->port_id[0]; 304 list[i].d_id.b.domain = gid_data->port_id[0];
298 list[i].d_id.b.area = gid_data->port_id[1]; 305 list[i].d_id.b.area = gid_data->port_id[1];
@@ -313,7 +320,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
313 * single call. Return a failed status, and let GA_NXT handle 320 * single call. Return a failed status, and let GA_NXT handle
314 * the overload. 321 * the overload.
315 */ 322 */
316 if (i == MAX_FIBRE_DEVICES) 323 if (i == ha->max_fibre_devices)
317 rval = QLA_FUNCTION_FAILED; 324 rval = QLA_FUNCTION_FAILED;
318 } 325 }
319 326
@@ -330,7 +337,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
330int 337int
331qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 338qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
332{ 339{
333 int rval; 340 int rval = QLA_SUCCESS;
334 uint16_t i; 341 uint16_t i;
335 342
336 ms_iocb_entry_t *ms_pkt; 343 ms_iocb_entry_t *ms_pkt;
@@ -341,7 +348,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
341 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 348 if (IS_QLA2100(ha) || IS_QLA2200(ha))
342 return qla2x00_sns_gpn_id(vha, list); 349 return qla2x00_sns_gpn_id(vha, list);
343 350
344 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 351 for (i = 0; i < ha->max_fibre_devices; i++) {
345 /* Issue GPN_ID */ 352 /* Issue GPN_ID */
346 /* Prepare common MS IOCB */ 353 /* Prepare common MS IOCB */
347 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE, 354 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GPN_ID_REQ_SIZE,
@@ -364,9 +371,11 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
364 /*EMPTY*/ 371 /*EMPTY*/
365 ql_dbg(ql_dbg_disc, vha, 0x2056, 372 ql_dbg(ql_dbg_disc, vha, 0x2056,
366 "GPN_ID issue IOCB failed (%d).\n", rval); 373 "GPN_ID issue IOCB failed (%d).\n", rval);
374 break;
367 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 375 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
368 "GPN_ID") != QLA_SUCCESS) { 376 "GPN_ID") != QLA_SUCCESS) {
369 rval = QLA_FUNCTION_FAILED; 377 rval = QLA_FUNCTION_FAILED;
378 break;
370 } else { 379 } else {
371 /* Save portname */ 380 /* Save portname */
372 memcpy(list[i].port_name, 381 memcpy(list[i].port_name,
@@ -391,7 +400,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
391int 400int
392qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 401qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
393{ 402{
394 int rval; 403 int rval = QLA_SUCCESS;
395 uint16_t i; 404 uint16_t i;
396 struct qla_hw_data *ha = vha->hw; 405 struct qla_hw_data *ha = vha->hw;
397 ms_iocb_entry_t *ms_pkt; 406 ms_iocb_entry_t *ms_pkt;
@@ -401,7 +410,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
401 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 410 if (IS_QLA2100(ha) || IS_QLA2200(ha))
402 return qla2x00_sns_gnn_id(vha, list); 411 return qla2x00_sns_gnn_id(vha, list);
403 412
404 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 413 for (i = 0; i < ha->max_fibre_devices; i++) {
405 /* Issue GNN_ID */ 414 /* Issue GNN_ID */
406 /* Prepare common MS IOCB */ 415 /* Prepare common MS IOCB */
407 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE, 416 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GNN_ID_REQ_SIZE,
@@ -424,9 +433,11 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
424 /*EMPTY*/ 433 /*EMPTY*/
425 ql_dbg(ql_dbg_disc, vha, 0x2057, 434 ql_dbg(ql_dbg_disc, vha, 0x2057,
426 "GNN_ID issue IOCB failed (%d).\n", rval); 435 "GNN_ID issue IOCB failed (%d).\n", rval);
436 break;
427 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 437 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
428 "GNN_ID") != QLA_SUCCESS) { 438 "GNN_ID") != QLA_SUCCESS) {
429 rval = QLA_FUNCTION_FAILED; 439 rval = QLA_FUNCTION_FAILED;
440 break;
430 } else { 441 } else {
431 /* Save nodename */ 442 /* Save nodename */
432 memcpy(list[i].node_name, 443 memcpy(list[i].node_name,
@@ -735,7 +746,7 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
735static int 746static int
736qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) 747qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
737{ 748{
738 int rval; 749 int rval = QLA_SUCCESS;
739 struct qla_hw_data *ha = vha->hw; 750 struct qla_hw_data *ha = vha->hw;
740 struct sns_cmd_pkt *sns_cmd; 751 struct sns_cmd_pkt *sns_cmd;
741 752
@@ -814,11 +825,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
814 uint16_t i; 825 uint16_t i;
815 uint8_t *entry; 826 uint8_t *entry;
816 struct sns_cmd_pkt *sns_cmd; 827 struct sns_cmd_pkt *sns_cmd;
828 uint16_t gid_pt_sns_data_size;
829
830 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
817 831
818 /* Issue GID_PT. */ 832 /* Issue GID_PT. */
819 /* Prepare SNS command request. */ 833 /* Prepare SNS command request. */
820 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN, 834 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
821 GID_PT_SNS_DATA_SIZE); 835 gid_pt_sns_data_size);
822 836
823 /* Prepare SNS command arguments -- port_type. */ 837 /* Prepare SNS command arguments -- port_type. */
824 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE; 838 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
@@ -839,7 +853,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
839 rval = QLA_FUNCTION_FAILED; 853 rval = QLA_FUNCTION_FAILED;
840 } else { 854 } else {
841 /* Set port IDs in switch info list. */ 855 /* Set port IDs in switch info list. */
842 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 856 for (i = 0; i < ha->max_fibre_devices; i++) {
843 entry = &sns_cmd->p.gid_data[(i * 4) + 16]; 857 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
844 list[i].d_id.b.domain = entry[1]; 858 list[i].d_id.b.domain = entry[1];
845 list[i].d_id.b.area = entry[2]; 859 list[i].d_id.b.area = entry[2];
@@ -858,7 +872,7 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
858 * single call. Return a failed status, and let GA_NXT handle 872 * single call. Return a failed status, and let GA_NXT handle
859 * the overload. 873 * the overload.
860 */ 874 */
861 if (i == MAX_FIBRE_DEVICES) 875 if (i == ha->max_fibre_devices)
862 rval = QLA_FUNCTION_FAILED; 876 rval = QLA_FUNCTION_FAILED;
863 } 877 }
864 878
@@ -877,12 +891,12 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
877static int 891static int
878qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) 892qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
879{ 893{
880 int rval; 894 int rval = QLA_SUCCESS;
881 struct qla_hw_data *ha = vha->hw; 895 struct qla_hw_data *ha = vha->hw;
882 uint16_t i; 896 uint16_t i;
883 struct sns_cmd_pkt *sns_cmd; 897 struct sns_cmd_pkt *sns_cmd;
884 898
885 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 899 for (i = 0; i < ha->max_fibre_devices; i++) {
886 /* Issue GPN_ID */ 900 /* Issue GPN_ID */
887 /* Prepare SNS command request. */ 901 /* Prepare SNS command request. */
888 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD, 902 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
@@ -933,12 +947,12 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
933static int 947static int
934qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) 948qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
935{ 949{
936 int rval; 950 int rval = QLA_SUCCESS;
937 struct qla_hw_data *ha = vha->hw; 951 struct qla_hw_data *ha = vha->hw;
938 uint16_t i; 952 uint16_t i;
939 struct sns_cmd_pkt *sns_cmd; 953 struct sns_cmd_pkt *sns_cmd;
940 954
941 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 955 for (i = 0; i < ha->max_fibre_devices; i++) {
942 /* Issue GNN_ID */ 956 /* Issue GNN_ID */
943 /* Prepare SNS command request. */ 957 /* Prepare SNS command request. */
944 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD, 958 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
@@ -1107,20 +1121,26 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1107static int 1121static int
1108qla2x00_mgmt_svr_login(scsi_qla_host_t *vha) 1122qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1109{ 1123{
1110 int ret; 1124 int ret, rval;
1111 uint16_t mb[MAILBOX_REGISTER_COUNT]; 1125 uint16_t mb[MAILBOX_REGISTER_COUNT];
1112 struct qla_hw_data *ha = vha->hw; 1126 struct qla_hw_data *ha = vha->hw;
1113 ret = QLA_SUCCESS; 1127 ret = QLA_SUCCESS;
1114 if (vha->flags.management_server_logged_in) 1128 if (vha->flags.management_server_logged_in)
1115 return ret; 1129 return ret;
1116 1130
1117 ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa, 1131 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1118 mb, BIT_1|BIT_0); 1132 0xfa, mb, BIT_1|BIT_0);
1119 if (mb[0] != MBS_COMMAND_COMPLETE) { 1133 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1120 ql_dbg(ql_dbg_disc, vha, 0x2024, 1134 if (rval == QLA_MEMORY_ALLOC_FAILED)
1121 "Failed management_server login: loopid=%x mb[0]=%x " 1135 ql_dbg(ql_dbg_disc, vha, 0x2085,
1122 "mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n", 1136 "Failed management_server login: loopid=%x "
1123 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]); 1137 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1138 else
1139 ql_dbg(ql_dbg_disc, vha, 0x2024,
1140 "Failed management_server login: loopid=%x "
1141 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1142 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1143 mb[7]);
1124 ret = QLA_FUNCTION_FAILED; 1144 ret = QLA_FUNCTION_FAILED;
1125 } else 1145 } else
1126 vha->flags.management_server_logged_in = 1; 1146 vha->flags.management_server_logged_in = 1;
@@ -1547,7 +1567,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1547 eiter = (struct ct_fdmi_port_attr *) (entries + size); 1567 eiter = (struct ct_fdmi_port_attr *) (entries + size);
1548 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED); 1568 eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1549 eiter->len = __constant_cpu_to_be16(4 + 4); 1569 eiter->len = __constant_cpu_to_be16(4 + 4);
1550 if (IS_QLA8XXX_TYPE(ha)) 1570 if (IS_CNA_CAPABLE(ha))
1551 eiter->a.sup_speed = __constant_cpu_to_be32( 1571 eiter->a.sup_speed = __constant_cpu_to_be32(
1552 FDMI_PORT_SPEED_10GB); 1572 FDMI_PORT_SPEED_10GB);
1553 else if (IS_QLA25XX(ha)) 1573 else if (IS_QLA25XX(ha))
@@ -1594,6 +1614,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1594 eiter->a.cur_speed = 1614 eiter->a.cur_speed =
1595 __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB); 1615 __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
1596 break; 1616 break;
1617 case PORT_SPEED_16GB:
1618 eiter->a.cur_speed =
1619 __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
1620 break;
1597 default: 1621 default:
1598 eiter->a.cur_speed = 1622 eiter->a.cur_speed =
1599 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN); 1623 __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
@@ -1724,7 +1748,7 @@ qla2x00_fdmi_register(scsi_qla_host_t *vha)
1724int 1748int
1725qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) 1749qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1726{ 1750{
1727 int rval; 1751 int rval = QLA_SUCCESS;
1728 uint16_t i; 1752 uint16_t i;
1729 struct qla_hw_data *ha = vha->hw; 1753 struct qla_hw_data *ha = vha->hw;
1730 ms_iocb_entry_t *ms_pkt; 1754 ms_iocb_entry_t *ms_pkt;
@@ -1734,7 +1758,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1734 if (!IS_IIDMA_CAPABLE(ha)) 1758 if (!IS_IIDMA_CAPABLE(ha))
1735 return QLA_FUNCTION_FAILED; 1759 return QLA_FUNCTION_FAILED;
1736 1760
1737 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1761 for (i = 0; i < ha->max_fibre_devices; i++) {
1738 /* Issue GFPN_ID */ 1762 /* Issue GFPN_ID */
1739 /* Prepare common MS IOCB */ 1763 /* Prepare common MS IOCB */
1740 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE, 1764 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, GFPN_ID_REQ_SIZE,
@@ -1757,9 +1781,11 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1757 /*EMPTY*/ 1781 /*EMPTY*/
1758 ql_dbg(ql_dbg_disc, vha, 0x2023, 1782 ql_dbg(ql_dbg_disc, vha, 0x2023,
1759 "GFPN_ID issue IOCB failed (%d).\n", rval); 1783 "GFPN_ID issue IOCB failed (%d).\n", rval);
1784 break;
1760 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, 1785 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
1761 "GFPN_ID") != QLA_SUCCESS) { 1786 "GFPN_ID") != QLA_SUCCESS) {
1762 rval = QLA_FUNCTION_FAILED; 1787 rval = QLA_FUNCTION_FAILED;
1788 break;
1763 } else { 1789 } else {
1764 /* Save fabric portname */ 1790 /* Save fabric portname */
1765 memcpy(list[i].fabric_port_name, 1791 memcpy(list[i].fabric_port_name,
@@ -1846,7 +1872,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
1846 if (rval) 1872 if (rval)
1847 return rval; 1873 return rval;
1848 1874
1849 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1875 for (i = 0; i < ha->max_fibre_devices; i++) {
1850 /* Issue GFPN_ID */ 1876 /* Issue GFPN_ID */
1851 /* Prepare common MS IOCB */ 1877 /* Prepare common MS IOCB */
1852 ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE, 1878 ms_pkt = qla24xx_prep_ms_fm_iocb(vha, GPSC_REQ_SIZE,
@@ -1947,7 +1973,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
1947 struct qla_hw_data *ha = vha->hw; 1973 struct qla_hw_data *ha = vha->hw;
1948 uint8_t fcp_scsi_features = 0; 1974 uint8_t fcp_scsi_features = 0;
1949 1975
1950 for (i = 0; i < MAX_FIBRE_DEVICES; i++) { 1976 for (i = 0; i < ha->max_fibre_devices; i++) {
1951 /* Set default FC4 Type as UNKNOWN so the default is to 1977 /* Set default FC4 Type as UNKNOWN so the default is to
1952 * Process this port */ 1978 * Process this port */
1953 list[i].fc4_type = FC4_TYPE_UNKNOWN; 1979 list[i].fc4_type = FC4_TYPE_UNKNOWN;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1fa067e053d..b9465643396 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -29,7 +29,6 @@ static int qla2x00_configure_loop(scsi_qla_host_t *);
29static int qla2x00_configure_local_loop(scsi_qla_host_t *); 29static int qla2x00_configure_local_loop(scsi_qla_host_t *);
30static int qla2x00_configure_fabric(scsi_qla_host_t *); 30static int qla2x00_configure_fabric(scsi_qla_host_t *);
31static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *); 31static int qla2x00_find_all_fabric_devs(scsi_qla_host_t *, struct list_head *);
32static int qla2x00_device_resync(scsi_qla_host_t *);
33static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *, 32static int qla2x00_fabric_dev_login(scsi_qla_host_t *, fc_port_t *,
34 uint16_t *); 33 uint16_t *);
35 34
@@ -41,11 +40,10 @@ static int qla25xx_init_queues(struct qla_hw_data *);
41 40
42/* SRB Extensions ---------------------------------------------------------- */ 41/* SRB Extensions ---------------------------------------------------------- */
43 42
44static void 43void
45qla2x00_ctx_sp_timeout(unsigned long __data) 44qla2x00_sp_timeout(unsigned long __data)
46{ 45{
47 srb_t *sp = (srb_t *)__data; 46 srb_t *sp = (srb_t *)__data;
48 struct srb_ctx *ctx;
49 struct srb_iocb *iocb; 47 struct srb_iocb *iocb;
50 fc_port_t *fcport = sp->fcport; 48 fc_port_t *fcport = sp->fcport;
51 struct qla_hw_data *ha = fcport->vha->hw; 49 struct qla_hw_data *ha = fcport->vha->hw;
@@ -55,79 +53,25 @@ qla2x00_ctx_sp_timeout(unsigned long __data)
55 spin_lock_irqsave(&ha->hardware_lock, flags); 53 spin_lock_irqsave(&ha->hardware_lock, flags);
56 req = ha->req_q_map[0]; 54 req = ha->req_q_map[0];
57 req->outstanding_cmds[sp->handle] = NULL; 55 req->outstanding_cmds[sp->handle] = NULL;
58 ctx = sp->ctx; 56 iocb = &sp->u.iocb_cmd;
59 iocb = ctx->u.iocb_cmd;
60 iocb->timeout(sp); 57 iocb->timeout(sp);
61 iocb->free(sp); 58 sp->free(fcport->vha, sp);
62 spin_unlock_irqrestore(&ha->hardware_lock, flags); 59 spin_unlock_irqrestore(&ha->hardware_lock, flags);
63} 60}
64 61
65static void 62void
66qla2x00_ctx_sp_free(srb_t *sp) 63qla2x00_sp_free(void *data, void *ptr)
67{ 64{
68 struct srb_ctx *ctx = sp->ctx; 65 srb_t *sp = (srb_t *)ptr;
69 struct srb_iocb *iocb = ctx->u.iocb_cmd; 66 struct srb_iocb *iocb = &sp->u.iocb_cmd;
70 struct scsi_qla_host *vha = sp->fcport->vha; 67 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
71 68
72 del_timer(&iocb->timer); 69 del_timer(&iocb->timer);
73 kfree(iocb); 70 mempool_free(sp, vha->hw->srb_mempool);
74 kfree(ctx);
75 mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
76 71
77 QLA_VHA_MARK_NOT_BUSY(vha); 72 QLA_VHA_MARK_NOT_BUSY(vha);
78} 73}
79 74
80inline srb_t *
81qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
82 unsigned long tmo)
83{
84 srb_t *sp = NULL;
85 struct qla_hw_data *ha = vha->hw;
86 struct srb_ctx *ctx;
87 struct srb_iocb *iocb;
88 uint8_t bail;
89
90 QLA_VHA_MARK_BUSY(vha, bail);
91 if (bail)
92 return NULL;
93
94 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
95 if (!sp)
96 goto done;
97 ctx = kzalloc(size, GFP_KERNEL);
98 if (!ctx) {
99 mempool_free(sp, ha->srb_mempool);
100 sp = NULL;
101 goto done;
102 }
103 iocb = kzalloc(sizeof(struct srb_iocb), GFP_KERNEL);
104 if (!iocb) {
105 mempool_free(sp, ha->srb_mempool);
106 sp = NULL;
107 kfree(ctx);
108 goto done;
109 }
110
111 memset(sp, 0, sizeof(*sp));
112 sp->fcport = fcport;
113 sp->ctx = ctx;
114 ctx->iocbs = 1;
115 ctx->u.iocb_cmd = iocb;
116 iocb->free = qla2x00_ctx_sp_free;
117
118 init_timer(&iocb->timer);
119 if (!tmo)
120 goto done;
121 iocb->timer.expires = jiffies + tmo * HZ;
122 iocb->timer.data = (unsigned long)sp;
123 iocb->timer.function = qla2x00_ctx_sp_timeout;
124 add_timer(&iocb->timer);
125done:
126 if (!sp)
127 QLA_VHA_MARK_NOT_BUSY(vha);
128 return sp;
129}
130
131/* Asynchronous Login/Logout Routines -------------------------------------- */ 75/* Asynchronous Login/Logout Routines -------------------------------------- */
132 76
133static inline unsigned long 77static inline unsigned long
@@ -149,19 +93,19 @@ qla2x00_get_async_timeout(struct scsi_qla_host *vha)
149} 93}
150 94
151static void 95static void
152qla2x00_async_iocb_timeout(srb_t *sp) 96qla2x00_async_iocb_timeout(void *data)
153{ 97{
98 srb_t *sp = (srb_t *)data;
154 fc_port_t *fcport = sp->fcport; 99 fc_port_t *fcport = sp->fcport;
155 struct srb_ctx *ctx = sp->ctx;
156 100
157 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071, 101 ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
158 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n", 102 "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
159 ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, 103 sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
160 fcport->d_id.b.al_pa); 104 fcport->d_id.b.al_pa);
161 105
162 fcport->flags &= ~FCF_ASYNC_SENT; 106 fcport->flags &= ~FCF_ASYNC_SENT;
163 if (ctx->type == SRB_LOGIN_CMD) { 107 if (sp->type == SRB_LOGIN_CMD) {
164 struct srb_iocb *lio = ctx->u.iocb_cmd; 108 struct srb_iocb *lio = &sp->u.iocb_cmd;
165 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL); 109 qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
166 /* Retry as needed. */ 110 /* Retry as needed. */
167 lio->u.logio.data[0] = MBS_COMMAND_ERROR; 111 lio->u.logio.data[0] = MBS_COMMAND_ERROR;
@@ -173,14 +117,16 @@ qla2x00_async_iocb_timeout(srb_t *sp)
173} 117}
174 118
175static void 119static void
176qla2x00_async_login_ctx_done(srb_t *sp) 120qla2x00_async_login_sp_done(void *data, void *ptr, int res)
177{ 121{
178 struct srb_ctx *ctx = sp->ctx; 122 srb_t *sp = (srb_t *)ptr;
179 struct srb_iocb *lio = ctx->u.iocb_cmd; 123 struct srb_iocb *lio = &sp->u.iocb_cmd;
180 124 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
181 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport, 125
182 lio->u.logio.data); 126 if (!test_bit(UNLOADING, &vha->dpc_flags))
183 lio->free(sp); 127 qla2x00_post_async_login_done_work(sp->fcport->vha, sp->fcport,
128 lio->u.logio.data);
129 sp->free(sp->fcport->vha, sp);
184} 130}
185 131
186int 132int
@@ -188,22 +134,21 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
188 uint16_t *data) 134 uint16_t *data)
189{ 135{
190 srb_t *sp; 136 srb_t *sp;
191 struct srb_ctx *ctx;
192 struct srb_iocb *lio; 137 struct srb_iocb *lio;
193 int rval; 138 int rval;
194 139
195 rval = QLA_FUNCTION_FAILED; 140 rval = QLA_FUNCTION_FAILED;
196 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 141 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
197 qla2x00_get_async_timeout(vha) + 2);
198 if (!sp) 142 if (!sp)
199 goto done; 143 goto done;
200 144
201 ctx = sp->ctx; 145 sp->type = SRB_LOGIN_CMD;
202 ctx->type = SRB_LOGIN_CMD; 146 sp->name = "login";
203 ctx->name = "login"; 147 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
204 lio = ctx->u.iocb_cmd; 148
149 lio = &sp->u.iocb_cmd;
205 lio->timeout = qla2x00_async_iocb_timeout; 150 lio->timeout = qla2x00_async_iocb_timeout;
206 lio->done = qla2x00_async_login_ctx_done; 151 sp->done = qla2x00_async_login_sp_done;
207 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; 152 lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI;
208 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 153 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
209 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 154 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
@@ -219,42 +164,43 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
219 return rval; 164 return rval;
220 165
221done_free_sp: 166done_free_sp:
222 lio->free(sp); 167 sp->free(fcport->vha, sp);
223done: 168done:
224 return rval; 169 return rval;
225} 170}
226 171
227static void 172static void
228qla2x00_async_logout_ctx_done(srb_t *sp) 173qla2x00_async_logout_sp_done(void *data, void *ptr, int res)
229{ 174{
230 struct srb_ctx *ctx = sp->ctx; 175 srb_t *sp = (srb_t *)ptr;
231 struct srb_iocb *lio = ctx->u.iocb_cmd; 176 struct srb_iocb *lio = &sp->u.iocb_cmd;
232 177 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
233 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport, 178
234 lio->u.logio.data); 179 if (!test_bit(UNLOADING, &vha->dpc_flags))
235 lio->free(sp); 180 qla2x00_post_async_logout_done_work(sp->fcport->vha, sp->fcport,
181 lio->u.logio.data);
182 sp->free(sp->fcport->vha, sp);
236} 183}
237 184
238int 185int
239qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) 186qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
240{ 187{
241 srb_t *sp; 188 srb_t *sp;
242 struct srb_ctx *ctx;
243 struct srb_iocb *lio; 189 struct srb_iocb *lio;
244 int rval; 190 int rval;
245 191
246 rval = QLA_FUNCTION_FAILED; 192 rval = QLA_FUNCTION_FAILED;
247 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 193 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
248 qla2x00_get_async_timeout(vha) + 2);
249 if (!sp) 194 if (!sp)
250 goto done; 195 goto done;
251 196
252 ctx = sp->ctx; 197 sp->type = SRB_LOGOUT_CMD;
253 ctx->type = SRB_LOGOUT_CMD; 198 sp->name = "logout";
254 ctx->name = "logout"; 199 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
255 lio = ctx->u.iocb_cmd; 200
201 lio = &sp->u.iocb_cmd;
256 lio->timeout = qla2x00_async_iocb_timeout; 202 lio->timeout = qla2x00_async_iocb_timeout;
257 lio->done = qla2x00_async_logout_ctx_done; 203 sp->done = qla2x00_async_logout_sp_done;
258 rval = qla2x00_start_sp(sp); 204 rval = qla2x00_start_sp(sp);
259 if (rval != QLA_SUCCESS) 205 if (rval != QLA_SUCCESS)
260 goto done_free_sp; 206 goto done_free_sp;
@@ -266,20 +212,22 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
266 return rval; 212 return rval;
267 213
268done_free_sp: 214done_free_sp:
269 lio->free(sp); 215 sp->free(fcport->vha, sp);
270done: 216done:
271 return rval; 217 return rval;
272} 218}
273 219
274static void 220static void
275qla2x00_async_adisc_ctx_done(srb_t *sp) 221qla2x00_async_adisc_sp_done(void *data, void *ptr, int res)
276{ 222{
277 struct srb_ctx *ctx = sp->ctx; 223 srb_t *sp = (srb_t *)ptr;
278 struct srb_iocb *lio = ctx->u.iocb_cmd; 224 struct srb_iocb *lio = &sp->u.iocb_cmd;
279 225 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
280 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport, 226
281 lio->u.logio.data); 227 if (!test_bit(UNLOADING, &vha->dpc_flags))
282 lio->free(sp); 228 qla2x00_post_async_adisc_done_work(sp->fcport->vha, sp->fcport,
229 lio->u.logio.data);
230 sp->free(sp->fcport->vha, sp);
283} 231}
284 232
285int 233int
@@ -287,22 +235,21 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
287 uint16_t *data) 235 uint16_t *data)
288{ 236{
289 srb_t *sp; 237 srb_t *sp;
290 struct srb_ctx *ctx;
291 struct srb_iocb *lio; 238 struct srb_iocb *lio;
292 int rval; 239 int rval;
293 240
294 rval = QLA_FUNCTION_FAILED; 241 rval = QLA_FUNCTION_FAILED;
295 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 242 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
296 qla2x00_get_async_timeout(vha) + 2);
297 if (!sp) 243 if (!sp)
298 goto done; 244 goto done;
299 245
300 ctx = sp->ctx; 246 sp->type = SRB_ADISC_CMD;
301 ctx->type = SRB_ADISC_CMD; 247 sp->name = "adisc";
302 ctx->name = "adisc"; 248 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
303 lio = ctx->u.iocb_cmd; 249
250 lio = &sp->u.iocb_cmd;
304 lio->timeout = qla2x00_async_iocb_timeout; 251 lio->timeout = qla2x00_async_iocb_timeout;
305 lio->done = qla2x00_async_adisc_ctx_done; 252 sp->done = qla2x00_async_adisc_sp_done;
306 if (data[1] & QLA_LOGIO_LOGIN_RETRIED) 253 if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
307 lio->u.logio.flags |= SRB_LOGIN_RETRIED; 254 lio->u.logio.flags |= SRB_LOGIN_RETRIED;
308 rval = qla2x00_start_sp(sp); 255 rval = qla2x00_start_sp(sp);
@@ -316,46 +263,62 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
316 return rval; 263 return rval;
317 264
318done_free_sp: 265done_free_sp:
319 lio->free(sp); 266 sp->free(fcport->vha, sp);
320done: 267done:
321 return rval; 268 return rval;
322} 269}
323 270
324static void 271static void
325qla2x00_async_tm_cmd_ctx_done(srb_t *sp) 272qla2x00_async_tm_cmd_done(void *data, void *ptr, int res)
326{ 273{
327 struct srb_ctx *ctx = sp->ctx; 274 srb_t *sp = (srb_t *)ptr;
328 struct srb_iocb *iocb = (struct srb_iocb *)ctx->u.iocb_cmd; 275 struct srb_iocb *iocb = &sp->u.iocb_cmd;
276 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
277 uint32_t flags;
278 uint16_t lun;
279 int rval;
280
281 if (!test_bit(UNLOADING, &vha->dpc_flags)) {
282 flags = iocb->u.tmf.flags;
283 lun = (uint16_t)iocb->u.tmf.lun;
284
285 /* Issue Marker IOCB */
286 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
287 vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun,
288 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
329 289
330 qla2x00_async_tm_cmd_done(sp->fcport->vha, sp->fcport, iocb); 290 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
331 iocb->free(sp); 291 ql_dbg(ql_dbg_taskm, vha, 0x8030,
292 "TM IOCB failed (%x).\n", rval);
293 }
294 }
295 sp->free(sp->fcport->vha, sp);
332} 296}
333 297
334int 298int
335qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, 299qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t tm_flags, uint32_t lun,
336 uint32_t tag) 300 uint32_t tag)
337{ 301{
338 struct scsi_qla_host *vha = fcport->vha; 302 struct scsi_qla_host *vha = fcport->vha;
339 srb_t *sp; 303 srb_t *sp;
340 struct srb_ctx *ctx;
341 struct srb_iocb *tcf; 304 struct srb_iocb *tcf;
342 int rval; 305 int rval;
343 306
344 rval = QLA_FUNCTION_FAILED; 307 rval = QLA_FUNCTION_FAILED;
345 sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_ctx), 308 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
346 qla2x00_get_async_timeout(vha) + 2);
347 if (!sp) 309 if (!sp)
348 goto done; 310 goto done;
349 311
350 ctx = sp->ctx; 312 sp->type = SRB_TM_CMD;
351 ctx->type = SRB_TM_CMD; 313 sp->name = "tmf";
352 ctx->name = "tmf"; 314 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
353 tcf = ctx->u.iocb_cmd; 315
354 tcf->u.tmf.flags = flags; 316 tcf = &sp->u.iocb_cmd;
317 tcf->u.tmf.flags = tm_flags;
355 tcf->u.tmf.lun = lun; 318 tcf->u.tmf.lun = lun;
356 tcf->u.tmf.data = tag; 319 tcf->u.tmf.data = tag;
357 tcf->timeout = qla2x00_async_iocb_timeout; 320 tcf->timeout = qla2x00_async_iocb_timeout;
358 tcf->done = qla2x00_async_tm_cmd_ctx_done; 321 sp->done = qla2x00_async_tm_cmd_done;
359 322
360 rval = qla2x00_start_sp(sp); 323 rval = qla2x00_start_sp(sp);
361 if (rval != QLA_SUCCESS) 324 if (rval != QLA_SUCCESS)
@@ -368,7 +331,7 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
368 return rval; 331 return rval;
369 332
370done_free_sp: 333done_free_sp:
371 tcf->free(sp); 334 sp->free(fcport->vha, sp);
372done: 335done:
373 return rval; 336 return rval;
374} 337}
@@ -387,6 +350,13 @@ qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
387 * requests. 350 * requests.
388 */ 351 */
389 rval = qla2x00_get_port_database(vha, fcport, 0); 352 rval = qla2x00_get_port_database(vha, fcport, 0);
353 if (rval == QLA_NOT_LOGGED_IN) {
354 fcport->flags &= ~FCF_ASYNC_SENT;
355 fcport->flags |= FCF_LOGIN_NEEDED;
356 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
357 break;
358 }
359
390 if (rval != QLA_SUCCESS) { 360 if (rval != QLA_SUCCESS) {
391 qla2x00_post_async_logout_work(vha, fcport, NULL); 361 qla2x00_post_async_logout_work(vha, fcport, NULL);
392 qla2x00_post_async_login_work(vha, fcport, NULL); 362 qla2x00_post_async_login_work(vha, fcport, NULL);
@@ -452,30 +422,6 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
452 return; 422 return;
453} 423}
454 424
455void
456qla2x00_async_tm_cmd_done(struct scsi_qla_host *vha, fc_port_t *fcport,
457 struct srb_iocb *iocb)
458{
459 int rval;
460 uint32_t flags;
461 uint16_t lun;
462
463 flags = iocb->u.tmf.flags;
464 lun = (uint16_t)iocb->u.tmf.lun;
465
466 /* Issue Marker IOCB */
467 rval = qla2x00_marker(vha, vha->hw->req_q_map[0],
468 vha->hw->rsp_q_map[0], fcport->loop_id, lun,
469 flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID);
470
471 if ((rval != QLA_SUCCESS) || iocb->u.tmf.data) {
472 ql_dbg(ql_dbg_taskm, vha, 0x8030,
473 "TM IOCB failed (%x).\n", rval);
474 }
475
476 return;
477}
478
479/****************************************************************************/ 425/****************************************************************************/
480/* QLogic ISP2x00 Hardware Support Functions. */ 426/* QLogic ISP2x00 Hardware Support Functions. */
481/****************************************************************************/ 427/****************************************************************************/
@@ -969,6 +915,9 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha)
969{ 915{
970 uint16_t mb[4] = {0x1010, 0, 1, 0}; 916 uint16_t mb[4] = {0x1010, 0, 1, 0};
971 917
918 if (!IS_QLA81XX(vha->hw))
919 return QLA_SUCCESS;
920
972 return qla81xx_write_mpi_register(vha, mb); 921 return qla81xx_write_mpi_register(vha, mb);
973} 922}
974 923
@@ -1262,7 +1211,9 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1262 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1211 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1263 sizeof(uint16_t); 1212 sizeof(uint16_t);
1264 } else if (IS_FWI2_CAPABLE(ha)) { 1213 } else if (IS_FWI2_CAPABLE(ha)) {
1265 if (IS_QLA81XX(ha)) 1214 if (IS_QLA83XX(ha))
1215 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1216 else if (IS_QLA81XX(ha))
1266 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1217 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
1267 else if (IS_QLA25XX(ha)) 1218 else if (IS_QLA25XX(ha))
1268 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem); 1219 fixed_size = offsetof(struct qla25xx_fw_dump, ext_mem);
@@ -1270,10 +1221,20 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1270 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem); 1221 fixed_size = offsetof(struct qla24xx_fw_dump, ext_mem);
1271 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1222 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1272 sizeof(uint32_t); 1223 sizeof(uint32_t);
1273 if (ha->mqenable) 1224 if (ha->mqenable) {
1274 mq_size = sizeof(struct qla2xxx_mq_chain); 1225 if (!IS_QLA83XX(ha))
1226 mq_size = sizeof(struct qla2xxx_mq_chain);
1227 /*
1228 * Allocate maximum buffer size for all queues.
1229 * Resizing must be done at end-of-dump processing.
1230 */
1231 mq_size += ha->max_req_queues *
1232 (req->length * sizeof(request_t));
1233 mq_size += ha->max_rsp_queues *
1234 (rsp->length * sizeof(response_t));
1235 }
1275 /* Allocate memory for Fibre Channel Event Buffer. */ 1236 /* Allocate memory for Fibre Channel Event Buffer. */
1276 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 1237 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1277 goto try_eft; 1238 goto try_eft;
1278 1239
1279 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, 1240 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
@@ -1484,17 +1445,8 @@ enable_82xx_npiv:
1484 fw_major_version = ha->fw_major_version; 1445 fw_major_version = ha->fw_major_version;
1485 if (IS_QLA82XX(ha)) 1446 if (IS_QLA82XX(ha))
1486 qla82xx_check_md_needed(vha); 1447 qla82xx_check_md_needed(vha);
1487 else { 1448 else
1488 rval = qla2x00_get_fw_version(vha, 1449 rval = qla2x00_get_fw_version(vha);
1489 &ha->fw_major_version,
1490 &ha->fw_minor_version,
1491 &ha->fw_subminor_version,
1492 &ha->fw_attributes,
1493 &ha->fw_memory_size,
1494 ha->mpi_version,
1495 &ha->mpi_capabilities,
1496 ha->phy_version);
1497 }
1498 if (rval != QLA_SUCCESS) 1450 if (rval != QLA_SUCCESS)
1499 goto failed; 1451 goto failed;
1500 ha->flags.npiv_supported = 0; 1452 ha->flags.npiv_supported = 0;
@@ -1535,6 +1487,9 @@ enable_82xx_npiv:
1535 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1487 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1536 } 1488 }
1537 1489
1490 if (IS_QLA83XX(ha))
1491 goto skip_fac_check;
1492
1538 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { 1493 if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
1539 uint32_t size; 1494 uint32_t size;
1540 1495
@@ -1547,6 +1502,11 @@ enable_82xx_npiv:
1547 "Unsupported FAC firmware (%d.%02d.%02d).\n", 1502 "Unsupported FAC firmware (%d.%02d.%02d).\n",
1548 ha->fw_major_version, ha->fw_minor_version, 1503 ha->fw_major_version, ha->fw_minor_version,
1549 ha->fw_subminor_version); 1504 ha->fw_subminor_version);
1505skip_fac_check:
1506 if (IS_QLA83XX(ha)) {
1507 ha->flags.fac_supported = 0;
1508 rval = QLA_SUCCESS;
1509 }
1550 } 1510 }
1551 } 1511 }
1552failed: 1512failed:
@@ -1725,7 +1685,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1725 struct req_que *req = ha->req_q_map[0]; 1685 struct req_que *req = ha->req_q_map[0];
1726 struct rsp_que *rsp = ha->rsp_q_map[0]; 1686 struct rsp_que *rsp = ha->rsp_q_map[0];
1727 1687
1728/* Setup ring parameters in initialization control block. */ 1688 /* Setup ring parameters in initialization control block. */
1729 icb = (struct init_cb_24xx *)ha->init_cb; 1689 icb = (struct init_cb_24xx *)ha->init_cb;
1730 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1690 icb->request_q_outpointer = __constant_cpu_to_le16(0);
1731 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1691 icb->response_q_inpointer = __constant_cpu_to_le16(0);
@@ -1736,7 +1696,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1736 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1696 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1737 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1697 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1738 1698
1739 if (ha->mqenable) { 1699 if (ha->mqenable || IS_QLA83XX(ha)) {
1740 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1700 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1741 icb->rid = __constant_cpu_to_le16(rid); 1701 icb->rid = __constant_cpu_to_le16(rid);
1742 if (ha->flags.msix_enabled) { 1702 if (ha->flags.msix_enabled) {
@@ -1756,7 +1716,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1756 __constant_cpu_to_le32(BIT_18); 1716 __constant_cpu_to_le32(BIT_18);
1757 1717
1758 /* Use Disable MSIX Handshake mode for capable adapters */ 1718 /* Use Disable MSIX Handshake mode for capable adapters */
1759 if (IS_MSIX_NACK_CAPABLE(ha)) { 1719 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
1720 (ha->flags.msix_enabled)) {
1760 icb->firmware_options_2 &= 1721 icb->firmware_options_2 &=
1761 __constant_cpu_to_le32(~BIT_22); 1722 __constant_cpu_to_le32(~BIT_22);
1762 ha->flags.disable_msix_handshake = 1; 1723 ha->flags.disable_msix_handshake = 1;
@@ -1800,7 +1761,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1800 struct qla_hw_data *ha = vha->hw; 1761 struct qla_hw_data *ha = vha->hw;
1801 struct req_que *req; 1762 struct req_que *req;
1802 struct rsp_que *rsp; 1763 struct rsp_que *rsp;
1803 struct scsi_qla_host *vp;
1804 struct mid_init_cb_24xx *mid_init_cb = 1764 struct mid_init_cb_24xx *mid_init_cb =
1805 (struct mid_init_cb_24xx *) ha->init_cb; 1765 (struct mid_init_cb_24xx *) ha->init_cb;
1806 1766
@@ -1831,11 +1791,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1831 } 1791 }
1832 1792
1833 spin_lock(&ha->vport_slock); 1793 spin_lock(&ha->vport_slock);
1834 /* Clear RSCN queue. */
1835 list_for_each_entry(vp, &ha->vp_list, list) {
1836 vp->rscn_in_ptr = 0;
1837 vp->rscn_out_ptr = 0;
1838 }
1839 1794
1840 spin_unlock(&ha->vport_slock); 1795 spin_unlock(&ha->vport_slock);
1841 1796
@@ -2028,7 +1983,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2028 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap); 1983 &loop_id, &al_pa, &area, &domain, &topo, &sw_cap);
2029 if (rval != QLA_SUCCESS) { 1984 if (rval != QLA_SUCCESS) {
2030 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) || 1985 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
2031 IS_QLA8XXX_TYPE(ha) || 1986 IS_CNA_CAPABLE(ha) ||
2032 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) { 1987 (rval == QLA_COMMAND_ERROR && loop_id == 0x7)) {
2033 ql_dbg(ql_dbg_disc, vha, 0x2008, 1988 ql_dbg(ql_dbg_disc, vha, 0x2008,
2034 "Loop is in a transition state.\n"); 1989 "Loop is in a transition state.\n");
@@ -2120,7 +2075,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
2120 uint16_t index; 2075 uint16_t index;
2121 struct qla_hw_data *ha = vha->hw; 2076 struct qla_hw_data *ha = vha->hw;
2122 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && 2077 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
2123 !IS_QLA8XXX_TYPE(ha); 2078 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
2124 2079
2125 if (memcmp(model, BINZERO, len) != 0) { 2080 if (memcmp(model, BINZERO, len) != 0) {
2126 strncpy(ha->model_number, model, len); 2081 strncpy(ha->model_number, model, len);
@@ -2596,13 +2551,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2596 if (ha->current_topology == ISP_CFG_FL && 2551 if (ha->current_topology == ISP_CFG_FL &&
2597 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2552 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2598 2553
2599 vha->flags.rscn_queue_overflow = 1;
2600 set_bit(RSCN_UPDATE, &flags); 2554 set_bit(RSCN_UPDATE, &flags);
2601 2555
2602 } else if (ha->current_topology == ISP_CFG_F && 2556 } else if (ha->current_topology == ISP_CFG_F &&
2603 (test_bit(LOCAL_LOOP_UPDATE, &flags))) { 2557 (test_bit(LOCAL_LOOP_UPDATE, &flags))) {
2604 2558
2605 vha->flags.rscn_queue_overflow = 1;
2606 set_bit(RSCN_UPDATE, &flags); 2559 set_bit(RSCN_UPDATE, &flags);
2607 clear_bit(LOCAL_LOOP_UPDATE, &flags); 2560 clear_bit(LOCAL_LOOP_UPDATE, &flags);
2608 2561
@@ -2612,7 +2565,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2612 } else if (!vha->flags.online || 2565 } else if (!vha->flags.online ||
2613 (test_bit(ABORT_ISP_ACTIVE, &flags))) { 2566 (test_bit(ABORT_ISP_ACTIVE, &flags))) {
2614 2567
2615 vha->flags.rscn_queue_overflow = 1;
2616 set_bit(RSCN_UPDATE, &flags); 2568 set_bit(RSCN_UPDATE, &flags);
2617 set_bit(LOCAL_LOOP_UPDATE, &flags); 2569 set_bit(LOCAL_LOOP_UPDATE, &flags);
2618 } 2570 }
@@ -2622,8 +2574,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2622 ql_dbg(ql_dbg_disc, vha, 0x2015, 2574 ql_dbg(ql_dbg_disc, vha, 0x2015,
2623 "Loop resync needed, failing.\n"); 2575 "Loop resync needed, failing.\n");
2624 rval = QLA_FUNCTION_FAILED; 2576 rval = QLA_FUNCTION_FAILED;
2625 } 2577 } else
2626 else
2627 rval = qla2x00_configure_local_loop(vha); 2578 rval = qla2x00_configure_local_loop(vha);
2628 } 2579 }
2629 2580
@@ -2662,8 +2613,6 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
2662 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 2613 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
2663 if (test_bit(RSCN_UPDATE, &save_flags)) { 2614 if (test_bit(RSCN_UPDATE, &save_flags)) {
2664 set_bit(RSCN_UPDATE, &vha->dpc_flags); 2615 set_bit(RSCN_UPDATE, &vha->dpc_flags);
2665 if (!IS_ALOGIO_CAPABLE(ha))
2666 vha->flags.rscn_queue_overflow = 1;
2667 } 2616 }
2668 } 2617 }
2669 2618
@@ -2699,7 +2648,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2699 2648
2700 found_devs = 0; 2649 found_devs = 0;
2701 new_fcport = NULL; 2650 new_fcport = NULL;
2702 entries = MAX_FIBRE_DEVICES; 2651 entries = MAX_FIBRE_DEVICES_LOOP;
2703 2652
2704 ql_dbg(ql_dbg_disc, vha, 0x2016, 2653 ql_dbg(ql_dbg_disc, vha, 0x2016,
2705 "Getting FCAL position map.\n"); 2654 "Getting FCAL position map.\n");
@@ -2707,7 +2656,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2707 qla2x00_get_fcal_position_map(vha, NULL); 2656 qla2x00_get_fcal_position_map(vha, NULL);
2708 2657
2709 /* Get list of logged in devices. */ 2658 /* Get list of logged in devices. */
2710 memset(ha->gid_list, 0, GID_LIST_SIZE); 2659 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
2711 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, 2660 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
2712 &entries); 2661 &entries);
2713 if (rval != QLA_SUCCESS) 2662 if (rval != QLA_SUCCESS)
@@ -2971,7 +2920,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2971static int 2920static int
2972qla2x00_configure_fabric(scsi_qla_host_t *vha) 2921qla2x00_configure_fabric(scsi_qla_host_t *vha)
2973{ 2922{
2974 int rval, rval2; 2923 int rval;
2975 fc_port_t *fcport, *fcptemp; 2924 fc_port_t *fcport, *fcptemp;
2976 uint16_t next_loopid; 2925 uint16_t next_loopid;
2977 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2926 uint16_t mb[MAILBOX_REGISTER_COUNT];
@@ -2995,12 +2944,6 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2995 } 2944 }
2996 vha->device_flags |= SWITCH_FOUND; 2945 vha->device_flags |= SWITCH_FOUND;
2997 2946
2998 /* Mark devices that need re-synchronization. */
2999 rval2 = qla2x00_device_resync(vha);
3000 if (rval2 == QLA_RSCNS_HANDLED) {
3001 /* No point doing the scan, just continue. */
3002 return (QLA_SUCCESS);
3003 }
3004 do { 2947 do {
3005 /* FDMI support. */ 2948 /* FDMI support. */
3006 if (ql2xfdmienable && 2949 if (ql2xfdmienable &&
@@ -3012,8 +2955,12 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3012 loop_id = NPH_SNS; 2955 loop_id = NPH_SNS;
3013 else 2956 else
3014 loop_id = SIMPLE_NAME_SERVER; 2957 loop_id = SIMPLE_NAME_SERVER;
3015 ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff, 2958 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
3016 0xfc, mb, BIT_1 | BIT_0); 2959 0xfc, mb, BIT_1|BIT_0);
2960 if (rval != QLA_SUCCESS) {
2961 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2962 return rval;
2963 }
3017 if (mb[0] != MBS_COMMAND_COMPLETE) { 2964 if (mb[0] != MBS_COMMAND_COMPLETE) {
3018 ql_dbg(ql_dbg_disc, vha, 0x2042, 2965 ql_dbg(ql_dbg_disc, vha, 0x2042,
3019 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " 2966 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x "
@@ -3044,6 +2991,13 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3044 } 2991 }
3045 } 2992 }
3046 2993
2994#define QLA_FCPORT_SCAN 1
2995#define QLA_FCPORT_FOUND 2
2996
2997 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2998 fcport->scan_state = QLA_FCPORT_SCAN;
2999 }
3000
3047 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3001 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3048 if (rval != QLA_SUCCESS) 3002 if (rval != QLA_SUCCESS)
3049 break; 3003 break;
@@ -3059,7 +3013,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3059 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3013 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3060 continue; 3014 continue;
3061 3015
3062 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { 3016 if (fcport->scan_state == QLA_FCPORT_SCAN &&
3017 atomic_read(&fcport->state) == FCS_ONLINE) {
3063 qla2x00_mark_device_lost(vha, fcport, 3018 qla2x00_mark_device_lost(vha, fcport,
3064 ql2xplogiabsentdevice, 0); 3019 ql2xplogiabsentdevice, 0);
3065 if (fcport->loop_id != FC_NO_LOOP_ID && 3020 if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3184,20 +3139,21 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3184 rval = QLA_SUCCESS; 3139 rval = QLA_SUCCESS;
3185 3140
3186 /* Try GID_PT to get device list, else GAN. */ 3141 /* Try GID_PT to get device list, else GAN. */
3187 swl = kcalloc(MAX_FIBRE_DEVICES, sizeof(sw_info_t), GFP_KERNEL); 3142 if (!ha->swl)
3143 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
3144 GFP_KERNEL);
3145 swl = ha->swl;
3188 if (!swl) { 3146 if (!swl) {
3189 /*EMPTY*/ 3147 /*EMPTY*/
3190 ql_dbg(ql_dbg_disc, vha, 0x2054, 3148 ql_dbg(ql_dbg_disc, vha, 0x2054,
3191 "GID_PT allocations failed, fallback on GA_NXT.\n"); 3149 "GID_PT allocations failed, fallback on GA_NXT.\n");
3192 } else { 3150 } else {
3151 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
3193 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) { 3152 if (qla2x00_gid_pt(vha, swl) != QLA_SUCCESS) {
3194 kfree(swl);
3195 swl = NULL; 3153 swl = NULL;
3196 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) { 3154 } else if (qla2x00_gpn_id(vha, swl) != QLA_SUCCESS) {
3197 kfree(swl);
3198 swl = NULL; 3155 swl = NULL;
3199 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) { 3156 } else if (qla2x00_gnn_id(vha, swl) != QLA_SUCCESS) {
3200 kfree(swl);
3201 swl = NULL; 3157 swl = NULL;
3202 } else if (ql2xiidmaenable && 3158 } else if (ql2xiidmaenable &&
3203 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) { 3159 qla2x00_gfpn_id(vha, swl) == QLA_SUCCESS) {
@@ -3215,7 +3171,6 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3215 if (new_fcport == NULL) { 3171 if (new_fcport == NULL) {
3216 ql_log(ql_log_warn, vha, 0x205e, 3172 ql_log(ql_log_warn, vha, 0x205e,
3217 "Failed to allocate memory for fcport.\n"); 3173 "Failed to allocate memory for fcport.\n");
3218 kfree(swl);
3219 return (QLA_MEMORY_ALLOC_FAILED); 3174 return (QLA_MEMORY_ALLOC_FAILED);
3220 } 3175 }
3221 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 3176 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
@@ -3332,6 +3287,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3332 WWN_SIZE)) 3287 WWN_SIZE))
3333 continue; 3288 continue;
3334 3289
3290 fcport->scan_state = QLA_FCPORT_FOUND;
3291
3335 found++; 3292 found++;
3336 3293
3337 /* Update port state. */ 3294 /* Update port state. */
@@ -3368,6 +3325,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3368 fcport->flags |= FCF_LOGIN_NEEDED; 3325 fcport->flags |= FCF_LOGIN_NEEDED;
3369 if (fcport->loop_id != FC_NO_LOOP_ID && 3326 if (fcport->loop_id != FC_NO_LOOP_ID &&
3370 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3327 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3328 (fcport->flags & FCF_ASYNC_SENT) == 0 &&
3371 fcport->port_type != FCT_INITIATOR && 3329 fcport->port_type != FCT_INITIATOR &&
3372 fcport->port_type != FCT_BROADCAST) { 3330 fcport->port_type != FCT_BROADCAST) {
3373 ha->isp_ops->fabric_logout(vha, fcport->loop_id, 3331 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
@@ -3390,14 +3348,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3390 if (new_fcport == NULL) { 3348 if (new_fcport == NULL) {
3391 ql_log(ql_log_warn, vha, 0x2066, 3349 ql_log(ql_log_warn, vha, 0x2066,
3392 "Memory allocation failed for fcport.\n"); 3350 "Memory allocation failed for fcport.\n");
3393 kfree(swl);
3394 return (QLA_MEMORY_ALLOC_FAILED); 3351 return (QLA_MEMORY_ALLOC_FAILED);
3395 } 3352 }
3396 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED); 3353 new_fcport->flags |= (FCF_FABRIC_DEVICE | FCF_LOGIN_NEEDED);
3397 new_fcport->d_id.b24 = nxt_d_id.b24; 3354 new_fcport->d_id.b24 = nxt_d_id.b24;
3398 } 3355 }
3399 3356
3400 kfree(swl);
3401 kfree(new_fcport); 3357 kfree(new_fcport);
3402 3358
3403 return (rval); 3359 return (rval);
@@ -3470,6 +3426,9 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3470 3426
3471 /* If not in use then it is free to use. */ 3427 /* If not in use then it is free to use. */
3472 if (!found) { 3428 if (!found) {
3429 ql_dbg(ql_dbg_disc, dev->vha, 0x2086,
3430 "Assigning new loopid=%x, portid=%x.\n",
3431 dev->loop_id, dev->d_id.b24);
3473 break; 3432 break;
3474 } 3433 }
3475 3434
@@ -3488,110 +3447,6 @@ qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev)
3488} 3447}
3489 3448
3490/* 3449/*
3491 * qla2x00_device_resync
3492 * Marks devices in the database that needs resynchronization.
3493 *
3494 * Input:
3495 * ha = adapter block pointer.
3496 *
3497 * Context:
3498 * Kernel context.
3499 */
3500static int
3501qla2x00_device_resync(scsi_qla_host_t *vha)
3502{
3503 int rval;
3504 uint32_t mask;
3505 fc_port_t *fcport;
3506 uint32_t rscn_entry;
3507 uint8_t rscn_out_iter;
3508 uint8_t format;
3509 port_id_t d_id = {};
3510
3511 rval = QLA_RSCNS_HANDLED;
3512
3513 while (vha->rscn_out_ptr != vha->rscn_in_ptr ||
3514 vha->flags.rscn_queue_overflow) {
3515
3516 rscn_entry = vha->rscn_queue[vha->rscn_out_ptr];
3517 format = MSB(MSW(rscn_entry));
3518 d_id.b.domain = LSB(MSW(rscn_entry));
3519 d_id.b.area = MSB(LSW(rscn_entry));
3520 d_id.b.al_pa = LSB(LSW(rscn_entry));
3521
3522 ql_dbg(ql_dbg_disc, vha, 0x2020,
3523 "RSCN queue entry[%d] = [%02x/%02x%02x%02x].\n",
3524 vha->rscn_out_ptr, format, d_id.b.domain, d_id.b.area,
3525 d_id.b.al_pa);
3526
3527 vha->rscn_out_ptr++;
3528 if (vha->rscn_out_ptr == MAX_RSCN_COUNT)
3529 vha->rscn_out_ptr = 0;
3530
3531 /* Skip duplicate entries. */
3532 for (rscn_out_iter = vha->rscn_out_ptr;
3533 !vha->flags.rscn_queue_overflow &&
3534 rscn_out_iter != vha->rscn_in_ptr;
3535 rscn_out_iter = (rscn_out_iter ==
3536 (MAX_RSCN_COUNT - 1)) ? 0: rscn_out_iter + 1) {
3537
3538 if (rscn_entry != vha->rscn_queue[rscn_out_iter])
3539 break;
3540
3541 ql_dbg(ql_dbg_disc, vha, 0x2021,
3542 "Skipping duplicate RSCN queue entry found at "
3543 "[%d].\n", rscn_out_iter);
3544
3545 vha->rscn_out_ptr = rscn_out_iter;
3546 }
3547
3548 /* Queue overflow, set switch default case. */
3549 if (vha->flags.rscn_queue_overflow) {
3550 ql_dbg(ql_dbg_disc, vha, 0x2022,
3551 "device_resync: rscn overflow.\n");
3552
3553 format = 3;
3554 vha->flags.rscn_queue_overflow = 0;
3555 }
3556
3557 switch (format) {
3558 case 0:
3559 mask = 0xffffff;
3560 break;
3561 case 1:
3562 mask = 0xffff00;
3563 break;
3564 case 2:
3565 mask = 0xff0000;
3566 break;
3567 default:
3568 mask = 0x0;
3569 d_id.b24 = 0;
3570 vha->rscn_out_ptr = vha->rscn_in_ptr;
3571 break;
3572 }
3573
3574 rval = QLA_SUCCESS;
3575
3576 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3577 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3578 (fcport->d_id.b24 & mask) != d_id.b24 ||
3579 fcport->port_type == FCT_BROADCAST)
3580 continue;
3581
3582 if (atomic_read(&fcport->state) == FCS_ONLINE) {
3583 if (format != 3 ||
3584 fcport->port_type != FCT_INITIATOR) {
3585 qla2x00_mark_device_lost(vha, fcport,
3586 0, 0);
3587 }
3588 }
3589 }
3590 }
3591 return (rval);
3592}
3593
3594/*
3595 * qla2x00_fabric_dev_login 3450 * qla2x00_fabric_dev_login
3596 * Login fabric target device and update FC port database. 3451 * Login fabric target device and update FC port database.
3597 * 3452 *
@@ -3644,6 +3499,9 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3644 } else { 3499 } else {
3645 qla2x00_update_fcport(vha, fcport); 3500 qla2x00_update_fcport(vha, fcport);
3646 } 3501 }
3502 } else {
3503 /* Retry Login. */
3504 qla2x00_mark_device_lost(vha, fcport, 1, 0);
3647 } 3505 }
3648 3506
3649 return (rval); 3507 return (rval);
@@ -3684,9 +3542,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3684 fcport->d_id.b.area, fcport->d_id.b.al_pa); 3542 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3685 3543
3686 /* Login fcport on switch. */ 3544 /* Login fcport on switch. */
3687 ha->isp_ops->fabric_login(vha, fcport->loop_id, 3545 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
3688 fcport->d_id.b.domain, fcport->d_id.b.area, 3546 fcport->d_id.b.domain, fcport->d_id.b.area,
3689 fcport->d_id.b.al_pa, mb, BIT_0); 3547 fcport->d_id.b.al_pa, mb, BIT_0);
3548 if (rval != QLA_SUCCESS) {
3549 return rval;
3550 }
3690 if (mb[0] == MBS_PORT_ID_USED) { 3551 if (mb[0] == MBS_PORT_ID_USED) {
3691 /* 3552 /*
3692 * Device has another loop ID. The firmware team 3553 * Device has another loop ID. The firmware team
@@ -4100,15 +3961,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
4100 ha->isp_abort_cnt = 0; 3961 ha->isp_abort_cnt = 0;
4101 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 3962 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
4102 3963
4103 if (IS_QLA81XX(ha)) 3964 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
4104 qla2x00_get_fw_version(vha, 3965 qla2x00_get_fw_version(vha);
4105 &ha->fw_major_version,
4106 &ha->fw_minor_version,
4107 &ha->fw_subminor_version,
4108 &ha->fw_attributes, &ha->fw_memory_size,
4109 ha->mpi_version, &ha->mpi_capabilities,
4110 ha->phy_version);
4111
4112 if (ha->fce) { 3966 if (ha->fce) {
4113 ha->flags.fce_enabled = 1; 3967 ha->flags.fce_enabled = 1;
4114 memset(ha->fce, 0, 3968 memset(ha->fce, 0,
@@ -4974,7 +4828,6 @@ try_blob_fw:
4974 4828
4975 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); 4829 ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n");
4976 ha->flags.running_gold_fw = 1; 4830 ha->flags.running_gold_fw = 1;
4977
4978 return rval; 4831 return rval;
4979} 4832}
4980 4833
@@ -5009,6 +4862,7 @@ int
5009qla24xx_configure_vhba(scsi_qla_host_t *vha) 4862qla24xx_configure_vhba(scsi_qla_host_t *vha)
5010{ 4863{
5011 int rval = QLA_SUCCESS; 4864 int rval = QLA_SUCCESS;
4865 int rval2;
5012 uint16_t mb[MAILBOX_REGISTER_COUNT]; 4866 uint16_t mb[MAILBOX_REGISTER_COUNT];
5013 struct qla_hw_data *ha = vha->hw; 4867 struct qla_hw_data *ha = vha->hw;
5014 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 4868 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
@@ -5033,12 +4887,18 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
5033 vha->flags.management_server_logged_in = 0; 4887 vha->flags.management_server_logged_in = 0;
5034 4888
5035 /* Login to SNS first */ 4889 /* Login to SNS first */
5036 ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb, BIT_1); 4890 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
5037 if (mb[0] != MBS_COMMAND_COMPLETE) { 4891 BIT_1);
5038 ql_dbg(ql_dbg_init, vha, 0x0103, 4892 if (rval2 != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
5039 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x " 4893 if (rval2 == QLA_MEMORY_ALLOC_FAILED)
5040 "mb[6]=%x mb[7]=%x.\n", 4894 ql_dbg(ql_dbg_init, vha, 0x0120,
5041 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]); 4895 "Failed SNS login: loop_id=%x, rval2=%d\n",
4896 NPH_SNS, rval2);
4897 else
4898 ql_dbg(ql_dbg_init, vha, 0x0103,
4899 "Failed SNS login: loop_id=%x mb[0]=%x mb[1]=%x "
4900 "mb[2]=%x mb[6]=%x mb[7]=%x.\n",
4901 NPH_SNS, mb[0], mb[1], mb[2], mb[6], mb[7]);
5042 return (QLA_FUNCTION_FAILED); 4902 return (QLA_FUNCTION_FAILED);
5043 } 4903 }
5044 4904
@@ -5214,10 +5074,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5214 nv->reset_delay = 5; 5074 nv->reset_delay = 5;
5215 nv->max_luns_per_target = __constant_cpu_to_le16(128); 5075 nv->max_luns_per_target = __constant_cpu_to_le16(128);
5216 nv->port_down_retry_count = __constant_cpu_to_le16(30); 5076 nv->port_down_retry_count = __constant_cpu_to_le16(30);
5217 nv->link_down_timeout = __constant_cpu_to_le16(30); 5077 nv->link_down_timeout = __constant_cpu_to_le16(180);
5218 nv->enode_mac[0] = 0x00; 5078 nv->enode_mac[0] = 0x00;
5219 nv->enode_mac[1] = 0x02; 5079 nv->enode_mac[1] = 0xC0;
5220 nv->enode_mac[2] = 0x03; 5080 nv->enode_mac[2] = 0xDD;
5221 nv->enode_mac[3] = 0x04; 5081 nv->enode_mac[3] = 0x04;
5222 nv->enode_mac[4] = 0x05; 5082 nv->enode_mac[4] = 0x05;
5223 nv->enode_mac[5] = 0x06 + ha->port_no; 5083 nv->enode_mac[5] = 0x06 + ha->port_no;
@@ -5248,9 +5108,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5248 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac)); 5108 memcpy(icb->enode_mac, nv->enode_mac, sizeof(icb->enode_mac));
5249 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */ 5109 /* Some boards (with valid NVRAMs) still have NULL enode_mac!! */
5250 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) { 5110 if (!memcmp(icb->enode_mac, "\0\0\0\0\0\0", sizeof(icb->enode_mac))) {
5251 icb->enode_mac[0] = 0x01; 5111 icb->enode_mac[0] = 0x00;
5252 icb->enode_mac[1] = 0x02; 5112 icb->enode_mac[1] = 0xC0;
5253 icb->enode_mac[2] = 0x03; 5113 icb->enode_mac[2] = 0xDD;
5254 icb->enode_mac[3] = 0x04; 5114 icb->enode_mac[3] = 0x04;
5255 icb->enode_mac[4] = 0x05; 5115 icb->enode_mac[4] = 0x05;
5256 icb->enode_mac[5] = 0x06 + ha->port_no; 5116 icb->enode_mac[5] = 0x06 + ha->port_no;
@@ -5353,6 +5213,10 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5353 if (ql2xloginretrycount) 5213 if (ql2xloginretrycount)
5354 ha->login_retry_count = ql2xloginretrycount; 5214 ha->login_retry_count = ql2xloginretrycount;
5355 5215
5216 /* if not running MSI-X we need handshaking on interrupts */
5217 if (!vha->hw->flags.msix_enabled && IS_QLA83XX(ha))
5218 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22);
5219
5356 /* Enable ZIO. */ 5220 /* Enable ZIO. */
5357 if (!vha->flags.init_done) { 5221 if (!vha->flags.init_done) {
5358 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) & 5222 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 7cc4f36cd53..6e457643c63 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -72,16 +72,19 @@ static inline void
72qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp) 72qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
73{ 73{
74 struct dsd_dma *dsd_ptr, *tdsd_ptr; 74 struct dsd_dma *dsd_ptr, *tdsd_ptr;
75 struct crc_context *ctx;
76
77 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
75 78
76 /* clean up allocated prev pool */ 79 /* clean up allocated prev pool */
77 list_for_each_entry_safe(dsd_ptr, tdsd_ptr, 80 list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
78 &((struct crc_context *)sp->ctx)->dsd_list, list) { 81 &ctx->dsd_list, list) {
79 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr, 82 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
80 dsd_ptr->dsd_list_dma); 83 dsd_ptr->dsd_list_dma);
81 list_del(&dsd_ptr->list); 84 list_del(&dsd_ptr->list);
82 kfree(dsd_ptr); 85 kfree(dsd_ptr);
83 } 86 }
84 INIT_LIST_HEAD(&((struct crc_context *)sp->ctx)->dsd_list); 87 INIT_LIST_HEAD(&ctx->dsd_list);
85} 88}
86 89
87static inline void 90static inline void
@@ -113,8 +116,7 @@ qla2x00_hba_err_chk_enabled(srb_t *sp)
113 return 0; 116 return 0;
114 * 117 *
115 */ 118 */
116 119 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
117 switch (scsi_get_prot_op(sp->cmd)) {
118 case SCSI_PROT_READ_STRIP: 120 case SCSI_PROT_READ_STRIP:
119 case SCSI_PROT_WRITE_INSERT: 121 case SCSI_PROT_WRITE_INSERT:
120 if (ql2xenablehba_err_chk >= 1) 122 if (ql2xenablehba_err_chk >= 1)
@@ -144,3 +146,44 @@ qla2x00_reset_active(scsi_qla_host_t *vha)
144 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || 146 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
145 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 147 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
146} 148}
149
150static inline srb_t *
151qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
152{
153 srb_t *sp = NULL;
154 struct qla_hw_data *ha = vha->hw;
155 uint8_t bail;
156
157 QLA_VHA_MARK_BUSY(vha, bail);
158 if (unlikely(bail))
159 return NULL;
160
161 sp = mempool_alloc(ha->srb_mempool, flag);
162 if (!sp)
163 goto done;
164
165 memset(sp, 0, sizeof(*sp));
166 sp->fcport = fcport;
167 sp->iocbs = 1;
168done:
169 if (!sp)
170 QLA_VHA_MARK_NOT_BUSY(vha);
171 return sp;
172}
173
174static inline void
175qla2x00_init_timer(srb_t *sp, unsigned long tmo)
176{
177 init_timer(&sp->u.iocb_cmd.timer);
178 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
179 sp->u.iocb_cmd.timer.data = (unsigned long)sp;
180 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
181 add_timer(&sp->u.iocb_cmd.timer);
182 sp->free = qla2x00_sp_free;
183}
184
185static inline int
186qla2x00_gid_list_size(struct qla_hw_data *ha)
187{
188 return sizeof(struct gid_list_info) * ha->max_fibre_devices;
189}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 55a96761b5a..eac95092449 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -22,18 +22,19 @@ static inline uint16_t
22qla2x00_get_cmd_direction(srb_t *sp) 22qla2x00_get_cmd_direction(srb_t *sp)
23{ 23{
24 uint16_t cflags; 24 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25 26
26 cflags = 0; 27 cflags = 0;
27 28
28 /* Set transfer direction */ 29 /* Set transfer direction */
29 if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) { 30 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
30 cflags = CF_WRITE; 31 cflags = CF_WRITE;
31 sp->fcport->vha->hw->qla_stats.output_bytes += 32 sp->fcport->vha->hw->qla_stats.output_bytes +=
32 scsi_bufflen(sp->cmd); 33 scsi_bufflen(cmd);
33 } else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) { 34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
34 cflags = CF_READ; 35 cflags = CF_READ;
35 sp->fcport->vha->hw->qla_stats.input_bytes += 36 sp->fcport->vha->hw->qla_stats.input_bytes +=
36 scsi_bufflen(sp->cmd); 37 scsi_bufflen(cmd);
37 } 38 }
38 return (cflags); 39 return (cflags);
39} 40}
@@ -143,12 +144,13 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
143static inline int 144static inline int
144qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts) 145qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
145{ 146{
146 uint8_t guard = scsi_host_get_guard(sp->cmd->device->host); 147 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 uint8_t guard = scsi_host_get_guard(cmd->device->host);
147 149
148 /* We only support T10 DIF right now */ 150 /* We only support T10 DIF right now */
149 if (guard != SHOST_DIX_GUARD_CRC) { 151 if (guard != SHOST_DIX_GUARD_CRC) {
150 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007, 152 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
151 "Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd); 153 "Unsupported guard: %d for cmd=%p.\n", guard, cmd);
152 return 0; 154 return 0;
153 } 155 }
154 156
@@ -156,7 +158,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
156 *fw_prot_opts = 0; 158 *fw_prot_opts = 0;
157 159
158 /* Translate SCSI opcode to a protection opcode */ 160 /* Translate SCSI opcode to a protection opcode */
159 switch (scsi_get_prot_op(sp->cmd)) { 161 switch (scsi_get_prot_op(cmd)) {
160 case SCSI_PROT_READ_STRIP: 162 case SCSI_PROT_READ_STRIP:
161 *fw_prot_opts |= PO_MODE_DIF_REMOVE; 163 *fw_prot_opts |= PO_MODE_DIF_REMOVE;
162 break; 164 break;
@@ -180,7 +182,7 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
180 break; 182 break;
181 } 183 }
182 184
183 return scsi_prot_sg_count(sp->cmd); 185 return scsi_prot_sg_count(cmd);
184} 186}
185 187
186/* 188/*
@@ -201,7 +203,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
201 struct scatterlist *sg; 203 struct scatterlist *sg;
202 int i; 204 int i;
203 205
204 cmd = sp->cmd; 206 cmd = GET_CMD_SP(sp);
205 207
206 /* Update entry type to indicate Command Type 2 IOCB */ 208 /* Update entry type to indicate Command Type 2 IOCB */
207 *((uint32_t *)(&cmd_pkt->entry_type)) = 209 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -259,7 +261,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
259 struct scatterlist *sg; 261 struct scatterlist *sg;
260 int i; 262 int i;
261 263
262 cmd = sp->cmd; 264 cmd = GET_CMD_SP(sp);
263 265
264 /* Update entry type to indicate Command Type 3 IOCB */ 266 /* Update entry type to indicate Command Type 3 IOCB */
265 *((uint32_t *)(&cmd_pkt->entry_type)) = 267 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -333,7 +335,7 @@ qla2x00_start_scsi(srb_t *sp)
333 vha = sp->fcport->vha; 335 vha = sp->fcport->vha;
334 ha = vha->hw; 336 ha = vha->hw;
335 reg = &ha->iobase->isp; 337 reg = &ha->iobase->isp;
336 cmd = sp->cmd; 338 cmd = GET_CMD_SP(sp);
337 req = ha->req_q_map[0]; 339 req = ha->req_q_map[0];
338 rsp = ha->rsp_q_map[0]; 340 rsp = ha->rsp_q_map[0];
339 /* So we know we haven't pci_map'ed anything yet */ 341 /* So we know we haven't pci_map'ed anything yet */
@@ -391,7 +393,7 @@ qla2x00_start_scsi(srb_t *sp)
391 req->current_outstanding_cmd = handle; 393 req->current_outstanding_cmd = handle;
392 req->outstanding_cmds[handle] = sp; 394 req->outstanding_cmds[handle] = sp;
393 sp->handle = handle; 395 sp->handle = handle;
394 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 396 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395 req->cnt -= req_cnt; 397 req->cnt -= req_cnt;
396 398
397 cmd_pkt = (cmd_entry_t *)req->ring_ptr; 399 cmd_pkt = (cmd_entry_t *)req->ring_ptr;
@@ -403,7 +405,7 @@ qla2x00_start_scsi(srb_t *sp)
403 405
404 /* Set target ID and LUN number*/ 406 /* Set target ID and LUN number*/
405 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 407 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun); 408 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407 409
408 /* Update tagged queuing modifier */ 410 /* Update tagged queuing modifier */
409 if (scsi_populate_tag_msg(cmd, tag)) { 411 if (scsi_populate_tag_msg(cmd, tag)) {
@@ -473,7 +475,6 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
473{ 475{
474 struct qla_hw_data *ha = vha->hw; 476 struct qla_hw_data *ha = vha->hw;
475 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 477 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
476 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
477 478
478 if (IS_QLA82XX(ha)) { 479 if (IS_QLA82XX(ha)) {
479 qla82xx_start_iocbs(vha); 480 qla82xx_start_iocbs(vha);
@@ -487,9 +488,9 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
487 req->ring_ptr++; 488 req->ring_ptr++;
488 489
489 /* Set chip new ring index. */ 490 /* Set chip new ring index. */
490 if (ha->mqenable) { 491 if (ha->mqenable || IS_QLA83XX(ha)) {
491 WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index); 492 WRT_REG_DWORD(req->req_q_in, req->ring_index);
492 RD_REG_DWORD(&ioreg->hccr); 493 RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
493 } else if (IS_FWI2_CAPABLE(ha)) { 494 } else if (IS_FWI2_CAPABLE(ha)) {
494 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index); 495 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
495 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in); 496 RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
@@ -609,7 +610,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
609 struct dsd_dma *dsd_ptr; 610 struct dsd_dma *dsd_ptr;
610 struct ct6_dsd *ctx; 611 struct ct6_dsd *ctx;
611 612
612 cmd = sp->cmd; 613 cmd = GET_CMD_SP(sp);
613 614
614 /* Update entry type to indicate Command Type 3 IOCB */ 615 /* Update entry type to indicate Command Type 3 IOCB */
615 *((uint32_t *)(&cmd_pkt->entry_type)) = 616 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -636,7 +637,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
636 } 637 }
637 638
638 cur_seg = scsi_sglist(cmd); 639 cur_seg = scsi_sglist(cmd);
639 ctx = sp->ctx; 640 ctx = GET_CMD_CTX_SP(sp);
640 641
641 while (tot_dsds) { 642 while (tot_dsds) {
642 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? 643 avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -725,7 +726,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
725 int i; 726 int i;
726 struct req_que *req; 727 struct req_que *req;
727 728
728 cmd = sp->cmd; 729 cmd = GET_CMD_SP(sp);
729 730
730 /* Update entry type to indicate Command Type 3 IOCB */ 731 /* Update entry type to indicate Command Type 3 IOCB */
731 *((uint32_t *)(&cmd_pkt->entry_type)) = 732 *((uint32_t *)(&cmd_pkt->entry_type)) =
@@ -745,12 +746,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
745 cmd_pkt->task_mgmt_flags = 746 cmd_pkt->task_mgmt_flags =
746 __constant_cpu_to_le16(TMF_WRITE_DATA); 747 __constant_cpu_to_le16(TMF_WRITE_DATA);
747 sp->fcport->vha->hw->qla_stats.output_bytes += 748 sp->fcport->vha->hw->qla_stats.output_bytes +=
748 scsi_bufflen(sp->cmd); 749 scsi_bufflen(cmd);
749 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 750 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
750 cmd_pkt->task_mgmt_flags = 751 cmd_pkt->task_mgmt_flags =
751 __constant_cpu_to_le16(TMF_READ_DATA); 752 __constant_cpu_to_le16(TMF_READ_DATA);
752 sp->fcport->vha->hw->qla_stats.input_bytes += 753 sp->fcport->vha->hw->qla_stats.input_bytes +=
753 scsi_bufflen(sp->cmd); 754 scsi_bufflen(cmd);
754 } 755 }
755 756
756 /* One DSD is available in the Command Type 3 IOCB */ 757 /* One DSD is available in the Command Type 3 IOCB */
@@ -797,7 +798,7 @@ static inline void
797qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt, 798qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
798 unsigned int protcnt) 799 unsigned int protcnt)
799{ 800{
800 struct scsi_cmnd *cmd = sp->cmd; 801 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
801 scsi_qla_host_t *vha = shost_priv(cmd->device->host); 802 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
802 803
803 switch (scsi_get_prot_type(cmd)) { 804 switch (scsi_get_prot_type(cmd)) {
@@ -952,16 +953,16 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
952 struct qla2_sgx sgx; 953 struct qla2_sgx sgx;
953 dma_addr_t sle_dma; 954 dma_addr_t sle_dma;
954 uint32_t sle_dma_len, tot_prot_dma_len = 0; 955 uint32_t sle_dma_len, tot_prot_dma_len = 0;
955 struct scsi_cmnd *cmd = sp->cmd; 956 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
956 957
957 prot_int = cmd->device->sector_size; 958 prot_int = cmd->device->sector_size;
958 959
959 memset(&sgx, 0, sizeof(struct qla2_sgx)); 960 memset(&sgx, 0, sizeof(struct qla2_sgx));
960 sgx.tot_bytes = scsi_bufflen(sp->cmd); 961 sgx.tot_bytes = scsi_bufflen(cmd);
961 sgx.cur_sg = scsi_sglist(sp->cmd); 962 sgx.cur_sg = scsi_sglist(cmd);
962 sgx.sp = sp; 963 sgx.sp = sp;
963 964
964 sg_prot = scsi_prot_sglist(sp->cmd); 965 sg_prot = scsi_prot_sglist(cmd);
965 966
966 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) { 967 while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
967 968
@@ -995,7 +996,7 @@ alloc_and_fill:
995 } 996 }
996 997
997 list_add_tail(&dsd_ptr->list, 998 list_add_tail(&dsd_ptr->list,
998 &((struct crc_context *)sp->ctx)->dsd_list); 999 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
999 1000
1000 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1001 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1001 1002
@@ -1044,11 +1045,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1044 uint32_t *cur_dsd = dsd; 1045 uint32_t *cur_dsd = dsd;
1045 int i; 1046 int i;
1046 uint16_t used_dsds = tot_dsds; 1047 uint16_t used_dsds = tot_dsds;
1047 scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host); 1048 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1049 scsi_qla_host_t *vha = shost_priv(cmd->device->host);
1048 1050
1049 uint8_t *cp; 1051 uint8_t *cp;
1050 1052
1051 scsi_for_each_sg(sp->cmd, sg, tot_dsds, i) { 1053 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1052 dma_addr_t sle_dma; 1054 dma_addr_t sle_dma;
1053 1055
1054 /* Allocate additional continuation packets? */ 1056 /* Allocate additional continuation packets? */
@@ -1078,7 +1080,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1078 } 1080 }
1079 1081
1080 list_add_tail(&dsd_ptr->list, 1082 list_add_tail(&dsd_ptr->list,
1081 &((struct crc_context *)sp->ctx)->dsd_list); 1083 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1082 1084
1083 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1085 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1084 1086
@@ -1091,17 +1093,16 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1091 sle_dma = sg_dma_address(sg); 1093 sle_dma = sg_dma_address(sg);
1092 ql_dbg(ql_dbg_io, vha, 0x300a, 1094 ql_dbg(ql_dbg_io, vha, 0x300a,
1093 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n", 1095 "sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
1094 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), 1096 i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg), cmd);
1095 sp->cmd);
1096 *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); 1097 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1097 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1098 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1098 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1099 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1099 avail_dsds--; 1100 avail_dsds--;
1100 1101
1101 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 1102 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1102 cp = page_address(sg_page(sg)) + sg->offset; 1103 cp = page_address(sg_page(sg)) + sg->offset;
1103 ql_dbg(ql_dbg_io, vha, 0x300b, 1104 ql_dbg(ql_dbg_io, vha, 0x300b,
1104 "User data buffer=%p for cmd=%p.\n", cp, sp->cmd); 1105 "User data buffer=%p for cmd=%p.\n", cp, cmd);
1105 } 1106 }
1106 } 1107 }
1107 /* Null termination */ 1108 /* Null termination */
@@ -1128,8 +1129,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1128 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 1129 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1129 uint8_t *cp; 1130 uint8_t *cp;
1130 1131
1131 1132 cmd = GET_CMD_SP(sp);
1132 cmd = sp->cmd;
1133 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) { 1133 scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1134 dma_addr_t sle_dma; 1134 dma_addr_t sle_dma;
1135 1135
@@ -1160,7 +1160,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1160 } 1160 }
1161 1161
1162 list_add_tail(&dsd_ptr->list, 1162 list_add_tail(&dsd_ptr->list,
1163 &((struct crc_context *)sp->ctx)->dsd_list); 1163 &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1164 1164
1165 sp->flags |= SRB_CRC_CTX_DSD_VALID; 1165 sp->flags |= SRB_CRC_CTX_DSD_VALID;
1166 1166
@@ -1171,7 +1171,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1171 cur_dsd = (uint32_t *)next_dsd; 1171 cur_dsd = (uint32_t *)next_dsd;
1172 } 1172 }
1173 sle_dma = sg_dma_address(sg); 1173 sle_dma = sg_dma_address(sg);
1174 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 1174 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1175 ql_dbg(ql_dbg_io, vha, 0x3027, 1175 ql_dbg(ql_dbg_io, vha, 0x3027,
1176 "%s(): %p, sg_entry %d - " 1176 "%s(): %p, sg_entry %d - "
1177 "addr=0x%x0x%x, len=%d.\n", 1177 "addr=0x%x0x%x, len=%d.\n",
@@ -1182,7 +1182,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1182 *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); 1182 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1183 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); 1183 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1184 1184
1185 if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) { 1185 if (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_PASS) {
1186 cp = page_address(sg_page(sg)) + sg->offset; 1186 cp = page_address(sg_page(sg)) + sg->offset;
1187 ql_dbg(ql_dbg_io, vha, 0x3028, 1187 ql_dbg(ql_dbg_io, vha, 0x3028,
1188 "%s(): Protection Data buffer = %p.\n", __func__, 1188 "%s(): Protection Data buffer = %p.\n", __func__,
@@ -1228,7 +1228,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1228 dma_addr_t crc_ctx_dma; 1228 dma_addr_t crc_ctx_dma;
1229 char tag[2]; 1229 char tag[2];
1230 1230
1231 cmd = sp->cmd; 1231 cmd = GET_CMD_SP(sp);
1232 1232
1233 sgc = 0; 1233 sgc = 0;
1234 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1234 /* Update entry type to indicate Command Type CRC_2 IOCB */
@@ -1256,15 +1256,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1256 __constant_cpu_to_le16(CF_READ_DATA); 1256 __constant_cpu_to_le16(CF_READ_DATA);
1257 } 1257 }
1258 1258
1259 if ((scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_INSERT) || 1259 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1260 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_STRIP) || 1260 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1261 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_READ_STRIP) || 1261 (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1262 (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_INSERT)) 1262 (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1263 bundling = 0; 1263 bundling = 0;
1264 1264
1265 /* Allocate CRC context from global pool */ 1265 /* Allocate CRC context from global pool */
1266 crc_ctx_pkt = sp->ctx = dma_pool_alloc(ha->dl_dma_pool, 1266 crc_ctx_pkt = sp->u.scmd.ctx =
1267 GFP_ATOMIC, &crc_ctx_dma); 1267 dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1268 1268
1269 if (!crc_ctx_pkt) 1269 if (!crc_ctx_pkt)
1270 goto crc_queuing_error; 1270 goto crc_queuing_error;
@@ -1310,7 +1310,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1310 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 1310 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1311 fcp_cmnd->additional_cdb_len |= 2; 1311 fcp_cmnd->additional_cdb_len |= 2;
1312 1312
1313 int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); 1313 int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1314 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); 1314 memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1315 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); 1315 cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1316 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( 1316 cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
@@ -1345,7 +1345,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1345 blk_size = cmd->device->sector_size; 1345 blk_size = cmd->device->sector_size;
1346 dif_bytes = (data_bytes / blk_size) * 8; 1346 dif_bytes = (data_bytes / blk_size) * 8;
1347 1347
1348 switch (scsi_get_prot_op(sp->cmd)) { 1348 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1349 case SCSI_PROT_READ_INSERT: 1349 case SCSI_PROT_READ_INSERT:
1350 case SCSI_PROT_WRITE_STRIP: 1350 case SCSI_PROT_WRITE_STRIP:
1351 total_bytes = data_bytes; 1351 total_bytes = data_bytes;
@@ -1445,7 +1445,7 @@ qla24xx_start_scsi(srb_t *sp)
1445 uint16_t tot_dsds; 1445 uint16_t tot_dsds;
1446 struct req_que *req = NULL; 1446 struct req_que *req = NULL;
1447 struct rsp_que *rsp = NULL; 1447 struct rsp_que *rsp = NULL;
1448 struct scsi_cmnd *cmd = sp->cmd; 1448 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1449 struct scsi_qla_host *vha = sp->fcport->vha; 1449 struct scsi_qla_host *vha = sp->fcport->vha;
1450 struct qla_hw_data *ha = vha->hw; 1450 struct qla_hw_data *ha = vha->hw;
1451 char tag[2]; 1451 char tag[2];
@@ -1510,7 +1510,7 @@ qla24xx_start_scsi(srb_t *sp)
1510 req->current_outstanding_cmd = handle; 1510 req->current_outstanding_cmd = handle;
1511 req->outstanding_cmds[handle] = sp; 1511 req->outstanding_cmds[handle] = sp;
1512 sp->handle = handle; 1512 sp->handle = handle;
1513 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1513 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1514 req->cnt -= req_cnt; 1514 req->cnt -= req_cnt;
1515 1515
1516 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; 1516 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
@@ -1529,7 +1529,7 @@ qla24xx_start_scsi(srb_t *sp)
1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1530 cmd_pkt->vp_index = sp->fcport->vp_idx; 1530 cmd_pkt->vp_index = sp->fcport->vp_idx;
1531 1531
1532 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 1532 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1534 1534
1535 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */ 1535 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -1611,7 +1611,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1611 uint16_t fw_prot_opts = 0; 1611 uint16_t fw_prot_opts = 0;
1612 struct req_que *req = NULL; 1612 struct req_que *req = NULL;
1613 struct rsp_que *rsp = NULL; 1613 struct rsp_que *rsp = NULL;
1614 struct scsi_cmnd *cmd = sp->cmd; 1614 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1615 struct scsi_qla_host *vha = sp->fcport->vha; 1615 struct scsi_qla_host *vha = sp->fcport->vha;
1616 struct qla_hw_data *ha = vha->hw; 1616 struct qla_hw_data *ha = vha->hw;
1617 struct cmd_type_crc_2 *cmd_pkt; 1617 struct cmd_type_crc_2 *cmd_pkt;
@@ -1728,7 +1728,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1728 req->current_outstanding_cmd = handle; 1728 req->current_outstanding_cmd = handle;
1729 req->outstanding_cmds[handle] = sp; 1729 req->outstanding_cmds[handle] = sp;
1730 sp->handle = handle; 1730 sp->handle = handle;
1731 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 1731 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1732 req->cnt -= req_cnt; 1732 req->cnt -= req_cnt;
1733 1733
1734 /* Fill-in common area */ 1734 /* Fill-in common area */
@@ -1744,7 +1744,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1744 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1744 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1745 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1745 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1746 1746
1747 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 1747 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1748 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1748 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1749 1749
1750 /* Total Data and protection segment(s) */ 1750 /* Total Data and protection segment(s) */
@@ -1797,7 +1797,7 @@ queuing_error:
1797 1797
1798static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp) 1798static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1799{ 1799{
1800 struct scsi_cmnd *cmd = sp->cmd; 1800 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1801 struct qla_hw_data *ha = sp->fcport->vha->hw; 1801 struct qla_hw_data *ha = sp->fcport->vha->hw;
1802 int affinity = cmd->request->cpu; 1802 int affinity = cmd->request->cpu;
1803 1803
@@ -1818,7 +1818,6 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1818 uint32_t index, handle; 1818 uint32_t index, handle;
1819 request_t *pkt; 1819 request_t *pkt;
1820 uint16_t cnt, req_cnt; 1820 uint16_t cnt, req_cnt;
1821 struct srb_ctx *ctx;
1822 1821
1823 pkt = NULL; 1822 pkt = NULL;
1824 req_cnt = 1; 1823 req_cnt = 1;
@@ -1848,15 +1847,13 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1848 sp->handle = handle; 1847 sp->handle = handle;
1849 1848
1850 /* Adjust entry-counts as needed. */ 1849 /* Adjust entry-counts as needed. */
1851 if (sp->ctx) { 1850 if (sp->type != SRB_SCSI_CMD)
1852 ctx = sp->ctx; 1851 req_cnt = sp->iocbs;
1853 req_cnt = ctx->iocbs;
1854 }
1855 1852
1856skip_cmd_array: 1853skip_cmd_array:
1857 /* Check for room on request queue. */ 1854 /* Check for room on request queue. */
1858 if (req->cnt < req_cnt) { 1855 if (req->cnt < req_cnt) {
1859 if (ha->mqenable) 1856 if (ha->mqenable || IS_QLA83XX(ha))
1860 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out); 1857 cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1861 else if (IS_QLA82XX(ha)) 1858 else if (IS_QLA82XX(ha))
1862 cnt = RD_REG_DWORD(&reg->isp82.req_q_out); 1859 cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
@@ -1889,8 +1886,7 @@ queuing_error:
1889static void 1886static void
1890qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) 1887qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1891{ 1888{
1892 struct srb_ctx *ctx = sp->ctx; 1889 struct srb_iocb *lio = &sp->u.iocb_cmd;
1893 struct srb_iocb *lio = ctx->u.iocb_cmd;
1894 1890
1895 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1891 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1896 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); 1892 logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
@@ -1909,8 +1905,7 @@ static void
1909qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) 1905qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1910{ 1906{
1911 struct qla_hw_data *ha = sp->fcport->vha->hw; 1907 struct qla_hw_data *ha = sp->fcport->vha->hw;
1912 struct srb_ctx *ctx = sp->ctx; 1908 struct srb_iocb *lio = &sp->u.iocb_cmd;
1913 struct srb_iocb *lio = ctx->u.iocb_cmd;
1914 uint16_t opts; 1909 uint16_t opts;
1915 1910
1916 mbx->entry_type = MBX_IOCB_TYPE; 1911 mbx->entry_type = MBX_IOCB_TYPE;
@@ -1999,8 +1994,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1999 struct fc_port *fcport = sp->fcport; 1994 struct fc_port *fcport = sp->fcport;
2000 scsi_qla_host_t *vha = fcport->vha; 1995 scsi_qla_host_t *vha = fcport->vha;
2001 struct qla_hw_data *ha = vha->hw; 1996 struct qla_hw_data *ha = vha->hw;
2002 struct srb_ctx *ctx = sp->ctx; 1997 struct srb_iocb *iocb = &sp->u.iocb_cmd;
2003 struct srb_iocb *iocb = ctx->u.iocb_cmd;
2004 struct req_que *req = vha->req; 1998 struct req_que *req = vha->req;
2005 1999
2006 flags = iocb->u.tmf.flags; 2000 flags = iocb->u.tmf.flags;
@@ -2027,7 +2021,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2027static void 2021static void
2028qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) 2022qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2029{ 2023{
2030 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 2024 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2031 2025
2032 els_iocb->entry_type = ELS_IOCB_TYPE; 2026 els_iocb->entry_type = ELS_IOCB_TYPE;
2033 els_iocb->entry_count = 1; 2027 els_iocb->entry_count = 1;
@@ -2041,7 +2035,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2041 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2035 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2042 2036
2043 els_iocb->opcode = 2037 els_iocb->opcode =
2044 (((struct srb_ctx *)sp->ctx)->type == SRB_ELS_CMD_RPT) ? 2038 sp->type == SRB_ELS_CMD_RPT ?
2045 bsg_job->request->rqst_data.r_els.els_code : 2039 bsg_job->request->rqst_data.r_els.els_code :
2046 bsg_job->request->rqst_data.h_els.command_code; 2040 bsg_job->request->rqst_data.h_els.command_code;
2047 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; 2041 els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2078,7 +2072,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2078 uint16_t tot_dsds; 2072 uint16_t tot_dsds;
2079 scsi_qla_host_t *vha = sp->fcport->vha; 2073 scsi_qla_host_t *vha = sp->fcport->vha;
2080 struct qla_hw_data *ha = vha->hw; 2074 struct qla_hw_data *ha = vha->hw;
2081 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 2075 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2082 int loop_iterartion = 0; 2076 int loop_iterartion = 0;
2083 int cont_iocb_prsnt = 0; 2077 int cont_iocb_prsnt = 0;
2084 int entry_count = 1; 2078 int entry_count = 1;
@@ -2155,7 +2149,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2155 uint16_t tot_dsds; 2149 uint16_t tot_dsds;
2156 scsi_qla_host_t *vha = sp->fcport->vha; 2150 scsi_qla_host_t *vha = sp->fcport->vha;
2157 struct qla_hw_data *ha = vha->hw; 2151 struct qla_hw_data *ha = vha->hw;
2158 struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job; 2152 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2159 int loop_iterartion = 0; 2153 int loop_iterartion = 0;
2160 int cont_iocb_prsnt = 0; 2154 int cont_iocb_prsnt = 0;
2161 int entry_count = 1; 2155 int entry_count = 1;
@@ -2245,12 +2239,12 @@ qla82xx_start_scsi(srb_t *sp)
2245 struct qla_hw_data *ha = vha->hw; 2239 struct qla_hw_data *ha = vha->hw;
2246 struct req_que *req = NULL; 2240 struct req_que *req = NULL;
2247 struct rsp_que *rsp = NULL; 2241 struct rsp_que *rsp = NULL;
2248 char tag[2]; 2242 char tag[2];
2249 2243
2250 /* Setup device pointers. */ 2244 /* Setup device pointers. */
2251 ret = 0; 2245 ret = 0;
2252 reg = &ha->iobase->isp82; 2246 reg = &ha->iobase->isp82;
2253 cmd = sp->cmd; 2247 cmd = GET_CMD_SP(sp);
2254 req = vha->req; 2248 req = vha->req;
2255 rsp = ha->rsp_q_map[0]; 2249 rsp = ha->rsp_q_map[0];
2256 2250
@@ -2354,12 +2348,14 @@ sufficient_dsds:
2354 if (req->cnt < (req_cnt + 2)) 2348 if (req->cnt < (req_cnt + 2))
2355 goto queuing_error; 2349 goto queuing_error;
2356 2350
2357 ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2351 ctx = sp->u.scmd.ctx =
2358 if (!sp->ctx) { 2352 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2353 if (!ctx) {
2359 ql_log(ql_log_fatal, vha, 0x3010, 2354 ql_log(ql_log_fatal, vha, 0x3010,
2360 "Failed to allocate ctx for cmd=%p.\n", cmd); 2355 "Failed to allocate ctx for cmd=%p.\n", cmd);
2361 goto queuing_error; 2356 goto queuing_error;
2362 } 2357 }
2358
2363 memset(ctx, 0, sizeof(struct ct6_dsd)); 2359 memset(ctx, 0, sizeof(struct ct6_dsd));
2364 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool, 2360 ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2365 GFP_ATOMIC, &ctx->fcp_cmnd_dma); 2361 GFP_ATOMIC, &ctx->fcp_cmnd_dma);
@@ -2410,12 +2406,12 @@ sufficient_dsds:
2410 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2406 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2411 goto queuing_error_fcp_cmnd; 2407 goto queuing_error_fcp_cmnd;
2412 2408
2413 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2409 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2414 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 2410 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2415 2411
2416 /* build FCP_CMND IU */ 2412 /* build FCP_CMND IU */
2417 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 2413 memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2418 int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun); 2414 int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2419 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; 2415 ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2420 2416
2421 if (cmd->sc_data_direction == DMA_TO_DEVICE) 2417 if (cmd->sc_data_direction == DMA_TO_DEVICE)
@@ -2495,9 +2491,9 @@ sufficient_dsds:
2495 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2491 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2496 cmd_pkt->vp_index = sp->fcport->vp_idx; 2492 cmd_pkt->vp_index = sp->fcport->vp_idx;
2497 2493
2498 int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); 2494 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2499 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2495 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2500 sizeof(cmd_pkt->lun)); 2496 sizeof(cmd_pkt->lun));
2501 2497
2502 /* 2498 /*
2503 * Update tagged queuing modifier -- default is TSK_SIMPLE (0). 2499 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
@@ -2538,7 +2534,7 @@ sufficient_dsds:
2538 req->current_outstanding_cmd = handle; 2534 req->current_outstanding_cmd = handle;
2539 req->outstanding_cmds[handle] = sp; 2535 req->outstanding_cmds[handle] = sp;
2540 sp->handle = handle; 2536 sp->handle = handle;
2541 sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; 2537 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2542 req->cnt -= req_cnt; 2538 req->cnt -= req_cnt;
2543 wmb(); 2539 wmb();
2544 2540
@@ -2584,9 +2580,9 @@ queuing_error:
2584 if (tot_dsds) 2580 if (tot_dsds)
2585 scsi_dma_unmap(cmd); 2581 scsi_dma_unmap(cmd);
2586 2582
2587 if (sp->ctx) { 2583 if (sp->u.scmd.ctx) {
2588 mempool_free(sp->ctx, ha->ctx_mempool); 2584 mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2589 sp->ctx = NULL; 2585 sp->u.scmd.ctx = NULL;
2590 } 2586 }
2591 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2587 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2592 2588
@@ -2599,7 +2595,6 @@ qla2x00_start_sp(srb_t *sp)
2599 int rval; 2595 int rval;
2600 struct qla_hw_data *ha = sp->fcport->vha->hw; 2596 struct qla_hw_data *ha = sp->fcport->vha->hw;
2601 void *pkt; 2597 void *pkt;
2602 struct srb_ctx *ctx = sp->ctx;
2603 unsigned long flags; 2598 unsigned long flags;
2604 2599
2605 rval = QLA_FUNCTION_FAILED; 2600 rval = QLA_FUNCTION_FAILED;
@@ -2612,7 +2607,7 @@ qla2x00_start_sp(srb_t *sp)
2612 } 2607 }
2613 2608
2614 rval = QLA_SUCCESS; 2609 rval = QLA_SUCCESS;
2615 switch (ctx->type) { 2610 switch (sp->type) {
2616 case SRB_LOGIN_CMD: 2611 case SRB_LOGIN_CMD:
2617 IS_FWI2_CAPABLE(ha) ? 2612 IS_FWI2_CAPABLE(ha) ?
2618 qla24xx_login_iocb(sp, pkt) : 2613 qla24xx_login_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 349843ea32f..f79844ce712 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -44,8 +44,8 @@ qla2100_intr_handler(int irq, void *dev_id)
44 44
45 rsp = (struct rsp_que *) dev_id; 45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) { 46 if (!rsp) {
47 printk(KERN_INFO 47 ql_log(ql_log_info, NULL, 0x505d,
48 "%s(): NULL response queue pointer.\n", __func__); 48 "%s: NULL response queue pointer.\n", __func__);
49 return (IRQ_NONE); 49 return (IRQ_NONE);
50 } 50 }
51 51
@@ -141,8 +141,8 @@ qla2300_intr_handler(int irq, void *dev_id)
141 141
142 rsp = (struct rsp_que *) dev_id; 142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) { 143 if (!rsp) {
144 printk(KERN_INFO 144 ql_log(ql_log_info, NULL, 0x5058,
145 "%s(): NULL response queue pointer.\n", __func__); 145 "%s: NULL response queue pointer.\n", __func__);
146 return (IRQ_NONE); 146 return (IRQ_NONE);
147 } 147 }
148 148
@@ -289,7 +289,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
289 mb[cnt] = RD_REG_WORD(wptr); 289 mb[cnt] = RD_REG_WORD(wptr);
290 290
291 ql_dbg(ql_dbg_async, vha, 0x5021, 291 ql_dbg(ql_dbg_async, vha, 0x5021,
292 "Inter-Driver Commucation %s -- " 292 "Inter-Driver Communication %s -- "
293 "%04x %04x %04x %04x %04x %04x %04x.\n", 293 "%04x %04x %04x %04x %04x %04x %04x.\n",
294 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 294 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
295 mb[4], mb[5], mb[6]); 295 mb[4], mb[5], mb[6]);
@@ -318,7 +318,7 @@ void
318qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 318qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
319{ 319{
320#define LS_UNKNOWN 2 320#define LS_UNKNOWN 2
321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
322 char *link_speed; 322 char *link_speed;
323 uint16_t handle_cnt; 323 uint16_t handle_cnt;
324 uint16_t cnt, mbx; 324 uint16_t cnt, mbx;
@@ -328,12 +328,11 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
328 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 328 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
329 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 329 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
330 uint32_t rscn_entry, host_pid; 330 uint32_t rscn_entry, host_pid;
331 uint8_t rscn_queue_index;
332 unsigned long flags; 331 unsigned long flags;
333 332
334 /* Setup to process RIO completion. */ 333 /* Setup to process RIO completion. */
335 handle_cnt = 0; 334 handle_cnt = 0;
336 if (IS_QLA8XXX_TYPE(ha)) 335 if (IS_CNA_CAPABLE(ha))
337 goto skip_rio; 336 goto skip_rio;
338 switch (mb[0]) { 337 switch (mb[0]) {
339 case MBA_SCSI_COMPLETION: 338 case MBA_SCSI_COMPLETION:
@@ -405,7 +404,8 @@ skip_rio:
405 break; 404 break;
406 405
407 case MBA_SYSTEM_ERR: /* System Error */ 406 case MBA_SYSTEM_ERR: /* System Error */
408 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0; 407 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
408 RD_REG_WORD(&reg24->mailbox7) : 0;
409 ql_log(ql_log_warn, vha, 0x5003, 409 ql_log(ql_log_warn, vha, 0x5003,
410 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 410 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
411 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 411 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
@@ -418,6 +418,7 @@ skip_rio:
418 "Unrecoverable Hardware Error: adapter " 418 "Unrecoverable Hardware Error: adapter "
419 "marked OFFLINE!\n"); 419 "marked OFFLINE!\n");
420 vha->flags.online = 0; 420 vha->flags.online = 0;
421 vha->device_flags |= DFLG_DEV_FAILED;
421 } else { 422 } else {
422 /* Check to see if MPI timeout occurred */ 423 /* Check to see if MPI timeout occurred */
423 if ((mbx & MBX_3) && (ha->flags.port0)) 424 if ((mbx & MBX_3) && (ha->flags.port0))
@@ -431,6 +432,7 @@ skip_rio:
431 "Unrecoverable Hardware Error: adapter marked " 432 "Unrecoverable Hardware Error: adapter marked "
432 "OFFLINE!\n"); 433 "OFFLINE!\n");
433 vha->flags.online = 0; 434 vha->flags.online = 0;
435 vha->device_flags |= DFLG_DEV_FAILED;
434 } else 436 } else
435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 437 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
436 break; 438 break;
@@ -482,10 +484,10 @@ skip_rio:
482 ha->link_data_rate = PORT_SPEED_1GB; 484 ha->link_data_rate = PORT_SPEED_1GB;
483 } else { 485 } else {
484 link_speed = link_speeds[LS_UNKNOWN]; 486 link_speed = link_speeds[LS_UNKNOWN];
485 if (mb[1] < 5) 487 if (mb[1] < 6)
486 link_speed = link_speeds[mb[1]]; 488 link_speed = link_speeds[mb[1]];
487 else if (mb[1] == 0x13) 489 else if (mb[1] == 0x13)
488 link_speed = link_speeds[5]; 490 link_speed = link_speeds[6];
489 ha->link_data_rate = mb[1]; 491 ha->link_data_rate = mb[1];
490 } 492 }
491 493
@@ -497,7 +499,8 @@ skip_rio:
497 break; 499 break;
498 500
499 case MBA_LOOP_DOWN: /* Loop Down Event */ 501 case MBA_LOOP_DOWN: /* Loop Down Event */
500 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0; 502 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
503 ? RD_REG_WORD(&reg24->mailbox4) : 0;
501 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx; 504 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
502 ql_dbg(ql_dbg_async, vha, 0x500b, 505 ql_dbg(ql_dbg_async, vha, 0x500b,
503 "LOOP DOWN detected (%x %x %x %x).\n", 506 "LOOP DOWN detected (%x %x %x %x).\n",
@@ -547,7 +550,7 @@ skip_rio:
547 if (IS_QLA2100(ha)) 550 if (IS_QLA2100(ha))
548 break; 551 break;
549 552
550 if (IS_QLA8XXX_TYPE(ha)) { 553 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
551 ql_dbg(ql_dbg_async, vha, 0x500d, 554 ql_dbg(ql_dbg_async, vha, 0x500d,
552 "DCBX Completed -- %04x %04x %04x.\n", 555 "DCBX Completed -- %04x %04x %04x.\n",
553 mb[1], mb[2], mb[3]); 556 mb[1], mb[2], mb[3]);
@@ -681,8 +684,6 @@ skip_rio:
681 684
682 qla2x00_mark_all_devices_lost(vha, 1); 685 qla2x00_mark_all_devices_lost(vha, 1);
683 686
684 vha->flags.rscn_queue_overflow = 1;
685
686 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 687 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
687 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 688 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
688 break; 689 break;
@@ -711,15 +712,6 @@ skip_rio:
711 712
712 /* Ignore reserved bits from RSCN-payload. */ 713 /* Ignore reserved bits from RSCN-payload. */
713 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 714 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
714 rscn_queue_index = vha->rscn_in_ptr + 1;
715 if (rscn_queue_index == MAX_RSCN_COUNT)
716 rscn_queue_index = 0;
717 if (rscn_queue_index != vha->rscn_out_ptr) {
718 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
719 vha->rscn_in_ptr = rscn_queue_index;
720 } else {
721 vha->flags.rscn_queue_overflow = 1;
722 }
723 715
724 atomic_set(&vha->loop_down_timer, 0); 716 atomic_set(&vha->loop_down_timer, 0);
725 vha->flags.management_server_logged_in = 0; 717 vha->flags.management_server_logged_in = 0;
@@ -809,6 +801,10 @@ skip_rio:
809 case MBA_IDC_TIME_EXT: 801 case MBA_IDC_TIME_EXT:
810 qla81xx_idc_event(vha, mb[0], mb[1]); 802 qla81xx_idc_event(vha, mb[0], mb[1]);
811 break; 803 break;
804 default:
805 ql_dbg(ql_dbg_async, vha, 0x5057,
806 "Unknown AEN:%04x %04x %04x %04x\n",
807 mb[0], mb[1], mb[2], mb[3]);
812 } 808 }
813 809
814 if (!vha->vp_idx && ha->num_vhosts) 810 if (!vha->vp_idx && ha->num_vhosts)
@@ -845,8 +841,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
845 req->outstanding_cmds[index] = NULL; 841 req->outstanding_cmds[index] = NULL;
846 842
847 /* Save ISP completion status */ 843 /* Save ISP completion status */
848 sp->cmd->result = DID_OK << 16; 844 sp->done(ha, sp, DID_OK << 16);
849 qla2x00_sp_compl(ha, sp);
850 } else { 845 } else {
851 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 846 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
852 847
@@ -903,7 +898,6 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
903 fc_port_t *fcport; 898 fc_port_t *fcport;
904 srb_t *sp; 899 srb_t *sp;
905 struct srb_iocb *lio; 900 struct srb_iocb *lio;
906 struct srb_ctx *ctx;
907 uint16_t *data; 901 uint16_t *data;
908 uint16_t status; 902 uint16_t status;
909 903
@@ -911,9 +905,8 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
911 if (!sp) 905 if (!sp)
912 return; 906 return;
913 907
914 ctx = sp->ctx; 908 lio = &sp->u.iocb_cmd;
915 lio = ctx->u.iocb_cmd; 909 type = sp->name;
916 type = ctx->name;
917 fcport = sp->fcport; 910 fcport = sp->fcport;
918 data = lio->u.logio.data; 911 data = lio->u.logio.data;
919 912
@@ -937,7 +930,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
937 } 930 }
938 931
939 status = le16_to_cpu(mbx->status); 932 status = le16_to_cpu(mbx->status);
940 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD && 933 if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
941 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 934 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
942 status = 0; 935 status = 0;
943 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 936 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
@@ -948,7 +941,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
948 le16_to_cpu(mbx->mb1)); 941 le16_to_cpu(mbx->mb1));
949 942
950 data[0] = MBS_COMMAND_COMPLETE; 943 data[0] = MBS_COMMAND_COMPLETE;
951 if (ctx->type == SRB_LOGIN_CMD) { 944 if (sp->type == SRB_LOGIN_CMD) {
952 fcport->port_type = FCT_TARGET; 945 fcport->port_type = FCT_TARGET;
953 if (le16_to_cpu(mbx->mb1) & BIT_0) 946 if (le16_to_cpu(mbx->mb1) & BIT_0)
954 fcport->port_type = FCT_INITIATOR; 947 fcport->port_type = FCT_INITIATOR;
@@ -979,7 +972,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
979 le16_to_cpu(mbx->mb7)); 972 le16_to_cpu(mbx->mb7));
980 973
981logio_done: 974logio_done:
982 lio->done(sp); 975 sp->done(vha, sp, 0);
983} 976}
984 977
985static void 978static void
@@ -988,29 +981,18 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
988{ 981{
989 const char func[] = "CT_IOCB"; 982 const char func[] = "CT_IOCB";
990 const char *type; 983 const char *type;
991 struct qla_hw_data *ha = vha->hw;
992 srb_t *sp; 984 srb_t *sp;
993 struct srb_ctx *sp_bsg;
994 struct fc_bsg_job *bsg_job; 985 struct fc_bsg_job *bsg_job;
995 uint16_t comp_status; 986 uint16_t comp_status;
987 int res;
996 988
997 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 989 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
998 if (!sp) 990 if (!sp)
999 return; 991 return;
1000 992
1001 sp_bsg = sp->ctx; 993 bsg_job = sp->u.bsg_job;
1002 bsg_job = sp_bsg->u.bsg_job;
1003 994
1004 type = NULL; 995 type = "ct pass-through";
1005 switch (sp_bsg->type) {
1006 case SRB_CT_CMD:
1007 type = "ct pass-through";
1008 break;
1009 default:
1010 ql_log(ql_log_warn, vha, 0x5047,
1011 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1012 return;
1013 }
1014 996
1015 comp_status = le16_to_cpu(pkt->comp_status); 997 comp_status = le16_to_cpu(pkt->comp_status);
1016 998
@@ -1022,7 +1004,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1022 1004
1023 if (comp_status != CS_COMPLETE) { 1005 if (comp_status != CS_COMPLETE) {
1024 if (comp_status == CS_DATA_UNDERRUN) { 1006 if (comp_status == CS_DATA_UNDERRUN) {
1025 bsg_job->reply->result = DID_OK << 16; 1007 res = DID_OK << 16;
1026 bsg_job->reply->reply_payload_rcv_len = 1008 bsg_job->reply->reply_payload_rcv_len =
1027 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1009 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1028 1010
@@ -1035,30 +1017,19 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1035 ql_log(ql_log_warn, vha, 0x5049, 1017 ql_log(ql_log_warn, vha, 0x5049,
1036 "CT pass-through-%s error " 1018 "CT pass-through-%s error "
1037 "comp_status-status=0x%x.\n", type, comp_status); 1019 "comp_status-status=0x%x.\n", type, comp_status);
1038 bsg_job->reply->result = DID_ERROR << 16; 1020 res = DID_ERROR << 16;
1039 bsg_job->reply->reply_payload_rcv_len = 0; 1021 bsg_job->reply->reply_payload_rcv_len = 0;
1040 } 1022 }
1041 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1023 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1042 (uint8_t *)pkt, sizeof(*pkt)); 1024 (uint8_t *)pkt, sizeof(*pkt));
1043 } else { 1025 } else {
1044 bsg_job->reply->result = DID_OK << 16; 1026 res = DID_OK << 16;
1045 bsg_job->reply->reply_payload_rcv_len = 1027 bsg_job->reply->reply_payload_rcv_len =
1046 bsg_job->reply_payload.payload_len; 1028 bsg_job->reply_payload.payload_len;
1047 bsg_job->reply_len = 0; 1029 bsg_job->reply_len = 0;
1048 } 1030 }
1049 1031
1050 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, 1032 sp->done(vha, sp, res);
1051 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1052
1053 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1054 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1055
1056 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1057 kfree(sp->fcport);
1058
1059 kfree(sp->ctx);
1060 mempool_free(sp, ha->srb_mempool);
1061 bsg_job->job_done(bsg_job);
1062} 1033}
1063 1034
1064static void 1035static void
@@ -1067,22 +1038,20 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1067{ 1038{
1068 const char func[] = "ELS_CT_IOCB"; 1039 const char func[] = "ELS_CT_IOCB";
1069 const char *type; 1040 const char *type;
1070 struct qla_hw_data *ha = vha->hw;
1071 srb_t *sp; 1041 srb_t *sp;
1072 struct srb_ctx *sp_bsg;
1073 struct fc_bsg_job *bsg_job; 1042 struct fc_bsg_job *bsg_job;
1074 uint16_t comp_status; 1043 uint16_t comp_status;
1075 uint32_t fw_status[3]; 1044 uint32_t fw_status[3];
1076 uint8_t* fw_sts_ptr; 1045 uint8_t* fw_sts_ptr;
1046 int res;
1077 1047
1078 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1048 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1079 if (!sp) 1049 if (!sp)
1080 return; 1050 return;
1081 sp_bsg = sp->ctx; 1051 bsg_job = sp->u.bsg_job;
1082 bsg_job = sp_bsg->u.bsg_job;
1083 1052
1084 type = NULL; 1053 type = NULL;
1085 switch (sp_bsg->type) { 1054 switch (sp->type) {
1086 case SRB_ELS_CMD_RPT: 1055 case SRB_ELS_CMD_RPT:
1087 case SRB_ELS_CMD_HST: 1056 case SRB_ELS_CMD_HST:
1088 type = "els"; 1057 type = "els";
@@ -1091,8 +1060,8 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1091 type = "ct pass-through"; 1060 type = "ct pass-through";
1092 break; 1061 break;
1093 default: 1062 default:
1094 ql_log(ql_log_warn, vha, 0x503e, 1063 ql_dbg(ql_dbg_user, vha, 0x503e,
1095 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type); 1064 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1096 return; 1065 return;
1097 } 1066 }
1098 1067
@@ -1108,11 +1077,11 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1108 1077
1109 if (comp_status != CS_COMPLETE) { 1078 if (comp_status != CS_COMPLETE) {
1110 if (comp_status == CS_DATA_UNDERRUN) { 1079 if (comp_status == CS_DATA_UNDERRUN) {
1111 bsg_job->reply->result = DID_OK << 16; 1080 res = DID_OK << 16;
1112 bsg_job->reply->reply_payload_rcv_len = 1081 bsg_job->reply->reply_payload_rcv_len =
1113 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count); 1082 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1114 1083
1115 ql_log(ql_log_info, vha, 0x503f, 1084 ql_dbg(ql_dbg_user, vha, 0x503f,
1116 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1085 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1117 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1086 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1118 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1087 type, sp->handle, comp_status, fw_status[1], fw_status[2],
@@ -1122,7 +1091,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1122 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1091 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1123 } 1092 }
1124 else { 1093 else {
1125 ql_log(ql_log_info, vha, 0x5040, 1094 ql_dbg(ql_dbg_user, vha, 0x5040,
1126 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1095 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1127 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1096 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1128 type, sp->handle, comp_status, 1097 type, sp->handle, comp_status,
@@ -1130,32 +1099,21 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1130 pkt)->error_subcode_1), 1099 pkt)->error_subcode_1),
1131 le16_to_cpu(((struct els_sts_entry_24xx *) 1100 le16_to_cpu(((struct els_sts_entry_24xx *)
1132 pkt)->error_subcode_2)); 1101 pkt)->error_subcode_2));
1133 bsg_job->reply->result = DID_ERROR << 16; 1102 res = DID_ERROR << 16;
1134 bsg_job->reply->reply_payload_rcv_len = 0; 1103 bsg_job->reply->reply_payload_rcv_len = 0;
1135 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1104 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1136 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1105 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1137 } 1106 }
1138 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056, 1107 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1139 (uint8_t *)pkt, sizeof(*pkt)); 1108 (uint8_t *)pkt, sizeof(*pkt));
1140 } 1109 }
1141 else { 1110 else {
1142 bsg_job->reply->result = DID_OK << 16; 1111 res = DID_OK << 16;
1143 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1112 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1144 bsg_job->reply_len = 0; 1113 bsg_job->reply_len = 0;
1145 } 1114 }
1146 1115
1147 dma_unmap_sg(&ha->pdev->dev, 1116 sp->done(vha, sp, res);
1148 bsg_job->request_payload.sg_list,
1149 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1150 dma_unmap_sg(&ha->pdev->dev,
1151 bsg_job->reply_payload.sg_list,
1152 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1153 if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1154 (sp_bsg->type == SRB_CT_CMD))
1155 kfree(sp->fcport);
1156 kfree(sp->ctx);
1157 mempool_free(sp, ha->srb_mempool);
1158 bsg_job->job_done(bsg_job);
1159} 1117}
1160 1118
1161static void 1119static void
@@ -1167,7 +1125,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1167 fc_port_t *fcport; 1125 fc_port_t *fcport;
1168 srb_t *sp; 1126 srb_t *sp;
1169 struct srb_iocb *lio; 1127 struct srb_iocb *lio;
1170 struct srb_ctx *ctx;
1171 uint16_t *data; 1128 uint16_t *data;
1172 uint32_t iop[2]; 1129 uint32_t iop[2];
1173 1130
@@ -1175,9 +1132,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1175 if (!sp) 1132 if (!sp)
1176 return; 1133 return;
1177 1134
1178 ctx = sp->ctx; 1135 lio = &sp->u.iocb_cmd;
1179 lio = ctx->u.iocb_cmd; 1136 type = sp->name;
1180 type = ctx->name;
1181 fcport = sp->fcport; 1137 fcport = sp->fcport;
1182 data = lio->u.logio.data; 1138 data = lio->u.logio.data;
1183 1139
@@ -1185,7 +1141,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1185 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1141 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1186 QLA_LOGIO_LOGIN_RETRIED : 0; 1142 QLA_LOGIO_LOGIN_RETRIED : 0;
1187 if (logio->entry_status) { 1143 if (logio->entry_status) {
1188 ql_log(ql_log_warn, vha, 0x5034, 1144 ql_log(ql_log_warn, fcport->vha, 0x5034,
1189 "Async-%s error entry - hdl=%x" 1145 "Async-%s error entry - hdl=%x"
1190 "portid=%02x%02x%02x entry-status=%x.\n", 1146 "portid=%02x%02x%02x entry-status=%x.\n",
1191 type, sp->handle, fcport->d_id.b.domain, 1147 type, sp->handle, fcport->d_id.b.domain,
@@ -1198,14 +1154,14 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1198 } 1154 }
1199 1155
1200 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1156 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1201 ql_dbg(ql_dbg_async, vha, 0x5036, 1157 ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1202 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1158 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1203 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1159 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1204 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1160 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1205 le32_to_cpu(logio->io_parameter[0])); 1161 le32_to_cpu(logio->io_parameter[0]));
1206 1162
1207 data[0] = MBS_COMMAND_COMPLETE; 1163 data[0] = MBS_COMMAND_COMPLETE;
1208 if (ctx->type != SRB_LOGIN_CMD) 1164 if (sp->type != SRB_LOGIN_CMD)
1209 goto logio_done; 1165 goto logio_done;
1210 1166
1211 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1167 iop[0] = le32_to_cpu(logio->io_parameter[0]);
@@ -1239,7 +1195,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1239 break; 1195 break;
1240 } 1196 }
1241 1197
1242 ql_dbg(ql_dbg_async, vha, 0x5037, 1198 ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1243 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1199 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1244 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1200 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1245 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1201 fcport->d_id.b.area, fcport->d_id.b.al_pa,
@@ -1248,7 +1204,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1248 le32_to_cpu(logio->io_parameter[1])); 1204 le32_to_cpu(logio->io_parameter[1]));
1249 1205
1250logio_done: 1206logio_done:
1251 lio->done(sp); 1207 sp->done(vha, sp, 0);
1252} 1208}
1253 1209
1254static void 1210static void
@@ -1260,7 +1216,6 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1260 fc_port_t *fcport; 1216 fc_port_t *fcport;
1261 srb_t *sp; 1217 srb_t *sp;
1262 struct srb_iocb *iocb; 1218 struct srb_iocb *iocb;
1263 struct srb_ctx *ctx;
1264 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1219 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1265 int error = 1; 1220 int error = 1;
1266 1221
@@ -1268,30 +1223,29 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1268 if (!sp) 1223 if (!sp)
1269 return; 1224 return;
1270 1225
1271 ctx = sp->ctx; 1226 iocb = &sp->u.iocb_cmd;
1272 iocb = ctx->u.iocb_cmd; 1227 type = sp->name;
1273 type = ctx->name;
1274 fcport = sp->fcport; 1228 fcport = sp->fcport;
1275 1229
1276 if (sts->entry_status) { 1230 if (sts->entry_status) {
1277 ql_log(ql_log_warn, vha, 0x5038, 1231 ql_log(ql_log_warn, fcport->vha, 0x5038,
1278 "Async-%s error - hdl=%x entry-status(%x).\n", 1232 "Async-%s error - hdl=%x entry-status(%x).\n",
1279 type, sp->handle, sts->entry_status); 1233 type, sp->handle, sts->entry_status);
1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1234 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1281 ql_log(ql_log_warn, vha, 0x5039, 1235 ql_log(ql_log_warn, fcport->vha, 0x5039,
1282 "Async-%s error - hdl=%x completion status(%x).\n", 1236 "Async-%s error - hdl=%x completion status(%x).\n",
1283 type, sp->handle, sts->comp_status); 1237 type, sp->handle, sts->comp_status);
1284 } else if (!(le16_to_cpu(sts->scsi_status) & 1238 } else if (!(le16_to_cpu(sts->scsi_status) &
1285 SS_RESPONSE_INFO_LEN_VALID)) { 1239 SS_RESPONSE_INFO_LEN_VALID)) {
1286 ql_log(ql_log_warn, vha, 0x503a, 1240 ql_log(ql_log_warn, fcport->vha, 0x503a,
1287 "Async-%s error - hdl=%x no response info(%x).\n", 1241 "Async-%s error - hdl=%x no response info(%x).\n",
1288 type, sp->handle, sts->scsi_status); 1242 type, sp->handle, sts->scsi_status);
1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1243 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1290 ql_log(ql_log_warn, vha, 0x503b, 1244 ql_log(ql_log_warn, fcport->vha, 0x503b,
1291 "Async-%s error - hdl=%x not enough response(%d).\n", 1245 "Async-%s error - hdl=%x not enough response(%d).\n",
1292 type, sp->handle, sts->rsp_data_len); 1246 type, sp->handle, sts->rsp_data_len);
1293 } else if (sts->data[3]) { 1247 } else if (sts->data[3]) {
1294 ql_log(ql_log_warn, vha, 0x503c, 1248 ql_log(ql_log_warn, fcport->vha, 0x503c,
1295 "Async-%s error - hdl=%x response(%x).\n", 1249 "Async-%s error - hdl=%x response(%x).\n",
1296 type, sp->handle, sts->data[3]); 1250 type, sp->handle, sts->data[3]);
1297 } else { 1251 } else {
@@ -1304,7 +1258,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1304 (uint8_t *)sts, sizeof(*sts)); 1258 (uint8_t *)sts, sizeof(*sts));
1305 } 1259 }
1306 1260
1307 iocb->done(sp); 1261 sp->done(vha, sp, 0);
1308} 1262}
1309 1263
1310/** 1264/**
@@ -1390,25 +1344,32 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
1390 1344
1391static inline void 1345static inline void
1392qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1346qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1393 uint32_t sense_len, struct rsp_que *rsp) 1347 uint32_t sense_len, struct rsp_que *rsp, int res)
1394{ 1348{
1395 struct scsi_qla_host *vha = sp->fcport->vha; 1349 struct scsi_qla_host *vha = sp->fcport->vha;
1396 struct scsi_cmnd *cp = sp->cmd; 1350 struct scsi_cmnd *cp = GET_CMD_SP(sp);
1351 uint32_t track_sense_len;
1397 1352
1398 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1353 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1399 sense_len = SCSI_SENSE_BUFFERSIZE; 1354 sense_len = SCSI_SENSE_BUFFERSIZE;
1400 1355
1401 sp->request_sense_length = sense_len; 1356 SET_CMD_SENSE_LEN(sp, sense_len);
1402 sp->request_sense_ptr = cp->sense_buffer; 1357 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1403 if (sp->request_sense_length > par_sense_len) 1358 track_sense_len = sense_len;
1359
1360 if (sense_len > par_sense_len)
1404 sense_len = par_sense_len; 1361 sense_len = par_sense_len;
1405 1362
1406 memcpy(cp->sense_buffer, sense_data, sense_len); 1363 memcpy(cp->sense_buffer, sense_data, sense_len);
1407 1364
1408 sp->request_sense_ptr += sense_len; 1365 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1409 sp->request_sense_length -= sense_len; 1366 track_sense_len -= sense_len;
1410 if (sp->request_sense_length != 0) 1367 SET_CMD_SENSE_LEN(sp, track_sense_len);
1368
1369 if (track_sense_len != 0) {
1411 rsp->status_srb = sp; 1370 rsp->status_srb = sp;
1371 cp->result = res;
1372 }
1412 1373
1413 if (sense_len) { 1374 if (sense_len) {
1414 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1375 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
@@ -1436,7 +1397,7 @@ static inline int
1436qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1397qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1437{ 1398{
1438 struct scsi_qla_host *vha = sp->fcport->vha; 1399 struct scsi_qla_host *vha = sp->fcport->vha;
1439 struct scsi_cmnd *cmd = sp->cmd; 1400 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1440 uint8_t *ap = &sts24->data[12]; 1401 uint8_t *ap = &sts24->data[12];
1441 uint8_t *ep = &sts24->data[20]; 1402 uint8_t *ep = &sts24->data[20];
1442 uint32_t e_ref_tag, a_ref_tag; 1403 uint32_t e_ref_tag, a_ref_tag;
@@ -1580,6 +1541,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1580 uint16_t que; 1541 uint16_t que;
1581 struct req_que *req; 1542 struct req_que *req;
1582 int logit = 1; 1543 int logit = 1;
1544 int res = 0;
1583 1545
1584 sts = (sts_entry_t *) pkt; 1546 sts = (sts_entry_t *) pkt;
1585 sts24 = (struct sts_entry_24xx *) pkt; 1547 sts24 = (struct sts_entry_24xx *) pkt;
@@ -1619,7 +1581,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1619 qla2xxx_wake_dpc(vha); 1581 qla2xxx_wake_dpc(vha);
1620 return; 1582 return;
1621 } 1583 }
1622 cp = sp->cmd; 1584 cp = GET_CMD_SP(sp);
1623 if (cp == NULL) { 1585 if (cp == NULL) {
1624 ql_dbg(ql_dbg_io, vha, 0x3018, 1586 ql_dbg(ql_dbg_io, vha, 0x3018,
1625 "Command already returned (0x%x/%p).\n", 1587 "Command already returned (0x%x/%p).\n",
@@ -1668,11 +1630,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1668 par_sense_len -= rsp_info_len; 1630 par_sense_len -= rsp_info_len;
1669 } 1631 }
1670 if (rsp_info_len > 3 && rsp_info[3]) { 1632 if (rsp_info_len > 3 && rsp_info[3]) {
1671 ql_dbg(ql_dbg_io, vha, 0x3019, 1633 ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
1672 "FCP I/O protocol failure (0x%x/0x%x).\n", 1634 "FCP I/O protocol failure (0x%x/0x%x).\n",
1673 rsp_info_len, rsp_info[3]); 1635 rsp_info_len, rsp_info[3]);
1674 1636
1675 cp->result = DID_BUS_BUSY << 16; 1637 res = DID_BUS_BUSY << 16;
1676 goto out; 1638 goto out;
1677 } 1639 }
1678 } 1640 }
@@ -1689,7 +1651,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1689 case CS_COMPLETE: 1651 case CS_COMPLETE:
1690 case CS_QUEUE_FULL: 1652 case CS_QUEUE_FULL:
1691 if (scsi_status == 0) { 1653 if (scsi_status == 0) {
1692 cp->result = DID_OK << 16; 1654 res = DID_OK << 16;
1693 break; 1655 break;
1694 } 1656 }
1695 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1657 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
@@ -1699,19 +1661,19 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1699 if (!lscsi_status && 1661 if (!lscsi_status &&
1700 ((unsigned)(scsi_bufflen(cp) - resid) < 1662 ((unsigned)(scsi_bufflen(cp) - resid) <
1701 cp->underflow)) { 1663 cp->underflow)) {
1702 ql_dbg(ql_dbg_io, vha, 0x301a, 1664 ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
1703 "Mid-layer underflow " 1665 "Mid-layer underflow "
1704 "detected (0x%x of 0x%x bytes).\n", 1666 "detected (0x%x of 0x%x bytes).\n",
1705 resid, scsi_bufflen(cp)); 1667 resid, scsi_bufflen(cp));
1706 1668
1707 cp->result = DID_ERROR << 16; 1669 res = DID_ERROR << 16;
1708 break; 1670 break;
1709 } 1671 }
1710 } 1672 }
1711 cp->result = DID_OK << 16 | lscsi_status; 1673 res = DID_OK << 16 | lscsi_status;
1712 1674
1713 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1675 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1714 ql_dbg(ql_dbg_io, vha, 0x301b, 1676 ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
1715 "QUEUE FULL detected.\n"); 1677 "QUEUE FULL detected.\n");
1716 break; 1678 break;
1717 } 1679 }
@@ -1724,7 +1686,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1724 break; 1686 break;
1725 1687
1726 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1688 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1727 rsp); 1689 rsp, res);
1728 break; 1690 break;
1729 1691
1730 case CS_DATA_UNDERRUN: 1692 case CS_DATA_UNDERRUN:
@@ -1733,36 +1695,36 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1733 scsi_set_resid(cp, resid); 1695 scsi_set_resid(cp, resid);
1734 if (scsi_status & SS_RESIDUAL_UNDER) { 1696 if (scsi_status & SS_RESIDUAL_UNDER) {
1735 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1697 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1736 ql_dbg(ql_dbg_io, vha, 0x301d, 1698 ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
1737 "Dropped frame(s) detected " 1699 "Dropped frame(s) detected "
1738 "(0x%x of 0x%x bytes).\n", 1700 "(0x%x of 0x%x bytes).\n",
1739 resid, scsi_bufflen(cp)); 1701 resid, scsi_bufflen(cp));
1740 1702
1741 cp->result = DID_ERROR << 16 | lscsi_status; 1703 res = DID_ERROR << 16 | lscsi_status;
1742 goto check_scsi_status; 1704 goto check_scsi_status;
1743 } 1705 }
1744 1706
1745 if (!lscsi_status && 1707 if (!lscsi_status &&
1746 ((unsigned)(scsi_bufflen(cp) - resid) < 1708 ((unsigned)(scsi_bufflen(cp) - resid) <
1747 cp->underflow)) { 1709 cp->underflow)) {
1748 ql_dbg(ql_dbg_io, vha, 0x301e, 1710 ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
1749 "Mid-layer underflow " 1711 "Mid-layer underflow "
1750 "detected (0x%x of 0x%x bytes).\n", 1712 "detected (0x%x of 0x%x bytes).\n",
1751 resid, scsi_bufflen(cp)); 1713 resid, scsi_bufflen(cp));
1752 1714
1753 cp->result = DID_ERROR << 16; 1715 res = DID_ERROR << 16;
1754 break; 1716 break;
1755 } 1717 }
1756 } else { 1718 } else {
1757 ql_dbg(ql_dbg_io, vha, 0x301f, 1719 ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
1758 "Dropped frame(s) detected (0x%x " 1720 "Dropped frame(s) detected (0x%x "
1759 "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); 1721 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1760 1722
1761 cp->result = DID_ERROR << 16 | lscsi_status; 1723 res = DID_ERROR << 16 | lscsi_status;
1762 goto check_scsi_status; 1724 goto check_scsi_status;
1763 } 1725 }
1764 1726
1765 cp->result = DID_OK << 16 | lscsi_status; 1727 res = DID_OK << 16 | lscsi_status;
1766 logit = 0; 1728 logit = 0;
1767 1729
1768check_scsi_status: 1730check_scsi_status:
@@ -1772,7 +1734,7 @@ check_scsi_status:
1772 */ 1734 */
1773 if (lscsi_status != 0) { 1735 if (lscsi_status != 0) {
1774 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1736 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1775 ql_dbg(ql_dbg_io, vha, 0x3020, 1737 ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
1776 "QUEUE FULL detected.\n"); 1738 "QUEUE FULL detected.\n");
1777 logit = 1; 1739 logit = 1;
1778 break; 1740 break;
@@ -1785,7 +1747,7 @@ check_scsi_status:
1785 break; 1747 break;
1786 1748
1787 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1749 qla2x00_handle_sense(sp, sense_data, par_sense_len,
1788 sense_len, rsp); 1750 sense_len, rsp, res);
1789 } 1751 }
1790 break; 1752 break;
1791 1753
@@ -1802,7 +1764,7 @@ check_scsi_status:
1802 * while we try to recover so instruct the mid layer 1764 * while we try to recover so instruct the mid layer
1803 * to requeue until the class decides how to handle this. 1765 * to requeue until the class decides how to handle this.
1804 */ 1766 */
1805 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1767 res = DID_TRANSPORT_DISRUPTED << 16;
1806 1768
1807 if (comp_status == CS_TIMEOUT) { 1769 if (comp_status == CS_TIMEOUT) {
1808 if (IS_FWI2_CAPABLE(ha)) 1770 if (IS_FWI2_CAPABLE(ha))
@@ -1812,7 +1774,7 @@ check_scsi_status:
1812 break; 1774 break;
1813 } 1775 }
1814 1776
1815 ql_dbg(ql_dbg_io, vha, 0x3021, 1777 ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
1816 "Port down status: port-state=0x%x.\n", 1778 "Port down status: port-state=0x%x.\n",
1817 atomic_read(&fcport->state)); 1779 atomic_read(&fcport->state));
1818 1780
@@ -1821,25 +1783,25 @@ check_scsi_status:
1821 break; 1783 break;
1822 1784
1823 case CS_ABORTED: 1785 case CS_ABORTED:
1824 cp->result = DID_RESET << 16; 1786 res = DID_RESET << 16;
1825 break; 1787 break;
1826 1788
1827 case CS_DIF_ERROR: 1789 case CS_DIF_ERROR:
1828 logit = qla2x00_handle_dif_error(sp, sts24); 1790 logit = qla2x00_handle_dif_error(sp, sts24);
1829 break; 1791 break;
1830 default: 1792 default:
1831 cp->result = DID_ERROR << 16; 1793 res = DID_ERROR << 16;
1832 break; 1794 break;
1833 } 1795 }
1834 1796
1835out: 1797out:
1836 if (logit) 1798 if (logit)
1837 ql_dbg(ql_dbg_io, vha, 0x3022, 1799 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
1838 "FCP command status: 0x%x-0x%x (0x%x) " 1800 "FCP command status: 0x%x-0x%x (0x%x) "
1839 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 1801 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1840 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 1802 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
1841 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1803 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1842 comp_status, scsi_status, cp->result, vha->host_no, 1804 comp_status, scsi_status, res, vha->host_no,
1843 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 1805 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
1844 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 1806 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
1845 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 1807 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
@@ -1848,7 +1810,7 @@ out:
1848 resid_len, fw_resid_len); 1810 resid_len, fw_resid_len);
1849 1811
1850 if (rsp->status_srb == NULL) 1812 if (rsp->status_srb == NULL)
1851 qla2x00_sp_compl(ha, sp); 1813 sp->done(ha, sp, res);
1852} 1814}
1853 1815
1854/** 1816/**
@@ -1861,84 +1823,52 @@ out:
1861static void 1823static void
1862qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1824qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1863{ 1825{
1864 uint8_t sense_sz = 0; 1826 uint8_t sense_sz = 0;
1865 struct qla_hw_data *ha = rsp->hw; 1827 struct qla_hw_data *ha = rsp->hw;
1866 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1828 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1867 srb_t *sp = rsp->status_srb; 1829 srb_t *sp = rsp->status_srb;
1868 struct scsi_cmnd *cp; 1830 struct scsi_cmnd *cp;
1831 uint32_t sense_len;
1832 uint8_t *sense_ptr;
1869 1833
1870 if (sp != NULL && sp->request_sense_length != 0) { 1834 if (!sp || !GET_CMD_SENSE_LEN(sp))
1871 cp = sp->cmd; 1835 return;
1872 if (cp == NULL) {
1873 ql_log(ql_log_warn, vha, 0x3025,
1874 "cmd is NULL: already returned to OS (sp=%p).\n",
1875 sp);
1876 1836
1877 rsp->status_srb = NULL; 1837 sense_len = GET_CMD_SENSE_LEN(sp);
1878 return; 1838 sense_ptr = GET_CMD_SENSE_PTR(sp);
1879 }
1880 1839
1881 if (sp->request_sense_length > sizeof(pkt->data)) { 1840 cp = GET_CMD_SP(sp);
1882 sense_sz = sizeof(pkt->data); 1841 if (cp == NULL) {
1883 } else { 1842 ql_log(ql_log_warn, vha, 0x3025,
1884 sense_sz = sp->request_sense_length; 1843 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
1885 }
1886 1844
1887 /* Move sense data. */ 1845 rsp->status_srb = NULL;
1888 if (IS_FWI2_CAPABLE(ha)) 1846 return;
1889 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1890 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1891 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1892 sp->request_sense_ptr, sense_sz);
1893
1894 sp->request_sense_ptr += sense_sz;
1895 sp->request_sense_length -= sense_sz;
1896
1897 /* Place command on done queue. */
1898 if (sp->request_sense_length == 0) {
1899 rsp->status_srb = NULL;
1900 qla2x00_sp_compl(ha, sp);
1901 }
1902 } 1847 }
1903}
1904 1848
1905static int 1849 if (sense_len > sizeof(pkt->data))
1906qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp) 1850 sense_sz = sizeof(pkt->data);
1907{ 1851 else
1908 struct qla_hw_data *ha = vha->hw; 1852 sense_sz = sense_len;
1909 struct srb_ctx *ctx;
1910 1853
1911 if (!sp->ctx) 1854 /* Move sense data. */
1912 return 1; 1855 if (IS_FWI2_CAPABLE(ha))
1856 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1857 memcpy(sense_ptr, pkt->data, sense_sz);
1858 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1859 sense_ptr, sense_sz);
1913 1860
1914 ctx = sp->ctx; 1861 sense_len -= sense_sz;
1862 sense_ptr += sense_sz;
1915 1863
1916 if (ctx->type == SRB_LOGIN_CMD || 1864 SET_CMD_SENSE_PTR(sp, sense_ptr);
1917 ctx->type == SRB_LOGOUT_CMD || 1865 SET_CMD_SENSE_LEN(sp, sense_len);
1918 ctx->type == SRB_TM_CMD) { 1866
1919 ctx->u.iocb_cmd->done(sp); 1867 /* Place command on done queue. */
1920 return 0; 1868 if (sense_len == 0) {
1921 } else if (ctx->type == SRB_ADISC_CMD) { 1869 rsp->status_srb = NULL;
1922 ctx->u.iocb_cmd->free(sp); 1870 sp->done(ha, sp, cp->result);
1923 return 0;
1924 } else {
1925 struct fc_bsg_job *bsg_job;
1926
1927 bsg_job = ctx->u.bsg_job;
1928 if (ctx->type == SRB_ELS_CMD_HST ||
1929 ctx->type == SRB_CT_CMD)
1930 kfree(sp->fcport);
1931
1932 bsg_job->reply->reply_data.ctels_reply.status =
1933 FC_CTELS_STATUS_OK;
1934 bsg_job->reply->result = DID_ERROR << 16;
1935 bsg_job->reply->reply_payload_rcv_len = 0;
1936 kfree(sp->ctx);
1937 mempool_free(sp, ha->srb_mempool);
1938 bsg_job->job_done(bsg_job);
1939 return 0;
1940 } 1871 }
1941 return 1;
1942} 1872}
1943 1873
1944/** 1874/**
@@ -1953,53 +1883,34 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1953 struct qla_hw_data *ha = vha->hw; 1883 struct qla_hw_data *ha = vha->hw;
1954 const char func[] = "ERROR-IOCB"; 1884 const char func[] = "ERROR-IOCB";
1955 uint16_t que = MSW(pkt->handle); 1885 uint16_t que = MSW(pkt->handle);
1956 struct req_que *req = ha->req_q_map[que]; 1886 struct req_que *req = NULL;
1957 1887 int res = DID_ERROR << 16;
1958 if (pkt->entry_status & RF_INV_E_ORDER) 1888
1959 ql_dbg(ql_dbg_async, vha, 0x502a, 1889 ql_dbg(ql_dbg_async, vha, 0x502a,
1960 "Invalid Entry Order.\n"); 1890 "type of error status in response: 0x%x\n", pkt->entry_status);
1961 else if (pkt->entry_status & RF_INV_E_COUNT) 1891
1962 ql_dbg(ql_dbg_async, vha, 0x502b, 1892 if (que >= ha->max_req_queues || !ha->req_q_map[que])
1963 "Invalid Entry Count.\n"); 1893 goto fatal;
1964 else if (pkt->entry_status & RF_INV_E_PARAM) 1894
1965 ql_dbg(ql_dbg_async, vha, 0x502c, 1895 req = ha->req_q_map[que];
1966 "Invalid Entry Parameter.\n"); 1896
1967 else if (pkt->entry_status & RF_INV_E_TYPE) 1897 if (pkt->entry_status & RF_BUSY)
1968 ql_dbg(ql_dbg_async, vha, 0x502d, 1898 res = DID_BUS_BUSY << 16;
1969 "Invalid Entry Type.\n");
1970 else if (pkt->entry_status & RF_BUSY)
1971 ql_dbg(ql_dbg_async, vha, 0x502e,
1972 "Busy.\n");
1973 else
1974 ql_dbg(ql_dbg_async, vha, 0x502f,
1975 "UNKNOWN flag error.\n");
1976 1899
1977 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1900 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1978 if (sp) { 1901 if (sp) {
1979 if (qla2x00_free_sp_ctx(vha, sp)) { 1902 sp->done(ha, sp, res);
1980 if (pkt->entry_status & 1903 return;
1981 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1982 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1983 sp->cmd->result = DID_ERROR << 16;
1984 } else if (pkt->entry_status & RF_BUSY) {
1985 sp->cmd->result = DID_BUS_BUSY << 16;
1986 } else {
1987 sp->cmd->result = DID_ERROR << 16;
1988 }
1989 qla2x00_sp_compl(ha, sp);
1990 }
1991 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1992 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1993 || pkt->entry_type == COMMAND_TYPE_6) {
1994 ql_log(ql_log_warn, vha, 0x5030,
1995 "Error entry - invalid handle.\n");
1996
1997 if (IS_QLA82XX(ha))
1998 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1999 else
2000 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2001 qla2xxx_wake_dpc(vha);
2002 } 1904 }
1905fatal:
1906 ql_log(ql_log_warn, vha, 0x5030,
1907 "Error entry - invalid handle/queue.\n");
1908
1909 if (IS_QLA82XX(ha))
1910 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1911 else
1912 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1913 qla2xxx_wake_dpc(vha);
2003} 1914}
2004 1915
2005/** 1916/**
@@ -2127,7 +2038,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2127 struct qla_hw_data *ha = vha->hw; 2038 struct qla_hw_data *ha = vha->hw;
2128 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2039 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2129 2040
2130 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 2041 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2131 return; 2042 return;
2132 2043
2133 rval = QLA_SUCCESS; 2044 rval = QLA_SUCCESS;
@@ -2168,7 +2079,7 @@ done:
2168} 2079}
2169 2080
2170/** 2081/**
2171 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 2082 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2172 * @irq: 2083 * @irq:
2173 * @dev_id: SCSI driver HA context 2084 * @dev_id: SCSI driver HA context
2174 * 2085 *
@@ -2192,8 +2103,8 @@ qla24xx_intr_handler(int irq, void *dev_id)
2192 2103
2193 rsp = (struct rsp_que *) dev_id; 2104 rsp = (struct rsp_que *) dev_id;
2194 if (!rsp) { 2105 if (!rsp) {
2195 printk(KERN_INFO 2106 ql_log(ql_log_info, NULL, 0x5059,
2196 "%s(): NULL response queue pointer.\n", __func__); 2107 "%s: NULL response queue pointer.\n", __func__);
2197 return IRQ_NONE; 2108 return IRQ_NONE;
2198 } 2109 }
2199 2110
@@ -2276,8 +2187,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
2276 2187
2277 rsp = (struct rsp_que *) dev_id; 2188 rsp = (struct rsp_que *) dev_id;
2278 if (!rsp) { 2189 if (!rsp) {
2279 printk(KERN_INFO 2190 ql_log(ql_log_info, NULL, 0x505a,
2280 "%s(): NULL response queue pointer.\n", __func__); 2191 "%s: NULL response queue pointer.\n", __func__);
2281 return IRQ_NONE; 2192 return IRQ_NONE;
2282 } 2193 }
2283 ha = rsp->hw; 2194 ha = rsp->hw;
@@ -2306,8 +2217,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
2306 2217
2307 rsp = (struct rsp_que *) dev_id; 2218 rsp = (struct rsp_que *) dev_id;
2308 if (!rsp) { 2219 if (!rsp) {
2309 printk(KERN_INFO 2220 ql_log(ql_log_info, NULL, 0x505b,
2310 "%s(): NULL response queue pointer.\n", __func__); 2221 "%s: NULL response queue pointer.\n", __func__);
2311 return IRQ_NONE; 2222 return IRQ_NONE;
2312 } 2223 }
2313 ha = rsp->hw; 2224 ha = rsp->hw;
@@ -2340,8 +2251,8 @@ qla24xx_msix_default(int irq, void *dev_id)
2340 2251
2341 rsp = (struct rsp_que *) dev_id; 2252 rsp = (struct rsp_que *) dev_id;
2342 if (!rsp) { 2253 if (!rsp) {
2343 printk(KERN_INFO 2254 ql_log(ql_log_info, NULL, 0x505c,
2344 "%s(): NULL response queue pointer.\n", __func__); 2255 "%s: NULL response queue pointer.\n", __func__);
2345 return IRQ_NONE; 2256 return IRQ_NONE;
2346 } 2257 }
2347 ha = rsp->hw; 2258 ha = rsp->hw;
@@ -2530,8 +2441,14 @@ msix_failed:
2530 } 2441 }
2531 2442
2532 /* Enable MSI-X vector for response queue update for queue 0 */ 2443 /* Enable MSI-X vector for response queue update for queue 0 */
2533 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2444 if (IS_QLA83XX(ha)) {
2534 ha->mqenable = 1; 2445 if (ha->msixbase && ha->mqiobase &&
2446 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2447 ha->mqenable = 1;
2448 } else
2449 if (ha->mqiobase
2450 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2451 ha->mqenable = 1;
2535 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2452 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2536 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2453 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2537 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2454 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
@@ -2552,8 +2469,8 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2552 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2469 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2553 2470
2554 /* If possible, enable MSI-X. */ 2471 /* If possible, enable MSI-X. */
2555 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 2472 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2556 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha)) 2473 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
2557 goto skip_msi; 2474 goto skip_msi;
2558 2475
2559 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2476 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
@@ -2615,7 +2532,7 @@ clear_risc_ints:
2615 * FIXME: Noted that 8014s were being dropped during NK testing. 2532 * FIXME: Noted that 8014s were being dropped during NK testing.
2616 * Timing deltas during MSI-X/INTa transitions? 2533 * Timing deltas during MSI-X/INTa transitions?
2617 */ 2534 */
2618 if (IS_QLA81XX(ha) || IS_QLA82XX(ha)) 2535 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha))
2619 goto fail; 2536 goto fail;
2620 spin_lock_irq(&ha->hardware_lock); 2537 spin_lock_irq(&ha->hardware_lock);
2621 if (IS_FWI2_CAPABLE(ha)) { 2538 if (IS_FWI2_CAPABLE(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 08f1d01bdc1..b4a23394a7b 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -46,17 +46,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
46 struct qla_hw_data *ha = vha->hw; 46 struct qla_hw_data *ha = vha->hw;
47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 47 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
48 48
49 ql_dbg(ql_dbg_mbx, base_vha, 0x1000, "Entered %s.\n", __func__); 49 ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__);
50 50
51 if (ha->pdev->error_state > pci_channel_io_frozen) { 51 if (ha->pdev->error_state > pci_channel_io_frozen) {
52 ql_log(ql_log_warn, base_vha, 0x1001, 52 ql_log(ql_log_warn, vha, 0x1001,
53 "error_state is greater than pci_channel_io_frozen, " 53 "error_state is greater than pci_channel_io_frozen, "
54 "exiting.\n"); 54 "exiting.\n");
55 return QLA_FUNCTION_TIMEOUT; 55 return QLA_FUNCTION_TIMEOUT;
56 } 56 }
57 57
58 if (vha->device_flags & DFLG_DEV_FAILED) { 58 if (vha->device_flags & DFLG_DEV_FAILED) {
59 ql_log(ql_log_warn, base_vha, 0x1002, 59 ql_log(ql_log_warn, vha, 0x1002,
60 "Device in failed state, exiting.\n"); 60 "Device in failed state, exiting.\n");
61 return QLA_FUNCTION_TIMEOUT; 61 return QLA_FUNCTION_TIMEOUT;
62 } 62 }
@@ -69,7 +69,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
69 69
70 70
71 if (ha->flags.pci_channel_io_perm_failure) { 71 if (ha->flags.pci_channel_io_perm_failure) {
72 ql_log(ql_log_warn, base_vha, 0x1003, 72 ql_log(ql_log_warn, vha, 0x1003,
73 "Perm failure on EEH timeout MBX, exiting.\n"); 73 "Perm failure on EEH timeout MBX, exiting.\n");
74 return QLA_FUNCTION_TIMEOUT; 74 return QLA_FUNCTION_TIMEOUT;
75 } 75 }
@@ -77,7 +77,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
77 if (ha->flags.isp82xx_fw_hung) { 77 if (ha->flags.isp82xx_fw_hung) {
78 /* Setting Link-Down error */ 78 /* Setting Link-Down error */
79 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 79 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
80 ql_log(ql_log_warn, base_vha, 0x1004, 80 ql_log(ql_log_warn, vha, 0x1004,
81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 81 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
82 return QLA_FUNCTION_TIMEOUT; 82 return QLA_FUNCTION_TIMEOUT;
83 } 83 }
@@ -89,8 +89,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
89 */ 89 */
90 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) { 90 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
91 /* Timeout occurred. Return error. */ 91 /* Timeout occurred. Return error. */
92 ql_log(ql_log_warn, base_vha, 0x1005, 92 ql_log(ql_log_warn, vha, 0x1005,
93 "Cmd access timeout, Exiting.\n"); 93 "Cmd access timeout, cmd=0x%x, Exiting.\n",
94 mcp->mb[0]);
94 return QLA_FUNCTION_TIMEOUT; 95 return QLA_FUNCTION_TIMEOUT;
95 } 96 }
96 97
@@ -98,7 +99,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
98 /* Save mailbox command for debug */ 99 /* Save mailbox command for debug */
99 ha->mcp = mcp; 100 ha->mcp = mcp;
100 101
101 ql_dbg(ql_dbg_mbx, base_vha, 0x1006, 102 ql_dbg(ql_dbg_mbx, vha, 0x1006,
102 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]); 103 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
103 104
104 spin_lock_irqsave(&ha->hardware_lock, flags); 105 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -127,28 +128,28 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
127 iptr++; 128 iptr++;
128 } 129 }
129 130
130 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1111, 131 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
131 "Loaded MBX registers (displayed in bytes) =.\n"); 132 "Loaded MBX registers (displayed in bytes) =.\n");
132 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1112, 133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
133 (uint8_t *)mcp->mb, 16); 134 (uint8_t *)mcp->mb, 16);
134 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1113, 135 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
135 ".\n"); 136 ".\n");
136 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1114, 137 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
137 ((uint8_t *)mcp->mb + 0x10), 16); 138 ((uint8_t *)mcp->mb + 0x10), 16);
138 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1115, 139 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
139 ".\n"); 140 ".\n");
140 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1116, 141 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
141 ((uint8_t *)mcp->mb + 0x20), 8); 142 ((uint8_t *)mcp->mb + 0x20), 8);
142 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1117, 143 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
143 "I/O Address = %p.\n", optr); 144 "I/O Address = %p.\n", optr);
144 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x100e); 145 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
145 146
146 /* Issue set host interrupt command to send cmd out. */ 147 /* Issue set host interrupt command to send cmd out. */
147 ha->flags.mbox_int = 0; 148 ha->flags.mbox_int = 0;
148 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 149 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
149 150
150 /* Unlock mbx registers and wait for interrupt */ 151 /* Unlock mbx registers and wait for interrupt */
151 ql_dbg(ql_dbg_mbx, base_vha, 0x100f, 152 ql_dbg(ql_dbg_mbx, vha, 0x100f,
152 "Going to unlock irq & waiting for interrupts. " 153 "Going to unlock irq & waiting for interrupts. "
153 "jiffies=%lx.\n", jiffies); 154 "jiffies=%lx.\n", jiffies);
154 155
@@ -163,7 +164,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
163 spin_unlock_irqrestore(&ha->hardware_lock, 164 spin_unlock_irqrestore(&ha->hardware_lock,
164 flags); 165 flags);
165 ha->flags.mbox_busy = 0; 166 ha->flags.mbox_busy = 0;
166 ql_dbg(ql_dbg_mbx, base_vha, 0x1010, 167 ql_dbg(ql_dbg_mbx, vha, 0x1010,
167 "Pending mailbox timeout, exiting.\n"); 168 "Pending mailbox timeout, exiting.\n");
168 rval = QLA_FUNCTION_TIMEOUT; 169 rval = QLA_FUNCTION_TIMEOUT;
169 goto premature_exit; 170 goto premature_exit;
@@ -180,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
180 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); 181 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
181 182
182 } else { 183 } else {
183 ql_dbg(ql_dbg_mbx, base_vha, 0x1011, 184 ql_dbg(ql_dbg_mbx, vha, 0x1011,
184 "Cmd=%x Polling Mode.\n", command); 185 "Cmd=%x Polling Mode.\n", command);
185 186
186 if (IS_QLA82XX(ha)) { 187 if (IS_QLA82XX(ha)) {
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
189 spin_unlock_irqrestore(&ha->hardware_lock, 190 spin_unlock_irqrestore(&ha->hardware_lock,
190 flags); 191 flags);
191 ha->flags.mbox_busy = 0; 192 ha->flags.mbox_busy = 0;
192 ql_dbg(ql_dbg_mbx, base_vha, 0x1012, 193 ql_dbg(ql_dbg_mbx, vha, 0x1012,
193 "Pending mailbox timeout, exiting.\n"); 194 "Pending mailbox timeout, exiting.\n");
194 rval = QLA_FUNCTION_TIMEOUT; 195 rval = QLA_FUNCTION_TIMEOUT;
195 goto premature_exit; 196 goto premature_exit;
@@ -214,7 +215,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
214 command == MBC_LOAD_RISC_RAM_EXTENDED)) 215 command == MBC_LOAD_RISC_RAM_EXTENDED))
215 msleep(10); 216 msleep(10);
216 } /* while */ 217 } /* while */
217 ql_dbg(ql_dbg_mbx, base_vha, 0x1013, 218 ql_dbg(ql_dbg_mbx, vha, 0x1013,
218 "Waited %d sec.\n", 219 "Waited %d sec.\n",
219 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)); 220 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
220 } 221 }
@@ -223,7 +224,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
223 if (ha->flags.mbox_int) { 224 if (ha->flags.mbox_int) {
224 uint16_t *iptr2; 225 uint16_t *iptr2;
225 226
226 ql_dbg(ql_dbg_mbx, base_vha, 0x1014, 227 ql_dbg(ql_dbg_mbx, vha, 0x1014,
227 "Cmd=%x completed.\n", command); 228 "Cmd=%x completed.\n", command);
228 229
229 /* Got interrupt. Clear the flag. */ 230 /* Got interrupt. Clear the flag. */
@@ -236,7 +237,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
236 mcp->mb[0] = MBS_LINK_DOWN_ERROR; 237 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
237 ha->mcp = NULL; 238 ha->mcp = NULL;
238 rval = QLA_FUNCTION_FAILED; 239 rval = QLA_FUNCTION_FAILED;
239 ql_log(ql_log_warn, base_vha, 0x1015, 240 ql_log(ql_log_warn, vha, 0x1015,
240 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung); 241 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
241 goto premature_exit; 242 goto premature_exit;
242 } 243 }
@@ -268,13 +269,19 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
268 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0); 269 mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
269 ictrl = RD_REG_WORD(&reg->isp.ictrl); 270 ictrl = RD_REG_WORD(&reg->isp.ictrl);
270 } 271 }
271 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1119, 272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
272 "MBX Command timeout for cmd %x.\n", command); 273 "MBX Command timeout for cmd %x.\n", command);
273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111a, 274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
274 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies); 275 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
275 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x111b, 276 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
276 "mb[0] = 0x%x.\n", mb0); 277 "mb[0] = 0x%x.\n", mb0);
277 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, base_vha, 0x1019); 278 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
279
280 /*
281 * Attempt to capture a firmware dump for further analysis
282 * of the current firmware state
283 */
284 ha->isp_ops->fw_dump(vha, 0);
278 285
279 rval = QLA_FUNCTION_TIMEOUT; 286 rval = QLA_FUNCTION_TIMEOUT;
280 } 287 }
@@ -285,7 +292,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
285 ha->mcp = NULL; 292 ha->mcp = NULL;
286 293
287 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) { 294 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
288 ql_dbg(ql_dbg_mbx, base_vha, 0x101a, 295 ql_dbg(ql_dbg_mbx, vha, 0x101a,
289 "Checking for additional resp interrupt.\n"); 296 "Checking for additional resp interrupt.\n");
290 297
291 /* polling mode for non isp_abort commands. */ 298 /* polling mode for non isp_abort commands. */
@@ -297,7 +304,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
297 if (!io_lock_on || (mcp->flags & IOCTL_CMD) || 304 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
298 ha->flags.eeh_busy) { 305 ha->flags.eeh_busy) {
299 /* not in dpc. schedule it for dpc to take over. */ 306 /* not in dpc. schedule it for dpc to take over. */
300 ql_dbg(ql_dbg_mbx, base_vha, 0x101b, 307 ql_dbg(ql_dbg_mbx, vha, 0x101b,
301 "Timeout, schedule isp_abort_needed.\n"); 308 "Timeout, schedule isp_abort_needed.\n");
302 309
303 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 310 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -313,15 +320,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
313 CRB_NIU_XG_PAUSE_CTL_P1); 320 CRB_NIU_XG_PAUSE_CTL_P1);
314 } 321 }
315 ql_log(ql_log_info, base_vha, 0x101c, 322 ql_log(ql_log_info, base_vha, 0x101c,
316 "Mailbox cmd timeout occured. " 323 "Mailbox cmd timeout occured, cmd=0x%x, "
317 "Scheduling ISP abort eeh_busy=0x%x.\n", 324 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
318 ha->flags.eeh_busy); 325 "abort.\n", command, mcp->mb[0],
326 ha->flags.eeh_busy);
319 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 327 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
320 qla2xxx_wake_dpc(vha); 328 qla2xxx_wake_dpc(vha);
321 } 329 }
322 } else if (!abort_active) { 330 } else if (!abort_active) {
323 /* call abort directly since we are in the DPC thread */ 331 /* call abort directly since we are in the DPC thread */
324 ql_dbg(ql_dbg_mbx, base_vha, 0x101d, 332 ql_dbg(ql_dbg_mbx, vha, 0x101d,
325 "Timeout, calling abort_isp.\n"); 333 "Timeout, calling abort_isp.\n");
326 334
327 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) && 335 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
@@ -337,9 +345,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
337 CRB_NIU_XG_PAUSE_CTL_P1); 345 CRB_NIU_XG_PAUSE_CTL_P1);
338 } 346 }
339 ql_log(ql_log_info, base_vha, 0x101e, 347 ql_log(ql_log_info, base_vha, 0x101e,
340 "Mailbox cmd timeout occured. " 348 "Mailbox cmd timeout occured, cmd=0x%x, "
341 "Scheduling ISP abort.\n"); 349 "mb[0]=0x%x. Scheduling ISP abort ",
342 350 command, mcp->mb[0]);
343 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 351 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
344 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 352 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
345 /* Allow next mbx cmd to come in. */ 353 /* Allow next mbx cmd to come in. */
@@ -350,7 +358,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
350 &vha->dpc_flags); 358 &vha->dpc_flags);
351 } 359 }
352 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 360 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
353 ql_dbg(ql_dbg_mbx, base_vha, 0x101f, 361 ql_dbg(ql_dbg_mbx, vha, 0x101f,
354 "Finished abort_isp.\n"); 362 "Finished abort_isp.\n");
355 goto mbx_done; 363 goto mbx_done;
356 } 364 }
@@ -364,8 +372,8 @@ premature_exit:
364mbx_done: 372mbx_done:
365 if (rval) { 373 if (rval) {
366 ql_dbg(ql_dbg_mbx, base_vha, 0x1020, 374 ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
367 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n", 375 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
368 mcp->mb[0], mcp->mb[1], mcp->mb[2], command); 376 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
369 } else { 377 } else {
370 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); 378 ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__);
371 } 379 }
@@ -455,7 +463,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
455 mcp->mb[1] = MSW(risc_addr); 463 mcp->mb[1] = MSW(risc_addr);
456 mcp->mb[2] = LSW(risc_addr); 464 mcp->mb[2] = LSW(risc_addr);
457 mcp->mb[3] = 0; 465 mcp->mb[3] = 0;
458 if (IS_QLA81XX(ha)) { 466 if (IS_QLA81XX(ha) || IS_QLA83XX(ha)) {
459 struct nvram_81xx *nv = ha->nvram; 467 struct nvram_81xx *nv = ha->nvram;
460 mcp->mb[4] = (nv->enhanced_features & 468 mcp->mb[4] = (nv->enhanced_features &
461 EXTENDED_BB_CREDITS); 469 EXTENDED_BB_CREDITS);
@@ -508,21 +516,22 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
508 * Kernel context. 516 * Kernel context.
509 */ 517 */
510int 518int
511qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor, 519qla2x00_get_fw_version(scsi_qla_host_t *vha)
512 uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
513 uint32_t *mpi_caps, uint8_t *phy)
514{ 520{
515 int rval; 521 int rval;
516 mbx_cmd_t mc; 522 mbx_cmd_t mc;
517 mbx_cmd_t *mcp = &mc; 523 mbx_cmd_t *mcp = &mc;
524 struct qla_hw_data *ha = vha->hw;
518 525
519 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__); 526 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
520 527
521 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 528 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
522 mcp->out_mb = MBX_0; 529 mcp->out_mb = MBX_0;
523 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 530 mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
524 if (IS_QLA81XX(vha->hw)) 531 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
525 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; 532 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
533 if (IS_QLA83XX(vha->hw))
534 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
526 mcp->flags = 0; 535 mcp->flags = 0;
527 mcp->tov = MBX_TOV_SECONDS; 536 mcp->tov = MBX_TOV_SECONDS;
528 rval = qla2x00_mailbox_command(vha, mcp); 537 rval = qla2x00_mailbox_command(vha, mcp);
@@ -530,23 +539,37 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
530 goto failed; 539 goto failed;
531 540
532 /* Return mailbox data. */ 541 /* Return mailbox data. */
533 *major = mcp->mb[1]; 542 ha->fw_major_version = mcp->mb[1];
534 *minor = mcp->mb[2]; 543 ha->fw_minor_version = mcp->mb[2];
535 *subminor = mcp->mb[3]; 544 ha->fw_subminor_version = mcp->mb[3];
536 *attributes = mcp->mb[6]; 545 ha->fw_attributes = mcp->mb[6];
537 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw)) 546 if (IS_QLA2100(vha->hw) || IS_QLA2200(vha->hw))
538 *memory = 0x1FFFF; /* Defaults to 128KB. */ 547 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
539 else 548 else
540 *memory = (mcp->mb[5] << 16) | mcp->mb[4]; 549 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
541 if (IS_QLA81XX(vha->hw)) { 550 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
542 mpi[0] = mcp->mb[10] & 0xff; 551 ha->mpi_version[0] = mcp->mb[10] & 0xff;
543 mpi[1] = mcp->mb[11] >> 8; 552 ha->mpi_version[1] = mcp->mb[11] >> 8;
544 mpi[2] = mcp->mb[11] & 0xff; 553 ha->mpi_version[2] = mcp->mb[11] & 0xff;
545 *mpi_caps = (mcp->mb[12] << 16) | mcp->mb[13]; 554 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
546 phy[0] = mcp->mb[8] & 0xff; 555 ha->phy_version[0] = mcp->mb[8] & 0xff;
547 phy[1] = mcp->mb[9] >> 8; 556 ha->phy_version[1] = mcp->mb[9] >> 8;
548 phy[2] = mcp->mb[9] & 0xff; 557 ha->phy_version[2] = mcp->mb[9] & 0xff;
558 }
559 if (IS_QLA83XX(ha)) {
560 if (mcp->mb[6] & BIT_15) {
561 ha->fw_attributes_h = mcp->mb[15];
562 ha->fw_attributes_ext[0] = mcp->mb[16];
563 ha->fw_attributes_ext[1] = mcp->mb[17];
564 ql_dbg(ql_dbg_mbx, vha, 0x1139,
565 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
566 __func__, mcp->mb[15], mcp->mb[6]);
567 } else
568 ql_dbg(ql_dbg_mbx, vha, 0x112f,
569 "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
570 __func__, mcp->mb[6]);
549 } 571 }
572
550failed: 573failed:
551 if (rval != QLA_SUCCESS) { 574 if (rval != QLA_SUCCESS) {
552 /*EMPTY*/ 575 /*EMPTY*/
@@ -859,6 +882,7 @@ qla2x00_abort_command(srb_t *sp)
859 scsi_qla_host_t *vha = fcport->vha; 882 scsi_qla_host_t *vha = fcport->vha;
860 struct qla_hw_data *ha = vha->hw; 883 struct qla_hw_data *ha = vha->hw;
861 struct req_que *req = vha->req; 884 struct req_que *req = vha->req;
885 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
862 886
863 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); 887 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
864 888
@@ -881,7 +905,7 @@ qla2x00_abort_command(srb_t *sp)
881 mcp->mb[1] = fcport->loop_id << 8; 905 mcp->mb[1] = fcport->loop_id << 8;
882 mcp->mb[2] = (uint16_t)handle; 906 mcp->mb[2] = (uint16_t)handle;
883 mcp->mb[3] = (uint16_t)(handle >> 16); 907 mcp->mb[3] = (uint16_t)(handle >> 16);
884 mcp->mb[6] = (uint16_t)sp->cmd->device->lun; 908 mcp->mb[6] = (uint16_t)cmd->device->lun;
885 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 909 mcp->out_mb = MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
886 mcp->in_mb = MBX_0; 910 mcp->in_mb = MBX_0;
887 mcp->tov = MBX_TOV_SECONDS; 911 mcp->tov = MBX_TOV_SECONDS;
@@ -1028,7 +1052,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1028 mcp->mb[9] = vha->vp_idx; 1052 mcp->mb[9] = vha->vp_idx;
1029 mcp->out_mb = MBX_9|MBX_0; 1053 mcp->out_mb = MBX_9|MBX_0;
1030 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1054 mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1031 if (IS_QLA8XXX_TYPE(vha->hw)) 1055 if (IS_CNA_CAPABLE(vha->hw))
1032 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; 1056 mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
1033 mcp->tov = MBX_TOV_SECONDS; 1057 mcp->tov = MBX_TOV_SECONDS;
1034 mcp->flags = 0; 1058 mcp->flags = 0;
@@ -1052,7 +1076,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1052 } else { 1076 } else {
1053 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__); 1077 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
1054 1078
1055 if (IS_QLA8XXX_TYPE(vha->hw)) { 1079 if (IS_CNA_CAPABLE(vha->hw)) {
1056 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1080 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
1057 vha->fcoe_fcf_idx = mcp->mb[10]; 1081 vha->fcoe_fcf_idx = mcp->mb[10];
1058 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8; 1082 vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
@@ -1163,7 +1187,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1163 mcp->mb[6] = MSW(MSD(ha->init_cb_dma)); 1187 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1164 mcp->mb[7] = LSW(MSD(ha->init_cb_dma)); 1188 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1165 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 1189 mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
1166 if (IS_QLA81XX(ha) && ha->ex_init_cb->ex_version) { 1190 if ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) && ha->ex_init_cb->ex_version) {
1167 mcp->mb[1] = BIT_0; 1191 mcp->mb[1] = BIT_0;
1168 mcp->mb[10] = MSW(ha->ex_init_cb_dma); 1192 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1169 mcp->mb[11] = LSW(ha->ex_init_cb_dma); 1193 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
@@ -1172,7 +1196,11 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1172 mcp->mb[14] = sizeof(*ha->ex_init_cb); 1196 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1173 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; 1197 mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
1174 } 1198 }
1175 mcp->in_mb = MBX_0; 1199 /* 1 and 2 should normally be captured. */
1200 mcp->in_mb = MBX_2|MBX_1|MBX_0;
1201 if (IS_QLA83XX(ha))
1202 /* mb3 is additional info about the installed SFP. */
1203 mcp->in_mb |= MBX_3;
1176 mcp->buf_size = size; 1204 mcp->buf_size = size;
1177 mcp->flags = MBX_DMA_OUT; 1205 mcp->flags = MBX_DMA_OUT;
1178 mcp->tov = MBX_TOV_SECONDS; 1206 mcp->tov = MBX_TOV_SECONDS;
@@ -1181,7 +1209,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1181 if (rval != QLA_SUCCESS) { 1209 if (rval != QLA_SUCCESS) {
1182 /*EMPTY*/ 1210 /*EMPTY*/
1183 ql_dbg(ql_dbg_mbx, vha, 0x104d, 1211 ql_dbg(ql_dbg_mbx, vha, 0x104d,
1184 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 1212 "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
1213 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1185 } else { 1214 } else {
1186 /*EMPTY*/ 1215 /*EMPTY*/
1187 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__); 1216 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
@@ -1260,6 +1289,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1260 goto gpd_error_out; 1289 goto gpd_error_out;
1261 1290
1262 if (IS_FWI2_CAPABLE(ha)) { 1291 if (IS_FWI2_CAPABLE(ha)) {
1292 uint64_t zero = 0;
1263 pd24 = (struct port_database_24xx *) pd; 1293 pd24 = (struct port_database_24xx *) pd;
1264 1294
1265 /* Check for logged in state. */ 1295 /* Check for logged in state. */
@@ -1273,6 +1303,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1273 goto gpd_error_out; 1303 goto gpd_error_out;
1274 } 1304 }
1275 1305
1306 if (fcport->loop_id == FC_NO_LOOP_ID ||
1307 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1308 memcmp(fcport->port_name, pd24->port_name, 8))) {
1309 /* We lost the device mid way. */
1310 rval = QLA_NOT_LOGGED_IN;
1311 goto gpd_error_out;
1312 }
1313
1276 /* Names are little-endian. */ 1314 /* Names are little-endian. */
1277 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE); 1315 memcpy(fcport->node_name, pd24->node_name, WWN_SIZE);
1278 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE); 1316 memcpy(fcport->port_name, pd24->port_name, WWN_SIZE);
@@ -1289,6 +1327,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1289 else 1327 else
1290 fcport->port_type = FCT_TARGET; 1328 fcport->port_type = FCT_TARGET;
1291 } else { 1329 } else {
1330 uint64_t zero = 0;
1331
1292 /* Check for logged in state. */ 1332 /* Check for logged in state. */
1293 if (pd->master_state != PD_STATE_PORT_LOGGED_IN && 1333 if (pd->master_state != PD_STATE_PORT_LOGGED_IN &&
1294 pd->slave_state != PD_STATE_PORT_LOGGED_IN) { 1334 pd->slave_state != PD_STATE_PORT_LOGGED_IN) {
@@ -1301,6 +1341,14 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1301 goto gpd_error_out; 1341 goto gpd_error_out;
1302 } 1342 }
1303 1343
1344 if (fcport->loop_id == FC_NO_LOOP_ID ||
1345 (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
1346 memcmp(fcport->port_name, pd->port_name, 8))) {
1347 /* We lost the device mid way. */
1348 rval = QLA_NOT_LOGGED_IN;
1349 goto gpd_error_out;
1350 }
1351
1304 /* Names are little-endian. */ 1352 /* Names are little-endian. */
1305 memcpy(fcport->node_name, pd->node_name, WWN_SIZE); 1353 memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
1306 memcpy(fcport->port_name, pd->port_name, WWN_SIZE); 1354 memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
@@ -1481,7 +1529,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1481 1529
1482 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__); 1530 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
1483 1531
1484 if (IS_QLA8XXX_TYPE(vha->hw)) { 1532 if (IS_CNA_CAPABLE(vha->hw)) {
1485 /* Logout across all FCFs. */ 1533 /* Logout across all FCFs. */
1486 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 1534 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
1487 mcp->mb[1] = BIT_1; 1535 mcp->mb[1] = BIT_1;
@@ -1622,7 +1670,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1622 lg->port_id[1] = area; 1670 lg->port_id[1] = area;
1623 lg->port_id[2] = domain; 1671 lg->port_id[2] = domain;
1624 lg->vp_index = vha->vp_idx; 1672 lg->vp_index = vha->vp_idx;
1625 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1673 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
1674 (ha->r_a_tov / 10 * 2) + 2);
1626 if (rval != QLA_SUCCESS) { 1675 if (rval != QLA_SUCCESS) {
1627 ql_dbg(ql_dbg_mbx, vha, 0x1063, 1676 ql_dbg(ql_dbg_mbx, vha, 0x1063,
1628 "Failed to issue login IOCB (%x).\n", rval); 1677 "Failed to issue login IOCB (%x).\n", rval);
@@ -1885,8 +1934,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1885 lg->port_id[1] = area; 1934 lg->port_id[1] = area;
1886 lg->port_id[2] = domain; 1935 lg->port_id[2] = domain;
1887 lg->vp_index = vha->vp_idx; 1936 lg->vp_index = vha->vp_idx;
1888 1937 rval = qla2x00_issue_iocb_timeout(vha, lg, lg_dma, 0,
1889 rval = qla2x00_issue_iocb(vha, lg, lg_dma, 0); 1938 (ha->r_a_tov / 10 * 2) + 2);
1890 if (rval != QLA_SUCCESS) { 1939 if (rval != QLA_SUCCESS) {
1891 ql_dbg(ql_dbg_mbx, vha, 0x106f, 1940 ql_dbg(ql_dbg_mbx, vha, 0x106f,
1892 "Failed to issue logout IOCB (%x).\n", rval); 1941 "Failed to issue logout IOCB (%x).\n", rval);
@@ -2094,7 +2143,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2094 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2143 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2095 mcp->out_mb = MBX_0; 2144 mcp->out_mb = MBX_0;
2096 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; 2145 mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
2097 if (IS_QLA81XX(vha->hw)) 2146 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw))
2098 mcp->in_mb |= MBX_12; 2147 mcp->in_mb |= MBX_12;
2099 mcp->tov = MBX_TOV_SECONDS; 2148 mcp->tov = MBX_TOV_SECONDS;
2100 mcp->flags = 0; 2149 mcp->flags = 0;
@@ -2121,7 +2170,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2121 *orig_iocb_cnt = mcp->mb[10]; 2170 *orig_iocb_cnt = mcp->mb[10];
2122 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2171 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2123 *max_npiv_vports = mcp->mb[11]; 2172 *max_npiv_vports = mcp->mb[11];
2124 if (IS_QLA81XX(vha->hw) && max_fcfs) 2173 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs)
2125 *max_fcfs = mcp->mb[12]; 2174 *max_fcfs = mcp->mb[12];
2126 } 2175 }
2127 2176
@@ -2686,7 +2735,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2686 2735
2687 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__); 2736 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
2688 2737
2689 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw)) 2738 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
2739 !IS_QLA83XX(vha->hw))
2690 return QLA_FUNCTION_FAILED; 2740 return QLA_FUNCTION_FAILED;
2691 2741
2692 if (unlikely(pci_channel_offline(vha->hw->pdev))) 2742 if (unlikely(pci_channel_offline(vha->hw->pdev)))
@@ -2828,7 +2878,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2828 mcp->mb[0] = MBC_PORT_PARAMS; 2878 mcp->mb[0] = MBC_PORT_PARAMS;
2829 mcp->mb[1] = loop_id; 2879 mcp->mb[1] = loop_id;
2830 mcp->mb[2] = BIT_0; 2880 mcp->mb[2] = BIT_0;
2831 if (IS_QLA8XXX_TYPE(vha->hw)) 2881 if (IS_CNA_CAPABLE(vha->hw))
2832 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); 2882 mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
2833 else 2883 else
2834 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0); 2884 mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
@@ -3298,6 +3348,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3298 mcp->mb[12] = req->qos; 3348 mcp->mb[12] = req->qos;
3299 mcp->mb[11] = req->vp_idx; 3349 mcp->mb[11] = req->vp_idx;
3300 mcp->mb[13] = req->rid; 3350 mcp->mb[13] = req->rid;
3351 if (IS_QLA83XX(ha))
3352 mcp->mb[15] = 0;
3301 3353
3302 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + 3354 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3303 QLA_QUE_PAGE * req->id); 3355 QLA_QUE_PAGE * req->id);
@@ -3311,12 +3363,21 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3311 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3363 MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3312 mcp->in_mb = MBX_0; 3364 mcp->in_mb = MBX_0;
3313 mcp->flags = MBX_DMA_OUT; 3365 mcp->flags = MBX_DMA_OUT;
3314 mcp->tov = 60; 3366 mcp->tov = MBX_TOV_SECONDS * 2;
3367
3368 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3369 mcp->in_mb |= MBX_1;
3370 if (IS_QLA83XX(ha)) {
3371 mcp->out_mb |= MBX_15;
3372 /* debug q create issue in SR-IOV */
3373 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3374 }
3315 3375
3316 spin_lock_irqsave(&ha->hardware_lock, flags); 3376 spin_lock_irqsave(&ha->hardware_lock, flags);
3317 if (!(req->options & BIT_0)) { 3377 if (!(req->options & BIT_0)) {
3318 WRT_REG_DWORD(&reg->req_q_in, 0); 3378 WRT_REG_DWORD(&reg->req_q_in, 0);
3319 WRT_REG_DWORD(&reg->req_q_out, 0); 3379 if (!IS_QLA83XX(ha))
3380 WRT_REG_DWORD(&reg->req_q_out, 0);
3320 } 3381 }
3321 req->req_q_in = &reg->req_q_in; 3382 req->req_q_in = &reg->req_q_in;
3322 req->req_q_out = &reg->req_q_out; 3383 req->req_q_out = &reg->req_q_out;
@@ -3354,6 +3415,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3354 mcp->mb[5] = rsp->length; 3415 mcp->mb[5] = rsp->length;
3355 mcp->mb[14] = rsp->msix->entry; 3416 mcp->mb[14] = rsp->msix->entry;
3356 mcp->mb[13] = rsp->rid; 3417 mcp->mb[13] = rsp->rid;
3418 if (IS_QLA83XX(ha))
3419 mcp->mb[15] = 0;
3357 3420
3358 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) + 3421 reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
3359 QLA_QUE_PAGE * rsp->id); 3422 QLA_QUE_PAGE * rsp->id);
@@ -3367,12 +3430,23 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3367 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; 3430 |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
3368 mcp->in_mb = MBX_0; 3431 mcp->in_mb = MBX_0;
3369 mcp->flags = MBX_DMA_OUT; 3432 mcp->flags = MBX_DMA_OUT;
3370 mcp->tov = 60; 3433 mcp->tov = MBX_TOV_SECONDS * 2;
3434
3435 if (IS_QLA81XX(ha)) {
3436 mcp->out_mb |= MBX_12|MBX_11|MBX_10;
3437 mcp->in_mb |= MBX_1;
3438 } else if (IS_QLA83XX(ha)) {
3439 mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
3440 mcp->in_mb |= MBX_1;
3441 /* debug q create issue in SR-IOV */
3442 mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
3443 }
3371 3444
3372 spin_lock_irqsave(&ha->hardware_lock, flags); 3445 spin_lock_irqsave(&ha->hardware_lock, flags);
3373 if (!(rsp->options & BIT_0)) { 3446 if (!(rsp->options & BIT_0)) {
3374 WRT_REG_DWORD(&reg->rsp_q_out, 0); 3447 WRT_REG_DWORD(&reg->rsp_q_out, 0);
3375 WRT_REG_DWORD(&reg->rsp_q_in, 0); 3448 if (!IS_QLA83XX(ha))
3449 WRT_REG_DWORD(&reg->rsp_q_in, 0);
3376 } 3450 }
3377 3451
3378 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3452 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3424,7 +3498,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3424 3498
3425 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__); 3499 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
3426 3500
3427 if (!IS_QLA81XX(vha->hw)) 3501 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3428 return QLA_FUNCTION_FAILED; 3502 return QLA_FUNCTION_FAILED;
3429 3503
3430 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3504 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
@@ -3454,7 +3528,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3454 mbx_cmd_t mc; 3528 mbx_cmd_t mc;
3455 mbx_cmd_t *mcp = &mc; 3529 mbx_cmd_t *mcp = &mc;
3456 3530
3457 if (!IS_QLA81XX(vha->hw)) 3531 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3458 return QLA_FUNCTION_FAILED; 3532 return QLA_FUNCTION_FAILED;
3459 3533
3460 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__); 3534 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
@@ -3486,7 +3560,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3486 mbx_cmd_t mc; 3560 mbx_cmd_t mc;
3487 mbx_cmd_t *mcp = &mc; 3561 mbx_cmd_t *mcp = &mc;
3488 3562
3489 if (!IS_QLA81XX(vha->hw)) 3563 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3490 return QLA_FUNCTION_FAILED; 3564 return QLA_FUNCTION_FAILED;
3491 3565
3492 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__); 3566 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
@@ -3641,7 +3715,7 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3641 3715
3642 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__); 3716 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
3643 3717
3644 if (!IS_QLA8XXX_TYPE(vha->hw)) 3718 if (!IS_CNA_CAPABLE(vha->hw))
3645 return QLA_FUNCTION_FAILED; 3719 return QLA_FUNCTION_FAILED;
3646 3720
3647 mcp->mb[0] = MBC_GET_XGMAC_STATS; 3721 mcp->mb[0] = MBC_GET_XGMAC_STATS;
@@ -3680,7 +3754,7 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3680 3754
3681 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__); 3755 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
3682 3756
3683 if (!IS_QLA8XXX_TYPE(vha->hw)) 3757 if (!IS_CNA_CAPABLE(vha->hw))
3684 return QLA_FUNCTION_FAILED; 3758 return QLA_FUNCTION_FAILED;
3685 3759
3686 mcp->mb[0] = MBC_GET_DCBX_PARAMS; 3760 mcp->mb[0] = MBC_GET_DCBX_PARAMS;
@@ -3775,7 +3849,7 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3775 3849
3776 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15| 3850 mcp->out_mb = MBX_21|MBX_20|MBX_19|MBX_18|MBX_17|MBX_16|MBX_15|
3777 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 3851 MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3778 if (IS_QLA8XXX_TYPE(vha->hw)) 3852 if (IS_CNA_CAPABLE(vha->hw))
3779 mcp->out_mb |= MBX_2; 3853 mcp->out_mb |= MBX_2;
3780 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0; 3854 mcp->in_mb = MBX_19|MBX_18|MBX_3|MBX_2|MBX_1|MBX_0;
3781 3855
@@ -3813,7 +3887,7 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3813 memset(mcp->mb, 0 , sizeof(mcp->mb)); 3887 memset(mcp->mb, 0 , sizeof(mcp->mb));
3814 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 3888 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
3815 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ 3889 mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */
3816 if (IS_QLA8XXX_TYPE(ha)) { 3890 if (IS_CNA_CAPABLE(ha)) {
3817 mcp->mb[1] |= BIT_15; 3891 mcp->mb[1] |= BIT_15;
3818 mcp->mb[2] = vha->fcoe_fcf_idx; 3892 mcp->mb[2] = vha->fcoe_fcf_idx;
3819 } 3893 }
@@ -3831,13 +3905,14 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3831 3905
3832 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15| 3906 mcp->out_mb = MBX_21|MBX_20|MBX_17|MBX_16|MBX_15|
3833 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0; 3907 MBX_14|MBX_10|MBX_7|MBX_6|MBX_1|MBX_0;
3834 if (IS_QLA8XXX_TYPE(ha)) 3908 if (IS_CNA_CAPABLE(ha))
3835 mcp->out_mb |= MBX_2; 3909 mcp->out_mb |= MBX_2;
3836 3910
3837 mcp->in_mb = MBX_0; 3911 mcp->in_mb = MBX_0;
3838 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha)) 3912 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
3913 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
3839 mcp->in_mb |= MBX_1; 3914 mcp->in_mb |= MBX_1;
3840 if (IS_QLA8XXX_TYPE(ha)) 3915 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
3841 mcp->in_mb |= MBX_3; 3916 mcp->in_mb |= MBX_3;
3842 3917
3843 mcp->tov = MBX_TOV_SECONDS; 3918 mcp->tov = MBX_TOV_SECONDS;
@@ -3976,6 +4051,7 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
3976 4051
3977 return rval; 4052 return rval;
3978} 4053}
4054
3979int 4055int
3980qla2x00_get_data_rate(scsi_qla_host_t *vha) 4056qla2x00_get_data_rate(scsi_qla_host_t *vha)
3981{ 4057{
@@ -3993,6 +4069,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
3993 mcp->mb[1] = 0; 4069 mcp->mb[1] = 0;
3994 mcp->out_mb = MBX_1|MBX_0; 4070 mcp->out_mb = MBX_1|MBX_0;
3995 mcp->in_mb = MBX_2|MBX_1|MBX_0; 4071 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4072 if (IS_QLA83XX(ha))
4073 mcp->in_mb |= MBX_3;
3996 mcp->tov = MBX_TOV_SECONDS; 4074 mcp->tov = MBX_TOV_SECONDS;
3997 mcp->flags = 0; 4075 mcp->flags = 0;
3998 rval = qla2x00_mailbox_command(vha, mcp); 4076 rval = qla2x00_mailbox_command(vha, mcp);
@@ -4018,7 +4096,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4018 4096
4019 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__); 4097 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
4020 4098
4021 if (!IS_QLA81XX(ha)) 4099 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
4022 return QLA_FUNCTION_FAILED; 4100 return QLA_FUNCTION_FAILED;
4023 mcp->mb[0] = MBC_GET_PORT_CONFIG; 4101 mcp->mb[0] = MBC_GET_PORT_CONFIG;
4024 mcp->out_mb = MBX_0; 4102 mcp->out_mb = MBX_0;
@@ -4299,6 +4377,90 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4299} 4377}
4300 4378
4301int 4379int
4380qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4381{
4382 int rval;
4383 struct qla_hw_data *ha = vha->hw;
4384 mbx_cmd_t mc;
4385 mbx_cmd_t *mcp = &mc;
4386
4387 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4388 return QLA_FUNCTION_FAILED;
4389
4390 ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
4391
4392 memset(mcp, 0, sizeof(mbx_cmd_t));
4393 mcp->mb[0] = MBC_SET_LED_CONFIG;
4394 mcp->mb[1] = led_cfg[0];
4395 mcp->mb[2] = led_cfg[1];
4396 if (IS_QLA8031(ha)) {
4397 mcp->mb[3] = led_cfg[2];
4398 mcp->mb[4] = led_cfg[3];
4399 mcp->mb[5] = led_cfg[4];
4400 mcp->mb[6] = led_cfg[5];
4401 }
4402
4403 mcp->out_mb = MBX_2|MBX_1|MBX_0;
4404 if (IS_QLA8031(ha))
4405 mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
4406 mcp->in_mb = MBX_0;
4407 mcp->tov = 30;
4408 mcp->flags = 0;
4409
4410 rval = qla2x00_mailbox_command(vha, mcp);
4411 if (rval != QLA_SUCCESS) {
4412 ql_dbg(ql_dbg_mbx, vha, 0x1134,
4413 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4414 } else {
4415 ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
4416 }
4417
4418 return rval;
4419}
4420
4421int
4422qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4423{
4424 int rval;
4425 struct qla_hw_data *ha = vha->hw;
4426 mbx_cmd_t mc;
4427 mbx_cmd_t *mcp = &mc;
4428
4429 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4430 return QLA_FUNCTION_FAILED;
4431
4432 ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
4433
4434 memset(mcp, 0, sizeof(mbx_cmd_t));
4435 mcp->mb[0] = MBC_GET_LED_CONFIG;
4436
4437 mcp->out_mb = MBX_0;
4438 mcp->in_mb = MBX_2|MBX_1|MBX_0;
4439 if (IS_QLA8031(ha))
4440 mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3;
4441 mcp->tov = 30;
4442 mcp->flags = 0;
4443
4444 rval = qla2x00_mailbox_command(vha, mcp);
4445 if (rval != QLA_SUCCESS) {
4446 ql_dbg(ql_dbg_mbx, vha, 0x1137,
4447 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4448 } else {
4449 led_cfg[0] = mcp->mb[1];
4450 led_cfg[1] = mcp->mb[2];
4451 if (IS_QLA8031(ha)) {
4452 led_cfg[2] = mcp->mb[3];
4453 led_cfg[3] = mcp->mb[4];
4454 led_cfg[4] = mcp->mb[5];
4455 led_cfg[5] = mcp->mb[6];
4456 }
4457 ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
4458 }
4459
4460 return rval;
4461}
4462
4463int
4302qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) 4464qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4303{ 4465{
4304 int rval; 4466 int rval;
@@ -4321,7 +4483,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4321 4483
4322 mcp->out_mb = MBX_7|MBX_0; 4484 mcp->out_mb = MBX_7|MBX_0;
4323 mcp->in_mb = MBX_0; 4485 mcp->in_mb = MBX_0;
4324 mcp->tov = 30; 4486 mcp->tov = MBX_TOV_SECONDS;
4325 mcp->flags = 0; 4487 mcp->flags = 0;
4326 4488
4327 rval = qla2x00_mailbox_command(vha, mcp); 4489 rval = qla2x00_mailbox_command(vha, mcp);
@@ -4335,3 +4497,75 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4335 4497
4336 return rval; 4498 return rval;
4337} 4499}
4500
4501int
4502qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4503{
4504 int rval;
4505 struct qla_hw_data *ha = vha->hw;
4506 mbx_cmd_t mc;
4507 mbx_cmd_t *mcp = &mc;
4508
4509 if (!IS_QLA83XX(ha))
4510 return QLA_FUNCTION_FAILED;
4511
4512 ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
4513
4514 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
4515 mcp->mb[1] = LSW(reg);
4516 mcp->mb[2] = MSW(reg);
4517 mcp->mb[3] = LSW(data);
4518 mcp->mb[4] = MSW(data);
4519 mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4520
4521 mcp->in_mb = MBX_1|MBX_0;
4522 mcp->tov = MBX_TOV_SECONDS;
4523 mcp->flags = 0;
4524 rval = qla2x00_mailbox_command(vha, mcp);
4525
4526 if (rval != QLA_SUCCESS) {
4527 ql_dbg(ql_dbg_mbx, vha, 0x1131,
4528 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4529 } else {
4530 ql_dbg(ql_dbg_mbx, vha, 0x1132,
4531 "Done %s.\n", __func__);
4532 }
4533
4534 return rval;
4535}
4536
4537int
4538qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4539{
4540 int rval;
4541 struct qla_hw_data *ha = vha->hw;
4542 mbx_cmd_t mc;
4543 mbx_cmd_t *mcp = &mc;
4544
4545 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4546 ql_dbg(ql_dbg_mbx, vha, 0x113b,
4547 "Implicit LOGO Unsupported.\n");
4548 return QLA_FUNCTION_FAILED;
4549 }
4550
4551
4552 ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__);
4553
4554 /* Perform Implicit LOGO. */
4555 mcp->mb[0] = MBC_PORT_LOGOUT;
4556 mcp->mb[1] = fcport->loop_id;
4557 mcp->mb[10] = BIT_15;
4558 mcp->out_mb = MBX_10|MBX_1|MBX_0;
4559 mcp->in_mb = MBX_0;
4560 mcp->tov = MBX_TOV_SECONDS;
4561 mcp->flags = 0;
4562 rval = qla2x00_mailbox_command(vha, mcp);
4563 if (rval != QLA_SUCCESS)
4564 ql_dbg(ql_dbg_mbx, vha, 0x113d,
4565 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4566 else
4567 ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
4568
4569 return rval;
4570}
4571
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f488cc69fc7..aa062a1b0ca 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -479,7 +479,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
479 host->max_channel = MAX_BUSES - 1; 479 host->max_channel = MAX_BUSES - 1;
480 host->max_lun = ql2xmaxlun; 480 host->max_lun = ql2xmaxlun;
481 host->unique_id = host->host_no; 481 host->unique_id = host->host_no;
482 host->max_id = MAX_TARGETS_2200; 482 host->max_id = ha->max_fibre_devices;
483 host->transportt = qla2xxx_transport_vport_template; 483 host->transportt = qla2xxx_transport_vport_template;
484 484
485 ql_dbg(ql_dbg_vport, vha, 0xa007, 485 ql_dbg(ql_dbg_vport, vha, 0xa007,
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 270ba3130fd..f0528539bbb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -908,27 +908,37 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
908 return 0; 908 return 0;
909} 909}
910 910
911int
912qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
913{
914 uint32_t off_value, rval = 0;
915
916 WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
917 (off & 0xFFFF0000));
918
919 /* Read back value to make sure write has gone through */
920 RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
921 off_value = (off & 0x0000FFFF);
922
923 if (flag)
924 WRT_REG_DWORD((void *)
925 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
926 data);
927 else
928 rval = RD_REG_DWORD((void *)
929 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
930
931 return rval;
932}
933
911static int 934static int
912qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) 935qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
913{ 936{
914 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 937 /* Dword reads to flash. */
938 qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1);
939 *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE +
940 (addr & 0x0000FFFF), 0, 0);
915 941
916 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
917 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
918 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
919 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
920 qla82xx_wait_rom_busy(ha);
921 if (qla82xx_wait_rom_done(ha)) {
922 ql_log(ql_log_fatal, vha, 0x00ba,
923 "Error waiting for rom done.\n");
924 return -1;
925 }
926 /* Reset abyte_cnt and dummy_byte_cnt */
927 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
928 udelay(10);
929 cond_resched();
930 qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
931 *valp = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
932 return 0; 942 return 0;
933} 943}
934 944
@@ -2040,8 +2050,8 @@ qla82xx_intr_handler(int irq, void *dev_id)
2040 2050
2041 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2042 if (!rsp) { 2052 if (!rsp) {
2043 printk(KERN_INFO 2053 ql_log(ql_log_info, NULL, 0xb054,
2044 "%s(): NULL response queue pointer.\n", __func__); 2054 "%s: NULL response queue pointer.\n", __func__);
2045 return IRQ_NONE; 2055 return IRQ_NONE;
2046 } 2056 }
2047 ha = rsp->hw; 2057 ha = rsp->hw;
@@ -3136,12 +3146,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3136 fw_minor_version = ha->fw_minor_version; 3146 fw_minor_version = ha->fw_minor_version;
3137 fw_subminor_version = ha->fw_subminor_version; 3147 fw_subminor_version = ha->fw_subminor_version;
3138 3148
3139 rval = qla2x00_get_fw_version(vha, &ha->fw_major_version, 3149 rval = qla2x00_get_fw_version(vha);
3140 &ha->fw_minor_version, &ha->fw_subminor_version,
3141 &ha->fw_attributes, &ha->fw_memory_size,
3142 ha->mpi_version, &ha->mpi_capabilities,
3143 ha->phy_version);
3144
3145 if (rval != QLA_SUCCESS) 3150 if (rval != QLA_SUCCESS)
3146 return rval; 3151 return rval;
3147 3152
@@ -3150,7 +3155,6 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3150 if (fw_major_version != ha->fw_major_version || 3155 if (fw_major_version != ha->fw_major_version ||
3151 fw_minor_version != ha->fw_minor_version || 3156 fw_minor_version != ha->fw_minor_version ||
3152 fw_subminor_version != ha->fw_subminor_version) { 3157 fw_subminor_version != ha->fw_subminor_version) {
3153
3154 ql_log(ql_log_info, vha, 0xb02d, 3158 ql_log(ql_log_info, vha, 0xb02d,
3155 "Firmware version differs " 3159 "Firmware version differs "
3156 "Previous version: %d:%d:%d - " 3160 "Previous version: %d:%d:%d - "
@@ -3614,7 +3618,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3614 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) { 3618 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3615 sp = req->outstanding_cmds[cnt]; 3619 sp = req->outstanding_cmds[cnt];
3616 if (sp) { 3620 if (sp) {
3617 if (!sp->ctx || 3621 if (!sp->u.scmd.ctx ||
3618 (sp->flags & SRB_FCP_CMND_DMA_VALID)) { 3622 (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
3619 spin_unlock_irqrestore( 3623 spin_unlock_irqrestore(
3620 &ha->hardware_lock, flags); 3624 &ha->hardware_lock, flags);
@@ -3645,29 +3649,6 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
3645} 3649}
3646 3650
3647/* Minidump related functions */ 3651/* Minidump related functions */
3648int
3649qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
3650{
3651 uint32_t off_value, rval = 0;
3652
3653 WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
3654 (off & 0xFFFF0000));
3655
3656 /* Read back value to make sure write has gone through */
3657 RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
3658 off_value = (off & 0x0000FFFF);
3659
3660 if (flag)
3661 WRT_REG_DWORD((void *)
3662 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
3663 data);
3664 else
3665 rval = RD_REG_DWORD((void *)
3666 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
3667
3668 return rval;
3669}
3670
3671static int 3652static int
3672qla82xx_minidump_process_control(scsi_qla_host_t *vha, 3653qla82xx_minidump_process_control(scsi_qla_host_t *vha,
3673 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) 3654 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
@@ -4117,8 +4098,9 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
4117 data_ptr = (uint32_t *)ha->md_dump; 4098 data_ptr = (uint32_t *)ha->md_dump;
4118 4099
4119 if (ha->fw_dumped) { 4100 if (ha->fw_dumped) {
4120 ql_log(ql_log_info, vha, 0xb037, 4101 ql_log(ql_log_warn, vha, 0xb037,
4121 "Firmware dump available to retrive\n"); 4102 "Firmware has been previously dumped (%p) "
4103 "-- ignoring request.\n", ha->fw_dump);
4122 goto md_failed; 4104 goto md_failed;
4123 } 4105 }
4124 4106
@@ -4161,7 +4143,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
4161 4143
4162 total_data_size = ha->md_dump_size; 4144 total_data_size = ha->md_dump_size;
4163 4145
4164 ql_dbg(ql_log_info, vha, 0xb03d, 4146 ql_dbg(ql_dbg_p3p, vha, 0xb03d,
4165 "Total minidump data_size 0x%x to be captured\n", total_data_size); 4147 "Total minidump data_size 0x%x to be captured\n", total_data_size);
4166 4148
4167 /* Check whether template obtained is valid */ 4149 /* Check whether template obtained is valid */
@@ -4284,7 +4266,7 @@ skip_nxt_entry:
4284 } 4266 }
4285 4267
4286 if (data_collected != total_data_size) { 4268 if (data_collected != total_data_size) {
4287 ql_dbg(ql_log_warn, vha, 0xb043, 4269 ql_dbg(ql_dbg_p3p, vha, 0xb043,
4288 "MiniDump data mismatch: Data collected: [0x%x]," 4270 "MiniDump data mismatch: Data collected: [0x%x],"
4289 "total_data_size:[0x%x]\n", 4271 "total_data_size:[0x%x]\n",
4290 data_collected, total_data_size); 4272 data_collected, total_data_size);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 57a226be339..4ac50e27466 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -888,7 +888,8 @@ struct ct6_dsd {
888}; 888};
889 889
890#define MBC_TOGGLE_INTERRUPT 0x10 890#define MBC_TOGGLE_INTERRUPT 0x10
891#define MBC_SET_LED_CONFIG 0x125 891#define MBC_SET_LED_CONFIG 0x125 /* FCoE specific LED control */
892#define MBC_GET_LED_CONFIG 0x126 /* FCoE specific LED control */
892 893
893/* Flash offset */ 894/* Flash offset */
894#define FLT_REG_BOOTLOAD_82XX 0x72 895#define FLT_REG_BOOTLOAD_82XX 0x72
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 036030c9533..a2f999273a5 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -304,7 +304,6 @@ static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
304 struct req_que **, struct rsp_que **); 304 struct req_que **, struct rsp_que **);
305static void qla2x00_free_fw_dump(struct qla_hw_data *); 305static void qla2x00_free_fw_dump(struct qla_hw_data *);
306static void qla2x00_mem_free(struct qla_hw_data *); 306static void qla2x00_mem_free(struct qla_hw_data *);
307static void qla2x00_sp_free_dma(srb_t *);
308 307
309/* -------------------------------------------------------------------------- */ 308/* -------------------------------------------------------------------------- */
310static int qla2x00_alloc_queues(struct qla_hw_data *ha) 309static int qla2x00_alloc_queues(struct qla_hw_data *ha)
@@ -559,28 +558,75 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
559 return str; 558 return str;
560} 559}
561 560
562static inline srb_t * 561void
563qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, 562qla2x00_sp_free_dma(void *vha, void *ptr)
564 struct scsi_cmnd *cmd)
565{ 563{
566 srb_t *sp; 564 srb_t *sp = (srb_t *)ptr;
567 struct qla_hw_data *ha = vha->hw; 565 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
566 struct qla_hw_data *ha = sp->fcport->vha->hw;
567 void *ctx = GET_CMD_CTX_SP(sp);
568 568
569 sp = mempool_alloc(ha->srb_mempool, GFP_ATOMIC); 569 if (sp->flags & SRB_DMA_VALID) {
570 if (!sp) { 570 scsi_dma_unmap(cmd);
571 ql_log(ql_log_warn, vha, 0x3006, 571 sp->flags &= ~SRB_DMA_VALID;
572 "Memory allocation failed for sp.\n");
573 return sp;
574 } 572 }
575 573
576 atomic_set(&sp->ref_count, 1); 574 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
577 sp->fcport = fcport; 575 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
578 sp->cmd = cmd; 576 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
579 sp->flags = 0; 577 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
580 CMD_SP(cmd) = (void *)sp; 578 }
581 sp->ctx = NULL;
582 579
583 return sp; 580 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
581 /* List assured to be having elements */
582 qla2x00_clean_dsd_pool(ha, sp);
583 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
584 }
585
586 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
587 dma_pool_free(ha->dl_dma_pool, ctx,
588 ((struct crc_context *)ctx)->crc_ctx_dma);
589 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
590 }
591
592 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
593 struct ct6_dsd *ctx1 = (struct ct6_dsd *)ctx;
594
595 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
596 ctx1->fcp_cmnd_dma);
597 list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
598 ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
599 ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
600 mempool_free(ctx1, ha->ctx_mempool);
601 ctx1 = NULL;
602 }
603
604 CMD_SP(cmd) = NULL;
605 mempool_free(sp, ha->srb_mempool);
606}
607
608static void
609qla2x00_sp_compl(void *data, void *ptr, int res)
610{
611 struct qla_hw_data *ha = (struct qla_hw_data *)data;
612 srb_t *sp = (srb_t *)ptr;
613 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
614
615 cmd->result = res;
616
617 if (atomic_read(&sp->ref_count) == 0) {
618 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
619 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
620 sp, GET_CMD_SP(sp));
621 if (ql2xextended_error_logging & ql_dbg_io)
622 BUG();
623 return;
624 }
625 if (!atomic_dec_and_test(&sp->ref_count))
626 return;
627
628 qla2x00_sp_free_dma(ha, sp);
629 cmd->scsi_done(cmd);
584} 630}
585 631
586static int 632static int
@@ -644,10 +690,17 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
644 goto qc24_target_busy; 690 goto qc24_target_busy;
645 } 691 }
646 692
647 sp = qla2x00_get_new_sp(base_vha, fcport, cmd); 693 sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
648 if (!sp) 694 if (!sp)
649 goto qc24_host_busy; 695 goto qc24_host_busy;
650 696
697 sp->u.scmd.cmd = cmd;
698 sp->type = SRB_SCSI_CMD;
699 atomic_set(&sp->ref_count, 1);
700 CMD_SP(cmd) = (void *)sp;
701 sp->free = qla2x00_sp_free_dma;
702 sp->done = qla2x00_sp_compl;
703
651 rval = ha->isp_ops->start_scsi(sp); 704 rval = ha->isp_ops->start_scsi(sp);
652 if (rval != QLA_SUCCESS) { 705 if (rval != QLA_SUCCESS) {
653 ql_dbg(ql_dbg_io, vha, 0x3013, 706 ql_dbg(ql_dbg_io, vha, 0x3013,
@@ -658,8 +711,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
658 return 0; 711 return 0;
659 712
660qc24_host_busy_free_sp: 713qc24_host_busy_free_sp:
661 qla2x00_sp_free_dma(sp); 714 qla2x00_sp_free_dma(ha, sp);
662 mempool_free(sp, ha->srb_mempool);
663 715
664qc24_host_busy: 716qc24_host_busy:
665 return SCSI_MLQUEUE_HOST_BUSY; 717 return SCSI_MLQUEUE_HOST_BUSY;
@@ -893,7 +945,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
893 } 945 }
894 946
895 spin_lock_irqsave(&ha->hardware_lock, flags); 947 spin_lock_irqsave(&ha->hardware_lock, flags);
896 qla2x00_sp_compl(ha, sp); 948 sp->done(ha, sp, 0);
897 spin_unlock_irqrestore(&ha->hardware_lock, flags); 949 spin_unlock_irqrestore(&ha->hardware_lock, flags);
898 950
899 /* Did the command return during mailbox execution? */ 951 /* Did the command return during mailbox execution? */
@@ -925,6 +977,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
925 struct qla_hw_data *ha = vha->hw; 977 struct qla_hw_data *ha = vha->hw;
926 struct req_que *req; 978 struct req_que *req;
927 srb_t *sp; 979 srb_t *sp;
980 struct scsi_cmnd *cmd;
928 981
929 status = QLA_SUCCESS; 982 status = QLA_SUCCESS;
930 983
@@ -935,28 +988,29 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
935 sp = req->outstanding_cmds[cnt]; 988 sp = req->outstanding_cmds[cnt];
936 if (!sp) 989 if (!sp)
937 continue; 990 continue;
938 if ((sp->ctx) && !IS_PROT_IO(sp)) 991 if (sp->type != SRB_SCSI_CMD)
939 continue; 992 continue;
940 if (vha->vp_idx != sp->fcport->vha->vp_idx) 993 if (vha->vp_idx != sp->fcport->vha->vp_idx)
941 continue; 994 continue;
942 match = 0; 995 match = 0;
996 cmd = GET_CMD_SP(sp);
943 switch (type) { 997 switch (type) {
944 case WAIT_HOST: 998 case WAIT_HOST:
945 match = 1; 999 match = 1;
946 break; 1000 break;
947 case WAIT_TARGET: 1001 case WAIT_TARGET:
948 match = sp->cmd->device->id == t; 1002 match = cmd->device->id == t;
949 break; 1003 break;
950 case WAIT_LUN: 1004 case WAIT_LUN:
951 match = (sp->cmd->device->id == t && 1005 match = (cmd->device->id == t &&
952 sp->cmd->device->lun == l); 1006 cmd->device->lun == l);
953 break; 1007 break;
954 } 1008 }
955 if (!match) 1009 if (!match)
956 continue; 1010 continue;
957 1011
958 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1012 spin_unlock_irqrestore(&ha->hardware_lock, flags);
959 status = qla2x00_eh_wait_on_command(sp->cmd); 1013 status = qla2x00_eh_wait_on_command(cmd);
960 spin_lock_irqsave(&ha->hardware_lock, flags); 1014 spin_lock_irqsave(&ha->hardware_lock, flags);
961 } 1015 }
962 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1016 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1219,7 +1273,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
1219 } 1273 }
1220 } 1274 }
1221 1275
1222 if (ha->flags.enable_lip_full_login && !IS_QLA8XXX_TYPE(ha)) { 1276 if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
1223 ret = qla2x00_full_login_lip(vha); 1277 ret = qla2x00_full_login_lip(vha);
1224 if (ret != QLA_SUCCESS) { 1278 if (ret != QLA_SUCCESS) {
1225 ql_dbg(ql_dbg_taskm, vha, 0x802d, 1279 ql_dbg(ql_dbg_taskm, vha, 0x802d,
@@ -1249,7 +1303,6 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1249 int que, cnt; 1303 int que, cnt;
1250 unsigned long flags; 1304 unsigned long flags;
1251 srb_t *sp; 1305 srb_t *sp;
1252 struct srb_ctx *ctx;
1253 struct qla_hw_data *ha = vha->hw; 1306 struct qla_hw_data *ha = vha->hw;
1254 struct req_que *req; 1307 struct req_que *req;
1255 1308
@@ -1262,31 +1315,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1262 sp = req->outstanding_cmds[cnt]; 1315 sp = req->outstanding_cmds[cnt];
1263 if (sp) { 1316 if (sp) {
1264 req->outstanding_cmds[cnt] = NULL; 1317 req->outstanding_cmds[cnt] = NULL;
1265 if (!sp->ctx || 1318 sp->done(vha, sp, res);
1266 (sp->flags & SRB_FCP_CMND_DMA_VALID) ||
1267 IS_PROT_IO(sp)) {
1268 sp->cmd->result = res;
1269 qla2x00_sp_compl(ha, sp);
1270 } else {
1271 ctx = sp->ctx;
1272 if (ctx->type == SRB_ELS_CMD_RPT ||
1273 ctx->type == SRB_ELS_CMD_HST ||
1274 ctx->type == SRB_CT_CMD) {
1275 struct fc_bsg_job *bsg_job =
1276 ctx->u.bsg_job;
1277 if (bsg_job->request->msgcode
1278 == FC_BSG_HST_CT)
1279 kfree(sp->fcport);
1280 bsg_job->req->errors = 0;
1281 bsg_job->reply->result = res;
1282 bsg_job->job_done(bsg_job);
1283 kfree(sp->ctx);
1284 mempool_free(sp,
1285 ha->srb_mempool);
1286 } else {
1287 ctx->u.iocb_cmd->free(sp);
1288 }
1289 }
1290 } 1319 }
1291 } 1320 }
1292 } 1321 }
@@ -1488,9 +1517,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
1488 uint16_t msix; 1517 uint16_t msix;
1489 int cpus; 1518 int cpus;
1490 1519
1491 if (IS_QLA82XX(ha))
1492 return qla82xx_iospace_config(ha);
1493
1494 if (pci_request_selected_regions(ha->pdev, ha->bars, 1520 if (pci_request_selected_regions(ha->pdev, ha->bars,
1495 QLA2XXX_DRIVER_NAME)) { 1521 QLA2XXX_DRIVER_NAME)) {
1496 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011, 1522 ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
@@ -1593,6 +1619,96 @@ iospace_error_exit:
1593} 1619}
1594 1620
1595 1621
1622static int
1623qla83xx_iospace_config(struct qla_hw_data *ha)
1624{
1625 uint16_t msix;
1626 int cpus;
1627
1628 if (pci_request_selected_regions(ha->pdev, ha->bars,
1629 QLA2XXX_DRIVER_NAME)) {
1630 ql_log_pci(ql_log_fatal, ha->pdev, 0x0117,
1631 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
1632 pci_name(ha->pdev));
1633
1634 goto iospace_error_exit;
1635 }
1636
1637 /* Use MMIO operations for all accesses. */
1638 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
1639 ql_log_pci(ql_log_warn, ha->pdev, 0x0118,
1640 "Invalid pci I/O region size (%s).\n",
1641 pci_name(ha->pdev));
1642 goto iospace_error_exit;
1643 }
1644 if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
1645 ql_log_pci(ql_log_warn, ha->pdev, 0x0119,
1646 "Invalid PCI mem region size (%s), aborting\n",
1647 pci_name(ha->pdev));
1648 goto iospace_error_exit;
1649 }
1650
1651 ha->iobase = ioremap(pci_resource_start(ha->pdev, 0), MIN_IOBASE_LEN);
1652 if (!ha->iobase) {
1653 ql_log_pci(ql_log_fatal, ha->pdev, 0x011a,
1654 "Cannot remap MMIO (%s), aborting.\n",
1655 pci_name(ha->pdev));
1656 goto iospace_error_exit;
1657 }
1658
1659 /* 64bit PCI BAR - BAR2 will correspoond to region 4 */
1660 /* 83XX 26XX always use MQ type access for queues
1661 * - mbar 2, a.k.a region 4 */
1662 ha->max_req_queues = ha->max_rsp_queues = 1;
1663 ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 4),
1664 pci_resource_len(ha->pdev, 4));
1665
1666 if (!ha->mqiobase) {
1667 ql_log_pci(ql_log_fatal, ha->pdev, 0x011d,
1668 "BAR2/region4 not enabled\n");
1669 goto mqiobase_exit;
1670 }
1671
1672 ha->msixbase = ioremap(pci_resource_start(ha->pdev, 2),
1673 pci_resource_len(ha->pdev, 2));
1674 if (ha->msixbase) {
1675 /* Read MSIX vector size of the board */
1676 pci_read_config_word(ha->pdev,
1677 QLA_83XX_PCI_MSIX_CONTROL, &msix);
1678 ha->msix_count = msix;
1679 /* Max queues are bounded by available msix vectors */
1680 /* queue 0 uses two msix vectors */
1681 if (ql2xmultique_tag) {
1682 cpus = num_online_cpus();
1683 ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
1684 (cpus + 1) : (ha->msix_count - 1);
1685 ha->max_req_queues = 2;
1686 } else if (ql2xmaxqueues > 1) {
1687 ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
1688 QLA_MQ_SIZE : ql2xmaxqueues;
1689 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc00c,
1690 "QoS mode set, max no of request queues:%d.\n",
1691 ha->max_req_queues);
1692 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b,
1693 "QoS mode set, max no of request queues:%d.\n",
1694 ha->max_req_queues);
1695 }
1696 ql_log_pci(ql_log_info, ha->pdev, 0x011c,
1697 "MSI-X vector count: %d.\n", msix);
1698 } else
1699 ql_log_pci(ql_log_info, ha->pdev, 0x011e,
1700 "BAR 1 not enabled.\n");
1701
1702mqiobase_exit:
1703 ha->msix_count = ha->max_rsp_queues + 1;
1704 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
1705 "MSIX Count:%d.\n", ha->msix_count);
1706 return 0;
1707
1708iospace_error_exit:
1709 return -ENOMEM;
1710}
1711
1596static struct isp_operations qla2100_isp_ops = { 1712static struct isp_operations qla2100_isp_ops = {
1597 .pci_config = qla2100_pci_config, 1713 .pci_config = qla2100_pci_config,
1598 .reset_chip = qla2x00_reset_chip, 1714 .reset_chip = qla2x00_reset_chip,
@@ -1769,7 +1885,7 @@ static struct isp_operations qla81xx_isp_ops = {
1769 .fw_dump = qla81xx_fw_dump, 1885 .fw_dump = qla81xx_fw_dump,
1770 .beacon_on = qla24xx_beacon_on, 1886 .beacon_on = qla24xx_beacon_on,
1771 .beacon_off = qla24xx_beacon_off, 1887 .beacon_off = qla24xx_beacon_off,
1772 .beacon_blink = qla24xx_beacon_blink, 1888 .beacon_blink = qla83xx_beacon_blink,
1773 .read_optrom = qla25xx_read_optrom_data, 1889 .read_optrom = qla25xx_read_optrom_data,
1774 .write_optrom = qla24xx_write_optrom_data, 1890 .write_optrom = qla24xx_write_optrom_data,
1775 .get_flash_version = qla24xx_get_flash_version, 1891 .get_flash_version = qla24xx_get_flash_version,
@@ -1815,6 +1931,43 @@ static struct isp_operations qla82xx_isp_ops = {
1815 .iospace_config = qla82xx_iospace_config, 1931 .iospace_config = qla82xx_iospace_config,
1816}; 1932};
1817 1933
1934static struct isp_operations qla83xx_isp_ops = {
1935 .pci_config = qla25xx_pci_config,
1936 .reset_chip = qla24xx_reset_chip,
1937 .chip_diag = qla24xx_chip_diag,
1938 .config_rings = qla24xx_config_rings,
1939 .reset_adapter = qla24xx_reset_adapter,
1940 .nvram_config = qla81xx_nvram_config,
1941 .update_fw_options = qla81xx_update_fw_options,
1942 .load_risc = qla81xx_load_risc,
1943 .pci_info_str = qla24xx_pci_info_str,
1944 .fw_version_str = qla24xx_fw_version_str,
1945 .intr_handler = qla24xx_intr_handler,
1946 .enable_intrs = qla24xx_enable_intrs,
1947 .disable_intrs = qla24xx_disable_intrs,
1948 .abort_command = qla24xx_abort_command,
1949 .target_reset = qla24xx_abort_target,
1950 .lun_reset = qla24xx_lun_reset,
1951 .fabric_login = qla24xx_login_fabric,
1952 .fabric_logout = qla24xx_fabric_logout,
1953 .calc_req_entries = NULL,
1954 .build_iocbs = NULL,
1955 .prep_ms_iocb = qla24xx_prep_ms_iocb,
1956 .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
1957 .read_nvram = NULL,
1958 .write_nvram = NULL,
1959 .fw_dump = qla83xx_fw_dump,
1960 .beacon_on = qla24xx_beacon_on,
1961 .beacon_off = qla24xx_beacon_off,
1962 .beacon_blink = qla83xx_beacon_blink,
1963 .read_optrom = qla25xx_read_optrom_data,
1964 .write_optrom = qla24xx_write_optrom_data,
1965 .get_flash_version = qla24xx_get_flash_version,
1966 .start_scsi = qla24xx_dif_start_scsi,
1967 .abort_isp = qla2x00_abort_isp,
1968 .iospace_config = qla83xx_iospace_config,
1969};
1970
1818static inline void 1971static inline void
1819qla2x00_set_isp_flags(struct qla_hw_data *ha) 1972qla2x00_set_isp_flags(struct qla_hw_data *ha)
1820{ 1973{
@@ -1909,6 +2062,22 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
1909 /* Initialize 82XX ISP flags */ 2062 /* Initialize 82XX ISP flags */
1910 qla82xx_init_flags(ha); 2063 qla82xx_init_flags(ha);
1911 break; 2064 break;
2065 case PCI_DEVICE_ID_QLOGIC_ISP2031:
2066 ha->device_type |= DT_ISP2031;
2067 ha->device_type |= DT_ZIO_SUPPORTED;
2068 ha->device_type |= DT_FWI2;
2069 ha->device_type |= DT_IIDMA;
2070 ha->device_type |= DT_T10_PI;
2071 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2072 break;
2073 case PCI_DEVICE_ID_QLOGIC_ISP8031:
2074 ha->device_type |= DT_ISP8031;
2075 ha->device_type |= DT_ZIO_SUPPORTED;
2076 ha->device_type |= DT_FWI2;
2077 ha->device_type |= DT_IIDMA;
2078 ha->device_type |= DT_T10_PI;
2079 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2080 break;
1912 } 2081 }
1913 2082
1914 if (IS_QLA82XX(ha)) 2083 if (IS_QLA82XX(ha))
@@ -1966,7 +2135,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1966 char pci_info[30]; 2135 char pci_info[30];
1967 char fw_str[30]; 2136 char fw_str[30];
1968 struct scsi_host_template *sht; 2137 struct scsi_host_template *sht;
1969 int bars, max_id, mem_only = 0; 2138 int bars, mem_only = 0;
1970 uint16_t req_length = 0, rsp_length = 0; 2139 uint16_t req_length = 0, rsp_length = 0;
1971 struct req_que *req = NULL; 2140 struct req_que *req = NULL;
1972 struct rsp_que *rsp = NULL; 2141 struct rsp_que *rsp = NULL;
@@ -1980,7 +2149,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1980 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 || 2149 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP5432 ||
1981 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 || 2150 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2532 ||
1982 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 || 2151 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8001 ||
1983 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021) { 2152 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
2153 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
2154 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031) {
1984 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2155 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1985 mem_only = 1; 2156 mem_only = 1;
1986 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2157 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2020,9 +2191,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2020 qla2x00_set_isp_flags(ha); 2191 qla2x00_set_isp_flags(ha);
2021 2192
2022 /* Set EEH reset type to fundamental if required by hba */ 2193 /* Set EEH reset type to fundamental if required by hba */
2023 if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) { 2194 if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha))
2024 pdev->needs_freset = 1; 2195 pdev->needs_freset = 1;
2025 }
2026 2196
2027 ha->prev_topology = 0; 2197 ha->prev_topology = 0;
2028 ha->init_cb_size = sizeof(init_cb_t); 2198 ha->init_cb_size = sizeof(init_cb_t);
@@ -2030,9 +2200,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2030 ha->optrom_size = OPTROM_SIZE_2300; 2200 ha->optrom_size = OPTROM_SIZE_2300;
2031 2201
2032 /* Assign ISP specific operations. */ 2202 /* Assign ISP specific operations. */
2033 max_id = MAX_TARGETS_2200;
2034 if (IS_QLA2100(ha)) { 2203 if (IS_QLA2100(ha)) {
2035 max_id = MAX_TARGETS_2100; 2204 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2036 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100; 2205 ha->mbx_count = MAILBOX_REGISTER_COUNT_2100;
2037 req_length = REQUEST_ENTRY_CNT_2100; 2206 req_length = REQUEST_ENTRY_CNT_2100;
2038 rsp_length = RESPONSE_ENTRY_CNT_2100; 2207 rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2044,6 +2213,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2044 ha->nvram_data_off = ~0; 2213 ha->nvram_data_off = ~0;
2045 ha->isp_ops = &qla2100_isp_ops; 2214 ha->isp_ops = &qla2100_isp_ops;
2046 } else if (IS_QLA2200(ha)) { 2215 } else if (IS_QLA2200(ha)) {
2216 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2047 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200; 2217 ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
2048 req_length = REQUEST_ENTRY_CNT_2200; 2218 req_length = REQUEST_ENTRY_CNT_2200;
2049 rsp_length = RESPONSE_ENTRY_CNT_2100; 2219 rsp_length = RESPONSE_ENTRY_CNT_2100;
@@ -2055,6 +2225,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2055 ha->nvram_data_off = ~0; 2225 ha->nvram_data_off = ~0;
2056 ha->isp_ops = &qla2100_isp_ops; 2226 ha->isp_ops = &qla2100_isp_ops;
2057 } else if (IS_QLA23XX(ha)) { 2227 } else if (IS_QLA23XX(ha)) {
2228 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2100;
2058 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2229 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2059 req_length = REQUEST_ENTRY_CNT_2200; 2230 req_length = REQUEST_ENTRY_CNT_2200;
2060 rsp_length = RESPONSE_ENTRY_CNT_2300; 2231 rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2068,6 +2239,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2068 ha->nvram_data_off = ~0; 2239 ha->nvram_data_off = ~0;
2069 ha->isp_ops = &qla2300_isp_ops; 2240 ha->isp_ops = &qla2300_isp_ops;
2070 } else if (IS_QLA24XX_TYPE(ha)) { 2241 } else if (IS_QLA24XX_TYPE(ha)) {
2242 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2071 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2243 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2072 req_length = REQUEST_ENTRY_CNT_24XX; 2244 req_length = REQUEST_ENTRY_CNT_24XX;
2073 rsp_length = RESPONSE_ENTRY_CNT_2300; 2245 rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2082,6 +2254,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2082 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2254 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2083 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2255 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2084 } else if (IS_QLA25XX(ha)) { 2256 } else if (IS_QLA25XX(ha)) {
2257 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2085 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2258 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2086 req_length = REQUEST_ENTRY_CNT_24XX; 2259 req_length = REQUEST_ENTRY_CNT_24XX;
2087 rsp_length = RESPONSE_ENTRY_CNT_2300; 2260 rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2096,6 +2269,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2096 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2269 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2097 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2270 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2098 } else if (IS_QLA81XX(ha)) { 2271 } else if (IS_QLA81XX(ha)) {
2272 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2099 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2273 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2100 req_length = REQUEST_ENTRY_CNT_24XX; 2274 req_length = REQUEST_ENTRY_CNT_24XX;
2101 rsp_length = RESPONSE_ENTRY_CNT_2300; 2275 rsp_length = RESPONSE_ENTRY_CNT_2300;
@@ -2110,6 +2284,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2110 ha->nvram_conf_off = ~0; 2284 ha->nvram_conf_off = ~0;
2111 ha->nvram_data_off = ~0; 2285 ha->nvram_data_off = ~0;
2112 } else if (IS_QLA82XX(ha)) { 2286 } else if (IS_QLA82XX(ha)) {
2287 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2113 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2288 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2114 req_length = REQUEST_ENTRY_CNT_82XX; 2289 req_length = REQUEST_ENTRY_CNT_82XX;
2115 rsp_length = RESPONSE_ENTRY_CNT_82XX; 2290 rsp_length = RESPONSE_ENTRY_CNT_82XX;
@@ -2123,14 +2298,31 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2123 ha->flash_data_off = FARX_ACCESS_FLASH_DATA; 2298 ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
2124 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF; 2299 ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
2125 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA; 2300 ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
2301 } else if (IS_QLA83XX(ha)) {
2302 ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
2303 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2304 req_length = REQUEST_ENTRY_CNT_24XX;
2305 rsp_length = RESPONSE_ENTRY_CNT_2300;
2306 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2307 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2308 ha->gid_list_info_size = 8;
2309 ha->optrom_size = OPTROM_SIZE_83XX;
2310 ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
2311 ha->isp_ops = &qla83xx_isp_ops;
2312 ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
2313 ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
2314 ha->nvram_conf_off = ~0;
2315 ha->nvram_data_off = ~0;
2126 } 2316 }
2317
2127 ql_dbg_pci(ql_dbg_init, pdev, 0x001e, 2318 ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
2128 "mbx_count=%d, req_length=%d, " 2319 "mbx_count=%d, req_length=%d, "
2129 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, " 2320 "rsp_length=%d, max_loop_id=%d, init_cb_size=%d, "
2130 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, .\n", 2321 "gid_list_info_size=%d, optrom_size=%d, nvram_npiv_size=%d, "
2322 "max_fibre_devices=%d.\n",
2131 ha->mbx_count, req_length, rsp_length, ha->max_loop_id, 2323 ha->mbx_count, req_length, rsp_length, ha->max_loop_id,
2132 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, 2324 ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size,
2133 ha->nvram_npiv_size); 2325 ha->nvram_npiv_size, ha->max_fibre_devices);
2134 ql_dbg_pci(ql_dbg_init, pdev, 0x001f, 2326 ql_dbg_pci(ql_dbg_init, pdev, 0x001f,
2135 "isp_ops=%p, flash_conf_off=%d, " 2327 "isp_ops=%p, flash_conf_off=%d, "
2136 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", 2328 "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
@@ -2204,7 +2396,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2204 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", 2396 "mgmt_svr_loop_id=%d, sg_tablesize=%d.\n",
2205 host->can_queue, base_vha->req, 2397 host->can_queue, base_vha->req,
2206 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 2398 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
2207 host->max_id = max_id; 2399 host->max_id = ha->max_fibre_devices;
2208 host->this_id = 255; 2400 host->this_id = 255;
2209 host->cmd_per_lun = 3; 2401 host->cmd_per_lun = 3;
2210 host->unique_id = host->host_no; 2402 host->unique_id = host->host_no;
@@ -2251,7 +2443,7 @@ que_init:
2251 req->req_q_out = &ha->iobase->isp24.req_q_out; 2443 req->req_q_out = &ha->iobase->isp24.req_q_out;
2252 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; 2444 rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
2253 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; 2445 rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
2254 if (ha->mqenable) { 2446 if (ha->mqenable || IS_QLA83XX(ha)) {
2255 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; 2447 req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
2256 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; 2448 req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
2257 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; 2449 rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
@@ -2552,6 +2744,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
2552 2744
2553 if (ha->mqiobase) 2745 if (ha->mqiobase)
2554 iounmap(ha->mqiobase); 2746 iounmap(ha->mqiobase);
2747
2748 if (IS_QLA83XX(ha) && ha->msixbase)
2749 iounmap(ha->msixbase);
2555 } 2750 }
2556 2751
2557 pci_release_selected_regions(ha->pdev, ha->bars); 2752 pci_release_selected_regions(ha->pdev, ha->bars);
@@ -2751,8 +2946,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2751 if (!ha->init_cb) 2946 if (!ha->init_cb)
2752 goto fail; 2947 goto fail;
2753 2948
2754 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, GID_LIST_SIZE, 2949 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
2755 &ha->gid_list_dma, GFP_KERNEL); 2950 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
2756 if (!ha->gid_list) 2951 if (!ha->gid_list)
2757 goto fail_free_init_cb; 2952 goto fail_free_init_cb;
2758 2953
@@ -2893,7 +3088,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2893 ha->npiv_info = NULL; 3088 ha->npiv_info = NULL;
2894 3089
2895 /* Get consistent memory allocated for EX-INIT-CB. */ 3090 /* Get consistent memory allocated for EX-INIT-CB. */
2896 if (IS_QLA8XXX_TYPE(ha)) { 3091 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)) {
2897 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 3092 ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
2898 &ha->ex_init_cb_dma); 3093 &ha->ex_init_cb_dma);
2899 if (!ha->ex_init_cb) 3094 if (!ha->ex_init_cb)
@@ -2967,7 +3162,8 @@ fail_free_srb_mempool:
2967 mempool_destroy(ha->srb_mempool); 3162 mempool_destroy(ha->srb_mempool);
2968 ha->srb_mempool = NULL; 3163 ha->srb_mempool = NULL;
2969fail_free_gid_list: 3164fail_free_gid_list:
2970 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 3165 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
3166 ha->gid_list,
2971 ha->gid_list_dma); 3167 ha->gid_list_dma);
2972 ha->gid_list = NULL; 3168 ha->gid_list = NULL;
2973 ha->gid_list_dma = 0; 3169 ha->gid_list_dma = 0;
@@ -3045,9 +3241,6 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3045 if (ha->sfp_data) 3241 if (ha->sfp_data)
3046 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma); 3242 dma_pool_free(ha->s_dma_pool, ha->sfp_data, ha->sfp_data_dma);
3047 3243
3048 if (ha->edc_data)
3049 dma_pool_free(ha->s_dma_pool, ha->edc_data, ha->edc_data_dma);
3050
3051 if (ha->ms_iocb) 3244 if (ha->ms_iocb)
3052 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); 3245 dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
3053 3246
@@ -3062,8 +3255,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3062 dma_pool_destroy(ha->s_dma_pool); 3255 dma_pool_destroy(ha->s_dma_pool);
3063 3256
3064 if (ha->gid_list) 3257 if (ha->gid_list)
3065 dma_free_coherent(&ha->pdev->dev, GID_LIST_SIZE, ha->gid_list, 3258 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
3066 ha->gid_list_dma); 3259 ha->gid_list, ha->gid_list_dma);
3067 3260
3068 if (IS_QLA82XX(ha)) { 3261 if (IS_QLA82XX(ha)) {
3069 if (!list_empty(&ha->gbl_dsd_list)) { 3262 if (!list_empty(&ha->gbl_dsd_list)) {
@@ -3095,6 +3288,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3095 vfree(ha->optrom_buffer); 3288 vfree(ha->optrom_buffer);
3096 kfree(ha->nvram); 3289 kfree(ha->nvram);
3097 kfree(ha->npiv_info); 3290 kfree(ha->npiv_info);
3291 kfree(ha->swl);
3098 3292
3099 ha->srb_mempool = NULL; 3293 ha->srb_mempool = NULL;
3100 ha->ctx_mempool = NULL; 3294 ha->ctx_mempool = NULL;
@@ -3661,75 +3855,6 @@ qla2x00_rst_aen(scsi_qla_host_t *vha)
3661 } 3855 }
3662} 3856}
3663 3857
3664static void
3665qla2x00_sp_free_dma(srb_t *sp)
3666{
3667 struct scsi_cmnd *cmd = sp->cmd;
3668 struct qla_hw_data *ha = sp->fcport->vha->hw;
3669
3670 if (sp->flags & SRB_DMA_VALID) {
3671 scsi_dma_unmap(cmd);
3672 sp->flags &= ~SRB_DMA_VALID;
3673 }
3674
3675 if (sp->flags & SRB_CRC_PROT_DMA_VALID) {
3676 dma_unmap_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
3677 scsi_prot_sg_count(cmd), cmd->sc_data_direction);
3678 sp->flags &= ~SRB_CRC_PROT_DMA_VALID;
3679 }
3680
3681 if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
3682 /* List assured to be having elements */
3683 qla2x00_clean_dsd_pool(ha, sp);
3684 sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
3685 }
3686
3687 if (sp->flags & SRB_CRC_CTX_DMA_VALID) {
3688 dma_pool_free(ha->dl_dma_pool, sp->ctx,
3689 ((struct crc_context *)sp->ctx)->crc_ctx_dma);
3690 sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
3691 }
3692
3693 if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
3694 struct ct6_dsd *ctx = sp->ctx;
3695 dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
3696 ctx->fcp_cmnd_dma);
3697 list_splice(&ctx->dsd_list, &ha->gbl_dsd_list);
3698 ha->gbl_dsd_inuse -= ctx->dsd_use_cnt;
3699 ha->gbl_dsd_avail += ctx->dsd_use_cnt;
3700 mempool_free(sp->ctx, ha->ctx_mempool);
3701 sp->ctx = NULL;
3702 }
3703
3704 CMD_SP(cmd) = NULL;
3705}
3706
3707static void
3708qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
3709{
3710 struct scsi_cmnd *cmd = sp->cmd;
3711
3712 qla2x00_sp_free_dma(sp);
3713 mempool_free(sp, ha->srb_mempool);
3714 cmd->scsi_done(cmd);
3715}
3716
3717void
3718qla2x00_sp_compl(struct qla_hw_data *ha, srb_t *sp)
3719{
3720 if (atomic_read(&sp->ref_count) == 0) {
3721 ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3015,
3722 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
3723 sp, sp->cmd);
3724 if (ql2xextended_error_logging & ql_dbg_io)
3725 BUG();
3726 return;
3727 }
3728 if (!atomic_dec_and_test(&sp->ref_count))
3729 return;
3730 qla2x00_sp_final_compl(ha, sp);
3731}
3732
3733/************************************************************************** 3858/**************************************************************************
3734* qla2x00_timer 3859* qla2x00_timer
3735* 3860*
@@ -3800,7 +3925,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3800 sp = req->outstanding_cmds[index]; 3925 sp = req->outstanding_cmds[index];
3801 if (!sp) 3926 if (!sp)
3802 continue; 3927 continue;
3803 if (sp->ctx && !IS_PROT_IO(sp)) 3928 if (sp->type != SRB_SCSI_CMD)
3804 continue; 3929 continue;
3805 sfcp = sp->fcport; 3930 sfcp = sp->fcport;
3806 if (!(sfcp->flags & FCF_FCP2_DEVICE)) 3931 if (!(sfcp->flags & FCF_FCP2_DEVICE))
@@ -3889,7 +4014,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
3889 4014
3890/* Firmware interface routines. */ 4015/* Firmware interface routines. */
3891 4016
3892#define FW_BLOBS 8 4017#define FW_BLOBS 10
3893#define FW_ISP21XX 0 4018#define FW_ISP21XX 0
3894#define FW_ISP22XX 1 4019#define FW_ISP22XX 1
3895#define FW_ISP2300 2 4020#define FW_ISP2300 2
@@ -3898,6 +4023,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3898#define FW_ISP25XX 5 4023#define FW_ISP25XX 5
3899#define FW_ISP81XX 6 4024#define FW_ISP81XX 6
3900#define FW_ISP82XX 7 4025#define FW_ISP82XX 7
4026#define FW_ISP2031 8
4027#define FW_ISP8031 9
3901 4028
3902#define FW_FILE_ISP21XX "ql2100_fw.bin" 4029#define FW_FILE_ISP21XX "ql2100_fw.bin"
3903#define FW_FILE_ISP22XX "ql2200_fw.bin" 4030#define FW_FILE_ISP22XX "ql2200_fw.bin"
@@ -3907,6 +4034,8 @@ qla2x00_timer(scsi_qla_host_t *vha)
3907#define FW_FILE_ISP25XX "ql2500_fw.bin" 4034#define FW_FILE_ISP25XX "ql2500_fw.bin"
3908#define FW_FILE_ISP81XX "ql8100_fw.bin" 4035#define FW_FILE_ISP81XX "ql8100_fw.bin"
3909#define FW_FILE_ISP82XX "ql8200_fw.bin" 4036#define FW_FILE_ISP82XX "ql8200_fw.bin"
4037#define FW_FILE_ISP2031 "ql2600_fw.bin"
4038#define FW_FILE_ISP8031 "ql8300_fw.bin"
3910 4039
3911static DEFINE_MUTEX(qla_fw_lock); 4040static DEFINE_MUTEX(qla_fw_lock);
3912 4041
@@ -3919,6 +4048,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
3919 { .name = FW_FILE_ISP25XX, }, 4048 { .name = FW_FILE_ISP25XX, },
3920 { .name = FW_FILE_ISP81XX, }, 4049 { .name = FW_FILE_ISP81XX, },
3921 { .name = FW_FILE_ISP82XX, }, 4050 { .name = FW_FILE_ISP82XX, },
4051 { .name = FW_FILE_ISP2031, },
4052 { .name = FW_FILE_ISP8031, },
3922}; 4053};
3923 4054
3924struct fw_blob * 4055struct fw_blob *
@@ -3927,7 +4058,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3927 struct qla_hw_data *ha = vha->hw; 4058 struct qla_hw_data *ha = vha->hw;
3928 struct fw_blob *blob; 4059 struct fw_blob *blob;
3929 4060
3930 blob = NULL;
3931 if (IS_QLA2100(ha)) { 4061 if (IS_QLA2100(ha)) {
3932 blob = &qla_fw_blobs[FW_ISP21XX]; 4062 blob = &qla_fw_blobs[FW_ISP21XX];
3933 } else if (IS_QLA2200(ha)) { 4063 } else if (IS_QLA2200(ha)) {
@@ -3944,6 +4074,12 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
3944 blob = &qla_fw_blobs[FW_ISP81XX]; 4074 blob = &qla_fw_blobs[FW_ISP81XX];
3945 } else if (IS_QLA82XX(ha)) { 4075 } else if (IS_QLA82XX(ha)) {
3946 blob = &qla_fw_blobs[FW_ISP82XX]; 4076 blob = &qla_fw_blobs[FW_ISP82XX];
4077 } else if (IS_QLA2031(ha)) {
4078 blob = &qla_fw_blobs[FW_ISP2031];
4079 } else if (IS_QLA8031(ha)) {
4080 blob = &qla_fw_blobs[FW_ISP8031];
4081 } else {
4082 return NULL;
3947 } 4083 }
3948 4084
3949 mutex_lock(&qla_fw_lock); 4085 mutex_lock(&qla_fw_lock);
@@ -4265,6 +4401,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
4265 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) }, 4401 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5422) },
4266 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) }, 4402 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP5432) },
4267 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) }, 4403 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2532) },
4404 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2031) },
4268 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) }, 4405 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8001) },
4269 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) }, 4406 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
4270 { 0 }, 4407 { 0 },
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 16bc72844a9..3c13c0a6be6 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -568,6 +568,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
568 else if (IS_QLA82XX(ha)) { 568 else if (IS_QLA82XX(ha)) {
569 *start = FA_FLASH_LAYOUT_ADDR_82; 569 *start = FA_FLASH_LAYOUT_ADDR_82;
570 goto end; 570 goto end;
571 } else if (IS_QLA83XX(ha)) {
572 *start = FA_FLASH_LAYOUT_ADDR_83;
573 goto end;
571 } 574 }
572 /* Begin with first PCI expansion ROM header. */ 575 /* Begin with first PCI expansion ROM header. */
573 buf = (uint8_t *)req->ring; 576 buf = (uint8_t *)req->ring;
@@ -721,13 +724,22 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
721 le32_to_cpu(region->size)); 724 le32_to_cpu(region->size));
722 725
723 switch (le32_to_cpu(region->code) & 0xff) { 726 switch (le32_to_cpu(region->code) & 0xff) {
727 case FLT_REG_FCOE_FW:
728 if (!IS_QLA8031(ha))
729 break;
730 ha->flt_region_fw = start;
731 break;
724 case FLT_REG_FW: 732 case FLT_REG_FW:
733 if (IS_QLA8031(ha))
734 break;
725 ha->flt_region_fw = start; 735 ha->flt_region_fw = start;
726 break; 736 break;
727 case FLT_REG_BOOT_CODE: 737 case FLT_REG_BOOT_CODE:
728 ha->flt_region_boot = start; 738 ha->flt_region_boot = start;
729 break; 739 break;
730 case FLT_REG_VPD_0: 740 case FLT_REG_VPD_0:
741 if (IS_QLA8031(ha))
742 break;
731 ha->flt_region_vpd_nvram = start; 743 ha->flt_region_vpd_nvram = start;
732 if (IS_QLA82XX(ha)) 744 if (IS_QLA82XX(ha))
733 break; 745 break;
@@ -735,16 +747,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
735 ha->flt_region_vpd = start; 747 ha->flt_region_vpd = start;
736 break; 748 break;
737 case FLT_REG_VPD_1: 749 case FLT_REG_VPD_1:
738 if (IS_QLA82XX(ha)) 750 if (IS_QLA82XX(ha) || IS_QLA8031(ha))
739 break; 751 break;
740 if (!ha->flags.port0) 752 if (!ha->flags.port0)
741 ha->flt_region_vpd = start; 753 ha->flt_region_vpd = start;
742 break; 754 break;
743 case FLT_REG_NVRAM_0: 755 case FLT_REG_NVRAM_0:
756 if (IS_QLA8031(ha))
757 break;
744 if (ha->flags.port0) 758 if (ha->flags.port0)
745 ha->flt_region_nvram = start; 759 ha->flt_region_nvram = start;
746 break; 760 break;
747 case FLT_REG_NVRAM_1: 761 case FLT_REG_NVRAM_1:
762 if (IS_QLA8031(ha))
763 break;
748 if (!ha->flags.port0) 764 if (!ha->flags.port0)
749 ha->flt_region_nvram = start; 765 ha->flt_region_nvram = start;
750 break; 766 break;
@@ -785,6 +801,31 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
785 case FLT_REG_VPD_82XX: 801 case FLT_REG_VPD_82XX:
786 ha->flt_region_vpd = start; 802 ha->flt_region_vpd = start;
787 break; 803 break;
804 case FLT_REG_FCOE_VPD_0:
805 if (!IS_QLA8031(ha))
806 break;
807 ha->flt_region_vpd_nvram = start;
808 if (ha->flags.port0)
809 ha->flt_region_vpd = start;
810 break;
811 case FLT_REG_FCOE_VPD_1:
812 if (!IS_QLA8031(ha))
813 break;
814 if (!ha->flags.port0)
815 ha->flt_region_vpd = start;
816 break;
817 case FLT_REG_FCOE_NVRAM_0:
818 if (!IS_QLA8031(ha))
819 break;
820 if (ha->flags.port0)
821 ha->flt_region_nvram = start;
822 break;
823 case FLT_REG_FCOE_NVRAM_1:
824 if (!IS_QLA8031(ha))
825 break;
826 if (!ha->flags.port0)
827 ha->flt_region_nvram = start;
828 break;
788 } 829 }
789 } 830 }
790 goto done; 831 goto done;
@@ -804,15 +845,12 @@ no_flash_data:
804 def_npiv_conf0[def] : def_npiv_conf1[def]; 845 def_npiv_conf0[def] : def_npiv_conf1[def];
805done: 846done:
806 ql_dbg(ql_dbg_init, vha, 0x004a, 847 ql_dbg(ql_dbg_init, vha, 0x004a,
807 "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n", 848 "FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x nvram=0x%x "
808 loc, ha->flt_region_boot, 849 "fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
809 ha->flt_region_fw, ha->flt_region_vpd_nvram, 850 loc, ha->flt_region_boot, ha->flt_region_fw,
810 ha->flt_region_vpd); 851 ha->flt_region_vpd_nvram, ha->flt_region_vpd, ha->flt_region_nvram,
811 ql_dbg(ql_dbg_init, vha, 0x004b, 852 ha->flt_region_fdt, ha->flt_region_flt, ha->flt_region_npiv_conf,
812 "nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n", 853 ha->flt_region_fcp_prio);
813 ha->flt_region_nvram,
814 ha->flt_region_fdt, ha->flt_region_flt,
815 ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
816} 854}
817 855
818static void 856static void
@@ -948,7 +986,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
948 uint32_t flt_addr; 986 uint32_t flt_addr;
949 struct qla_hw_data *ha = vha->hw; 987 struct qla_hw_data *ha = vha->hw;
950 988
951 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha)) 989 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
990 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
952 return QLA_SUCCESS; 991 return QLA_SUCCESS;
953 992
954 ret = qla2xxx_find_flt_start(vha, &flt_addr); 993 ret = qla2xxx_find_flt_start(vha, &flt_addr);
@@ -974,7 +1013,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
974 struct qla_npiv_entry *entry; 1013 struct qla_npiv_entry *entry;
975 struct qla_hw_data *ha = vha->hw; 1014 struct qla_hw_data *ha = vha->hw;
976 1015
977 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_QLA8XXX_TYPE(ha)) 1016 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
1017 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha))
978 return; 1018 return;
979 1019
980 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1020 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
@@ -1144,8 +1184,8 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
1144 struct qla_hw_data *ha = vha->hw; 1184 struct qla_hw_data *ha = vha->hw;
1145 1185
1146 /* Prepare burst-capable write on supported ISPs. */ 1186 /* Prepare burst-capable write on supported ISPs. */
1147 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && !(faddr & 0xfff) && 1187 if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
1148 dwords > OPTROM_BURST_DWORDS) { 1188 !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
1149 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, 1189 optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
1150 &optrom_dma, GFP_KERNEL); 1190 &optrom_dma, GFP_KERNEL);
1151 if (!optrom) { 1191 if (!optrom) {
@@ -1619,6 +1659,71 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha)
1619 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1659 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1620} 1660}
1621 1661
1662void
1663qla83xx_beacon_blink(struct scsi_qla_host *vha)
1664{
1665 uint32_t led_select_value;
1666 struct qla_hw_data *ha = vha->hw;
1667 uint16_t led_cfg[6];
1668 uint16_t orig_led_cfg[6];
1669
1670 if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha))
1671 return;
1672
1673 if (IS_QLA2031(ha) && ha->beacon_blink_led) {
1674 if (ha->flags.port0)
1675 led_select_value = 0x00201320;
1676 else
1677 led_select_value = 0x00201328;
1678
1679 qla83xx_write_remote_reg(vha, led_select_value, 0x40002000);
1680 qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40002000);
1681 msleep(1000);
1682 qla83xx_write_remote_reg(vha, led_select_value, 0x40004000);
1683 qla83xx_write_remote_reg(vha, led_select_value + 4, 0x40004000);
1684 } else if ((IS_QLA8031(ha) || IS_QLA81XX(ha)) && ha->beacon_blink_led) {
1685 int rval;
1686
1687 /* Save Current */
1688 rval = qla81xx_get_led_config(vha, orig_led_cfg);
1689 /* Do the blink */
1690 if (rval == QLA_SUCCESS) {
1691 if (IS_QLA81XX(ha)) {
1692 led_cfg[0] = 0x4000;
1693 led_cfg[1] = 0x2000;
1694 led_cfg[2] = 0;
1695 led_cfg[3] = 0;
1696 led_cfg[4] = 0;
1697 led_cfg[5] = 0;
1698 } else {
1699 led_cfg[0] = 0x4000;
1700 led_cfg[1] = 0x4000;
1701 led_cfg[2] = 0x4000;
1702 led_cfg[3] = 0x2000;
1703 led_cfg[4] = 0;
1704 led_cfg[5] = 0x2000;
1705 }
1706 rval = qla81xx_set_led_config(vha, led_cfg);
1707 msleep(1000);
1708 if (IS_QLA81XX(ha)) {
1709 led_cfg[0] = 0x4000;
1710 led_cfg[1] = 0x2000;
1711 led_cfg[2] = 0;
1712 } else {
1713 led_cfg[0] = 0x4000;
1714 led_cfg[1] = 0x2000;
1715 led_cfg[2] = 0x4000;
1716 led_cfg[3] = 0x4000;
1717 led_cfg[4] = 0;
1718 led_cfg[5] = 0x2000;
1719 }
1720 rval = qla81xx_set_led_config(vha, led_cfg);
1721 }
1722 /* On exit, restore original (presumes no status change) */
1723 qla81xx_set_led_config(vha, orig_led_cfg);
1724 }
1725}
1726
1622int 1727int
1623qla24xx_beacon_on(struct scsi_qla_host *vha) 1728qla24xx_beacon_on(struct scsi_qla_host *vha)
1624{ 1729{
@@ -1630,6 +1735,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1630 if (IS_QLA82XX(ha)) 1735 if (IS_QLA82XX(ha))
1631 return QLA_SUCCESS; 1736 return QLA_SUCCESS;
1632 1737
1738 if (IS_QLA8031(ha) || IS_QLA81XX(ha))
1739 goto skip_gpio; /* let blink handle it */
1740
1633 if (ha->beacon_blink_led == 0) { 1741 if (ha->beacon_blink_led == 0) {
1634 /* Enable firmware for update */ 1742 /* Enable firmware for update */
1635 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL; 1743 ha->fw_options[1] |= ADD_FO1_DISABLE_GPIO_LED_CTRL;
@@ -1644,6 +1752,9 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1644 return QLA_FUNCTION_FAILED; 1752 return QLA_FUNCTION_FAILED;
1645 } 1753 }
1646 1754
1755 if (IS_QLA2031(ha))
1756 goto skip_gpio;
1757
1647 spin_lock_irqsave(&ha->hardware_lock, flags); 1758 spin_lock_irqsave(&ha->hardware_lock, flags);
1648 gpio_data = RD_REG_DWORD(&reg->gpiod); 1759 gpio_data = RD_REG_DWORD(&reg->gpiod);
1649 1760
@@ -1658,6 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
1658 /* So all colors blink together. */ 1769 /* So all colors blink together. */
1659 ha->beacon_color_state = 0; 1770 ha->beacon_color_state = 0;
1660 1771
1772skip_gpio:
1661 /* Let the per HBA timer kick off the blinking process. */ 1773 /* Let the per HBA timer kick off the blinking process. */
1662 ha->beacon_blink_led = 1; 1774 ha->beacon_blink_led = 1;
1663 1775
@@ -1676,6 +1788,13 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1676 return QLA_SUCCESS; 1788 return QLA_SUCCESS;
1677 1789
1678 ha->beacon_blink_led = 0; 1790 ha->beacon_blink_led = 0;
1791
1792 if (IS_QLA2031(ha))
1793 goto set_fw_options;
1794
1795 if (IS_QLA8031(ha) || IS_QLA81XX(ha))
1796 return QLA_SUCCESS;
1797
1679 ha->beacon_color_state = QLA_LED_ALL_ON; 1798 ha->beacon_color_state = QLA_LED_ALL_ON;
1680 1799
1681 ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */ 1800 ha->isp_ops->beacon_blink(vha); /* Will flip to all off. */
@@ -1690,6 +1809,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
1690 RD_REG_DWORD(&reg->gpiod); 1809 RD_REG_DWORD(&reg->gpiod);
1691 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1810 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1692 1811
1812set_fw_options:
1693 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL; 1813 ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
1694 1814
1695 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) { 1815 if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index bfe68545203..7f2492e88be 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,8 +150,6 @@
150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */ 150#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
151 /* recovery timeout */ 151 /* recovery timeout */
152 152
153#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
154#define LSW(x) ((uint16_t)(x))
155#define LSDW(x) ((u32)((u64)(x))) 153#define LSDW(x) ((u32)((u64)(x)))
156#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) 154#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
157 155
@@ -223,6 +221,15 @@ struct srb {
223 uint16_t reserved2; 221 uint16_t reserved2;
224}; 222};
225 223
224/* Mailbox request block structure */
225struct mrb {
226 struct scsi_qla_host *ha;
227 struct mbox_cmd_iocb *mbox;
228 uint32_t mbox_cmd;
229 uint16_t iocb_cnt; /* Number of used iocbs */
230 uint32_t pid;
231};
232
226/* 233/*
227 * Asynchronous Event Queue structure 234 * Asynchronous Event Queue structure
228 */ 235 */
@@ -265,7 +272,7 @@ struct ddb_entry {
265 * retried */ 272 * retried */
266 uint32_t default_time2wait; /* Default Min time between 273 uint32_t default_time2wait; /* Default Min time between
267 * relogins (+aens) */ 274 * relogins (+aens) */
268 275 uint16_t chap_tbl_idx;
269}; 276};
270 277
271struct qla_ddb_index { 278struct qla_ddb_index {
@@ -284,6 +291,7 @@ struct ql4_tuple_ddb {
284 uint16_t options; 291 uint16_t options;
285#define DDB_OPT_IPV6 0x0e0e 292#define DDB_OPT_IPV6 0x0e0e
286#define DDB_OPT_IPV4 0x0f0f 293#define DDB_OPT_IPV4 0x0f0f
294 uint8_t isid[6];
287}; 295};
288 296
289/* 297/*
@@ -303,7 +311,28 @@ struct ql4_tuple_ddb {
303#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ 311#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
304#define DF_FO_MASKED 3 312#define DF_FO_MASKED 3
305 313
314enum qla4_work_type {
315 QLA4_EVENT_AEN,
316 QLA4_EVENT_PING_STATUS,
317};
306 318
319struct qla4_work_evt {
320 struct list_head list;
321 enum qla4_work_type type;
322 union {
323 struct {
324 enum iscsi_host_event_code code;
325 uint32_t data_size;
326 uint8_t data[0];
327 } aen;
328 struct {
329 uint32_t status;
330 uint32_t pid;
331 uint32_t data_size;
332 uint8_t data[0];
333 } ping;
334 } u;
335};
307 336
308struct ql82xx_hw_data { 337struct ql82xx_hw_data {
309 /* Offsets for flash/nvram access (set to ~0 if not used). */ 338 /* Offsets for flash/nvram access (set to ~0 if not used). */
@@ -657,6 +686,7 @@ struct scsi_qla_host {
657 struct dma_pool *chap_dma_pool; 686 struct dma_pool *chap_dma_pool;
658 uint8_t *chap_list; /* CHAP table cache */ 687 uint8_t *chap_list; /* CHAP table cache */
659 struct mutex chap_sem; 688 struct mutex chap_sem;
689
660#define CHAP_DMA_BLOCK_SIZE 512 690#define CHAP_DMA_BLOCK_SIZE 512
661 struct workqueue_struct *task_wq; 691 struct workqueue_struct *task_wq;
662 unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG]; 692 unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG];
@@ -674,6 +704,15 @@ struct scsi_qla_host {
674 uint16_t sec_ddb_idx; 704 uint16_t sec_ddb_idx;
675 int is_reset; 705 int is_reset;
676 uint16_t temperature; 706 uint16_t temperature;
707
708 /* event work list */
709 struct list_head work_list;
710 spinlock_t work_lock;
711
712 /* mbox iocb */
713#define MAX_MRB 128
714 struct mrb *active_mrb_array[MAX_MRB];
715 uint32_t mrb_index;
677}; 716};
678 717
679struct ql4_task_data { 718struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 7825c141bc1..210cd1d6447 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -331,6 +331,10 @@ struct qla_flt_region {
331/* Mailbox command definitions */ 331/* Mailbox command definitions */
332#define MBOX_CMD_ABOUT_FW 0x0009 332#define MBOX_CMD_ABOUT_FW 0x0009
333#define MBOX_CMD_PING 0x000B 333#define MBOX_CMD_PING 0x000B
334#define PING_IPV6_PROTOCOL_ENABLE 0x1
335#define PING_IPV6_LINKLOCAL_ADDR 0x4
336#define PING_IPV6_ADDR0 0x8
337#define PING_IPV6_ADDR1 0xC
334#define MBOX_CMD_ENABLE_INTRS 0x0010 338#define MBOX_CMD_ENABLE_INTRS 0x0010
335#define INTR_DISABLE 0 339#define INTR_DISABLE 0
336#define INTR_ENABLE 1 340#define INTR_ENABLE 1
@@ -396,6 +400,10 @@ struct qla_flt_region {
396#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008 400#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
397#define FW_ADDSTATE_LINK_UP 0x0010 401#define FW_ADDSTATE_LINK_UP 0x0010
398#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020 402#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
403#define FW_ADDSTATE_LINK_SPEED_10MBPS 0x0100
404#define FW_ADDSTATE_LINK_SPEED_100MBPS 0x0200
405#define FW_ADDSTATE_LINK_SPEED_1GBPS 0x0400
406#define FW_ADDSTATE_LINK_SPEED_10GBPS 0x0800
399 407
400#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B 408#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
401#define IPV6_DEFAULT_DDB_ENTRY 0x0001 409#define IPV6_DEFAULT_DDB_ENTRY 0x0001
@@ -918,6 +926,8 @@ struct qla4_header {
918#define ET_CMND_T3 0x19 926#define ET_CMND_T3 0x19
919#define ET_PASSTHRU0 0x3A 927#define ET_PASSTHRU0 0x3A
920#define ET_PASSTHRU_STATUS 0x3C 928#define ET_PASSTHRU_STATUS 0x3C
929#define ET_MBOX_CMD 0x38
930#define ET_MBOX_STATUS 0x39
921 931
922 uint8_t entryStatus; 932 uint8_t entryStatus;
923 uint8_t systemDefined; 933 uint8_t systemDefined;
@@ -1118,6 +1128,20 @@ struct passthru_status {
1118 uint8_t res4[16]; /* 30-3F */ 1128 uint8_t res4[16]; /* 30-3F */
1119}; 1129};
1120 1130
1131struct mbox_cmd_iocb {
1132 struct qla4_header hdr; /* 00-03 */
1133 uint32_t handle; /* 04-07 */
1134 uint32_t in_mbox[8]; /* 08-25 */
1135 uint32_t res1[6]; /* 26-3F */
1136};
1137
1138struct mbox_status_iocb {
1139 struct qla4_header hdr; /* 00-03 */
1140 uint32_t handle; /* 04-07 */
1141 uint32_t out_mbox[8]; /* 08-25 */
1142 uint32_t res1[6]; /* 26-3F */
1143};
1144
1121/* 1145/*
1122 * ISP queue - response queue entry definition. 1146 * ISP queue - response queue entry definition.
1123 */ 1147 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index d0dd4b33020..910536667cf 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -81,6 +81,8 @@ int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
81 uint32_t offset, uint32_t length, uint32_t options); 81 uint32_t offset, uint32_t length, uint32_t options);
82int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, 82int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
83 uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); 83 uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
84int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
85 char *password, int bidi, uint16_t *chap_index);
84 86
85void qla4xxx_queue_iocb(struct scsi_qla_host *ha); 87void qla4xxx_queue_iocb(struct scsi_qla_host *ha);
86void qla4xxx_complete_iocb(struct scsi_qla_host *ha); 88void qla4xxx_complete_iocb(struct scsi_qla_host *ha);
@@ -181,6 +183,13 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
181int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, 183int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
182 struct ddb_entry *ddb_entry, uint32_t state); 184 struct ddb_entry *ddb_entry, uint32_t state);
183void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset); 185void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
186int qla4xxx_post_aen_work(struct scsi_qla_host *ha, uint32_t aen_code,
187 uint32_t data_size, uint8_t *data);
188int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
189 uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
190int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
191 uint32_t status, uint32_t pid,
192 uint32_t data_size, uint8_t *data);
184 193
185/* BSG Functions */ 194/* BSG Functions */
186int qla4xxx_bsg_request(struct bsg_job *bsg_job); 195int qla4xxx_bsg_request(struct bsg_job *bsg_job);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90614f38b55..90ee5d8fa73 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -86,6 +86,7 @@ static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha)
86int qla4xxx_init_rings(struct scsi_qla_host *ha) 86int qla4xxx_init_rings(struct scsi_qla_host *ha)
87{ 87{
88 unsigned long flags = 0; 88 unsigned long flags = 0;
89 int i;
89 90
90 /* Initialize request queue. */ 91 /* Initialize request queue. */
91 spin_lock_irqsave(&ha->hardware_lock, flags); 92 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -125,6 +126,10 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
125 126
126 qla4xxx_init_response_q_entries(ha); 127 qla4xxx_init_response_q_entries(ha);
127 128
129 /* Initialize mabilbox active array */
130 for (i = 0; i < MAX_MRB; i++)
131 ha->active_mrb_array[i] = NULL;
132
128 spin_unlock_irqrestore(&ha->hardware_lock, flags); 133 spin_unlock_irqrestore(&ha->hardware_lock, flags);
129 134
130 return QLA_SUCCESS; 135 return QLA_SUCCESS;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 41066935190..2a2022a6bb9 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -445,3 +445,95 @@ queuing_error:
445 spin_unlock_irqrestore(&ha->hardware_lock, flags); 445 spin_unlock_irqrestore(&ha->hardware_lock, flags);
446 return ret; 446 return ret;
447} 447}
448
449static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
450{
451 struct mrb *mrb;
452
453 mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
454 if (!mrb)
455 return mrb;
456
457 mrb->ha = ha;
458 return mrb;
459}
460
461static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
462 uint32_t *in_mbox)
463{
464 int rval = QLA_SUCCESS;
465 uint32_t i;
466 unsigned long flags;
467 uint32_t index = 0;
468
469 /* Acquire hardware specific lock */
470 spin_lock_irqsave(&ha->hardware_lock, flags);
471
472 /* Get pointer to the queue entry for the marker */
473 rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
474 if (rval != QLA_SUCCESS)
475 goto exit_mbox_iocb;
476
477 index = ha->mrb_index;
478 /* get valid mrb index*/
479 for (i = 0; i < MAX_MRB; i++) {
480 index++;
481 if (index == MAX_MRB)
482 index = 1;
483 if (ha->active_mrb_array[index] == NULL) {
484 ha->mrb_index = index;
485 break;
486 }
487 }
488
489 mrb->iocb_cnt = 1;
490 ha->active_mrb_array[index] = mrb;
491 mrb->mbox->handle = index;
492 mrb->mbox->hdr.entryType = ET_MBOX_CMD;
493 mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
494 memcpy(mrb->mbox->in_mbox, in_mbox, 32);
495 mrb->mbox_cmd = in_mbox[0];
496 wmb();
497
498 ha->isp_ops->queue_iocb(ha);
499exit_mbox_iocb:
500 spin_unlock_irqrestore(&ha->hardware_lock, flags);
501 return rval;
502}
503
504int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
505 uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
506{
507 uint32_t in_mbox[8];
508 struct mrb *mrb = NULL;
509 int rval = QLA_SUCCESS;
510
511 memset(in_mbox, 0, sizeof(in_mbox));
512
513 mrb = qla4xxx_get_new_mrb(ha);
514 if (!mrb) {
515 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
516 __func__));
517 rval = QLA_ERROR;
518 goto exit_ping;
519 }
520
521 in_mbox[0] = MBOX_CMD_PING;
522 in_mbox[1] = options;
523 memcpy(&in_mbox[2], &ipaddr[0], 4);
524 memcpy(&in_mbox[3], &ipaddr[4], 4);
525 memcpy(&in_mbox[4], &ipaddr[8], 4);
526 memcpy(&in_mbox[5], &ipaddr[12], 4);
527 in_mbox[6] = payload_size;
528
529 mrb->pid = pid;
530 rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
531
532 if (rval != QLA_SUCCESS)
533 goto exit_ping;
534
535 return rval;
536exit_ping:
537 kfree(mrb);
538 return rval;
539}
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 95828862eea..7c9f28b7da7 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -385,6 +385,71 @@ static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
385 queue_work(ha->task_wq, &task_data->task_work); 385 queue_work(ha->task_wq, &task_data->task_work);
386} 386}
387 387
388static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
389 uint32_t index)
390{
391 struct mrb *mrb = NULL;
392
393 /* validate handle and remove from active array */
394 if (index >= MAX_MRB)
395 return mrb;
396
397 mrb = ha->active_mrb_array[index];
398 ha->active_mrb_array[index] = NULL;
399 if (!mrb)
400 return mrb;
401
402 /* update counters */
403 ha->req_q_count += mrb->iocb_cnt;
404 ha->iocb_cnt -= mrb->iocb_cnt;
405
406 return mrb;
407}
408
409static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
410 struct mbox_status_iocb *mbox_sts_entry)
411{
412 struct mrb *mrb;
413 uint32_t status;
414 uint32_t data_size;
415
416 mrb = qla4xxx_del_mrb_from_active_array(ha,
417 le32_to_cpu(mbox_sts_entry->handle));
418
419 if (mrb == NULL) {
420 ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
421 mbox_sts_entry->handle);
422 return;
423 }
424
425 switch (mrb->mbox_cmd) {
426 case MBOX_CMD_PING:
427 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
428 "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
429 __func__, mrb->mbox_cmd,
430 mbox_sts_entry->out_mbox[0],
431 mbox_sts_entry->out_mbox[6]));
432
433 if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
434 status = QLA_SUCCESS;
435 else
436 status = QLA_ERROR;
437
438 data_size = sizeof(mbox_sts_entry->out_mbox);
439
440 qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
441 (uint8_t *) mbox_sts_entry->out_mbox);
442 break;
443
444 default:
445 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
446 "0x%x\n", __func__, mrb->mbox_cmd));
447 }
448
449 kfree(mrb);
450 return;
451}
452
388/** 453/**
389 * qla4xxx_process_response_queue - process response queue completions 454 * qla4xxx_process_response_queue - process response queue completions
390 * @ha: Pointer to host adapter structure. 455 * @ha: Pointer to host adapter structure.
@@ -461,6 +526,13 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
461 "ignoring\n", ha->host_no, __func__)); 526 "ignoring\n", ha->host_no, __func__));
462 break; 527 break;
463 528
529 case ET_MBOX_STATUS:
530 DEBUG2(ql4_printk(KERN_INFO, ha,
531 "%s: mbox status IOCB\n", __func__));
532 qla4xxx_mbox_status_entry(ha,
533 (struct mbox_status_iocb *)sts_entry);
534 break;
535
464 default: 536 default:
465 /* 537 /*
466 * Invalid entry in response queue, reset RISC 538 * Invalid entry in response queue, reset RISC
@@ -576,6 +648,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
576 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); 648 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
577 649
578 ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__); 650 ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
651 qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
652 sizeof(mbox_sts),
653 (uint8_t *) mbox_sts);
579 break; 654 break;
580 655
581 case MBOX_ASTS_LINK_DOWN: 656 case MBOX_ASTS_LINK_DOWN:
@@ -584,6 +659,9 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
584 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags); 659 set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
585 660
586 ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__); 661 ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
662 qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
663 sizeof(mbox_sts),
664 (uint8_t *) mbox_sts);
587 break; 665 break;
588 666
589 case MBOX_ASTS_HEARTBEAT: 667 case MBOX_ASTS_HEARTBEAT:
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index e1e66a45e4d..7ac21dabbf2 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -622,7 +622,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
622 return QLA_ERROR; 622 return QLA_ERROR;
623 } 623 }
624 624
625 ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n", 625 ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
626 ha->host_no, mbox_sts[2]); 626 ha->host_no, mbox_sts[2]);
627 627
628 return QLA_SUCCESS; 628 return QLA_SUCCESS;
@@ -661,6 +661,8 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
661 } 661 }
662 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 662 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
663 memset(&mbox_sts, 0, sizeof(mbox_sts)); 663 memset(&mbox_sts, 0, sizeof(mbox_sts));
664 if (fw_ddb_entry)
665 memset(fw_ddb_entry, 0, sizeof(struct dev_db_entry));
664 666
665 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY; 667 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
666 mbox_cmd[1] = (uint32_t) fw_ddb_index; 668 mbox_cmd[1] = (uint32_t) fw_ddb_index;
@@ -1424,8 +1426,8 @@ exit_set_chap:
1424 * match is found. If a match is not found then add the entry in FLASH and 1426 * match is found. If a match is not found then add the entry in FLASH and
1425 * return the index at which entry is written in the FLASH. 1427 * return the index at which entry is written in the FLASH.
1426 **/ 1428 **/
1427static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, 1429int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
1428 char *password, int bidi, uint16_t *chap_index) 1430 char *password, int bidi, uint16_t *chap_index)
1429{ 1431{
1430 int i, rval; 1432 int i, rval;
1431 int free_index = -1; 1433 int free_index = -1;
@@ -1444,6 +1446,11 @@ static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
1444 return QLA_ERROR; 1446 return QLA_ERROR;
1445 } 1447 }
1446 1448
1449 if (!username || !password) {
1450 ql4_printk(KERN_ERR, ha, "Do not have username and psw\n");
1451 return QLA_ERROR;
1452 }
1453
1447 mutex_lock(&ha->chap_sem); 1454 mutex_lock(&ha->chap_sem);
1448 for (i = 0; i < max_chap_entries; i++) { 1455 for (i = 0; i < max_chap_entries; i++) {
1449 chap_table = (struct ql4_chap_table *)ha->chap_list + i; 1456 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
@@ -1600,7 +1607,7 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
1600 char *ip; 1607 char *ip;
1601 uint16_t iscsi_opts = 0; 1608 uint16_t iscsi_opts = 0;
1602 uint32_t options = 0; 1609 uint32_t options = 0;
1603 uint16_t idx; 1610 uint16_t idx, *ptid;
1604 1611
1605 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 1612 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1606 &fw_ddb_entry_dma, GFP_KERNEL); 1613 &fw_ddb_entry_dma, GFP_KERNEL);
@@ -1626,6 +1633,14 @@ int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
1626 goto exit_set_param; 1633 goto exit_set_param;
1627 } 1634 }
1628 1635
1636 ptid = (uint16_t *)&fw_ddb_entry->isid[1];
1637 *ptid = cpu_to_le16((uint16_t)ddb_entry->sess->target_id);
1638
1639 DEBUG2(ql4_printk(KERN_INFO, ha, "ISID [%02x%02x%02x%02x%02x%02x]\n",
1640 fw_ddb_entry->isid[5], fw_ddb_entry->isid[4],
1641 fw_ddb_entry->isid[3], fw_ddb_entry->isid[2],
1642 fw_ddb_entry->isid[1], fw_ddb_entry->isid[0]));
1643
1629 iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options); 1644 iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
1630 memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias)); 1645 memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
1631 1646
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 65253dfbe96..e1e46b6dac7 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -841,11 +841,8 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha)
841 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); 841 done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
842 if (done == 1) 842 if (done == 1)
843 break; 843 break;
844 if (timeout >= qla4_8xxx_rom_lock_timeout) { 844 if (timeout >= qla4_8xxx_rom_lock_timeout)
845 ql4_printk(KERN_WARNING, ha,
846 "%s: Failed to acquire rom lock", __func__);
847 return -1; 845 return -1;
848 }
849 846
850 timeout++; 847 timeout++;
851 848
@@ -996,18 +993,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
996 else 993 else
997 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); 994 qla4_8xxx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
998 995
999 /* reset ms */
1000 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1001 val |= (1 << 1);
1002 qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1003
1004 msleep(20);
1005 /* unreset ms */
1006 val = qla4_8xxx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
1007 val &= ~(1 << 1);
1008 qla4_8xxx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
1009 msleep(20);
1010
1011 qla4_8xxx_rom_unlock(ha); 996 qla4_8xxx_rom_unlock(ha);
1012 997
1013 /* Read the signature value from the flash. 998 /* Read the signature value from the flash.
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc45ac92369..dc7500e47b8 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -623,6 +623,7 @@ struct crb_addr_pair {
623 623
624#define ADDR_ERROR ((unsigned long) 0xffffffff) 624#define ADDR_ERROR ((unsigned long) 0xffffffff)
625#define MAX_CTL_CHECK 1000 625#define MAX_CTL_CHECK 1000
626#define QLA82XX_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
626 627
627/*************************************************************************** 628/***************************************************************************
628 * PCI related defines. 629 * PCI related defines.
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index edf503437e9..3d9419460e0 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -32,14 +32,14 @@ static struct kmem_cache *srb_cachep;
32/* 32/*
33 * Module parameter information and variables 33 * Module parameter information and variables
34 */ 34 */
35int ql4xdisablesysfsboot = 1; 35static int ql4xdisablesysfsboot = 1;
36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR); 36module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(ql4xdisablesysfsboot, 37MODULE_PARM_DESC(ql4xdisablesysfsboot,
38 " Set to disable exporting boot targets to sysfs.\n" 38 " Set to disable exporting boot targets to sysfs.\n"
39 "\t\t 0 - Export boot targets\n" 39 "\t\t 0 - Export boot targets\n"
40 "\t\t 1 - Do not export boot targets (Default)"); 40 "\t\t 1 - Do not export boot targets (Default)");
41 41
42int ql4xdontresethba = 0; 42int ql4xdontresethba;
43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR); 43module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(ql4xdontresethba, 44MODULE_PARM_DESC(ql4xdontresethba,
45 " Don't reset the HBA for driver recovery.\n" 45 " Don't reset the HBA for driver recovery.\n"
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
72module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 72module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
73MODULE_PARM_DESC(ql4xsess_recovery_tmo, 73MODULE_PARM_DESC(ql4xsess_recovery_tmo,
74 "Target Session Recovery Timeout.\n" 74 " Target Session Recovery Timeout.\n"
75 "\t\t Default: 120 sec."); 75 "\t\t Default: 120 sec.");
76 76
77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
@@ -83,6 +83,8 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
83/* 83/*
84 * iSCSI template entry points 84 * iSCSI template entry points
85 */ 85 */
86static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
87 enum iscsi_param param, char *buf);
86static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 88static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
87 enum iscsi_param param, char *buf); 89 enum iscsi_param param, char *buf);
88static int qla4xxx_host_get_param(struct Scsi_Host *shost, 90static int qla4xxx_host_get_param(struct Scsi_Host *shost,
@@ -118,6 +120,13 @@ static void qla4xxx_task_cleanup(struct iscsi_task *);
118static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); 120static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
119static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, 121static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
120 struct iscsi_stats *stats); 122 struct iscsi_stats *stats);
123static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
124 uint32_t iface_type, uint32_t payload_size,
125 uint32_t pid, struct sockaddr *dst_addr);
126static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
127 uint32_t *num_entries, char *buf);
128static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx);
129
121/* 130/*
122 * SCSI host template entry points 131 * SCSI host template entry points
123 */ 132 */
@@ -179,7 +188,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
179 .destroy_conn = qla4xxx_conn_destroy, 188 .destroy_conn = qla4xxx_conn_destroy,
180 .set_param = iscsi_set_param, 189 .set_param = iscsi_set_param,
181 .get_conn_param = qla4xxx_conn_get_param, 190 .get_conn_param = qla4xxx_conn_get_param,
182 .get_session_param = iscsi_session_get_param, 191 .get_session_param = qla4xxx_session_get_param,
183 .get_ep_param = qla4xxx_get_ep_param, 192 .get_ep_param = qla4xxx_get_ep_param,
184 .ep_connect = qla4xxx_ep_connect, 193 .ep_connect = qla4xxx_ep_connect,
185 .ep_poll = qla4xxx_ep_poll, 194 .ep_poll = qla4xxx_ep_poll,
@@ -194,10 +203,93 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
194 .set_iface_param = qla4xxx_iface_set_param, 203 .set_iface_param = qla4xxx_iface_set_param,
195 .get_iface_param = qla4xxx_get_iface_param, 204 .get_iface_param = qla4xxx_get_iface_param,
196 .bsg_request = qla4xxx_bsg_request, 205 .bsg_request = qla4xxx_bsg_request,
206 .send_ping = qla4xxx_send_ping,
207 .get_chap = qla4xxx_get_chap_list,
208 .delete_chap = qla4xxx_delete_chap,
197}; 209};
198 210
199static struct scsi_transport_template *qla4xxx_scsi_transport; 211static struct scsi_transport_template *qla4xxx_scsi_transport;
200 212
213static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
214 uint32_t iface_type, uint32_t payload_size,
215 uint32_t pid, struct sockaddr *dst_addr)
216{
217 struct scsi_qla_host *ha = to_qla_host(shost);
218 struct sockaddr_in *addr;
219 struct sockaddr_in6 *addr6;
220 uint32_t options = 0;
221 uint8_t ipaddr[IPv6_ADDR_LEN];
222 int rval;
223
224 memset(ipaddr, 0, IPv6_ADDR_LEN);
225 /* IPv4 to IPv4 */
226 if ((iface_type == ISCSI_IFACE_TYPE_IPV4) &&
227 (dst_addr->sa_family == AF_INET)) {
228 addr = (struct sockaddr_in *)dst_addr;
229 memcpy(ipaddr, &addr->sin_addr.s_addr, IP_ADDR_LEN);
230 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv4 Ping src: %pI4 "
231 "dest: %pI4\n", __func__,
232 &ha->ip_config.ip_address, ipaddr));
233 rval = qla4xxx_ping_iocb(ha, options, payload_size, pid,
234 ipaddr);
235 if (rval)
236 rval = -EINVAL;
237 } else if ((iface_type == ISCSI_IFACE_TYPE_IPV6) &&
238 (dst_addr->sa_family == AF_INET6)) {
239 /* IPv6 to IPv6 */
240 addr6 = (struct sockaddr_in6 *)dst_addr;
241 memcpy(ipaddr, &addr6->sin6_addr.in6_u.u6_addr8, IPv6_ADDR_LEN);
242
243 options |= PING_IPV6_PROTOCOL_ENABLE;
244
245 /* Ping using LinkLocal address */
246 if ((iface_num == 0) || (iface_num == 1)) {
247 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: LinkLocal Ping "
248 "src: %pI6 dest: %pI6\n", __func__,
249 &ha->ip_config.ipv6_link_local_addr,
250 ipaddr));
251 options |= PING_IPV6_LINKLOCAL_ADDR;
252 rval = qla4xxx_ping_iocb(ha, options, payload_size,
253 pid, ipaddr);
254 } else {
255 ql4_printk(KERN_WARNING, ha, "%s: iface num = %d "
256 "not supported\n", __func__, iface_num);
257 rval = -ENOSYS;
258 goto exit_send_ping;
259 }
260
261 /*
262 * If ping using LinkLocal address fails, try ping using
263 * IPv6 address
264 */
265 if (rval != QLA_SUCCESS) {
266 options &= ~PING_IPV6_LINKLOCAL_ADDR;
267 if (iface_num == 0) {
268 options |= PING_IPV6_ADDR0;
269 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
270 "Ping src: %pI6 "
271 "dest: %pI6\n", __func__,
272 &ha->ip_config.ipv6_addr0,
273 ipaddr));
274 } else if (iface_num == 1) {
275 options |= PING_IPV6_ADDR1;
276 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IPv6 "
277 "Ping src: %pI6 "
278 "dest: %pI6\n", __func__,
279 &ha->ip_config.ipv6_addr1,
280 ipaddr));
281 }
282 rval = qla4xxx_ping_iocb(ha, options, payload_size,
283 pid, ipaddr);
284 if (rval)
285 rval = -EINVAL;
286 }
287 } else
288 rval = -ENOSYS;
289exit_send_ping:
290 return rval;
291}
292
201static umode_t ql4_attr_is_visible(int param_type, int param) 293static umode_t ql4_attr_is_visible(int param_type, int param)
202{ 294{
203 switch (param_type) { 295 switch (param_type) {
@@ -206,6 +298,8 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
206 case ISCSI_HOST_PARAM_HWADDRESS: 298 case ISCSI_HOST_PARAM_HWADDRESS:
207 case ISCSI_HOST_PARAM_IPADDRESS: 299 case ISCSI_HOST_PARAM_IPADDRESS:
208 case ISCSI_HOST_PARAM_INITIATOR_NAME: 300 case ISCSI_HOST_PARAM_INITIATOR_NAME:
301 case ISCSI_HOST_PARAM_PORT_STATE:
302 case ISCSI_HOST_PARAM_PORT_SPEED:
209 return S_IRUGO; 303 return S_IRUGO;
210 default: 304 default:
211 return 0; 305 return 0;
@@ -225,6 +319,12 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
225 case ISCSI_PARAM_MAX_RECV_DLENGTH: 319 case ISCSI_PARAM_MAX_RECV_DLENGTH:
226 case ISCSI_PARAM_MAX_XMIT_DLENGTH: 320 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
227 case ISCSI_PARAM_IFACE_NAME: 321 case ISCSI_PARAM_IFACE_NAME:
322 case ISCSI_PARAM_CHAP_OUT_IDX:
323 case ISCSI_PARAM_CHAP_IN_IDX:
324 case ISCSI_PARAM_USERNAME:
325 case ISCSI_PARAM_PASSWORD:
326 case ISCSI_PARAM_USERNAME_IN:
327 case ISCSI_PARAM_PASSWORD_IN:
228 return S_IRUGO; 328 return S_IRUGO;
229 default: 329 default:
230 return 0; 330 return 0;
@@ -255,6 +355,189 @@ static umode_t ql4_attr_is_visible(int param_type, int param)
255 return 0; 355 return 0;
256} 356}
257 357
358static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
359 uint32_t *num_entries, char *buf)
360{
361 struct scsi_qla_host *ha = to_qla_host(shost);
362 struct ql4_chap_table *chap_table;
363 struct iscsi_chap_rec *chap_rec;
364 int max_chap_entries = 0;
365 int valid_chap_entries = 0;
366 int ret = 0, i;
367
368 if (is_qla8022(ha))
369 max_chap_entries = (ha->hw.flt_chap_size / 2) /
370 sizeof(struct ql4_chap_table);
371 else
372 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
373
374 ql4_printk(KERN_INFO, ha, "%s: num_entries = %d, CHAP idx = %d\n",
375 __func__, *num_entries, chap_tbl_idx);
376
377 if (!buf) {
378 ret = -ENOMEM;
379 goto exit_get_chap_list;
380 }
381
382 chap_rec = (struct iscsi_chap_rec *) buf;
383 mutex_lock(&ha->chap_sem);
384 for (i = chap_tbl_idx; i < max_chap_entries; i++) {
385 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
386 if (chap_table->cookie !=
387 __constant_cpu_to_le16(CHAP_VALID_COOKIE))
388 continue;
389
390 chap_rec->chap_tbl_idx = i;
391 strncpy(chap_rec->username, chap_table->name,
392 ISCSI_CHAP_AUTH_NAME_MAX_LEN);
393 strncpy(chap_rec->password, chap_table->secret,
394 QL4_CHAP_MAX_SECRET_LEN);
395 chap_rec->password_length = chap_table->secret_len;
396
397 if (chap_table->flags & BIT_7) /* local */
398 chap_rec->chap_type = CHAP_TYPE_OUT;
399
400 if (chap_table->flags & BIT_6) /* peer */
401 chap_rec->chap_type = CHAP_TYPE_IN;
402
403 chap_rec++;
404
405 valid_chap_entries++;
406 if (valid_chap_entries == *num_entries)
407 break;
408 else
409 continue;
410 }
411 mutex_unlock(&ha->chap_sem);
412
413exit_get_chap_list:
414 ql4_printk(KERN_INFO, ha, "%s: Valid CHAP Entries = %d\n",
415 __func__, valid_chap_entries);
416 *num_entries = valid_chap_entries;
417 return ret;
418}
419
420static int __qla4xxx_is_chap_active(struct device *dev, void *data)
421{
422 int ret = 0;
423 uint16_t *chap_tbl_idx = (uint16_t *) data;
424 struct iscsi_cls_session *cls_session;
425 struct iscsi_session *sess;
426 struct ddb_entry *ddb_entry;
427
428 if (!iscsi_is_session_dev(dev))
429 goto exit_is_chap_active;
430
431 cls_session = iscsi_dev_to_session(dev);
432 sess = cls_session->dd_data;
433 ddb_entry = sess->dd_data;
434
435 if (iscsi_session_chkready(cls_session))
436 goto exit_is_chap_active;
437
438 if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
439 ret = 1;
440
441exit_is_chap_active:
442 return ret;
443}
444
445static int qla4xxx_is_chap_active(struct Scsi_Host *shost,
446 uint16_t chap_tbl_idx)
447{
448 int ret = 0;
449
450 ret = device_for_each_child(&shost->shost_gendev, &chap_tbl_idx,
451 __qla4xxx_is_chap_active);
452
453 return ret;
454}
455
456static int qla4xxx_delete_chap(struct Scsi_Host *shost, uint16_t chap_tbl_idx)
457{
458 struct scsi_qla_host *ha = to_qla_host(shost);
459 struct ql4_chap_table *chap_table;
460 dma_addr_t chap_dma;
461 int max_chap_entries = 0;
462 uint32_t offset = 0;
463 uint32_t chap_size;
464 int ret = 0;
465
466 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
467 if (chap_table == NULL)
468 return -ENOMEM;
469
470 memset(chap_table, 0, sizeof(struct ql4_chap_table));
471
472 if (is_qla8022(ha))
473 max_chap_entries = (ha->hw.flt_chap_size / 2) /
474 sizeof(struct ql4_chap_table);
475 else
476 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
477
478 if (chap_tbl_idx > max_chap_entries) {
479 ret = -EINVAL;
480 goto exit_delete_chap;
481 }
482
483 /* Check if chap index is in use.
484 * If chap is in use don't delet chap entry */
485 ret = qla4xxx_is_chap_active(shost, chap_tbl_idx);
486 if (ret) {
487 ql4_printk(KERN_INFO, ha, "CHAP entry %d is in use, cannot "
488 "delete from flash\n", chap_tbl_idx);
489 ret = -EBUSY;
490 goto exit_delete_chap;
491 }
492
493 chap_size = sizeof(struct ql4_chap_table);
494 if (is_qla40XX(ha))
495 offset = FLASH_CHAP_OFFSET | (chap_tbl_idx * chap_size);
496 else {
497 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
498 /* flt_chap_size is CHAP table size for both ports
499 * so divide it by 2 to calculate the offset for second port
500 */
501 if (ha->port_num == 1)
502 offset += (ha->hw.flt_chap_size / 2);
503 offset += (chap_tbl_idx * chap_size);
504 }
505
506 ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
507 if (ret != QLA_SUCCESS) {
508 ret = -EINVAL;
509 goto exit_delete_chap;
510 }
511
512 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
513 __le16_to_cpu(chap_table->cookie)));
514
515 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
516 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
517 goto exit_delete_chap;
518 }
519
520 chap_table->cookie = __constant_cpu_to_le16(0xFFFF);
521
522 offset = FLASH_CHAP_OFFSET |
523 (chap_tbl_idx * sizeof(struct ql4_chap_table));
524 ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size,
525 FLASH_OPT_RMW_COMMIT);
526 if (ret == QLA_SUCCESS && ha->chap_list) {
527 mutex_lock(&ha->chap_sem);
528 /* Update ha chap_list cache */
529 memcpy((struct ql4_chap_table *)ha->chap_list + chap_tbl_idx,
530 chap_table, sizeof(struct ql4_chap_table));
531 mutex_unlock(&ha->chap_sem);
532 }
533 if (ret != QLA_SUCCESS)
534 ret = -EINVAL;
535
536exit_delete_chap:
537 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
538 return ret;
539}
540
258static int qla4xxx_get_iface_param(struct iscsi_iface *iface, 541static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
259 enum iscsi_param_type param_type, 542 enum iscsi_param_type param_type,
260 int param, char *buf) 543 int param, char *buf)
@@ -548,6 +831,43 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
548 return ret; 831 return ret;
549} 832}
550 833
834static void qla4xxx_set_port_speed(struct Scsi_Host *shost)
835{
836 struct scsi_qla_host *ha = to_qla_host(shost);
837 struct iscsi_cls_host *ihost = shost_priv(shost);
838 uint32_t speed = ISCSI_PORT_SPEED_UNKNOWN;
839
840 qla4xxx_get_firmware_state(ha);
841
842 switch (ha->addl_fw_state & 0x0F00) {
843 case FW_ADDSTATE_LINK_SPEED_10MBPS:
844 speed = ISCSI_PORT_SPEED_10MBPS;
845 break;
846 case FW_ADDSTATE_LINK_SPEED_100MBPS:
847 speed = ISCSI_PORT_SPEED_100MBPS;
848 break;
849 case FW_ADDSTATE_LINK_SPEED_1GBPS:
850 speed = ISCSI_PORT_SPEED_1GBPS;
851 break;
852 case FW_ADDSTATE_LINK_SPEED_10GBPS:
853 speed = ISCSI_PORT_SPEED_10GBPS;
854 break;
855 }
856 ihost->port_speed = speed;
857}
858
859static void qla4xxx_set_port_state(struct Scsi_Host *shost)
860{
861 struct scsi_qla_host *ha = to_qla_host(shost);
862 struct iscsi_cls_host *ihost = shost_priv(shost);
863 uint32_t state = ISCSI_PORT_STATE_DOWN;
864
865 if (test_bit(AF_LINK_UP, &ha->flags))
866 state = ISCSI_PORT_STATE_UP;
867
868 ihost->port_state = state;
869}
870
551static int qla4xxx_host_get_param(struct Scsi_Host *shost, 871static int qla4xxx_host_get_param(struct Scsi_Host *shost,
552 enum iscsi_host_param param, char *buf) 872 enum iscsi_host_param param, char *buf)
553{ 873{
@@ -564,6 +884,14 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
564 case ISCSI_HOST_PARAM_INITIATOR_NAME: 884 case ISCSI_HOST_PARAM_INITIATOR_NAME:
565 len = sprintf(buf, "%s\n", ha->name_string); 885 len = sprintf(buf, "%s\n", ha->name_string);
566 break; 886 break;
887 case ISCSI_HOST_PARAM_PORT_STATE:
888 qla4xxx_set_port_state(shost);
889 len = sprintf(buf, "%s\n", iscsi_get_port_state_name(shost));
890 break;
891 case ISCSI_HOST_PARAM_PORT_SPEED:
892 qla4xxx_set_port_speed(shost);
893 len = sprintf(buf, "%s\n", iscsi_get_port_speed_name(shost));
894 break;
567 default: 895 default:
568 return -ENOSYS; 896 return -ENOSYS;
569 } 897 }
@@ -968,6 +1296,41 @@ exit_init_fw_cb:
968 return rval; 1296 return rval;
969} 1297}
970 1298
1299static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
1300 enum iscsi_param param, char *buf)
1301{
1302 struct iscsi_session *sess = cls_sess->dd_data;
1303 struct ddb_entry *ddb_entry = sess->dd_data;
1304 struct scsi_qla_host *ha = ddb_entry->ha;
1305 int rval, len;
1306 uint16_t idx;
1307
1308 switch (param) {
1309 case ISCSI_PARAM_CHAP_IN_IDX:
1310 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1311 sess->password_in, BIDI_CHAP,
1312 &idx);
1313 if (rval)
1314 return -EINVAL;
1315
1316 len = sprintf(buf, "%hu\n", idx);
1317 break;
1318 case ISCSI_PARAM_CHAP_OUT_IDX:
1319 rval = qla4xxx_get_chap_index(ha, sess->username,
1320 sess->password, LOCAL_CHAP,
1321 &idx);
1322 if (rval)
1323 return -EINVAL;
1324
1325 len = sprintf(buf, "%hu\n", idx);
1326 break;
1327 default:
1328 return iscsi_session_get_param(cls_sess, param, buf);
1329 }
1330
1331 return len;
1332}
1333
971static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, 1334static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
972 enum iscsi_param param, char *buf) 1335 enum iscsi_param param, char *buf)
973{ 1336{
@@ -1506,13 +1869,17 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1506{ 1869{
1507 int buflen = 0; 1870 int buflen = 0;
1508 struct iscsi_session *sess; 1871 struct iscsi_session *sess;
1872 struct ddb_entry *ddb_entry;
1509 struct iscsi_conn *conn; 1873 struct iscsi_conn *conn;
1510 char ip_addr[DDB_IPADDR_LEN]; 1874 char ip_addr[DDB_IPADDR_LEN];
1511 uint16_t options = 0; 1875 uint16_t options = 0;
1512 1876
1513 sess = cls_sess->dd_data; 1877 sess = cls_sess->dd_data;
1878 ddb_entry = sess->dd_data;
1514 conn = cls_conn->dd_data; 1879 conn = cls_conn->dd_data;
1515 1880
1881 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1882
1516 conn->max_recv_dlength = BYTE_UNITS * 1883 conn->max_recv_dlength = BYTE_UNITS *
1517 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 1884 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1518 1885
@@ -1552,6 +1919,8 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
1552 (char *)ha->name_string, buflen); 1919 (char *)ha->name_string, buflen);
1553 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS, 1920 iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
1554 (char *)ip_addr, buflen); 1921 (char *)ip_addr, buflen);
1922 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
1923 (char *)fw_ddb_entry->iscsi_alias, buflen);
1555} 1924}
1556 1925
1557void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha, 1926void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -1638,6 +2007,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1638 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); 2007 le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1639 2008
1640 /* Update params */ 2009 /* Update params */
2010 ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
1641 conn->max_recv_dlength = BYTE_UNITS * 2011 conn->max_recv_dlength = BYTE_UNITS *
1642 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); 2012 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1643 2013
@@ -1666,6 +2036,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1666 memcpy(sess->initiatorname, ha->name_string, 2036 memcpy(sess->initiatorname, ha->name_string,
1667 min(sizeof(ha->name_string), sizeof(sess->initiatorname))); 2037 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
1668 2038
2039 iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
2040 (char *)fw_ddb_entry->iscsi_alias, 0);
2041
1669exit_session_conn_param: 2042exit_session_conn_param:
1670 if (fw_ddb_entry) 2043 if (fw_ddb_entry)
1671 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 2044 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
@@ -2113,7 +2486,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
2113 halt_status = qla4_8xxx_rd_32(ha, 2486 halt_status = qla4_8xxx_rd_32(ha,
2114 QLA82XX_PEG_HALT_STATUS1); 2487 QLA82XX_PEG_HALT_STATUS1);
2115 2488
2116 if (LSW(MSB(halt_status)) == 0x67) 2489 if (QLA82XX_FWERROR_CODE(halt_status) == 0x67)
2117 ql4_printk(KERN_ERR, ha, "%s:" 2490 ql4_printk(KERN_ERR, ha, "%s:"
2118 " Firmware aborted with" 2491 " Firmware aborted with"
2119 " error code 0x00006700." 2492 " error code 0x00006700."
@@ -2230,6 +2603,10 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
2230 } 2603 }
2231 } 2604 }
2232 2605
2606 /* Process any deferred work. */
2607 if (!list_empty(&ha->work_list))
2608 start_dpc++;
2609
2233 /* Wakeup the dpc routine for this adapter, if needed. */ 2610 /* Wakeup the dpc routine for this adapter, if needed. */
2234 if (start_dpc || 2611 if (start_dpc ||
2235 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 2612 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
@@ -2795,6 +3172,109 @@ void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
2795 queue_work(ha->dpc_thread, &ha->dpc_work); 3172 queue_work(ha->dpc_thread, &ha->dpc_work);
2796} 3173}
2797 3174
3175static struct qla4_work_evt *
3176qla4xxx_alloc_work(struct scsi_qla_host *ha, uint32_t data_size,
3177 enum qla4_work_type type)
3178{
3179 struct qla4_work_evt *e;
3180 uint32_t size = sizeof(struct qla4_work_evt) + data_size;
3181
3182 e = kzalloc(size, GFP_ATOMIC);
3183 if (!e)
3184 return NULL;
3185
3186 INIT_LIST_HEAD(&e->list);
3187 e->type = type;
3188 return e;
3189}
3190
3191static void qla4xxx_post_work(struct scsi_qla_host *ha,
3192 struct qla4_work_evt *e)
3193{
3194 unsigned long flags;
3195
3196 spin_lock_irqsave(&ha->work_lock, flags);
3197 list_add_tail(&e->list, &ha->work_list);
3198 spin_unlock_irqrestore(&ha->work_lock, flags);
3199 qla4xxx_wake_dpc(ha);
3200}
3201
3202int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
3203 enum iscsi_host_event_code aen_code,
3204 uint32_t data_size, uint8_t *data)
3205{
3206 struct qla4_work_evt *e;
3207
3208 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_AEN);
3209 if (!e)
3210 return QLA_ERROR;
3211
3212 e->u.aen.code = aen_code;
3213 e->u.aen.data_size = data_size;
3214 memcpy(e->u.aen.data, data, data_size);
3215
3216 qla4xxx_post_work(ha, e);
3217
3218 return QLA_SUCCESS;
3219}
3220
3221int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha,
3222 uint32_t status, uint32_t pid,
3223 uint32_t data_size, uint8_t *data)
3224{
3225 struct qla4_work_evt *e;
3226
3227 e = qla4xxx_alloc_work(ha, data_size, QLA4_EVENT_PING_STATUS);
3228 if (!e)
3229 return QLA_ERROR;
3230
3231 e->u.ping.status = status;
3232 e->u.ping.pid = pid;
3233 e->u.ping.data_size = data_size;
3234 memcpy(e->u.ping.data, data, data_size);
3235
3236 qla4xxx_post_work(ha, e);
3237
3238 return QLA_SUCCESS;
3239}
3240
3241static void qla4xxx_do_work(struct scsi_qla_host *ha)
3242{
3243 struct qla4_work_evt *e, *tmp;
3244 unsigned long flags;
3245 LIST_HEAD(work);
3246
3247 spin_lock_irqsave(&ha->work_lock, flags);
3248 list_splice_init(&ha->work_list, &work);
3249 spin_unlock_irqrestore(&ha->work_lock, flags);
3250
3251 list_for_each_entry_safe(e, tmp, &work, list) {
3252 list_del_init(&e->list);
3253
3254 switch (e->type) {
3255 case QLA4_EVENT_AEN:
3256 iscsi_post_host_event(ha->host_no,
3257 &qla4xxx_iscsi_transport,
3258 e->u.aen.code,
3259 e->u.aen.data_size,
3260 e->u.aen.data);
3261 break;
3262 case QLA4_EVENT_PING_STATUS:
3263 iscsi_ping_comp_event(ha->host_no,
3264 &qla4xxx_iscsi_transport,
3265 e->u.ping.status,
3266 e->u.ping.pid,
3267 e->u.ping.data_size,
3268 e->u.ping.data);
3269 break;
3270 default:
3271 ql4_printk(KERN_WARNING, ha, "event type: 0x%x not "
3272 "supported", e->type);
3273 }
3274 kfree(e);
3275 }
3276}
3277
2798/** 3278/**
2799 * qla4xxx_do_dpc - dpc routine 3279 * qla4xxx_do_dpc - dpc routine
2800 * @data: in our case pointer to adapter structure 3280 * @data: in our case pointer to adapter structure
@@ -2826,6 +3306,9 @@ static void qla4xxx_do_dpc(struct work_struct *work)
2826 return; 3306 return;
2827 } 3307 }
2828 3308
3309 /* post events to application */
3310 qla4xxx_do_work(ha);
3311
2829 if (is_qla8022(ha)) { 3312 if (is_qla8022(ha)) {
2830 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) { 3313 if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
2831 qla4_8xxx_idc_lock(ha); 3314 qla4_8xxx_idc_lock(ha);
@@ -3341,9 +3824,8 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3341 /* Check Boot Mode */ 3824 /* Check Boot Mode */
3342 val = rd_nvram_byte(ha, addr); 3825 val = rd_nvram_byte(ha, addr);
3343 if (!(val & 0x07)) { 3826 if (!(val & 0x07)) {
3344 DEBUG2(ql4_printk(KERN_ERR, ha, 3827 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Adapter boot "
3345 "%s: Failed Boot options : 0x%x\n", 3828 "options : 0x%x\n", __func__, val));
3346 __func__, val));
3347 ret = QLA_ERROR; 3829 ret = QLA_ERROR;
3348 goto exit_boot_info; 3830 goto exit_boot_info;
3349 } 3831 }
@@ -3388,9 +3870,8 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3388 } 3870 }
3389 /* Check Boot Mode */ 3871 /* Check Boot Mode */
3390 if (!(buf[1] & 0x07)) { 3872 if (!(buf[1] & 0x07)) {
3391 DEBUG2(ql4_printk(KERN_INFO, ha, 3873 DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware boot options"
3392 "Failed: Boot options : 0x%x\n", 3874 " : 0x%x\n", buf[1]));
3393 buf[1]));
3394 ret = QLA_ERROR; 3875 ret = QLA_ERROR;
3395 goto exit_boot_info_free; 3876 goto exit_boot_info_free;
3396 } 3877 }
@@ -3411,12 +3892,11 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
3411 " target ID %d\n", __func__, ddb_index[0], 3892 " target ID %d\n", __func__, ddb_index[0],
3412 ddb_index[1])); 3893 ddb_index[1]));
3413 3894
3414 ha->pri_ddb_idx = ddb_index[0];
3415 ha->sec_ddb_idx = ddb_index[1];
3416
3417exit_boot_info_free: 3895exit_boot_info_free:
3418 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); 3896 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
3419exit_boot_info: 3897exit_boot_info:
3898 ha->pri_ddb_idx = ddb_index[0];
3899 ha->sec_ddb_idx = ddb_index[1];
3420 return ret; 3900 return ret;
3421} 3901}
3422 3902
@@ -3497,8 +3977,8 @@ static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
3497 3977
3498 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, 3978 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
3499 fw_ddb_entry_dma, ddb_index)) { 3979 fw_ddb_entry_dma, ddb_index)) {
3500 DEBUG2(ql4_printk(KERN_ERR, ha, 3980 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: No Flash DDB found at "
3501 "%s: Flash DDB read Failed\n", __func__)); 3981 "index [%d]\n", __func__, ddb_index));
3502 ret = QLA_ERROR; 3982 ret = QLA_ERROR;
3503 goto exit_boot_target; 3983 goto exit_boot_target;
3504 } 3984 }
@@ -3576,8 +4056,8 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3576 ddb_index[1] = 0xffff; 4056 ddb_index[1] = 0xffff;
3577 ret = get_fw_boot_info(ha, ddb_index); 4057 ret = get_fw_boot_info(ha, ddb_index);
3578 if (ret != QLA_SUCCESS) { 4058 if (ret != QLA_SUCCESS) {
3579 DEBUG2(ql4_printk(KERN_ERR, ha, 4059 DEBUG2(ql4_printk(KERN_INFO, ha,
3580 "%s: Failed to set boot info.\n", __func__)); 4060 "%s: No boot target configured.\n", __func__));
3581 return ret; 4061 return ret;
3582 } 4062 }
3583 4063
@@ -3590,8 +4070,8 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3590 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), 4070 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
3591 ddb_index[0]); 4071 ddb_index[0]);
3592 if (rval != QLA_SUCCESS) { 4072 if (rval != QLA_SUCCESS) {
3593 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get " 4073 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary boot target not "
3594 "primary target\n", __func__)); 4074 "configured\n", __func__));
3595 } else 4075 } else
3596 ret = QLA_SUCCESS; 4076 ret = QLA_SUCCESS;
3597 4077
@@ -3602,8 +4082,8 @@ sec_target:
3602 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), 4082 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
3603 ddb_index[1]); 4083 ddb_index[1]);
3604 if (rval != QLA_SUCCESS) { 4084 if (rval != QLA_SUCCESS) {
3605 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get " 4085 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Secondary boot target not"
3606 "secondary target\n", __func__)); 4086 " configured\n", __func__));
3607 } else 4087 } else
3608 ret = QLA_SUCCESS; 4088 ret = QLA_SUCCESS;
3609 4089
@@ -3772,11 +4252,13 @@ static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
3772 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr); 4252 sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
3773 4253
3774 tddb->port = le16_to_cpu(fw_ddb_entry->port); 4254 tddb->port = le16_to_cpu(fw_ddb_entry->port);
4255 memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
3775} 4256}
3776 4257
3777static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, 4258static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
3778 struct ql4_tuple_ddb *old_tddb, 4259 struct ql4_tuple_ddb *old_tddb,
3779 struct ql4_tuple_ddb *new_tddb) 4260 struct ql4_tuple_ddb *new_tddb,
4261 uint8_t is_isid_compare)
3780{ 4262{
3781 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name)) 4263 if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
3782 return QLA_ERROR; 4264 return QLA_ERROR;
@@ -3787,6 +4269,26 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
3787 if (old_tddb->port != new_tddb->port) 4269 if (old_tddb->port != new_tddb->port)
3788 return QLA_ERROR; 4270 return QLA_ERROR;
3789 4271
4272 /* For multi sessions, driver generates the ISID, so do not compare
4273 * ISID in reset path since it would be a comparision between the
4274 * driver generated ISID and firmware generated ISID. This could
4275 * lead to adding duplicated DDBs in the list as driver generated
4276 * ISID would not match firmware generated ISID.
4277 */
4278 if (is_isid_compare) {
4279 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: old ISID [%02x%02x%02x"
4280 "%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n",
4281 __func__, old_tddb->isid[5], old_tddb->isid[4],
4282 old_tddb->isid[3], old_tddb->isid[2], old_tddb->isid[1],
4283 old_tddb->isid[0], new_tddb->isid[5], new_tddb->isid[4],
4284 new_tddb->isid[3], new_tddb->isid[2], new_tddb->isid[1],
4285 new_tddb->isid[0]));
4286
4287 if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
4288 sizeof(old_tddb->isid)))
4289 return QLA_ERROR;
4290 }
4291
3790 DEBUG2(ql4_printk(KERN_INFO, ha, 4292 DEBUG2(ql4_printk(KERN_INFO, ha,
3791 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", 4293 "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
3792 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr, 4294 old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
@@ -3829,7 +4331,7 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
3829 continue; 4331 continue;
3830 4332
3831 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); 4333 qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
3832 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) { 4334 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, false)) {
3833 ret = QLA_SUCCESS; /* found */ 4335 ret = QLA_SUCCESS; /* found */
3834 goto exit_check; 4336 goto exit_check;
3835 } 4337 }
@@ -3872,7 +4374,7 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
3872 4374
3873 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) { 4375 list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
3874 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb); 4376 qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
3875 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) { 4377 if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
3876 ret = QLA_SUCCESS; /* found */ 4378 ret = QLA_SUCCESS; /* found */
3877 goto exit_check; 4379 goto exit_check;
3878 } 4380 }
@@ -4038,6 +4540,10 @@ static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
4038 if (ret == QLA_ERROR) 4540 if (ret == QLA_ERROR)
4039 break; 4541 break;
4040 4542
4543 /* Ignore DDB if invalid state (unassigned) */
4544 if (state == DDB_DS_UNASSIGNED)
4545 goto continue_next_st;
4546
4041 /* Check if ST, add to the list_st */ 4547 /* Check if ST, add to the list_st */
4042 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0) 4548 if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
4043 goto continue_next_st; 4549 goto continue_next_st;
@@ -4397,6 +4903,9 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
4397 4903
4398 spin_lock_init(&ha->hardware_lock); 4904 spin_lock_init(&ha->hardware_lock);
4399 4905
4906 /* Initialize work list */
4907 INIT_LIST_HEAD(&ha->work_list);
4908
4400 /* Allocate dma buffers */ 4909 /* Allocate dma buffers */
4401 if (qla4xxx_mem_alloc(ha)) { 4910 if (qla4xxx_mem_alloc(ha)) {
4402 ql4_printk(KERN_WARNING, ha, 4911 ql4_printk(KERN_WARNING, ha,
@@ -4524,8 +5033,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
4524 ha->patch_number, ha->build_number); 5033 ha->patch_number, ha->build_number);
4525 5034
4526 if (qla4xxx_setup_boot_info(ha)) 5035 if (qla4xxx_setup_boot_info(ha))
4527 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n", 5036 ql4_printk(KERN_ERR, ha,
4528 __func__); 5037 "%s: No iSCSI boot target configured\n", __func__);
4529 5038
4530 /* Perform the build ddb list and login to each */ 5039 /* Perform the build ddb list and login to each */
4531 qla4xxx_build_ddb_list(ha, INIT_ADAPTER); 5040 qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 133989b3a9f..ede9af94414 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k12" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k15"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2aeb2e9c4d3..07322ecff90 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -782,12 +782,6 @@ static void scsi_done(struct scsi_cmnd *cmd)
782 blk_complete_request(cmd->request); 782 blk_complete_request(cmd->request);
783} 783}
784 784
785/* Move this to a header if it becomes more generally useful */
786static struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
787{
788 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
789}
790
791/** 785/**
792 * scsi_finish_command - cleanup and pass command back to upper layer 786 * scsi_finish_command - cleanup and pass command back to upper layer
793 * @cmd: the command 787 * @cmd: the command
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 68da6c092f6..591856131c4 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -126,6 +126,7 @@ static const char * scsi_debug_version_date = "20100324";
126#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16 126#define SCSI_DEBUG_OPT_TRANSPORT_ERR 16
127#define SCSI_DEBUG_OPT_DIF_ERR 32 127#define SCSI_DEBUG_OPT_DIF_ERR 32
128#define SCSI_DEBUG_OPT_DIX_ERR 64 128#define SCSI_DEBUG_OPT_DIX_ERR 64
129#define SCSI_DEBUG_OPT_MAC_TIMEOUT 128
129/* When "every_nth" > 0 then modulo "every_nth" commands: 130/* When "every_nth" > 0 then modulo "every_nth" commands:
130 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set 131 * - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
131 * - a RECOVERED_ERROR is simulated on successful read and write 132 * - a RECOVERED_ERROR is simulated on successful read and write
@@ -2220,7 +2221,7 @@ static int resp_get_lba_status(struct scsi_cmnd * scmd,
2220 mapped = map_state(lba, &num); 2221 mapped = map_state(lba, &num);
2221 2222
2222 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN); 2223 memset(arr, 0, SDEBUG_GET_LBA_STATUS_LEN);
2223 put_unaligned_be32(16, &arr[0]); /* Parameter Data Length */ 2224 put_unaligned_be32(20, &arr[0]); /* Parameter Data Length */
2224 put_unaligned_be64(lba, &arr[8]); /* LBA */ 2225 put_unaligned_be64(lba, &arr[8]); /* LBA */
2225 put_unaligned_be32(num, &arr[16]); /* Number of blocks */ 2226 put_unaligned_be32(num, &arr[16]); /* Number of blocks */
2226 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */ 2227 arr[20] = !mapped; /* mapped = 0, unmapped = 1 */
@@ -3615,6 +3616,9 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
3615 scsi_debug_every_nth = -1; 3616 scsi_debug_every_nth = -1;
3616 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts) 3617 if (SCSI_DEBUG_OPT_TIMEOUT & scsi_debug_opts)
3617 return 0; /* ignore command causing timeout */ 3618 return 0; /* ignore command causing timeout */
3619 else if (SCSI_DEBUG_OPT_MAC_TIMEOUT & scsi_debug_opts &&
3620 scsi_medium_access_command(SCpnt))
3621 return 0; /* time out reads and writes */
3618 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts) 3622 else if (SCSI_DEBUG_OPT_RECOVERED_ERR & scsi_debug_opts)
3619 inj_recovered = 1; /* to reads and writes below */ 3623 inj_recovered = 1; /* to reads and writes below */
3620 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts) 3624 else if (SCSI_DEBUG_OPT_TRANSPORT_ERR & scsi_debug_opts)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5f84a148eb1..2cfcbffa41f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -30,6 +30,7 @@
30#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_dbg.h> 31#include <scsi/scsi_dbg.h>
32#include <scsi/scsi_device.h> 32#include <scsi/scsi_device.h>
33#include <scsi/scsi_driver.h>
33#include <scsi/scsi_eh.h> 34#include <scsi/scsi_eh.h>
34#include <scsi/scsi_transport.h> 35#include <scsi/scsi_transport.h>
35#include <scsi/scsi_host.h> 36#include <scsi/scsi_host.h>
@@ -141,11 +142,11 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
141 else if (host->hostt->eh_timed_out) 142 else if (host->hostt->eh_timed_out)
142 rtn = host->hostt->eh_timed_out(scmd); 143 rtn = host->hostt->eh_timed_out(scmd);
143 144
145 scmd->result |= DID_TIME_OUT << 16;
146
144 if (unlikely(rtn == BLK_EH_NOT_HANDLED && 147 if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
145 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { 148 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
146 scmd->result |= DID_TIME_OUT << 16;
147 rtn = BLK_EH_HANDLED; 149 rtn = BLK_EH_HANDLED;
148 }
149 150
150 return rtn; 151 return rtn;
151} 152}
@@ -366,6 +367,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
366 return TARGET_ERROR; 367 return TARGET_ERROR;
367 368
368 case ILLEGAL_REQUEST: 369 case ILLEGAL_REQUEST:
370 if (sshdr.asc == 0x20 || /* Invalid command operation code */
371 sshdr.asc == 0x21 || /* Logical block address out of range */
372 sshdr.asc == 0x24 || /* Invalid field in cdb */
373 sshdr.asc == 0x26) { /* Parameter value invalid */
374 return TARGET_ERROR;
375 }
376 return SUCCESS;
377
369 default: 378 default:
370 return SUCCESS; 379 return SUCCESS;
371 } 380 }
@@ -770,6 +779,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
770 int cmnd_size, int timeout, unsigned sense_bytes) 779 int cmnd_size, int timeout, unsigned sense_bytes)
771{ 780{
772 struct scsi_device *sdev = scmd->device; 781 struct scsi_device *sdev = scmd->device;
782 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
773 struct Scsi_Host *shost = sdev->host; 783 struct Scsi_Host *shost = sdev->host;
774 DECLARE_COMPLETION_ONSTACK(done); 784 DECLARE_COMPLETION_ONSTACK(done);
775 unsigned long timeleft; 785 unsigned long timeleft;
@@ -824,6 +834,10 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
824 } 834 }
825 835
826 scsi_eh_restore_cmnd(scmd, &ses); 836 scsi_eh_restore_cmnd(scmd, &ses);
837
838 if (sdrv->eh_action)
839 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
840
827 return rtn; 841 return rtn;
828} 842}
829 843
@@ -1540,7 +1554,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1540 * Need to modify host byte to signal a 1554 * Need to modify host byte to signal a
1541 * permanent target failure 1555 * permanent target failure
1542 */ 1556 */
1543 scmd->result |= (DID_TARGET_FAILURE << 16); 1557 set_host_byte(scmd, DID_TARGET_FAILURE);
1544 rtn = SUCCESS; 1558 rtn = SUCCESS;
1545 } 1559 }
1546 /* if rtn == FAILED, we have no sense information; 1560 /* if rtn == FAILED, we have no sense information;
@@ -1560,7 +1574,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1560 case RESERVATION_CONFLICT: 1574 case RESERVATION_CONFLICT:
1561 sdev_printk(KERN_INFO, scmd->device, 1575 sdev_printk(KERN_INFO, scmd->device,
1562 "reservation conflict\n"); 1576 "reservation conflict\n");
1563 scmd->result |= (DID_NEXUS_FAILURE << 16); 1577 set_host_byte(scmd, DID_NEXUS_FAILURE);
1564 return SUCCESS; /* causes immediate i/o error */ 1578 return SUCCESS; /* causes immediate i/o error */
1565 default: 1579 default:
1566 return FAILED; 1580 return FAILED;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a33b2b66da6..ead6405f3e5 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -682,11 +682,11 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
682 error = -ENOLINK; 682 error = -ENOLINK;
683 break; 683 break;
684 case DID_TARGET_FAILURE: 684 case DID_TARGET_FAILURE:
685 cmd->result |= (DID_OK << 16); 685 set_host_byte(cmd, DID_OK);
686 error = -EREMOTEIO; 686 error = -EREMOTEIO;
687 break; 687 break;
688 case DID_NEXUS_FAILURE: 688 case DID_NEXUS_FAILURE:
689 cmd->result |= (DID_OK << 16); 689 set_host_byte(cmd, DID_OK);
690 error = -EBADE; 690 error = -EBADE;
691 break; 691 break;
692 default: 692 default:
@@ -880,6 +880,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
880 cmd->cmnd[0] == WRITE_SAME)) { 880 cmd->cmnd[0] == WRITE_SAME)) {
881 description = "Discard failure"; 881 description = "Discard failure";
882 action = ACTION_FAIL; 882 action = ACTION_FAIL;
883 error = -EREMOTEIO;
883 } else 884 } else
884 action = ACTION_FAIL; 885 action = ACTION_FAIL;
885 break; 886 break;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f59d4a05ecd..80fbe2ac0b4 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -313,7 +313,7 @@ static void fc_scsi_scan_rport(struct work_struct *work);
313#define FC_STARGET_NUM_ATTRS 3 313#define FC_STARGET_NUM_ATTRS 3
314#define FC_RPORT_NUM_ATTRS 10 314#define FC_RPORT_NUM_ATTRS 10
315#define FC_VPORT_NUM_ATTRS 9 315#define FC_VPORT_NUM_ATTRS 9
316#define FC_HOST_NUM_ATTRS 22 316#define FC_HOST_NUM_ATTRS 29
317 317
318struct fc_internal { 318struct fc_internal {
319 struct scsi_transport_template t; 319 struct scsi_transport_template t;
@@ -399,6 +399,20 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
399 fc_host->max_npiv_vports = 0; 399 fc_host->max_npiv_vports = 0;
400 memset(fc_host->serial_number, 0, 400 memset(fc_host->serial_number, 0,
401 sizeof(fc_host->serial_number)); 401 sizeof(fc_host->serial_number));
402 memset(fc_host->manufacturer, 0,
403 sizeof(fc_host->manufacturer));
404 memset(fc_host->model, 0,
405 sizeof(fc_host->model));
406 memset(fc_host->model_description, 0,
407 sizeof(fc_host->model_description));
408 memset(fc_host->hardware_version, 0,
409 sizeof(fc_host->hardware_version));
410 memset(fc_host->driver_version, 0,
411 sizeof(fc_host->driver_version));
412 memset(fc_host->firmware_version, 0,
413 sizeof(fc_host->firmware_version));
414 memset(fc_host->optionrom_version, 0,
415 sizeof(fc_host->optionrom_version));
402 416
403 fc_host->port_id = -1; 417 fc_host->port_id = -1;
404 fc_host->port_type = FC_PORTTYPE_UNKNOWN; 418 fc_host->port_type = FC_PORTTYPE_UNKNOWN;
@@ -1513,6 +1527,13 @@ fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1513fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); 1527fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1514fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20); 1528fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1515fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); 1529fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1530fc_private_host_rd_attr(manufacturer, "%s\n", FC_SERIAL_NUMBER_SIZE + 1);
1531fc_private_host_rd_attr(model, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1532fc_private_host_rd_attr(model_description, "%s\n", FC_SYMBOLIC_NAME_SIZE + 1);
1533fc_private_host_rd_attr(hardware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1534fc_private_host_rd_attr(driver_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1535fc_private_host_rd_attr(firmware_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1536fc_private_host_rd_attr(optionrom_version, "%s\n", FC_VERSION_STRING_SIZE + 1);
1516 1537
1517 1538
1518/* Dynamic Host Attributes */ 1539/* Dynamic Host Attributes */
@@ -2208,6 +2229,13 @@ fc_attach_transport(struct fc_function_template *ft)
2208 SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse); 2229 SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2209 } 2230 }
2210 SETUP_HOST_ATTRIBUTE_RD(serial_number); 2231 SETUP_HOST_ATTRIBUTE_RD(serial_number);
2232 SETUP_HOST_ATTRIBUTE_RD(manufacturer);
2233 SETUP_HOST_ATTRIBUTE_RD(model);
2234 SETUP_HOST_ATTRIBUTE_RD(model_description);
2235 SETUP_HOST_ATTRIBUTE_RD(hardware_version);
2236 SETUP_HOST_ATTRIBUTE_RD(driver_version);
2237 SETUP_HOST_ATTRIBUTE_RD(firmware_version);
2238 SETUP_HOST_ATTRIBUTE_RD(optionrom_version);
2211 2239
2212 SETUP_HOST_ATTRIBUTE_RD(port_id); 2240 SETUP_HOST_ATTRIBUTE_RD(port_id);
2213 SETUP_HOST_ATTRIBUTE_RD(port_type); 2241 SETUP_HOST_ATTRIBUTE_RD(port_type);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index cfd49143723..fac31730add 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -727,10 +727,11 @@ static void iscsi_session_release(struct device *dev)
727 kfree(session); 727 kfree(session);
728} 728}
729 729
730static int iscsi_is_session_dev(const struct device *dev) 730int iscsi_is_session_dev(const struct device *dev)
731{ 731{
732 return dev->release == iscsi_session_release; 732 return dev->release == iscsi_session_release;
733} 733}
734EXPORT_SYMBOL_GPL(iscsi_is_session_dev);
734 735
735static int iscsi_iter_session_fn(struct device *dev, void *data) 736static int iscsi_iter_session_fn(struct device *dev, void *data)
736{ 737{
@@ -1476,6 +1477,66 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
1476} 1477}
1477EXPORT_SYMBOL_GPL(iscsi_conn_login_event); 1478EXPORT_SYMBOL_GPL(iscsi_conn_login_event);
1478 1479
1480void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
1481 enum iscsi_host_event_code code, uint32_t data_size,
1482 uint8_t *data)
1483{
1484 struct nlmsghdr *nlh;
1485 struct sk_buff *skb;
1486 struct iscsi_uevent *ev;
1487 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
1488
1489 skb = alloc_skb(len, GFP_KERNEL);
1490 if (!skb) {
1491 printk(KERN_ERR "gracefully ignored host event (%d):%d OOM\n",
1492 host_no, code);
1493 return;
1494 }
1495
1496 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1497 ev = NLMSG_DATA(nlh);
1498 ev->transport_handle = iscsi_handle(transport);
1499 ev->type = ISCSI_KEVENT_HOST_EVENT;
1500 ev->r.host_event.host_no = host_no;
1501 ev->r.host_event.code = code;
1502 ev->r.host_event.data_size = data_size;
1503
1504 if (data_size)
1505 memcpy((char *)ev + sizeof(*ev), data, data_size);
1506
1507 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
1508}
1509EXPORT_SYMBOL_GPL(iscsi_post_host_event);
1510
1511void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
1512 uint32_t status, uint32_t pid, uint32_t data_size,
1513 uint8_t *data)
1514{
1515 struct nlmsghdr *nlh;
1516 struct sk_buff *skb;
1517 struct iscsi_uevent *ev;
1518 int len = NLMSG_SPACE(sizeof(*ev) + data_size);
1519
1520 skb = alloc_skb(len, GFP_KERNEL);
1521 if (!skb) {
1522 printk(KERN_ERR "gracefully ignored ping comp: OOM\n");
1523 return;
1524 }
1525
1526 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1527 ev = NLMSG_DATA(nlh);
1528 ev->transport_handle = iscsi_handle(transport);
1529 ev->type = ISCSI_KEVENT_PING_COMP;
1530 ev->r.ping_comp.host_no = host_no;
1531 ev->r.ping_comp.status = status;
1532 ev->r.ping_comp.pid = pid;
1533 ev->r.ping_comp.data_size = data_size;
1534 memcpy((char *)ev + sizeof(*ev), data, data_size);
1535
1536 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
1537}
1538EXPORT_SYMBOL_GPL(iscsi_ping_comp_event);
1539
1479static int 1540static int
1480iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, 1541iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1481 void *payload, int size) 1542 void *payload, int size)
@@ -1915,6 +1976,123 @@ iscsi_set_iface_params(struct iscsi_transport *transport,
1915} 1976}
1916 1977
1917static int 1978static int
1979iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1980{
1981 struct Scsi_Host *shost;
1982 struct sockaddr *dst_addr;
1983 int err;
1984
1985 if (!transport->send_ping)
1986 return -ENOSYS;
1987
1988 shost = scsi_host_lookup(ev->u.iscsi_ping.host_no);
1989 if (!shost) {
1990 printk(KERN_ERR "iscsi_ping could not find host no %u\n",
1991 ev->u.iscsi_ping.host_no);
1992 return -ENODEV;
1993 }
1994
1995 dst_addr = (struct sockaddr *)((char *)ev + sizeof(*ev));
1996 err = transport->send_ping(shost, ev->u.iscsi_ping.iface_num,
1997 ev->u.iscsi_ping.iface_type,
1998 ev->u.iscsi_ping.payload_size,
1999 ev->u.iscsi_ping.pid,
2000 dst_addr);
2001 scsi_host_put(shost);
2002 return err;
2003}
2004
2005static int
2006iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2007{
2008 struct iscsi_uevent *ev = NLMSG_DATA(nlh);
2009 struct Scsi_Host *shost = NULL;
2010 struct iscsi_chap_rec *chap_rec;
2011 struct iscsi_internal *priv;
2012 struct sk_buff *skbchap;
2013 struct nlmsghdr *nlhchap;
2014 struct iscsi_uevent *evchap;
2015 uint32_t chap_buf_size;
2016 int len, err = 0;
2017 char *buf;
2018
2019 if (!transport->get_chap)
2020 return -EINVAL;
2021
2022 priv = iscsi_if_transport_lookup(transport);
2023 if (!priv)
2024 return -EINVAL;
2025
2026 chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
2027 len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
2028
2029 shost = scsi_host_lookup(ev->u.get_chap.host_no);
2030 if (!shost) {
2031 printk(KERN_ERR "%s: failed. Cound not find host no %u\n",
2032 __func__, ev->u.get_chap.host_no);
2033 return -ENODEV;
2034 }
2035
2036 do {
2037 int actual_size;
2038
2039 skbchap = alloc_skb(len, GFP_KERNEL);
2040 if (!skbchap) {
2041 printk(KERN_ERR "can not deliver chap: OOM\n");
2042 err = -ENOMEM;
2043 goto exit_get_chap;
2044 }
2045
2046 nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
2047 (len - sizeof(*nlhchap)), 0);
2048 evchap = NLMSG_DATA(nlhchap);
2049 memset(evchap, 0, sizeof(*evchap));
2050 evchap->transport_handle = iscsi_handle(transport);
2051 evchap->type = nlh->nlmsg_type;
2052 evchap->u.get_chap.host_no = ev->u.get_chap.host_no;
2053 evchap->u.get_chap.chap_tbl_idx = ev->u.get_chap.chap_tbl_idx;
2054 evchap->u.get_chap.num_entries = ev->u.get_chap.num_entries;
2055 buf = (char *) ((char *)evchap + sizeof(*evchap));
2056 memset(buf, 0, chap_buf_size);
2057
2058 err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
2059 &evchap->u.get_chap.num_entries, buf);
2060
2061 actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size);
2062 skb_trim(skbchap, NLMSG_ALIGN(actual_size));
2063 nlhchap->nlmsg_len = actual_size;
2064
2065 err = iscsi_multicast_skb(skbchap, ISCSI_NL_GRP_ISCSID,
2066 GFP_KERNEL);
2067 } while (err < 0 && err != -ECONNREFUSED);
2068
2069exit_get_chap:
2070 scsi_host_put(shost);
2071 return err;
2072}
2073
2074static int iscsi_delete_chap(struct iscsi_transport *transport,
2075 struct iscsi_uevent *ev)
2076{
2077 struct Scsi_Host *shost;
2078 int err = 0;
2079
2080 if (!transport->delete_chap)
2081 return -ENOSYS;
2082
2083 shost = scsi_host_lookup(ev->u.delete_chap.host_no);
2084 if (!shost) {
2085 printk(KERN_ERR "%s could not find host no %u\n",
2086 __func__, ev->u.delete_chap.host_no);
2087 return -ENODEV;
2088 }
2089
2090 err = transport->delete_chap(shost, ev->u.delete_chap.chap_tbl_idx);
2091 scsi_host_put(shost);
2092 return err;
2093}
2094
2095static int
1918iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 2096iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1919{ 2097{
1920 int err = 0; 2098 int err = 0;
@@ -1941,7 +2119,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1941 switch (nlh->nlmsg_type) { 2119 switch (nlh->nlmsg_type) {
1942 case ISCSI_UEVENT_CREATE_SESSION: 2120 case ISCSI_UEVENT_CREATE_SESSION:
1943 err = iscsi_if_create_session(priv, ep, ev, 2121 err = iscsi_if_create_session(priv, ep, ev,
1944 NETLINK_CREDS(skb)->pid, 2122 NETLINK_CB(skb).pid,
1945 ev->u.c_session.initial_cmdsn, 2123 ev->u.c_session.initial_cmdsn,
1946 ev->u.c_session.cmds_max, 2124 ev->u.c_session.cmds_max,
1947 ev->u.c_session.queue_depth); 2125 ev->u.c_session.queue_depth);
@@ -1954,7 +2132,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1954 } 2132 }
1955 2133
1956 err = iscsi_if_create_session(priv, ep, ev, 2134 err = iscsi_if_create_session(priv, ep, ev,
1957 NETLINK_CREDS(skb)->pid, 2135 NETLINK_CB(skb).pid,
1958 ev->u.c_bound_session.initial_cmdsn, 2136 ev->u.c_bound_session.initial_cmdsn,
1959 ev->u.c_bound_session.cmds_max, 2137 ev->u.c_bound_session.cmds_max,
1960 ev->u.c_bound_session.queue_depth); 2138 ev->u.c_bound_session.queue_depth);
@@ -2059,6 +2237,15 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2059 err = iscsi_set_iface_params(transport, ev, 2237 err = iscsi_set_iface_params(transport, ev,
2060 nlmsg_attrlen(nlh, sizeof(*ev))); 2238 nlmsg_attrlen(nlh, sizeof(*ev)));
2061 break; 2239 break;
2240 case ISCSI_UEVENT_PING:
2241 err = iscsi_send_ping(transport, ev);
2242 break;
2243 case ISCSI_UEVENT_GET_CHAP:
2244 err = iscsi_get_chap(transport, nlh);
2245 break;
2246 case ISCSI_UEVENT_DELETE_CHAP:
2247 err = iscsi_delete_chap(transport, ev);
2248 break;
2062 default: 2249 default:
2063 err = -ENOSYS; 2250 err = -ENOSYS;
2064 break; 2251 break;
@@ -2108,9 +2295,11 @@ iscsi_if_rx(struct sk_buff *skb)
2108 */ 2295 */
2109 if (ev->type == ISCSI_UEVENT_GET_STATS && !err) 2296 if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
2110 break; 2297 break;
2298 if (ev->type == ISCSI_UEVENT_GET_CHAP && !err)
2299 break;
2111 err = iscsi_if_send_reply(group, nlh->nlmsg_seq, 2300 err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
2112 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); 2301 nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
2113 } while (err < 0 && err != -ECONNREFUSED); 2302 } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH);
2114 skb_pull(skb, rlen); 2303 skb_pull(skb, rlen);
2115 } 2304 }
2116 mutex_unlock(&rx_queue_mutex); 2305 mutex_unlock(&rx_queue_mutex);
@@ -2286,6 +2475,8 @@ iscsi_session_attr(username, ISCSI_PARAM_USERNAME, 1);
2286iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1); 2475iscsi_session_attr(username_in, ISCSI_PARAM_USERNAME_IN, 1);
2287iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1); 2476iscsi_session_attr(password, ISCSI_PARAM_PASSWORD, 1);
2288iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1); 2477iscsi_session_attr(password_in, ISCSI_PARAM_PASSWORD_IN, 1);
2478iscsi_session_attr(chap_out_idx, ISCSI_PARAM_CHAP_OUT_IDX, 1);
2479iscsi_session_attr(chap_in_idx, ISCSI_PARAM_CHAP_IN_IDX, 1);
2289iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0); 2480iscsi_session_attr(fast_abort, ISCSI_PARAM_FAST_ABORT, 0);
2290iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0); 2481iscsi_session_attr(abort_tmo, ISCSI_PARAM_ABORT_TMO, 0);
2291iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0); 2482iscsi_session_attr(lu_reset_tmo, ISCSI_PARAM_LU_RESET_TMO, 0);
@@ -2382,6 +2573,8 @@ static struct attribute *iscsi_session_attrs[] = {
2382 &dev_attr_priv_sess_recovery_tmo.attr, 2573 &dev_attr_priv_sess_recovery_tmo.attr,
2383 &dev_attr_priv_sess_state.attr, 2574 &dev_attr_priv_sess_state.attr,
2384 &dev_attr_priv_sess_creator.attr, 2575 &dev_attr_priv_sess_creator.attr,
2576 &dev_attr_sess_chap_out_idx.attr,
2577 &dev_attr_sess_chap_in_idx.attr,
2385 NULL, 2578 NULL,
2386}; 2579};
2387 2580
@@ -2413,6 +2606,10 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
2413 param = ISCSI_PARAM_TARGET_NAME; 2606 param = ISCSI_PARAM_TARGET_NAME;
2414 else if (attr == &dev_attr_sess_tpgt.attr) 2607 else if (attr == &dev_attr_sess_tpgt.attr)
2415 param = ISCSI_PARAM_TPGT; 2608 param = ISCSI_PARAM_TPGT;
2609 else if (attr == &dev_attr_sess_chap_in_idx.attr)
2610 param = ISCSI_PARAM_CHAP_IN_IDX;
2611 else if (attr == &dev_attr_sess_chap_out_idx.attr)
2612 param = ISCSI_PARAM_CHAP_OUT_IDX;
2416 else if (attr == &dev_attr_sess_password.attr) 2613 else if (attr == &dev_attr_sess_password.attr)
2417 param = ISCSI_PARAM_USERNAME; 2614 param = ISCSI_PARAM_USERNAME;
2418 else if (attr == &dev_attr_sess_password_in.attr) 2615 else if (attr == &dev_attr_sess_password_in.attr)
@@ -2476,12 +2673,16 @@ iscsi_host_attr(netdev, ISCSI_HOST_PARAM_NETDEV_NAME);
2476iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS); 2673iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
2477iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS); 2674iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
2478iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME); 2675iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
2676iscsi_host_attr(port_state, ISCSI_HOST_PARAM_PORT_STATE);
2677iscsi_host_attr(port_speed, ISCSI_HOST_PARAM_PORT_SPEED);
2479 2678
2480static struct attribute *iscsi_host_attrs[] = { 2679static struct attribute *iscsi_host_attrs[] = {
2481 &dev_attr_host_netdev.attr, 2680 &dev_attr_host_netdev.attr,
2482 &dev_attr_host_hwaddress.attr, 2681 &dev_attr_host_hwaddress.attr,
2483 &dev_attr_host_ipaddress.attr, 2682 &dev_attr_host_ipaddress.attr,
2484 &dev_attr_host_initiatorname.attr, 2683 &dev_attr_host_initiatorname.attr,
2684 &dev_attr_host_port_state.attr,
2685 &dev_attr_host_port_speed.attr,
2485 NULL, 2686 NULL,
2486}; 2687};
2487 2688
@@ -2501,6 +2702,10 @@ static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
2501 param = ISCSI_HOST_PARAM_IPADDRESS; 2702 param = ISCSI_HOST_PARAM_IPADDRESS;
2502 else if (attr == &dev_attr_host_initiatorname.attr) 2703 else if (attr == &dev_attr_host_initiatorname.attr)
2503 param = ISCSI_HOST_PARAM_INITIATOR_NAME; 2704 param = ISCSI_HOST_PARAM_INITIATOR_NAME;
2705 else if (attr == &dev_attr_host_port_state.attr)
2706 param = ISCSI_HOST_PARAM_PORT_STATE;
2707 else if (attr == &dev_attr_host_port_speed.attr)
2708 param = ISCSI_HOST_PARAM_PORT_SPEED;
2504 else { 2709 else {
2505 WARN_ONCE(1, "Invalid host attr"); 2710 WARN_ONCE(1, "Invalid host attr");
2506 return 0; 2711 return 0;
@@ -2514,6 +2719,61 @@ static struct attribute_group iscsi_host_group = {
2514 .is_visible = iscsi_host_attr_is_visible, 2719 .is_visible = iscsi_host_attr_is_visible,
2515}; 2720};
2516 2721
2722/* convert iscsi_port_speed values to ascii string name */
2723static const struct {
2724 enum iscsi_port_speed value;
2725 char *name;
2726} iscsi_port_speed_names[] = {
2727 {ISCSI_PORT_SPEED_UNKNOWN, "Unknown" },
2728 {ISCSI_PORT_SPEED_10MBPS, "10 Mbps" },
2729 {ISCSI_PORT_SPEED_100MBPS, "100 Mbps" },
2730 {ISCSI_PORT_SPEED_1GBPS, "1 Gbps" },
2731 {ISCSI_PORT_SPEED_10GBPS, "10 Gbps" },
2732};
2733
2734char *iscsi_get_port_speed_name(struct Scsi_Host *shost)
2735{
2736 int i;
2737 char *speed = "Unknown!";
2738 struct iscsi_cls_host *ihost = shost->shost_data;
2739 uint32_t port_speed = ihost->port_speed;
2740
2741 for (i = 0; i < ARRAY_SIZE(iscsi_port_speed_names); i++) {
2742 if (iscsi_port_speed_names[i].value & port_speed) {
2743 speed = iscsi_port_speed_names[i].name;
2744 break;
2745 }
2746 }
2747 return speed;
2748}
2749EXPORT_SYMBOL_GPL(iscsi_get_port_speed_name);
2750
2751/* convert iscsi_port_state values to ascii string name */
2752static const struct {
2753 enum iscsi_port_state value;
2754 char *name;
2755} iscsi_port_state_names[] = {
2756 {ISCSI_PORT_STATE_DOWN, "LINK DOWN" },
2757 {ISCSI_PORT_STATE_UP, "LINK UP" },
2758};
2759
2760char *iscsi_get_port_state_name(struct Scsi_Host *shost)
2761{
2762 int i;
2763 char *state = "Unknown!";
2764 struct iscsi_cls_host *ihost = shost->shost_data;
2765 uint32_t port_state = ihost->port_state;
2766
2767 for (i = 0; i < ARRAY_SIZE(iscsi_port_state_names); i++) {
2768 if (iscsi_port_state_names[i].value & port_state) {
2769 state = iscsi_port_state_names[i].name;
2770 break;
2771 }
2772 }
2773 return state;
2774}
2775EXPORT_SYMBOL_GPL(iscsi_get_port_state_name);
2776
2517static int iscsi_session_match(struct attribute_container *cont, 2777static int iscsi_session_match(struct attribute_container *cont,
2518 struct device *dev) 2778 struct device *dev)
2519{ 2779{
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 9d9330ae421..f7565fc4f0e 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -615,6 +615,7 @@ do_sas_phy_reset(struct device *dev, size_t count, int hard_reset)
615 error = i->f->phy_reset(phy, hard_reset); 615 error = i->f->phy_reset(phy, hard_reset);
616 if (error) 616 if (error)
617 return error; 617 return error;
618 phy->enabled = 1;
618 return count; 619 return count;
619}; 620};
620 621
@@ -652,9 +653,21 @@ sas_phy_linkerror_attr(running_disparity_error_count);
652sas_phy_linkerror_attr(loss_of_dword_sync_count); 653sas_phy_linkerror_attr(loss_of_dword_sync_count);
653sas_phy_linkerror_attr(phy_reset_problem_count); 654sas_phy_linkerror_attr(phy_reset_problem_count);
654 655
656static int sas_phy_setup(struct transport_container *tc, struct device *dev,
657 struct device *cdev)
658{
659 struct sas_phy *phy = dev_to_phy(dev);
660 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
661 struct sas_internal *i = to_sas_internal(shost->transportt);
662
663 if (i->f->phy_setup)
664 i->f->phy_setup(phy);
665
666 return 0;
667}
655 668
656static DECLARE_TRANSPORT_CLASS(sas_phy_class, 669static DECLARE_TRANSPORT_CLASS(sas_phy_class,
657 "sas_phy", NULL, NULL, NULL); 670 "sas_phy", sas_phy_setup, NULL, NULL);
658 671
659static int sas_phy_match(struct attribute_container *cont, struct device *dev) 672static int sas_phy_match(struct attribute_container *cont, struct device *dev)
660{ 673{
@@ -678,7 +691,11 @@ static int sas_phy_match(struct attribute_container *cont, struct device *dev)
678static void sas_phy_release(struct device *dev) 691static void sas_phy_release(struct device *dev)
679{ 692{
680 struct sas_phy *phy = dev_to_phy(dev); 693 struct sas_phy *phy = dev_to_phy(dev);
694 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
695 struct sas_internal *i = to_sas_internal(shost->transportt);
681 696
697 if (i->f->phy_release)
698 i->f->phy_release(phy);
682 put_device(dev->parent); 699 put_device(dev->parent);
683 kfree(phy); 700 kfree(phy);
684} 701}
@@ -1044,6 +1061,29 @@ int scsi_is_sas_port(const struct device *dev)
1044EXPORT_SYMBOL(scsi_is_sas_port); 1061EXPORT_SYMBOL(scsi_is_sas_port);
1045 1062
1046/** 1063/**
1064 * sas_port_get_phy - try to take a reference on a port member
1065 * @port: port to check
1066 */
1067struct sas_phy *sas_port_get_phy(struct sas_port *port)
1068{
1069 struct sas_phy *phy;
1070
1071 mutex_lock(&port->phy_list_mutex);
1072 if (list_empty(&port->phy_list))
1073 phy = NULL;
1074 else {
1075 struct list_head *ent = port->phy_list.next;
1076
1077 phy = list_entry(ent, typeof(*phy), port_siblings);
1078 get_device(&phy->dev);
1079 }
1080 mutex_unlock(&port->phy_list_mutex);
1081
1082 return phy;
1083}
1084EXPORT_SYMBOL(sas_port_get_phy);
1085
1086/**
1047 * sas_port_add_phy - add another phy to a port to form a wide port 1087 * sas_port_add_phy - add another phy to a port to form a wide port
1048 * @port: port to add the phy to 1088 * @port: port to add the phy to
1049 * @phy: phy to add 1089 * @phy: phy to add
@@ -1603,6 +1643,20 @@ sas_rphy_delete(struct sas_rphy *rphy)
1603EXPORT_SYMBOL(sas_rphy_delete); 1643EXPORT_SYMBOL(sas_rphy_delete);
1604 1644
1605/** 1645/**
1646 * sas_rphy_unlink - unlink SAS remote PHY
1647 * @rphy: SAS remote phy to unlink from its parent port
1648 *
1649 * Removes port reference to an rphy
1650 */
1651void sas_rphy_unlink(struct sas_rphy *rphy)
1652{
1653 struct sas_port *parent = dev_to_sas_port(rphy->dev.parent);
1654
1655 parent->rphy = NULL;
1656}
1657EXPORT_SYMBOL(sas_rphy_unlink);
1658
1659/**
1606 * sas_rphy_remove - remove SAS remote PHY 1660 * sas_rphy_remove - remove SAS remote PHY
1607 * @rphy: SAS remote phy to remove 1661 * @rphy: SAS remote phy to remove
1608 * 1662 *
@@ -1612,7 +1666,6 @@ void
1612sas_rphy_remove(struct sas_rphy *rphy) 1666sas_rphy_remove(struct sas_rphy *rphy)
1613{ 1667{
1614 struct device *dev = &rphy->dev; 1668 struct device *dev = &rphy->dev;
1615 struct sas_port *parent = dev_to_sas_port(dev->parent);
1616 1669
1617 switch (rphy->identify.device_type) { 1670 switch (rphy->identify.device_type) {
1618 case SAS_END_DEVICE: 1671 case SAS_END_DEVICE:
@@ -1626,10 +1679,9 @@ sas_rphy_remove(struct sas_rphy *rphy)
1626 break; 1679 break;
1627 } 1680 }
1628 1681
1682 sas_rphy_unlink(rphy);
1629 transport_remove_device(dev); 1683 transport_remove_device(dev);
1630 device_del(dev); 1684 device_del(dev);
1631
1632 parent->rphy = NULL;
1633} 1685}
1634EXPORT_SYMBOL(sas_rphy_remove); 1686EXPORT_SYMBOL(sas_rphy_remove);
1635 1687
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d173b90b25e..09e3df42a40 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -107,6 +107,7 @@ static int sd_suspend(struct device *, pm_message_t state);
107static int sd_resume(struct device *); 107static int sd_resume(struct device *);
108static void sd_rescan(struct device *); 108static void sd_rescan(struct device *);
109static int sd_done(struct scsi_cmnd *); 109static int sd_done(struct scsi_cmnd *);
110static int sd_eh_action(struct scsi_cmnd *, unsigned char *, int, int);
110static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); 111static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
111static void scsi_disk_release(struct device *cdev); 112static void scsi_disk_release(struct device *cdev);
112static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); 113static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
@@ -346,6 +347,31 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
346 return count; 347 return count;
347} 348}
348 349
350static ssize_t
351sd_show_max_medium_access_timeouts(struct device *dev,
352 struct device_attribute *attr, char *buf)
353{
354 struct scsi_disk *sdkp = to_scsi_disk(dev);
355
356 return snprintf(buf, 20, "%u\n", sdkp->max_medium_access_timeouts);
357}
358
359static ssize_t
360sd_store_max_medium_access_timeouts(struct device *dev,
361 struct device_attribute *attr,
362 const char *buf, size_t count)
363{
364 struct scsi_disk *sdkp = to_scsi_disk(dev);
365 int err;
366
367 if (!capable(CAP_SYS_ADMIN))
368 return -EACCES;
369
370 err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);
371
372 return err ? err : count;
373}
374
349static struct device_attribute sd_disk_attrs[] = { 375static struct device_attribute sd_disk_attrs[] = {
350 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type, 376 __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
351 sd_store_cache_type), 377 sd_store_cache_type),
@@ -360,6 +386,9 @@ static struct device_attribute sd_disk_attrs[] = {
360 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), 386 __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
361 __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode, 387 __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
362 sd_store_provisioning_mode), 388 sd_store_provisioning_mode),
389 __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
390 sd_show_max_medium_access_timeouts,
391 sd_store_max_medium_access_timeouts),
363 __ATTR_NULL, 392 __ATTR_NULL,
364}; 393};
365 394
@@ -382,6 +411,7 @@ static struct scsi_driver sd_template = {
382 }, 411 },
383 .rescan = sd_rescan, 412 .rescan = sd_rescan,
384 .done = sd_done, 413 .done = sd_done,
414 .eh_action = sd_eh_action,
385}; 415};
386 416
387/* 417/*
@@ -497,6 +527,8 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
497 max(sdkp->physical_block_size, 527 max(sdkp->physical_block_size,
498 sdkp->unmap_granularity * logical_block_size); 528 sdkp->unmap_granularity * logical_block_size);
499 529
530 sdkp->provisioning_mode = mode;
531
500 switch (mode) { 532 switch (mode) {
501 533
502 case SD_LBP_DISABLE: 534 case SD_LBP_DISABLE:
@@ -524,8 +556,6 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
524 556
525 q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9); 557 q->limits.max_discard_sectors = max_blocks * (logical_block_size >> 9);
526 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); 558 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
527
528 sdkp->provisioning_mode = mode;
529} 559}
530 560
531/** 561/**
@@ -1313,6 +1343,55 @@ static const struct block_device_operations sd_fops = {
1313 .unlock_native_capacity = sd_unlock_native_capacity, 1343 .unlock_native_capacity = sd_unlock_native_capacity,
1314}; 1344};
1315 1345
1346/**
1347 * sd_eh_action - error handling callback
1348 * @scmd: sd-issued command that has failed
1349 * @eh_cmnd: The command that was sent during error handling
1350 * @eh_cmnd_len: Length of eh_cmnd in bytes
1351 * @eh_disp: The recovery disposition suggested by the midlayer
1352 *
1353 * This function is called by the SCSI midlayer upon completion of
1354 * an error handling command (TEST UNIT READY, START STOP UNIT,
1355 * etc.) The command sent to the device by the error handler is
1356 * stored in eh_cmnd. The result of sending the eh command is
1357 * passed in eh_disp.
1358 **/
1359static int sd_eh_action(struct scsi_cmnd *scmd, unsigned char *eh_cmnd,
1360 int eh_cmnd_len, int eh_disp)
1361{
1362 struct scsi_disk *sdkp = scsi_disk(scmd->request->rq_disk);
1363
1364 if (!scsi_device_online(scmd->device) ||
1365 !scsi_medium_access_command(scmd))
1366 return eh_disp;
1367
1368 /*
1369 * The device has timed out executing a medium access command.
1370 * However, the TEST UNIT READY command sent during error
1371 * handling completed successfully. Either the device is in the
1372 * process of recovering or has it suffered an internal failure
1373 * that prevents access to the storage medium.
1374 */
1375 if (host_byte(scmd->result) == DID_TIME_OUT && eh_disp == SUCCESS &&
1376 eh_cmnd_len && eh_cmnd[0] == TEST_UNIT_READY)
1377 sdkp->medium_access_timed_out++;
1378
1379 /*
1380 * If the device keeps failing read/write commands but TEST UNIT
1381 * READY always completes successfully we assume that medium
1382 * access is no longer possible and take the device offline.
1383 */
1384 if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
1385 scmd_printk(KERN_ERR, scmd,
1386 "Medium access timeout failure. Offlining disk!\n");
1387 scsi_device_set_state(scmd->device, SDEV_OFFLINE);
1388
1389 return FAILED;
1390 }
1391
1392 return eh_disp;
1393}
1394
1316static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 1395static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
1317{ 1396{
1318 u64 start_lba = blk_rq_pos(scmd->request); 1397 u64 start_lba = blk_rq_pos(scmd->request);
@@ -1402,6 +1481,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
1402 (!sense_valid || sense_deferred)) 1481 (!sense_valid || sense_deferred))
1403 goto out; 1482 goto out;
1404 1483
1484 sdkp->medium_access_timed_out = 0;
1485
1405 switch (sshdr.sense_key) { 1486 switch (sshdr.sense_key) {
1406 case HARDWARE_ERROR: 1487 case HARDWARE_ERROR:
1407 case MEDIUM_ERROR: 1488 case MEDIUM_ERROR:
@@ -2523,6 +2604,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
2523 sdkp->RCD = 0; 2604 sdkp->RCD = 0;
2524 sdkp->ATO = 0; 2605 sdkp->ATO = 0;
2525 sdkp->first_scan = 1; 2606 sdkp->first_scan = 1;
2607 sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
2526 2608
2527 sd_revalidate_disk(gd); 2609 sd_revalidate_disk(gd);
2528 2610
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 4163f2910e3..f703f4827b6 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -20,6 +20,7 @@
20 */ 20 */
21#define SD_MAX_RETRIES 5 21#define SD_MAX_RETRIES 5
22#define SD_PASSTHROUGH_RETRIES 1 22#define SD_PASSTHROUGH_RETRIES 1
23#define SD_MAX_MEDIUM_TIMEOUTS 2
23 24
24/* 25/*
25 * Size of the initial data buffer for mode and read capacity data 26 * Size of the initial data buffer for mode and read capacity data
@@ -59,6 +60,8 @@ struct scsi_disk {
59 u32 unmap_alignment; 60 u32 unmap_alignment;
60 u32 index; 61 u32 index;
61 unsigned int physical_block_size; 62 unsigned int physical_block_size;
63 unsigned int max_medium_access_timeouts;
64 unsigned int medium_access_timed_out;
62 u8 media_present; 65 u8 media_present;
63 u8 write_prot; 66 u8 write_prot;
64 u8 protection_type;/* Data Integrity Field */ 67 u8 protection_type;/* Data Integrity Field */
@@ -88,6 +91,38 @@ static inline struct scsi_disk *scsi_disk(struct gendisk *disk)
88 (sdsk)->disk->disk_name, ##a) : \ 91 (sdsk)->disk->disk_name, ##a) : \
89 sdev_printk(prefix, (sdsk)->device, fmt, ##a) 92 sdev_printk(prefix, (sdsk)->device, fmt, ##a)
90 93
94static inline int scsi_medium_access_command(struct scsi_cmnd *scmd)
95{
96 switch (scmd->cmnd[0]) {
97 case READ_6:
98 case READ_10:
99 case READ_12:
100 case READ_16:
101 case SYNCHRONIZE_CACHE:
102 case VERIFY:
103 case VERIFY_12:
104 case VERIFY_16:
105 case WRITE_6:
106 case WRITE_10:
107 case WRITE_12:
108 case WRITE_16:
109 case WRITE_SAME:
110 case WRITE_SAME_16:
111 case UNMAP:
112 return 1;
113 case VARIABLE_LENGTH_CMD:
114 switch (scmd->cmnd[9]) {
115 case READ_32:
116 case VERIFY_32:
117 case WRITE_32:
118 case WRITE_SAME_32:
119 return 1;
120 }
121 }
122
123 return 0;
124}
125
91/* 126/*
92 * A DIF-capable target device can be formatted with different 127 * A DIF-capable target device can be formatted with different
93 * protection schemes. Currently 0 through 3 are defined: 128 * protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9b28f39bac2..9262cdfa4b2 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -1177,6 +1177,7 @@ static int check_tape(struct scsi_tape *STp, struct file *filp)
1177static int st_open(struct inode *inode, struct file *filp) 1177static int st_open(struct inode *inode, struct file *filp)
1178{ 1178{
1179 int i, retval = (-EIO); 1179 int i, retval = (-EIO);
1180 int resumed = 0;
1180 struct scsi_tape *STp; 1181 struct scsi_tape *STp;
1181 struct st_partstat *STps; 1182 struct st_partstat *STps;
1182 int dev = TAPE_NR(inode); 1183 int dev = TAPE_NR(inode);
@@ -1211,6 +1212,11 @@ static int st_open(struct inode *inode, struct file *filp)
1211 write_unlock(&st_dev_arr_lock); 1212 write_unlock(&st_dev_arr_lock);
1212 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0; 1213 STp->rew_at_close = STp->autorew_dev = (iminor(inode) & 0x80) == 0;
1213 1214
1215 if (scsi_autopm_get_device(STp->device) < 0) {
1216 retval = -EIO;
1217 goto err_out;
1218 }
1219 resumed = 1;
1214 if (!scsi_block_when_processing_errors(STp->device)) { 1220 if (!scsi_block_when_processing_errors(STp->device)) {
1215 retval = (-ENXIO); 1221 retval = (-ENXIO);
1216 goto err_out; 1222 goto err_out;
@@ -1258,6 +1264,8 @@ static int st_open(struct inode *inode, struct file *filp)
1258 normalize_buffer(STp->buffer); 1264 normalize_buffer(STp->buffer);
1259 STp->in_use = 0; 1265 STp->in_use = 0;
1260 scsi_tape_put(STp); 1266 scsi_tape_put(STp);
1267 if (resumed)
1268 scsi_autopm_put_device(STp->device);
1261 mutex_unlock(&st_mutex); 1269 mutex_unlock(&st_mutex);
1262 return retval; 1270 return retval;
1263 1271
@@ -1391,6 +1399,7 @@ static int st_release(struct inode *inode, struct file *filp)
1391 write_lock(&st_dev_arr_lock); 1399 write_lock(&st_dev_arr_lock);
1392 STp->in_use = 0; 1400 STp->in_use = 0;
1393 write_unlock(&st_dev_arr_lock); 1401 write_unlock(&st_dev_arr_lock);
1402 scsi_autopm_put_device(STp->device);
1394 scsi_tape_put(STp); 1403 scsi_tape_put(STp);
1395 1404
1396 return result; 1405 return result;
@@ -4154,6 +4163,7 @@ static int st_probe(struct device *dev)
4154 if (error) 4163 if (error)
4155 goto out_free_tape; 4164 goto out_free_tape;
4156 } 4165 }
4166 scsi_autopm_put_device(SDp);
4157 4167
4158 sdev_printk(KERN_NOTICE, SDp, 4168 sdev_printk(KERN_NOTICE, SDp,
4159 "Attached scsi tape %s\n", tape_name(tpnt)); 4169 "Attached scsi tape %s\n", tape_name(tpnt));
@@ -4201,6 +4211,7 @@ static int st_remove(struct device *dev)
4201 struct scsi_tape *tpnt; 4211 struct scsi_tape *tpnt;
4202 int i, j, mode; 4212 int i, j, mode;
4203 4213
4214 scsi_autopm_get_device(SDp);
4204 write_lock(&st_dev_arr_lock); 4215 write_lock(&st_dev_arr_lock);
4205 for (i = 0; i < st_dev_max; i++) { 4216 for (i = 0; i < st_dev_max; i++) {
4206 tpnt = scsi_tapes[i]; 4217 tpnt = scsi_tapes[i];
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
new file mode 100644
index 00000000000..efccd72c4a3
--- /dev/null
+++ b/drivers/scsi/virtio_scsi.c
@@ -0,0 +1,594 @@
1/*
2 * Virtio SCSI HBA driver
3 *
4 * Copyright IBM Corp. 2010
5 * Copyright Red Hat, Inc. 2011
6 *
7 * Authors:
8 * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
9 * Paolo Bonzini <pbonzini@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/mempool.h>
19#include <linux/virtio.h>
20#include <linux/virtio_ids.h>
21#include <linux/virtio_config.h>
22#include <linux/virtio_scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_device.h>
25#include <scsi/scsi_cmnd.h>
26
27#define VIRTIO_SCSI_MEMPOOL_SZ 64
28
29/* Command queue element */
30struct virtio_scsi_cmd {
31 struct scsi_cmnd *sc;
32 struct completion *comp;
33 union {
34 struct virtio_scsi_cmd_req cmd;
35 struct virtio_scsi_ctrl_tmf_req tmf;
36 struct virtio_scsi_ctrl_an_req an;
37 } req;
38 union {
39 struct virtio_scsi_cmd_resp cmd;
40 struct virtio_scsi_ctrl_tmf_resp tmf;
41 struct virtio_scsi_ctrl_an_resp an;
42 struct virtio_scsi_event evt;
43 } resp;
44} ____cacheline_aligned_in_smp;
45
46/* Driver instance state */
47struct virtio_scsi {
48 /* Protects ctrl_vq, req_vq and sg[] */
49 spinlock_t vq_lock;
50
51 struct virtio_device *vdev;
52 struct virtqueue *ctrl_vq;
53 struct virtqueue *event_vq;
54 struct virtqueue *req_vq;
55
56 /* For sglist construction when adding commands to the virtqueue. */
57 struct scatterlist sg[];
58};
59
60static struct kmem_cache *virtscsi_cmd_cache;
61static mempool_t *virtscsi_cmd_pool;
62
63static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
64{
65 return vdev->priv;
66}
67
68static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
69{
70 if (!resid)
71 return;
72
73 if (!scsi_bidi_cmnd(sc)) {
74 scsi_set_resid(sc, resid);
75 return;
76 }
77
78 scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
79 scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
80}
81
82/**
83 * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
84 *
85 * Called with vq_lock held.
86 */
87static void virtscsi_complete_cmd(void *buf)
88{
89 struct virtio_scsi_cmd *cmd = buf;
90 struct scsi_cmnd *sc = cmd->sc;
91 struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
92
93 dev_dbg(&sc->device->sdev_gendev,
94 "cmd %p response %u status %#02x sense_len %u\n",
95 sc, resp->response, resp->status, resp->sense_len);
96
97 sc->result = resp->status;
98 virtscsi_compute_resid(sc, resp->resid);
99 switch (resp->response) {
100 case VIRTIO_SCSI_S_OK:
101 set_host_byte(sc, DID_OK);
102 break;
103 case VIRTIO_SCSI_S_OVERRUN:
104 set_host_byte(sc, DID_ERROR);
105 break;
106 case VIRTIO_SCSI_S_ABORTED:
107 set_host_byte(sc, DID_ABORT);
108 break;
109 case VIRTIO_SCSI_S_BAD_TARGET:
110 set_host_byte(sc, DID_BAD_TARGET);
111 break;
112 case VIRTIO_SCSI_S_RESET:
113 set_host_byte(sc, DID_RESET);
114 break;
115 case VIRTIO_SCSI_S_BUSY:
116 set_host_byte(sc, DID_BUS_BUSY);
117 break;
118 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
119 set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
120 break;
121 case VIRTIO_SCSI_S_TARGET_FAILURE:
122 set_host_byte(sc, DID_TARGET_FAILURE);
123 break;
124 case VIRTIO_SCSI_S_NEXUS_FAILURE:
125 set_host_byte(sc, DID_NEXUS_FAILURE);
126 break;
127 default:
128 scmd_printk(KERN_WARNING, sc, "Unknown response %d",
129 resp->response);
130 /* fall through */
131 case VIRTIO_SCSI_S_FAILURE:
132 set_host_byte(sc, DID_ERROR);
133 break;
134 }
135
136 WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
137 if (sc->sense_buffer) {
138 memcpy(sc->sense_buffer, resp->sense,
139 min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
140 if (resp->sense_len)
141 set_driver_byte(sc, DRIVER_SENSE);
142 }
143
144 mempool_free(cmd, virtscsi_cmd_pool);
145 sc->scsi_done(sc);
146}
147
148static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
149{
150 struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
151 struct virtio_scsi *vscsi = shost_priv(sh);
152 void *buf;
153 unsigned long flags;
154 unsigned int len;
155
156 spin_lock_irqsave(&vscsi->vq_lock, flags);
157
158 do {
159 virtqueue_disable_cb(vq);
160 while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
161 fn(buf);
162 } while (!virtqueue_enable_cb(vq));
163
164 spin_unlock_irqrestore(&vscsi->vq_lock, flags);
165}
166
167static void virtscsi_req_done(struct virtqueue *vq)
168{
169 virtscsi_vq_done(vq, virtscsi_complete_cmd);
170};
171
172static void virtscsi_complete_free(void *buf)
173{
174 struct virtio_scsi_cmd *cmd = buf;
175
176 if (cmd->comp)
177 complete_all(cmd->comp);
178 mempool_free(cmd, virtscsi_cmd_pool);
179}
180
181static void virtscsi_ctrl_done(struct virtqueue *vq)
182{
183 virtscsi_vq_done(vq, virtscsi_complete_free);
184};
185
186static void virtscsi_event_done(struct virtqueue *vq)
187{
188 virtscsi_vq_done(vq, virtscsi_complete_free);
189};
190
191static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
192 struct scsi_data_buffer *sdb)
193{
194 struct sg_table *table = &sdb->table;
195 struct scatterlist *sg_elem;
196 unsigned int idx = *p_idx;
197 int i;
198
199 for_each_sg(table->sgl, sg_elem, table->nents, i)
200 sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length);
201
202 *p_idx = idx;
203}
204
205/**
206 * virtscsi_map_cmd - map a scsi_cmd to a virtqueue scatterlist
207 * @vscsi : virtio_scsi state
208 * @cmd : command structure
209 * @out_num : number of read-only elements
210 * @in_num : number of write-only elements
211 * @req_size : size of the request buffer
212 * @resp_size : size of the response buffer
213 *
214 * Called with vq_lock held.
215 */
216static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
217 struct virtio_scsi_cmd *cmd,
218 unsigned *out_num, unsigned *in_num,
219 size_t req_size, size_t resp_size)
220{
221 struct scsi_cmnd *sc = cmd->sc;
222 struct scatterlist *sg = vscsi->sg;
223 unsigned int idx = 0;
224
225 if (sc) {
226 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
227 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
228
229 /* TODO: check feature bit and fail if unsupported? */
230 BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
231 }
232
233 /* Request header. */
234 sg_set_buf(&sg[idx++], &cmd->req, req_size);
235
236 /* Data-out buffer. */
237 if (sc && sc->sc_data_direction != DMA_FROM_DEVICE)
238 virtscsi_map_sgl(sg, &idx, scsi_out(sc));
239
240 *out_num = idx;
241
242 /* Response header. */
243 sg_set_buf(&sg[idx++], &cmd->resp, resp_size);
244
245 /* Data-in buffer */
246 if (sc && sc->sc_data_direction != DMA_TO_DEVICE)
247 virtscsi_map_sgl(sg, &idx, scsi_in(sc));
248
249 *in_num = idx - *out_num;
250}
251
252static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
253 struct virtio_scsi_cmd *cmd,
254 size_t req_size, size_t resp_size, gfp_t gfp)
255{
256 unsigned int out_num, in_num;
257 unsigned long flags;
258 int ret;
259
260 spin_lock_irqsave(&vscsi->vq_lock, flags);
261
262 virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
263
264 ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp);
265 if (ret >= 0)
266 virtqueue_kick(vq);
267
268 spin_unlock_irqrestore(&vscsi->vq_lock, flags);
269 return ret;
270}
271
272static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
273{
274 struct virtio_scsi *vscsi = shost_priv(sh);
275 struct virtio_scsi_cmd *cmd;
276 int ret;
277
278 dev_dbg(&sc->device->sdev_gendev,
279 "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
280
281 ret = SCSI_MLQUEUE_HOST_BUSY;
282 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
283 if (!cmd)
284 goto out;
285
286 memset(cmd, 0, sizeof(*cmd));
287 cmd->sc = sc;
288 cmd->req.cmd = (struct virtio_scsi_cmd_req){
289 .lun[0] = 1,
290 .lun[1] = sc->device->id,
291 .lun[2] = (sc->device->lun >> 8) | 0x40,
292 .lun[3] = sc->device->lun & 0xff,
293 .tag = (unsigned long)sc,
294 .task_attr = VIRTIO_SCSI_S_SIMPLE,
295 .prio = 0,
296 .crn = 0,
297 };
298
299 BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
300 memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
301
302 if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd,
303 sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
304 GFP_ATOMIC) >= 0)
305 ret = 0;
306
307out:
308 return ret;
309}
310
311static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
312{
313 DECLARE_COMPLETION_ONSTACK(comp);
314 int ret;
315
316 cmd->comp = &comp;
317 ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
318 sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
319 GFP_NOIO);
320 if (ret < 0)
321 return FAILED;
322
323 wait_for_completion(&comp);
324 if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK &&
325 cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
326 return FAILED;
327
328 return SUCCESS;
329}
330
331static int virtscsi_device_reset(struct scsi_cmnd *sc)
332{
333 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
334 struct virtio_scsi_cmd *cmd;
335
336 sdev_printk(KERN_INFO, sc->device, "device reset\n");
337 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
338 if (!cmd)
339 return FAILED;
340
341 memset(cmd, 0, sizeof(*cmd));
342 cmd->sc = sc;
343 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
344 .type = VIRTIO_SCSI_T_TMF,
345 .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
346 .lun[0] = 1,
347 .lun[1] = sc->device->id,
348 .lun[2] = (sc->device->lun >> 8) | 0x40,
349 .lun[3] = sc->device->lun & 0xff,
350 };
351 return virtscsi_tmf(vscsi, cmd);
352}
353
354static int virtscsi_abort(struct scsi_cmnd *sc)
355{
356 struct virtio_scsi *vscsi = shost_priv(sc->device->host);
357 struct virtio_scsi_cmd *cmd;
358
359 scmd_printk(KERN_INFO, sc, "abort\n");
360 cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
361 if (!cmd)
362 return FAILED;
363
364 memset(cmd, 0, sizeof(*cmd));
365 cmd->sc = sc;
366 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
367 .type = VIRTIO_SCSI_T_TMF,
368 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
369 .lun[0] = 1,
370 .lun[1] = sc->device->id,
371 .lun[2] = (sc->device->lun >> 8) | 0x40,
372 .lun[3] = sc->device->lun & 0xff,
373 .tag = (unsigned long)sc,
374 };
375 return virtscsi_tmf(vscsi, cmd);
376}
377
378static struct scsi_host_template virtscsi_host_template = {
379 .module = THIS_MODULE,
380 .name = "Virtio SCSI HBA",
381 .proc_name = "virtio_scsi",
382 .queuecommand = virtscsi_queuecommand,
383 .this_id = -1,
384 .eh_abort_handler = virtscsi_abort,
385 .eh_device_reset_handler = virtscsi_device_reset,
386
387 .can_queue = 1024,
388 .dma_boundary = UINT_MAX,
389 .use_clustering = ENABLE_CLUSTERING,
390};
391
392#define virtscsi_config_get(vdev, fld) \
393 ({ \
394 typeof(((struct virtio_scsi_config *)0)->fld) __val; \
395 vdev->config->get(vdev, \
396 offsetof(struct virtio_scsi_config, fld), \
397 &__val, sizeof(__val)); \
398 __val; \
399 })
400
401#define virtscsi_config_set(vdev, fld, val) \
402 (void)({ \
403 typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
404 vdev->config->set(vdev, \
405 offsetof(struct virtio_scsi_config, fld), \
406 &__val, sizeof(__val)); \
407 })
408
409static int virtscsi_init(struct virtio_device *vdev,
410 struct virtio_scsi *vscsi)
411{
412 int err;
413 struct virtqueue *vqs[3];
414 vq_callback_t *callbacks[] = {
415 virtscsi_ctrl_done,
416 virtscsi_event_done,
417 virtscsi_req_done
418 };
419 const char *names[] = {
420 "control",
421 "event",
422 "request"
423 };
424
425 /* Discover virtqueues and write information to configuration. */
426 err = vdev->config->find_vqs(vdev, 3, vqs, callbacks, names);
427 if (err)
428 return err;
429
430 vscsi->ctrl_vq = vqs[0];
431 vscsi->event_vq = vqs[1];
432 vscsi->req_vq = vqs[2];
433
434 virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
435 virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
436 return 0;
437}
438
439static int __devinit virtscsi_probe(struct virtio_device *vdev)
440{
441 struct Scsi_Host *shost;
442 struct virtio_scsi *vscsi;
443 int err;
444 u32 sg_elems;
445 u32 cmd_per_lun;
446
447 /* We need to know how many segments before we allocate.
448 * We need an extra sg elements at head and tail.
449 */
450 sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
451
452 /* Allocate memory and link the structs together. */
453 shost = scsi_host_alloc(&virtscsi_host_template,
454 sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2));
455
456 if (!shost)
457 return -ENOMEM;
458
459 shost->sg_tablesize = sg_elems;
460 vscsi = shost_priv(shost);
461 vscsi->vdev = vdev;
462 vdev->priv = shost;
463
464 /* Random initializations. */
465 spin_lock_init(&vscsi->vq_lock);
466 sg_init_table(vscsi->sg, sg_elems + 2);
467
468 err = virtscsi_init(vdev, vscsi);
469 if (err)
470 goto virtscsi_init_failed;
471
472 cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
473 shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
474 shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
475 shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
476 shost->max_id = virtscsi_config_get(vdev, max_target) + 1;
477 shost->max_channel = 0;
478 shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
479 err = scsi_add_host(shost, &vdev->dev);
480 if (err)
481 goto scsi_add_host_failed;
482
483 scsi_scan_host(shost);
484
485 return 0;
486
487scsi_add_host_failed:
488 vdev->config->del_vqs(vdev);
489virtscsi_init_failed:
490 scsi_host_put(shost);
491 return err;
492}
493
494static void virtscsi_remove_vqs(struct virtio_device *vdev)
495{
496 /* Stop all the virtqueues. */
497 vdev->config->reset(vdev);
498
499 vdev->config->del_vqs(vdev);
500}
501
502static void __devexit virtscsi_remove(struct virtio_device *vdev)
503{
504 struct Scsi_Host *shost = virtio_scsi_host(vdev);
505
506 scsi_remove_host(shost);
507
508 virtscsi_remove_vqs(vdev);
509 scsi_host_put(shost);
510}
511
512#ifdef CONFIG_PM
513static int virtscsi_freeze(struct virtio_device *vdev)
514{
515 virtscsi_remove_vqs(vdev);
516 return 0;
517}
518
519static int virtscsi_restore(struct virtio_device *vdev)
520{
521 struct Scsi_Host *sh = virtio_scsi_host(vdev);
522 struct virtio_scsi *vscsi = shost_priv(sh);
523
524 return virtscsi_init(vdev, vscsi);
525}
526#endif
527
528static struct virtio_device_id id_table[] = {
529 { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
530 { 0 },
531};
532
533static struct virtio_driver virtio_scsi_driver = {
534 .driver.name = KBUILD_MODNAME,
535 .driver.owner = THIS_MODULE,
536 .id_table = id_table,
537 .probe = virtscsi_probe,
538#ifdef CONFIG_PM
539 .freeze = virtscsi_freeze,
540 .restore = virtscsi_restore,
541#endif
542 .remove = __devexit_p(virtscsi_remove),
543};
544
545static int __init init(void)
546{
547 int ret = -ENOMEM;
548
549 virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
550 if (!virtscsi_cmd_cache) {
551 printk(KERN_ERR "kmem_cache_create() for "
552 "virtscsi_cmd_cache failed\n");
553 goto error;
554 }
555
556
557 virtscsi_cmd_pool =
558 mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
559 virtscsi_cmd_cache);
560 if (!virtscsi_cmd_pool) {
561 printk(KERN_ERR "mempool_create() for"
562 "virtscsi_cmd_pool failed\n");
563 goto error;
564 }
565 ret = register_virtio_driver(&virtio_scsi_driver);
566 if (ret < 0)
567 goto error;
568
569 return 0;
570
571error:
572 if (virtscsi_cmd_pool) {
573 mempool_destroy(virtscsi_cmd_pool);
574 virtscsi_cmd_pool = NULL;
575 }
576 if (virtscsi_cmd_cache) {
577 kmem_cache_destroy(virtscsi_cmd_cache);
578 virtscsi_cmd_cache = NULL;
579 }
580 return ret;
581}
582
583static void __exit fini(void)
584{
585 unregister_virtio_driver(&virtio_scsi_driver);
586 mempool_destroy(virtscsi_cmd_pool);
587 kmem_cache_destroy(virtscsi_cmd_cache);
588}
589module_init(init);
590module_exit(fini);
591
592MODULE_DEVICE_TABLE(virtio, id_table);
593MODULE_DESCRIPTION("Virtio SCSI HBA driver");
594MODULE_LICENSE("GPL");
diff --git a/include/linux/libata.h b/include/linux/libata.h
index cafc09a64fe..42378d637ff 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -996,6 +996,7 @@ extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
996extern void ata_sas_port_destroy(struct ata_port *); 996extern void ata_sas_port_destroy(struct ata_port *);
997extern struct ata_port *ata_sas_port_alloc(struct ata_host *, 997extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
998 struct ata_port_info *, struct Scsi_Host *); 998 struct ata_port_info *, struct Scsi_Host *);
999extern int ata_sas_async_port_init(struct ata_port *);
999extern int ata_sas_port_init(struct ata_port *); 1000extern int ata_sas_port_init(struct ata_port *);
1000extern int ata_sas_port_start(struct ata_port *ap); 1001extern int ata_sas_port_start(struct ata_port *ap);
1001extern void ata_sas_port_stop(struct ata_port *ap); 1002extern void ata_sas_port_stop(struct ata_port *ap);
@@ -1147,6 +1148,7 @@ static inline int ata_acpi_cbl_80wire(struct ata_port *ap,
1147 * EH - drivers/ata/libata-eh.c 1148 * EH - drivers/ata/libata-eh.c
1148 */ 1149 */
1149extern void ata_port_schedule_eh(struct ata_port *ap); 1150extern void ata_port_schedule_eh(struct ata_port *ap);
1151extern void ata_port_wait_eh(struct ata_port *ap);
1150extern int ata_link_abort(struct ata_link *link); 1152extern int ata_link_abort(struct ata_link *link);
1151extern int ata_port_abort(struct ata_port *ap); 1153extern int ata_port_abort(struct ata_port *ap);
1152extern int ata_port_freeze(struct ata_port *ap); 1154extern int ata_port_freeze(struct ata_port *ap);
diff --git a/include/linux/virtio_ids.h b/include/linux/virtio_ids.h
index 85bb0bb66ff..c5d8455c68c 100644
--- a/include/linux/virtio_ids.h
+++ b/include/linux/virtio_ids.h
@@ -34,6 +34,7 @@
34#define VIRTIO_ID_CONSOLE 3 /* virtio console */ 34#define VIRTIO_ID_CONSOLE 3 /* virtio console */
35#define VIRTIO_ID_RNG 4 /* virtio ring */ 35#define VIRTIO_ID_RNG 4 /* virtio ring */
36#define VIRTIO_ID_BALLOON 5 /* virtio balloon */ 36#define VIRTIO_ID_BALLOON 5 /* virtio balloon */
37#define VIRTIO_ID_SCSI 8 /* virtio scsi */
37#define VIRTIO_ID_9P 9 /* 9p virtio console */ 38#define VIRTIO_ID_9P 9 /* 9p virtio console */
38 39
39#endif /* _LINUX_VIRTIO_IDS_H */ 40#endif /* _LINUX_VIRTIO_IDS_H */
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
new file mode 100644
index 00000000000..8ddeafdc054
--- /dev/null
+++ b/include/linux/virtio_scsi.h
@@ -0,0 +1,114 @@
1#ifndef _LINUX_VIRTIO_SCSI_H
2#define _LINUX_VIRTIO_SCSI_H
3/* This header is BSD licensed so anyone can use the definitions to implement
4 * compatible drivers/servers. */
5
6#define VIRTIO_SCSI_CDB_SIZE 32
7#define VIRTIO_SCSI_SENSE_SIZE 96
8
9/* SCSI command request, followed by data-out */
10struct virtio_scsi_cmd_req {
11 u8 lun[8]; /* Logical Unit Number */
12 u64 tag; /* Command identifier */
13 u8 task_attr; /* Task attribute */
14 u8 prio;
15 u8 crn;
16 u8 cdb[VIRTIO_SCSI_CDB_SIZE];
17} __packed;
18
19/* Response, followed by sense data and data-in */
20struct virtio_scsi_cmd_resp {
21 u32 sense_len; /* Sense data length */
22 u32 resid; /* Residual bytes in data buffer */
23 u16 status_qualifier; /* Status qualifier */
24 u8 status; /* Command completion status */
25 u8 response; /* Response values */
26 u8 sense[VIRTIO_SCSI_SENSE_SIZE];
27} __packed;
28
29/* Task Management Request */
30struct virtio_scsi_ctrl_tmf_req {
31 u32 type;
32 u32 subtype;
33 u8 lun[8];
34 u64 tag;
35} __packed;
36
37struct virtio_scsi_ctrl_tmf_resp {
38 u8 response;
39} __packed;
40
41/* Asynchronous notification query/subscription */
42struct virtio_scsi_ctrl_an_req {
43 u32 type;
44 u8 lun[8];
45 u32 event_requested;
46} __packed;
47
48struct virtio_scsi_ctrl_an_resp {
49 u32 event_actual;
50 u8 response;
51} __packed;
52
53struct virtio_scsi_event {
54 u32 event;
55 u8 lun[8];
56 u32 reason;
57} __packed;
58
59struct virtio_scsi_config {
60 u32 num_queues;
61 u32 seg_max;
62 u32 max_sectors;
63 u32 cmd_per_lun;
64 u32 event_info_size;
65 u32 sense_size;
66 u32 cdb_size;
67 u16 max_channel;
68 u16 max_target;
69 u32 max_lun;
70} __packed;
71
72/* Response codes */
73#define VIRTIO_SCSI_S_OK 0
74#define VIRTIO_SCSI_S_OVERRUN 1
75#define VIRTIO_SCSI_S_ABORTED 2
76#define VIRTIO_SCSI_S_BAD_TARGET 3
77#define VIRTIO_SCSI_S_RESET 4
78#define VIRTIO_SCSI_S_BUSY 5
79#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
80#define VIRTIO_SCSI_S_TARGET_FAILURE 7
81#define VIRTIO_SCSI_S_NEXUS_FAILURE 8
82#define VIRTIO_SCSI_S_FAILURE 9
83#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
84#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
85#define VIRTIO_SCSI_S_INCORRECT_LUN 12
86
87/* Controlq type codes. */
88#define VIRTIO_SCSI_T_TMF 0
89#define VIRTIO_SCSI_T_AN_QUERY 1
90#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2
91
92/* Valid TMF subtypes. */
93#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
94#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
95#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
96#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
97#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
98#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
99#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
100#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
101
102/* Events. */
103#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000
104#define VIRTIO_SCSI_T_NO_EVENT 0
105#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
106#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
107
108#define VIRTIO_SCSI_S_SIMPLE 0
109#define VIRTIO_SCSI_S_ORDERED 1
110#define VIRTIO_SCSI_S_HEAD 2
111#define VIRTIO_SCSI_S_ACA 3
112
113
114#endif /* _LINUX_VIRTIO_SCSI_H */
diff --git a/include/scsi/fc/fc_ms.h b/include/scsi/fc/fc_ms.h
new file mode 100644
index 00000000000..f52b921b5c7
--- /dev/null
+++ b/include/scsi/fc/fc_ms.h
@@ -0,0 +1,213 @@
1/* * Copyright(c) 2011 Intel Corporation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify it
4 * under the terms and conditions of the GNU General Public License,
5 * version 2, as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope it will be useful, but WITHOUT
8 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
9 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
10 * more details.
11 *
12 * You should have received a copy of the GNU General Public License along with
13 * this program; if not, write to the Free Software Foundation, Inc.,
14 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
15 *
16 * Maintained at www.Open-FCoE.org
17 */
18
19#ifndef _FC_MS_H_
20#define _FC_MS_H_
21
22#include <linux/types.h>
23
24/*
25 * Fibre Channel Services - Management Service (MS)
26 * From T11.org FC-GS-4 Rev 7.91 February 4, 2004
27 */
28
29/*
30 * Fabric Device Management Interface
31 */
32
33/*
34 * Common-transport sub-type for FDMI
35 */
36#define FC_FDMI_SUBTYPE 0x10 /* fs_ct_hdr.ct_fs_subtype */
37
38/*
39 * Management server FDMI Requests.
40 */
41enum fc_fdmi_req {
42 FC_FDMI_GRHL = 0x0100, /* Get Registered HBA List */
43 FC_FDMI_GHAT = 0x0101, /* Get HBA Attributes */
44 FC_FDMI_GRPL = 0x0102, /* Get Registered Port List */
45 FC_FDMI_GPAT = 0x0110, /* Get Port Attributes */
46 FC_FDMI_RHBA = 0x0200, /* Register HBA */
47 FC_FDMI_RHAT = 0x0201, /* Register HBA Attributes */
48 FC_FDMI_RPRT = 0x0210, /* Register Port */
49 FC_FDMI_RPA = 0x0211, /* Register Port Attributes */
50 FC_FDMI_DHBA = 0x0300, /* Deregister HBA */
51 FC_FDMI_DHAT = 0x0301, /* Deregister HBA Attributes */
52 FC_FDMI_DPRT = 0x0310, /* Deregister Port */
53 FC_FDMI_DPA = 0x0311, /* Deregister Port Attributes */
54};
55
56/*
57 * HBA Attribute Entry Type
58 */
59enum fc_fdmi_hba_attr_type {
60 FC_FDMI_HBA_ATTR_NODENAME = 0x0001,
61 FC_FDMI_HBA_ATTR_MANUFACTURER = 0x0002,
62 FC_FDMI_HBA_ATTR_SERIALNUMBER = 0x0003,
63 FC_FDMI_HBA_ATTR_MODEL = 0x0004,
64 FC_FDMI_HBA_ATTR_MODELDESCRIPTION = 0x0005,
65 FC_FDMI_HBA_ATTR_HARDWAREVERSION = 0x0006,
66 FC_FDMI_HBA_ATTR_DRIVERVERSION = 0x0007,
67 FC_FDMI_HBA_ATTR_OPTIONROMVERSION = 0x0008,
68 FC_FDMI_HBA_ATTR_FIRMWAREVERSION = 0x0009,
69 FC_FDMI_HBA_ATTR_OSNAMEVERSION = 0x000A,
70 FC_FDMI_HBA_ATTR_MAXCTPAYLOAD = 0x000B,
71};
72
73/*
74 * HBA Attribute Length
75 */
76#define FC_FDMI_HBA_ATTR_NODENAME_LEN 8
77#define FC_FDMI_HBA_ATTR_MANUFACTURER_LEN 64
78#define FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN 64
79#define FC_FDMI_HBA_ATTR_MODEL_LEN 256
80#define FC_FDMI_HBA_ATTR_MODELDESCR_LEN 256
81#define FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN 256
82#define FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN 256
83#define FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN 256
84#define FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN 256
85#define FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN 256
86#define FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN 4
87
88/*
89 * Port Attribute Type
90 */
91enum fc_fdmi_port_attr_type {
92 FC_FDMI_PORT_ATTR_FC4TYPES = 0x0001,
93 FC_FDMI_PORT_ATTR_SUPPORTEDSPEED = 0x0002,
94 FC_FDMI_PORT_ATTR_CURRENTPORTSPEED = 0x0003,
95 FC_FDMI_PORT_ATTR_MAXFRAMESIZE = 0x0004,
96 FC_FDMI_PORT_ATTR_OSDEVICENAME = 0x0005,
97 FC_FDMI_PORT_ATTR_HOSTNAME = 0x0006,
98};
99
100/*
101 * Port Attribute Length
102 */
103#define FC_FDMI_PORT_ATTR_FC4TYPES_LEN 32
104#define FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN 4
105#define FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN 4
106#define FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN 4
107#define FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN 256
108#define FC_FDMI_PORT_ATTR_HOSTNAME_LEN 256
109
110/*
111 * HBA Attribute ID
112 */
113struct fc_fdmi_hba_identifier {
114 __be64 id;
115};
116
117/*
118 * Port Name
119 */
120struct fc_fdmi_port_name {
121 __be64 portname;
122};
123
124/*
125 * Attribute Entry Block for HBA/Port Attributes
126 */
127#define FC_FDMI_ATTR_ENTRY_HEADER_LEN 4
128struct fc_fdmi_attr_entry {
129 __be16 type;
130 __be16 len;
131 __u8 value[1];
132} __attribute__((__packed__));
133
134/*
135 * Common for HBA/Port Attributes
136 */
137struct fs_fdmi_attrs {
138 __be32 numattrs;
139 struct fc_fdmi_attr_entry attr[1];
140} __attribute__((__packed__));
141
142/*
143 * Registered Port List
144 */
145struct fc_fdmi_rpl {
146 __be32 numport;
147 struct fc_fdmi_port_name port[1];
148} __attribute__((__packed__));
149
150/*
151 * Register HBA (RHBA)
152 */
153struct fc_fdmi_rhba {
154 struct fc_fdmi_hba_identifier hbaid;
155 struct fc_fdmi_rpl port;
156 struct fs_fdmi_attrs hba_attrs;
157} __attribute__((__packed__));
158
159/*
160 * Register HBA Attributes (RHAT)
161 */
162struct fc_fdmi_rhat {
163 struct fc_fdmi_hba_identifier hbaid;
164 struct fs_fdmi_attrs hba_attrs;
165} __attribute__((__packed__));
166
167/*
168 * Register Port (RPRT)
169 */
170struct fc_fdmi_rprt {
171 struct fc_fdmi_hba_identifier hbaid;
172 struct fc_fdmi_port_name port;
173 struct fs_fdmi_attrs hba_attrs;
174} __attribute__((__packed__));
175
176/*
177 * Register Port Attributes (RPA)
178 */
179struct fc_fdmi_rpa {
180 struct fc_fdmi_port_name port;
181 struct fs_fdmi_attrs hba_attrs;
182} __attribute__((__packed__));
183
184/*
185 * Deregister Port (DPRT)
186 */
187struct fc_fdmi_dprt {
188 struct fc_fdmi_port_name port;
189} __attribute__((__packed__));
190
191/*
192 * Deregister Port Attributes (DPA)
193 */
194struct fc_fdmi_dpa {
195 struct fc_fdmi_port_name port;
196 struct fs_fdmi_attrs hba_attrs;
197} __attribute__((__packed__));
198
199/*
200 * Deregister HBA Attributes (DHAT)
201 */
202struct fc_fdmi_dhat {
203 struct fc_fdmi_hba_identifier hbaid;
204} __attribute__((__packed__));
205
206/*
207 * Deregister HBA (DHBA)
208 */
209struct fc_fdmi_dhba {
210 struct fc_fdmi_hba_identifier hbaid;
211} __attribute__((__packed__));
212
213#endif /* _FC_MS_H_ */
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index be418d8448a..35fd4744f3e 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -20,6 +20,7 @@
20#ifndef _FC_ENCODE_H_ 20#ifndef _FC_ENCODE_H_
21#define _FC_ENCODE_H_ 21#define _FC_ENCODE_H_
22#include <asm/unaligned.h> 22#include <asm/unaligned.h>
23#include <linux/utsname.h>
23 24
24/* 25/*
25 * F_CTL values for simple requests and responses. 26 * F_CTL values for simple requests and responses.
@@ -43,6 +44,10 @@ struct fc_ct_req {
43 struct fc_ns_fid fid; 44 struct fc_ns_fid fid;
44 struct fc_ns_rsnn snn; 45 struct fc_ns_rsnn snn;
45 struct fc_ns_rspn spn; 46 struct fc_ns_rspn spn;
47 struct fc_fdmi_rhba rhba;
48 struct fc_fdmi_rpa rpa;
49 struct fc_fdmi_dprt dprt;
50 struct fc_fdmi_dhba dhba;
46 } payload; 51 } payload;
47}; 52};
48 53
@@ -97,7 +102,9 @@ static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
97 * returns pointer to ct request. 102 * returns pointer to ct request.
98 */ 103 */
99static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp, 104static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
100 unsigned int op, size_t req_size) 105 unsigned int op, size_t req_size,
106 enum fc_ct_fs_type fs_type,
107 u8 subtype)
101{ 108{
102 struct fc_ct_req *ct; 109 struct fc_ct_req *ct;
103 size_t ct_plen; 110 size_t ct_plen;
@@ -106,14 +113,14 @@ static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
106 ct = fc_frame_payload_get(fp, ct_plen); 113 ct = fc_frame_payload_get(fp, ct_plen);
107 memset(ct, 0, ct_plen); 114 memset(ct, 0, ct_plen);
108 ct->hdr.ct_rev = FC_CT_REV; 115 ct->hdr.ct_rev = FC_CT_REV;
109 ct->hdr.ct_fs_type = FC_FST_DIR; 116 ct->hdr.ct_fs_type = fs_type;
110 ct->hdr.ct_fs_subtype = FC_NS_SUBTYPE; 117 ct->hdr.ct_fs_subtype = subtype;
111 ct->hdr.ct_cmd = htons((u16) op); 118 ct->hdr.ct_cmd = htons((u16) op);
112 return ct; 119 return ct;
113} 120}
114 121
115/** 122/**
116 * fc_ct_fill() - Fill in a name service request frame 123 * fc_ct_ns_fill() - Fill in a name service request frame
117 * @lport: local port. 124 * @lport: local port.
118 * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries. 125 * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
119 * @fp: frame to contain payload. 126 * @fp: frame to contain payload.
@@ -121,7 +128,7 @@ static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
121 * @r_ctl: pointer to FC header R_CTL. 128 * @r_ctl: pointer to FC header R_CTL.
122 * @fh_type: pointer to FC-4 type. 129 * @fh_type: pointer to FC-4 type.
123 */ 130 */
124static inline int fc_ct_fill(struct fc_lport *lport, 131static inline int fc_ct_ns_fill(struct fc_lport *lport,
125 u32 fc_id, struct fc_frame *fp, 132 u32 fc_id, struct fc_frame *fp,
126 unsigned int op, enum fc_rctl *r_ctl, 133 unsigned int op, enum fc_rctl *r_ctl,
127 enum fc_fh_type *fh_type) 134 enum fc_fh_type *fh_type)
@@ -131,23 +138,28 @@ static inline int fc_ct_fill(struct fc_lport *lport,
131 138
132 switch (op) { 139 switch (op) {
133 case FC_NS_GPN_FT: 140 case FC_NS_GPN_FT:
134 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft)); 141 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft),
142 FC_FST_DIR, FC_NS_SUBTYPE);
135 ct->payload.gid.fn_fc4_type = FC_TYPE_FCP; 143 ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
136 break; 144 break;
137 145
138 case FC_NS_GPN_ID: 146 case FC_NS_GPN_ID:
139 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid)); 147 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid),
148 FC_FST_DIR, FC_NS_SUBTYPE);
149 ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
140 hton24(ct->payload.fid.fp_fid, fc_id); 150 hton24(ct->payload.fid.fp_fid, fc_id);
141 break; 151 break;
142 152
143 case FC_NS_RFT_ID: 153 case FC_NS_RFT_ID:
144 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft)); 154 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft),
155 FC_FST_DIR, FC_NS_SUBTYPE);
145 hton24(ct->payload.rft.fid.fp_fid, lport->port_id); 156 hton24(ct->payload.rft.fid.fp_fid, lport->port_id);
146 ct->payload.rft.fts = lport->fcts; 157 ct->payload.rft.fts = lport->fcts;
147 break; 158 break;
148 159
149 case FC_NS_RFF_ID: 160 case FC_NS_RFF_ID:
150 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id)); 161 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id),
162 FC_FST_DIR, FC_NS_SUBTYPE);
151 hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id); 163 hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id);
152 ct->payload.rff.fr_type = FC_TYPE_FCP; 164 ct->payload.rff.fr_type = FC_TYPE_FCP;
153 if (lport->service_params & FCP_SPPF_INIT_FCN) 165 if (lport->service_params & FCP_SPPF_INIT_FCN)
@@ -157,14 +169,16 @@ static inline int fc_ct_fill(struct fc_lport *lport,
157 break; 169 break;
158 170
159 case FC_NS_RNN_ID: 171 case FC_NS_RNN_ID:
160 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id)); 172 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id),
173 FC_FST_DIR, FC_NS_SUBTYPE);
161 hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id); 174 hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id);
162 put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn); 175 put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn);
163 break; 176 break;
164 177
165 case FC_NS_RSPN_ID: 178 case FC_NS_RSPN_ID:
166 len = strnlen(fc_host_symbolic_name(lport->host), 255); 179 len = strnlen(fc_host_symbolic_name(lport->host), 255);
167 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len); 180 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
181 FC_FST_DIR, FC_NS_SUBTYPE);
168 hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id); 182 hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
169 strncpy(ct->payload.spn.fr_name, 183 strncpy(ct->payload.spn.fr_name,
170 fc_host_symbolic_name(lport->host), len); 184 fc_host_symbolic_name(lport->host), len);
@@ -173,7 +187,8 @@ static inline int fc_ct_fill(struct fc_lport *lport,
173 187
174 case FC_NS_RSNN_NN: 188 case FC_NS_RSNN_NN:
175 len = strnlen(fc_host_symbolic_name(lport->host), 255); 189 len = strnlen(fc_host_symbolic_name(lport->host), 255);
176 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len); 190 ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
191 FC_FST_DIR, FC_NS_SUBTYPE);
177 put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn); 192 put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
178 strncpy(ct->payload.snn.fr_name, 193 strncpy(ct->payload.snn.fr_name,
179 fc_host_symbolic_name(lport->host), len); 194 fc_host_symbolic_name(lport->host), len);
@@ -189,6 +204,330 @@ static inline int fc_ct_fill(struct fc_lport *lport,
189} 204}
190 205
191/** 206/**
207 * fc_ct_ms_fill() - Fill in a mgmt service request frame
208 * @lport: local port.
209 * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
210 * @fp: frame to contain payload.
211 * @op: CT opcode.
212 * @r_ctl: pointer to FC header R_CTL.
213 * @fh_type: pointer to FC-4 type.
214 */
215static inline int fc_ct_ms_fill(struct fc_lport *lport,
216 u32 fc_id, struct fc_frame *fp,
217 unsigned int op, enum fc_rctl *r_ctl,
218 enum fc_fh_type *fh_type)
219{
220 struct fc_ct_req *ct;
221 size_t len;
222 struct fc_fdmi_attr_entry *entry;
223 struct fs_fdmi_attrs *hba_attrs;
224 int numattrs = 0;
225
226 switch (op) {
227 case FC_FDMI_RHBA:
228 numattrs = 10;
229 len = sizeof(struct fc_fdmi_rhba);
230 len -= sizeof(struct fc_fdmi_attr_entry);
231 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
232 len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
233 len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
234 len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
235 len += FC_FDMI_HBA_ATTR_MODEL_LEN;
236 len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
237 len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
238 len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
239 len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
240 len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
241 len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
242 ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
243 FC_FDMI_SUBTYPE);
244
245 /* HBA Identifier */
246 put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
247 /* Number of Ports - always 1 */
248 put_unaligned_be32(1, &ct->payload.rhba.port.numport);
249 /* Port Name */
250 put_unaligned_be64(lport->wwpn,
251 &ct->payload.rhba.port.port[0].portname);
252
253 /* HBA Attributes */
254 put_unaligned_be32(numattrs,
255 &ct->payload.rhba.hba_attrs.numattrs);
256 hba_attrs = &ct->payload.rhba.hba_attrs;
257 entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
258 /* NodeName*/
259 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
260 len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
261 put_unaligned_be16(FC_FDMI_HBA_ATTR_NODENAME,
262 &entry->type);
263 put_unaligned_be16(len, &entry->len);
264 put_unaligned_be64(lport->wwnn,
265 (__be64 *)&entry->value[0]);
266
267 /* Manufacturer */
268 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
269 FC_FDMI_HBA_ATTR_NODENAME_LEN);
270 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
271 len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
272 put_unaligned_be16(FC_FDMI_HBA_ATTR_MANUFACTURER,
273 &entry->type);
274 put_unaligned_be16(len, &entry->len);
275 strncpy((char *)&entry->value,
276 fc_host_manufacturer(lport->host),
277 FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
278
279 /* SerialNumber */
280 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
281 FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
282 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
283 len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
284 put_unaligned_be16(FC_FDMI_HBA_ATTR_SERIALNUMBER,
285 &entry->type);
286 put_unaligned_be16(len, &entry->len);
287 strncpy((char *)&entry->value,
288 fc_host_serial_number(lport->host),
289 FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
290
291 /* Model */
292 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
293 FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
294 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
295 len += FC_FDMI_HBA_ATTR_MODEL_LEN;
296 put_unaligned_be16(FC_FDMI_HBA_ATTR_MODEL,
297 &entry->type);
298 put_unaligned_be16(len, &entry->len);
299 strncpy((char *)&entry->value,
300 fc_host_model(lport->host),
301 FC_FDMI_HBA_ATTR_MODEL_LEN);
302
303 /* Model Description */
304 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
305 FC_FDMI_HBA_ATTR_MODEL_LEN);
306 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
307 len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
308 put_unaligned_be16(FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
309 &entry->type);
310 put_unaligned_be16(len, &entry->len);
311 strncpy((char *)&entry->value,
312 fc_host_model_description(lport->host),
313 FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
314
315 /* Hardware Version */
316 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
317 FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
318 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
319 len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
320 put_unaligned_be16(FC_FDMI_HBA_ATTR_HARDWAREVERSION,
321 &entry->type);
322 put_unaligned_be16(len, &entry->len);
323 strncpy((char *)&entry->value,
324 fc_host_hardware_version(lport->host),
325 FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
326
327 /* Driver Version */
328 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
329 FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
330 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
331 len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
332 put_unaligned_be16(FC_FDMI_HBA_ATTR_DRIVERVERSION,
333 &entry->type);
334 put_unaligned_be16(len, &entry->len);
335 strncpy((char *)&entry->value,
336 fc_host_driver_version(lport->host),
337 FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
338
339 /* OptionROM Version */
340 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
341 FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
342 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
343 len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
344 put_unaligned_be16(FC_FDMI_HBA_ATTR_OPTIONROMVERSION,
345 &entry->type);
346 put_unaligned_be16(len, &entry->len);
347 strncpy((char *)&entry->value,
348 fc_host_optionrom_version(lport->host),
349 FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
350
351 /* Firmware Version */
352 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
353 FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
354 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
355 len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
356 put_unaligned_be16(FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
357 &entry->type);
358 put_unaligned_be16(len, &entry->len);
359 strncpy((char *)&entry->value,
360 fc_host_firmware_version(lport->host),
361 FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
362
363 /* OS Name and Version */
364 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
365 FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
366 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
367 len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
368 put_unaligned_be16(FC_FDMI_HBA_ATTR_OSNAMEVERSION,
369 &entry->type);
370 put_unaligned_be16(len, &entry->len);
371 snprintf((char *)&entry->value,
372 FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
373 "%s v%s",
374 init_utsname()->sysname,
375 init_utsname()->release);
376 break;
377 case FC_FDMI_RPA:
378 numattrs = 6;
379 len = sizeof(struct fc_fdmi_rpa);
380 len -= sizeof(struct fc_fdmi_attr_entry);
381 len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
382 len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
383 len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
384 len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
385 len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
386 len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
387 len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
388 ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
389 FC_FDMI_SUBTYPE);
390
391 /* Port Name */
392 put_unaligned_be64(lport->wwpn,
393 &ct->payload.rpa.port.portname);
394
395 /* Port Attributes */
396 put_unaligned_be32(numattrs,
397 &ct->payload.rpa.hba_attrs.numattrs);
398
399 hba_attrs = &ct->payload.rpa.hba_attrs;
400 entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
401
402 /* FC4 types */
403 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
404 len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
405 put_unaligned_be16(FC_FDMI_PORT_ATTR_FC4TYPES,
406 &entry->type);
407 put_unaligned_be16(len, &entry->len);
408 memcpy(&entry->value, fc_host_supported_fc4s(lport->host),
409 FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
410
411 /* Supported Speed */
412 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
413 FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
414 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
415 len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
416 put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
417 &entry->type);
418 put_unaligned_be16(len, &entry->len);
419
420 put_unaligned_be32(fc_host_supported_speeds(lport->host),
421 &entry->value);
422
423 /* Current Port Speed */
424 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
425 FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
426 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
427 len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
428 put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
429 &entry->type);
430 put_unaligned_be16(len, &entry->len);
431 put_unaligned_be32(lport->link_speed,
432 &entry->value);
433
434 /* Max Frame Size */
435 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
436 FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
437 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
438 len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
439 put_unaligned_be16(FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
440 &entry->type);
441 put_unaligned_be16(len, &entry->len);
442 put_unaligned_be32(fc_host_maxframe_size(lport->host),
443 &entry->value);
444
445 /* OS Device Name */
446 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
447 FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
448 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
449 len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
450 put_unaligned_be16(FC_FDMI_PORT_ATTR_OSDEVICENAME,
451 &entry->type);
452 put_unaligned_be16(len, &entry->len);
453 /* Use the sysfs device name */
454 strncpy((char *)&entry->value,
455 dev_name(&lport->host->shost_gendev),
456 strnlen(dev_name(&lport->host->shost_gendev),
457 FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
458
459 /* Host Name */
460 entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
461 FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN);
462 len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
463 len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
464 put_unaligned_be16(FC_FDMI_PORT_ATTR_HOSTNAME,
465 &entry->type);
466 put_unaligned_be16(len, &entry->len);
467 if (strlen(fc_host_system_hostname(lport->host)))
468 strncpy((char *)&entry->value,
469 fc_host_system_hostname(lport->host),
470 strnlen(fc_host_system_hostname(lport->host),
471 FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
472 else
473 strncpy((char *)&entry->value,
474 init_utsname()->nodename,
475 FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
476 break;
477 case FC_FDMI_DPRT:
478 len = sizeof(struct fc_fdmi_dprt);
479 ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
480 FC_FDMI_SUBTYPE);
481 /* Port Name */
482 put_unaligned_be64(lport->wwpn,
483 &ct->payload.dprt.port.portname);
484 break;
485 case FC_FDMI_DHBA:
486 len = sizeof(struct fc_fdmi_dhba);
487 ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
488 FC_FDMI_SUBTYPE);
489 /* HBA Identifier */
490 put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id);
491 break;
492 default:
493 return -EINVAL;
494 }
495 *r_ctl = FC_RCTL_DD_UNSOL_CTL;
496 *fh_type = FC_TYPE_CT;
497 return 0;
498}
499
500/**
501 * fc_ct_fill() - Fill in a common transport service request frame
502 * @lport: local port.
503 * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
504 * @fp: frame to contain payload.
505 * @op: CT opcode.
506 * @r_ctl: pointer to FC header R_CTL.
507 * @fh_type: pointer to FC-4 type.
508 */
509static inline int fc_ct_fill(struct fc_lport *lport,
510 u32 fc_id, struct fc_frame *fp,
511 unsigned int op, enum fc_rctl *r_ctl,
512 enum fc_fh_type *fh_type, u32 *did)
513{
514 int rc = -EINVAL;
515
516 switch (fc_id) {
517 case FC_FID_MGMT_SERV:
518 rc = fc_ct_ms_fill(lport, fc_id, fp, op, r_ctl, fh_type);
519 *did = FC_FID_MGMT_SERV;
520 break;
521 case FC_FID_DIR_SERV:
522 default:
523 rc = fc_ct_ns_fill(lport, fc_id, fp, op, r_ctl, fh_type);
524 *did = FC_FID_DIR_SERV;
525 break;
526 }
527
528 return rc;
529}
530/**
192 * fc_plogi_fill - Fill in plogi request frame 531 * fc_plogi_fill - Fill in plogi request frame
193 */ 532 */
194static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp, 533static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 2703e3bedbf..9c23ee8fd2d 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -60,6 +60,9 @@ enum iscsi_uevent_e {
60 60
61 ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20, 61 ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20,
62 ISCSI_UEVENT_SET_IFACE_PARAMS = UEVENT_BASE + 21, 62 ISCSI_UEVENT_SET_IFACE_PARAMS = UEVENT_BASE + 21,
63 ISCSI_UEVENT_PING = UEVENT_BASE + 22,
64 ISCSI_UEVENT_GET_CHAP = UEVENT_BASE + 23,
65 ISCSI_UEVENT_DELETE_CHAP = UEVENT_BASE + 24,
63 66
64 /* up events */ 67 /* up events */
65 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, 68 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -72,6 +75,8 @@ enum iscsi_uevent_e {
72 ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7, 75 ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7,
73 ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8, 76 ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8,
74 ISCSI_KEVENT_CONN_LOGIN_STATE = KEVENT_BASE + 9, 77 ISCSI_KEVENT_CONN_LOGIN_STATE = KEVENT_BASE + 9,
78 ISCSI_KEVENT_HOST_EVENT = KEVENT_BASE + 10,
79 ISCSI_KEVENT_PING_COMP = KEVENT_BASE + 11,
75}; 80};
76 81
77enum iscsi_tgt_dscvr { 82enum iscsi_tgt_dscvr {
@@ -80,6 +85,13 @@ enum iscsi_tgt_dscvr {
80 ISCSI_TGT_DSCVR_SLP = 3, 85 ISCSI_TGT_DSCVR_SLP = 3,
81}; 86};
82 87
88enum iscsi_host_event_code {
89 ISCSI_EVENT_LINKUP = 1,
90 ISCSI_EVENT_LINKDOWN,
91 /* must always be last */
92 ISCSI_EVENT_MAX,
93};
94
83struct iscsi_uevent { 95struct iscsi_uevent {
84 uint32_t type; /* k/u events type */ 96 uint32_t type; /* k/u events type */
85 uint32_t iferror; /* carries interface or resource errors */ 97 uint32_t iferror; /* carries interface or resource errors */
@@ -178,6 +190,26 @@ struct iscsi_uevent {
178 uint32_t host_no; 190 uint32_t host_no;
179 uint32_t count; 191 uint32_t count;
180 } set_iface_params; 192 } set_iface_params;
193 struct msg_iscsi_ping {
194 uint32_t host_no;
195 uint32_t iface_num;
196 uint32_t iface_type;
197 uint32_t payload_size;
198 uint32_t pid; /* unique ping id associated
199 with each ping request */
200 } iscsi_ping;
201 struct msg_get_chap {
202 uint32_t host_no;
203 uint32_t num_entries; /* number of CHAP entries
204 * on request, number of
205 * valid CHAP entries on
206 * response */
207 uint16_t chap_tbl_idx;
208 } get_chap;
209 struct msg_delete_chap {
210 uint32_t host_no;
211 uint16_t chap_tbl_idx;
212 } delete_chap;
181 } u; 213 } u;
182 union { 214 union {
183 /* messages k -> u */ 215 /* messages k -> u */
@@ -222,6 +254,18 @@ struct iscsi_uevent {
222 struct msg_notify_if_down { 254 struct msg_notify_if_down {
223 uint32_t host_no; 255 uint32_t host_no;
224 } notify_if_down; 256 } notify_if_down;
257 struct msg_host_event {
258 uint32_t host_no;
259 uint32_t data_size;
260 enum iscsi_host_event_code code;
261 } host_event;
262 struct msg_ping_comp {
263 uint32_t host_no;
264 uint32_t status;
265 uint32_t pid; /* unique ping id associated
266 with each ping request */
267 uint32_t data_size;
268 } ping_comp;
225 } r; 269 } r;
226} __attribute__ ((aligned (sizeof(uint64_t)))); 270} __attribute__ ((aligned (sizeof(uint64_t))));
227 271
@@ -406,6 +450,9 @@ enum iscsi_param {
406 450
407 ISCSI_PARAM_TGT_RESET_TMO, 451 ISCSI_PARAM_TGT_RESET_TMO,
408 ISCSI_PARAM_TARGET_ALIAS, 452 ISCSI_PARAM_TARGET_ALIAS,
453
454 ISCSI_PARAM_CHAP_IN_IDX,
455 ISCSI_PARAM_CHAP_OUT_IDX,
409 /* must always be last */ 456 /* must always be last */
410 ISCSI_PARAM_MAX, 457 ISCSI_PARAM_MAX,
411}; 458};
@@ -416,9 +463,26 @@ enum iscsi_host_param {
416 ISCSI_HOST_PARAM_INITIATOR_NAME, 463 ISCSI_HOST_PARAM_INITIATOR_NAME,
417 ISCSI_HOST_PARAM_NETDEV_NAME, 464 ISCSI_HOST_PARAM_NETDEV_NAME,
418 ISCSI_HOST_PARAM_IPADDRESS, 465 ISCSI_HOST_PARAM_IPADDRESS,
466 ISCSI_HOST_PARAM_PORT_STATE,
467 ISCSI_HOST_PARAM_PORT_SPEED,
419 ISCSI_HOST_PARAM_MAX, 468 ISCSI_HOST_PARAM_MAX,
420}; 469};
421 470
471/* iSCSI port Speed */
472enum iscsi_port_speed {
473 ISCSI_PORT_SPEED_UNKNOWN = 0x1,
474 ISCSI_PORT_SPEED_10MBPS = 0x2,
475 ISCSI_PORT_SPEED_100MBPS = 0x4,
476 ISCSI_PORT_SPEED_1GBPS = 0x8,
477 ISCSI_PORT_SPEED_10GBPS = 0x10,
478};
479
480/* iSCSI port state */
481enum iscsi_port_state {
482 ISCSI_PORT_STATE_DOWN = 0x1,
483 ISCSI_PORT_STATE_UP = 0x2,
484};
485
422#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 486#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
423#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 487#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
424 488
@@ -501,4 +565,19 @@ struct iscsi_stats {
501 __attribute__ ((aligned (sizeof(uint64_t)))); 565 __attribute__ ((aligned (sizeof(uint64_t))));
502}; 566};
503 567
568enum chap_type_e {
569 CHAP_TYPE_OUT,
570 CHAP_TYPE_IN,
571};
572
573#define ISCSI_CHAP_AUTH_NAME_MAX_LEN 256
574#define ISCSI_CHAP_AUTH_SECRET_MAX_LEN 256
575struct iscsi_chap_rec {
576 uint16_t chap_tbl_idx;
577 enum chap_type_e chap_type;
578 char username[ISCSI_CHAP_AUTH_NAME_MAX_LEN];
579 uint8_t password[ISCSI_CHAP_AUTH_SECRET_MAX_LEN];
580 uint8_t password_length;
581} __packed;
582
504#endif 583#endif
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 6a3922fe0be..8f9dfba3fcf 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -30,6 +30,7 @@
30 30
31#include <scsi/fc/fc_fcp.h> 31#include <scsi/fc/fc_fcp.h>
32#include <scsi/fc/fc_ns.h> 32#include <scsi/fc/fc_ns.h>
33#include <scsi/fc/fc_ms.h>
33#include <scsi/fc/fc_els.h> 34#include <scsi/fc/fc_els.h>
34#include <scsi/fc/fc_gs.h> 35#include <scsi/fc/fc_gs.h>
35 36
@@ -52,6 +53,8 @@
52 * @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent 53 * @LPORT_ST_RPN_ID: Register port name by ID (RPN_ID) sent
53 * @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent 54 * @LPORT_ST_RFT_ID: Register Fibre Channel types by ID (RFT_ID) sent
54 * @LPORT_ST_RFF_ID: Register FC-4 Features by ID (RFF_ID) sent 55 * @LPORT_ST_RFF_ID: Register FC-4 Features by ID (RFF_ID) sent
56 * @LPORT_ST_FDMI: Waiting for mgmt server rport to become ready
57 * @LPORT_ST_RHBA:
55 * @LPORT_ST_SCR: State Change Register (SCR) sent 58 * @LPORT_ST_SCR: State Change Register (SCR) sent
56 * @LPORT_ST_READY: Ready for use 59 * @LPORT_ST_READY: Ready for use
57 * @LPORT_ST_LOGO: Local port logout (LOGO) sent 60 * @LPORT_ST_LOGO: Local port logout (LOGO) sent
@@ -66,6 +69,11 @@ enum fc_lport_state {
66 LPORT_ST_RSPN_ID, 69 LPORT_ST_RSPN_ID,
67 LPORT_ST_RFT_ID, 70 LPORT_ST_RFT_ID,
68 LPORT_ST_RFF_ID, 71 LPORT_ST_RFF_ID,
72 LPORT_ST_FDMI,
73 LPORT_ST_RHBA,
74 LPORT_ST_RPA,
75 LPORT_ST_DHBA,
76 LPORT_ST_DPRT,
69 LPORT_ST_SCR, 77 LPORT_ST_SCR,
70 LPORT_ST_READY, 78 LPORT_ST_READY,
71 LPORT_ST_LOGO, 79 LPORT_ST_LOGO,
@@ -797,6 +805,7 @@ enum fc_lport_event {
797 * @host: The SCSI host associated with a local port 805 * @host: The SCSI host associated with a local port
798 * @ema_list: Exchange manager anchor list 806 * @ema_list: Exchange manager anchor list
799 * @dns_rdata: The directory server remote port 807 * @dns_rdata: The directory server remote port
808 * @ms_rdata: The management server remote port
800 * @ptp_rdata: Point to point remote port 809 * @ptp_rdata: Point to point remote port
801 * @scsi_priv: FCP layer internal data 810 * @scsi_priv: FCP layer internal data
802 * @disc: Discovery context 811 * @disc: Discovery context
@@ -842,6 +851,7 @@ struct fc_lport {
842 struct Scsi_Host *host; 851 struct Scsi_Host *host;
843 struct list_head ema_list; 852 struct list_head ema_list;
844 struct fc_rport_priv *dns_rdata; 853 struct fc_rport_priv *dns_rdata;
854 struct fc_rport_priv *ms_rdata;
845 struct fc_rport_priv *ptp_rdata; 855 struct fc_rport_priv *ptp_rdata;
846 void *scsi_priv; 856 void *scsi_priv;
847 struct fc_disc disc; 857 struct fc_disc disc;
@@ -877,6 +887,7 @@ struct fc_lport {
877 u32 does_npiv:1; 887 u32 does_npiv:1;
878 u32 npiv_enabled:1; 888 u32 npiv_enabled:1;
879 u32 point_to_multipoint:1; 889 u32 point_to_multipoint:1;
890 u32 fdmi_enabled:1;
880 u32 mfs; 891 u32 mfs;
881 u8 max_retry_count; 892 u8 max_retry_count;
882 u8 max_rport_retry_count; 893 u8 max_rport_retry_count;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index cedcff371c8..6e33386a389 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -268,7 +268,7 @@ struct iscsi_session {
268 int lu_reset_timeout; 268 int lu_reset_timeout;
269 int tgt_reset_timeout; 269 int tgt_reset_timeout;
270 int initial_r2t_en; 270 int initial_r2t_en;
271 unsigned max_r2t; 271 unsigned short max_r2t;
272 int imm_data_en; 272 int imm_data_en;
273 unsigned first_burst; 273 unsigned first_burst;
274 unsigned max_burst; 274 unsigned max_burst;
@@ -284,6 +284,7 @@ struct iscsi_session {
284 char *password; 284 char *password;
285 char *password_in; 285 char *password_in;
286 char *targetname; 286 char *targetname;
287 char *targetalias;
287 char *ifacename; 288 char *ifacename;
288 char *initiatorname; 289 char *initiatorname;
289 /* control data */ 290 /* control data */
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h
index ac0cc1d925e..215469a9b80 100644
--- a/include/scsi/libiscsi_tcp.h
+++ b/include/scsi/libiscsi_tcp.h
@@ -128,7 +128,7 @@ extern void iscsi_tcp_conn_teardown(struct iscsi_cls_conn *cls_conn);
128/* misc helpers */ 128/* misc helpers */
129extern int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session); 129extern int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session);
130extern void iscsi_tcp_r2tpool_free(struct iscsi_session *session); 130extern void iscsi_tcp_r2tpool_free(struct iscsi_session *session);
131 131extern int iscsi_tcp_set_max_r2t(struct iscsi_conn *conn, char *buf);
132extern void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn, 132extern void iscsi_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
133 struct iscsi_stats *stats); 133 struct iscsi_stats *stats);
134#endif /* LIBISCSI_TCP_H */ 134#endif /* LIBISCSI_TCP_H */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 6a308d42d98..5f5ed1b8b41 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -86,7 +86,9 @@ enum discover_event {
86 DISCE_DISCOVER_DOMAIN = 0U, 86 DISCE_DISCOVER_DOMAIN = 0U,
87 DISCE_REVALIDATE_DOMAIN = 1, 87 DISCE_REVALIDATE_DOMAIN = 1,
88 DISCE_PORT_GONE = 2, 88 DISCE_PORT_GONE = 2,
89 DISC_NUM_EVENTS = 3, 89 DISCE_PROBE = 3,
90 DISCE_DESTRUCT = 4,
91 DISC_NUM_EVENTS = 5,
90}; 92};
91 93
92/* ---------- Expander Devices ---------- */ 94/* ---------- Expander Devices ---------- */
@@ -151,6 +153,8 @@ struct expander_device {
151 153
152 struct ex_phy *ex_phy; 154 struct ex_phy *ex_phy;
153 struct sas_port *parent_port; 155 struct sas_port *parent_port;
156
157 struct mutex cmd_mutex;
154}; 158};
155 159
156/* ---------- SATA device ---------- */ 160/* ---------- SATA device ---------- */
@@ -162,22 +166,21 @@ enum ata_command_set {
162struct sata_device { 166struct sata_device {
163 enum ata_command_set command_set; 167 enum ata_command_set command_set;
164 struct smp_resp rps_resp; /* report_phy_sata_resp */ 168 struct smp_resp rps_resp; /* report_phy_sata_resp */
165 __le16 *identify_device;
166 __le16 *identify_packet_device;
167
168 u8 port_no; /* port number, if this is a PM (Port) */ 169 u8 port_no; /* port number, if this is a PM (Port) */
169 struct list_head children; /* PM Ports if this is a PM */ 170 struct list_head children; /* PM Ports if this is a PM */
170 171
171 struct ata_port *ap; 172 struct ata_port *ap;
172 struct ata_host ata_host; 173 struct ata_host ata_host;
173 struct ata_taskfile tf; 174 struct ata_taskfile tf;
174 u32 sstatus;
175 u32 serror;
176 u32 scontrol;
177}; 175};
178 176
179/* ---------- Domain device ---------- */ 177enum {
178 SAS_DEV_GONE,
179 SAS_DEV_DESTROY,
180};
181
180struct domain_device { 182struct domain_device {
183 spinlock_t done_lock;
181 enum sas_dev_type dev_type; 184 enum sas_dev_type dev_type;
182 185
183 enum sas_linkrate linkrate; 186 enum sas_linkrate linkrate;
@@ -189,8 +192,10 @@ struct domain_device {
189 struct domain_device *parent; 192 struct domain_device *parent;
190 struct list_head siblings; /* devices on the same level */ 193 struct list_head siblings; /* devices on the same level */
191 struct asd_sas_port *port; /* shortcut to root of the tree */ 194 struct asd_sas_port *port; /* shortcut to root of the tree */
195 struct sas_phy *phy;
192 196
193 struct list_head dev_list_node; 197 struct list_head dev_list_node;
198 struct list_head disco_list_node; /* awaiting probe or destruct */
194 199
195 enum sas_protocol iproto; 200 enum sas_protocol iproto;
196 enum sas_protocol tproto; 201 enum sas_protocol tproto;
@@ -208,7 +213,8 @@ struct domain_device {
208 }; 213 };
209 214
210 void *lldd_dev; 215 void *lldd_dev;
211 int gone; 216 unsigned long state;
217 struct kref kref;
212}; 218};
213 219
214struct sas_discovery_event { 220struct sas_discovery_event {
@@ -217,7 +223,6 @@ struct sas_discovery_event {
217}; 223};
218 224
219struct sas_discovery { 225struct sas_discovery {
220 spinlock_t disc_event_lock;
221 struct sas_discovery_event disc_work[DISC_NUM_EVENTS]; 226 struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
222 unsigned long pending; 227 unsigned long pending;
223 u8 fanout_sas_addr[8]; 228 u8 fanout_sas_addr[8];
@@ -226,7 +231,6 @@ struct sas_discovery {
226 int max_level; 231 int max_level;
227}; 232};
228 233
229
230/* The port struct is Class:RW, driver:RO */ 234/* The port struct is Class:RW, driver:RO */
231struct asd_sas_port { 235struct asd_sas_port {
232/* private: */ 236/* private: */
@@ -236,9 +240,10 @@ struct asd_sas_port {
236 struct domain_device *port_dev; 240 struct domain_device *port_dev;
237 spinlock_t dev_list_lock; 241 spinlock_t dev_list_lock;
238 struct list_head dev_list; 242 struct list_head dev_list;
243 struct list_head disco_list;
244 struct list_head destroy_list;
239 enum sas_linkrate linkrate; 245 enum sas_linkrate linkrate;
240 246
241 struct sas_phy *phy;
242 struct work_struct work; 247 struct work_struct work;
243 248
244/* public: */ 249/* public: */
@@ -274,7 +279,6 @@ struct asd_sas_event {
274 */ 279 */
275struct asd_sas_phy { 280struct asd_sas_phy {
276/* private: */ 281/* private: */
277 /* protected by ha->event_lock */
278 struct asd_sas_event port_events[PORT_NUM_EVENTS]; 282 struct asd_sas_event port_events[PORT_NUM_EVENTS];
279 struct asd_sas_event phy_events[PHY_NUM_EVENTS]; 283 struct asd_sas_event phy_events[PHY_NUM_EVENTS];
280 284
@@ -320,6 +324,7 @@ struct asd_sas_phy {
320struct scsi_core { 324struct scsi_core {
321 struct Scsi_Host *shost; 325 struct Scsi_Host *shost;
322 326
327 struct mutex task_queue_flush;
323 spinlock_t task_queue_lock; 328 spinlock_t task_queue_lock;
324 struct list_head task_queue; 329 struct list_head task_queue;
325 int task_queue_size; 330 int task_queue_size;
@@ -334,18 +339,23 @@ struct sas_ha_event {
334 339
335enum sas_ha_state { 340enum sas_ha_state {
336 SAS_HA_REGISTERED, 341 SAS_HA_REGISTERED,
337 SAS_HA_UNREGISTERED 342 SAS_HA_DRAINING,
343 SAS_HA_ATA_EH_ACTIVE,
344 SAS_HA_FROZEN,
338}; 345};
339 346
340struct sas_ha_struct { 347struct sas_ha_struct {
341/* private: */ 348/* private: */
342 spinlock_t event_lock;
343 struct sas_ha_event ha_events[HA_NUM_EVENTS]; 349 struct sas_ha_event ha_events[HA_NUM_EVENTS];
344 unsigned long pending; 350 unsigned long pending;
345 351
346 enum sas_ha_state state; 352 struct list_head defer_q; /* work queued while draining */
353 struct mutex drain_mutex;
354 unsigned long state;
347 spinlock_t state_lock; 355 spinlock_t state_lock;
348 356
357 struct mutex disco_mutex;
358
349 struct scsi_core core; 359 struct scsi_core core;
350 360
351/* public: */ 361/* public: */
@@ -374,7 +384,8 @@ struct sas_ha_struct {
374 384
375 void *lldd_ha; /* not touched by sas class code */ 385 void *lldd_ha; /* not touched by sas class code */
376 386
377 struct list_head eh_done_q; 387 struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
388 struct list_head eh_ata_q; /* scmds to promote from sas to ata eh */
378}; 389};
379 390
380#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata) 391#define SHOST_TO_SAS_HA(_shost) (*(struct sas_ha_struct **)(_shost)->hostdata)
@@ -418,6 +429,11 @@ static inline unsigned int to_sas_gpio_od(int device, int bit)
418 return 3 * device + bit; 429 return 3 * device + bit;
419} 430}
420 431
432static inline void sas_put_local_phy(struct sas_phy *phy)
433{
434 put_device(&phy->dev);
435}
436
421#ifdef CONFIG_SCSI_SAS_HOST_SMP 437#ifdef CONFIG_SCSI_SAS_HOST_SMP
422int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count); 438int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count);
423#else 439#else
@@ -447,7 +463,10 @@ enum service_response {
447}; 463};
448 464
449enum exec_status { 465enum exec_status {
450 /* The SAM_STAT_.. codes fit in the lower 6 bits */ 466 /* The SAM_STAT_.. codes fit in the lower 6 bits, alias some of
467 * them here to silence 'case value not in enumerated type' warnings
468 */
469 __SAM_STAT_CHECK_CONDITION = SAM_STAT_CHECK_CONDITION,
451 470
452 SAS_DEV_NO_RESPONSE = 0x80, 471 SAS_DEV_NO_RESPONSE = 0x80,
453 SAS_DATA_UNDERRUN, 472 SAS_DATA_UNDERRUN,
@@ -487,10 +506,6 @@ enum exec_status {
487struct ata_task_resp { 506struct ata_task_resp {
488 u16 frame_len; 507 u16 frame_len;
489 u8 ending_fis[24]; /* dev to host or data-in */ 508 u8 ending_fis[24]; /* dev to host or data-in */
490 u32 sstatus;
491 u32 serror;
492 u32 scontrol;
493 u32 sactive;
494}; 509};
495 510
496#define SAS_STATUS_BUF_SIZE 96 511#define SAS_STATUS_BUF_SIZE 96
@@ -604,7 +619,8 @@ struct sas_domain_function_template {
604 int (*lldd_clear_aca)(struct domain_device *, u8 *lun); 619 int (*lldd_clear_aca)(struct domain_device *, u8 *lun);
605 int (*lldd_clear_task_set)(struct domain_device *, u8 *lun); 620 int (*lldd_clear_task_set)(struct domain_device *, u8 *lun);
606 int (*lldd_I_T_nexus_reset)(struct domain_device *); 621 int (*lldd_I_T_nexus_reset)(struct domain_device *);
607 int (*lldd_ata_soft_reset)(struct domain_device *); 622 int (*lldd_ata_check_ready)(struct domain_device *);
623 void (*lldd_ata_set_dmamode)(struct domain_device *);
608 int (*lldd_lu_reset)(struct domain_device *, u8 *lun); 624 int (*lldd_lu_reset)(struct domain_device *, u8 *lun);
609 int (*lldd_query_task)(struct sas_task *); 625 int (*lldd_query_task)(struct sas_task *);
610 626
@@ -625,14 +641,11 @@ extern int sas_unregister_ha(struct sas_ha_struct *);
625 641
626int sas_set_phy_speed(struct sas_phy *phy, 642int sas_set_phy_speed(struct sas_phy *phy,
627 struct sas_phy_linkrates *rates); 643 struct sas_phy_linkrates *rates);
628int sas_phy_enable(struct sas_phy *phy, int enabled);
629int sas_phy_reset(struct sas_phy *phy, int hard_reset); 644int sas_phy_reset(struct sas_phy *phy, int hard_reset);
630int sas_queue_up(struct sas_task *task); 645int sas_queue_up(struct sas_task *task);
631extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *); 646extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
632extern int sas_target_alloc(struct scsi_target *); 647extern int sas_target_alloc(struct scsi_target *);
633extern int sas_slave_alloc(struct scsi_device *);
634extern int sas_slave_configure(struct scsi_device *); 648extern int sas_slave_configure(struct scsi_device *);
635extern void sas_slave_destroy(struct scsi_device *);
636extern int sas_change_queue_depth(struct scsi_device *, int new_depth, 649extern int sas_change_queue_depth(struct scsi_device *, int new_depth,
637 int reason); 650 int reason);
638extern int sas_change_queue_type(struct scsi_device *, int qt); 651extern int sas_change_queue_type(struct scsi_device *, int qt);
@@ -649,7 +662,7 @@ void sas_init_ex_attr(void);
649 662
650int sas_ex_revalidate_domain(struct domain_device *); 663int sas_ex_revalidate_domain(struct domain_device *);
651 664
652void sas_unregister_domain_devices(struct asd_sas_port *port); 665void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
653void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *); 666void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *);
654int sas_discover_event(struct asd_sas_port *, enum discover_event ev); 667int sas_discover_event(struct asd_sas_port *, enum discover_event ev);
655 668
@@ -661,20 +674,20 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *);
661void sas_init_dev(struct domain_device *); 674void sas_init_dev(struct domain_device *);
662 675
663void sas_task_abort(struct sas_task *); 676void sas_task_abort(struct sas_task *);
664int __sas_task_abort(struct sas_task *);
665int sas_eh_device_reset_handler(struct scsi_cmnd *cmd); 677int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
666int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd); 678int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd);
667 679
668extern void sas_target_destroy(struct scsi_target *); 680extern void sas_target_destroy(struct scsi_target *);
669extern int sas_slave_alloc(struct scsi_device *); 681extern int sas_slave_alloc(struct scsi_device *);
670extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg); 682extern int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
683extern int sas_drain_work(struct sas_ha_struct *ha);
671 684
672extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, 685extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
673 struct request *req); 686 struct request *req);
674 687
675extern void sas_ssp_task_response(struct device *dev, struct sas_task *task, 688extern void sas_ssp_task_response(struct device *dev, struct sas_task *task,
676 struct ssp_response_iu *iu); 689 struct ssp_response_iu *iu);
677struct sas_phy *sas_find_local_phy(struct domain_device *dev); 690struct sas_phy *sas_get_local_phy(struct domain_device *dev);
678 691
679int sas_request_addr(struct Scsi_Host *shost, u8 *addr); 692int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
680 693
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index 3673d685e6a..a577a833603 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -89,8 +89,7 @@ enum sas_oob_mode {
89 SAS_OOB_MODE 89 SAS_OOB_MODE
90}; 90};
91 91
92/* See sas_discover.c if you plan on changing these. 92/* See sas_discover.c if you plan on changing these */
93 */
94enum sas_dev_type { 93enum sas_dev_type {
95 NO_DEVICE = 0, /* protocol */ 94 NO_DEVICE = 0, /* protocol */
96 SAS_END_DEV = 1, /* protocol */ 95 SAS_END_DEV = 1, /* protocol */
@@ -100,6 +99,7 @@ enum sas_dev_type {
100 SATA_DEV = 5, 99 SATA_DEV = 5,
101 SATA_PM = 7, 100 SATA_PM = 7,
102 SATA_PM_PORT= 8, 101 SATA_PM_PORT= 8,
102 SATA_PENDING = 9,
103}; 103};
104 104
105enum sas_protocol { 105enum sas_protocol {
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 9c159f74c6d..cdccd2eb7b6 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -32,19 +32,19 @@
32 32
33static inline int dev_is_sata(struct domain_device *dev) 33static inline int dev_is_sata(struct domain_device *dev)
34{ 34{
35 return (dev->rphy->identify.target_port_protocols & SAS_PROTOCOL_SATA); 35 return dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM ||
36 dev->dev_type == SATA_PM_PORT || dev->dev_type == SATA_PENDING;
36} 37}
37 38
38int sas_ata_init_host_and_port(struct domain_device *found_dev, 39int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy);
39 struct scsi_target *starget); 40int sas_ata_init_host_and_port(struct domain_device *found_dev);
40
41void sas_ata_task_abort(struct sas_task *task); 41void sas_ata_task_abort(struct sas_task *task);
42void sas_ata_strategy_handler(struct Scsi_Host *shost); 42void sas_ata_strategy_handler(struct Scsi_Host *shost);
43int sas_ata_timed_out(struct scsi_cmnd *cmd, struct sas_task *task, 43void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
44 enum blk_eh_timer_return *rtn); 44 struct list_head *done_q);
45int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, 45void sas_ata_schedule_reset(struct domain_device *dev);
46 struct list_head *done_q); 46void sas_ata_wait_eh(struct domain_device *dev);
47 47void sas_probe_sata(struct asd_sas_port *port);
48#else 48#else
49 49
50 50
@@ -52,8 +52,7 @@ static inline int dev_is_sata(struct domain_device *dev)
52{ 52{
53 return 0; 53 return 0;
54} 54}
55static inline int sas_ata_init_host_and_port(struct domain_device *found_dev, 55static inline int sas_ata_init_host_and_port(struct domain_device *found_dev)
56 struct scsi_target *starget)
57{ 56{
58 return 0; 57 return 0;
59} 58}
@@ -65,18 +64,27 @@ static inline void sas_ata_strategy_handler(struct Scsi_Host *shost)
65{ 64{
66} 65}
67 66
68static inline int sas_ata_timed_out(struct scsi_cmnd *cmd, 67static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
69 struct sas_task *task, 68 struct list_head *done_q)
70 enum blk_eh_timer_return *rtn)
71{ 69{
72 return 0;
73} 70}
74static inline int sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, 71
75 struct list_head *done_q) 72static inline void sas_ata_schedule_reset(struct domain_device *dev)
73{
74}
75
76static inline void sas_ata_wait_eh(struct domain_device *dev)
77{
78}
79
80static inline void sas_probe_sata(struct asd_sas_port *port)
76{ 81{
77 return 0;
78} 82}
79 83
84static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
85{
86 return 0;
87}
80#endif 88#endif
81 89
82#endif /* _SAS_ATA_H_ */ 90#endif /* _SAS_ATA_H_ */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a5e885a111d..377df4a2851 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -10,6 +10,7 @@
10 10
11struct Scsi_Host; 11struct Scsi_Host;
12struct scsi_device; 12struct scsi_device;
13struct scsi_driver;
13 14
14/* 15/*
15 * MAX_COMMAND_SIZE is: 16 * MAX_COMMAND_SIZE is:
@@ -131,6 +132,11 @@ struct scsi_cmnd {
131 unsigned char tag; /* SCSI-II queued command tag */ 132 unsigned char tag; /* SCSI-II queued command tag */
132}; 133};
133 134
135static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
136{
137 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
138}
139
134extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t); 140extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
135extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t); 141extern struct scsi_cmnd *__scsi_get_command(struct Scsi_Host *, gfp_t);
136extern void scsi_put_command(struct scsi_cmnd *); 142extern void scsi_put_command(struct scsi_cmnd *);
@@ -289,17 +295,17 @@ static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
289 295
290static inline void set_msg_byte(struct scsi_cmnd *cmd, char status) 296static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
291{ 297{
292 cmd->result |= status << 8; 298 cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
293} 299}
294 300
295static inline void set_host_byte(struct scsi_cmnd *cmd, char status) 301static inline void set_host_byte(struct scsi_cmnd *cmd, char status)
296{ 302{
297 cmd->result |= status << 16; 303 cmd->result = (cmd->result & 0xff00ffff) | (status << 16);
298} 304}
299 305
300static inline void set_driver_byte(struct scsi_cmnd *cmd, char status) 306static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
301{ 307{
302 cmd->result |= status << 24; 308 cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
303} 309}
304 310
305#endif /* _SCSI_SCSI_CMND_H */ 311#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 9fd6702f02e..d443aa06a72 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -16,6 +16,7 @@ struct scsi_driver {
16 16
17 void (*rescan)(struct device *); 17 void (*rescan)(struct device *);
18 int (*done)(struct scsi_cmnd *); 18 int (*done)(struct scsi_cmnd *);
19 int (*eh_action)(struct scsi_cmnd *, unsigned char *, int, int);
19}; 20};
20#define to_scsi_driver(drv) \ 21#define to_scsi_driver(drv) \
21 container_of((drv), struct scsi_driver, gendrv) 22 container_of((drv), struct scsi_driver, gendrv)
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 2a65167a8f1..719faf1863a 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -126,8 +126,8 @@ enum fc_vport_state {
126 incapable of reporting */ 126 incapable of reporting */
127#define FC_PORTSPEED_1GBIT 1 127#define FC_PORTSPEED_1GBIT 1
128#define FC_PORTSPEED_2GBIT 2 128#define FC_PORTSPEED_2GBIT 2
129#define FC_PORTSPEED_4GBIT 4 129#define FC_PORTSPEED_10GBIT 4
130#define FC_PORTSPEED_10GBIT 8 130#define FC_PORTSPEED_4GBIT 8
131#define FC_PORTSPEED_8GBIT 0x10 131#define FC_PORTSPEED_8GBIT 0x10
132#define FC_PORTSPEED_16GBIT 0x20 132#define FC_PORTSPEED_16GBIT 0x20
133#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */ 133#define FC_PORTSPEED_NOT_NEGOTIATED (1 << 15) /* Speed not established */
@@ -486,6 +486,13 @@ struct fc_host_attrs {
486 u32 maxframe_size; 486 u32 maxframe_size;
487 u16 max_npiv_vports; 487 u16 max_npiv_vports;
488 char serial_number[FC_SERIAL_NUMBER_SIZE]; 488 char serial_number[FC_SERIAL_NUMBER_SIZE];
489 char manufacturer[FC_SERIAL_NUMBER_SIZE];
490 char model[FC_SYMBOLIC_NAME_SIZE];
491 char model_description[FC_SYMBOLIC_NAME_SIZE];
492 char hardware_version[FC_VERSION_STRING_SIZE];
493 char driver_version[FC_VERSION_STRING_SIZE];
494 char firmware_version[FC_VERSION_STRING_SIZE];
495 char optionrom_version[FC_VERSION_STRING_SIZE];
489 496
490 /* Dynamic Attributes */ 497 /* Dynamic Attributes */
491 u32 port_id; 498 u32 port_id;
@@ -541,6 +548,20 @@ struct fc_host_attrs {
541 (((struct fc_host_attrs *)(x)->shost_data)->max_npiv_vports) 548 (((struct fc_host_attrs *)(x)->shost_data)->max_npiv_vports)
542#define fc_host_serial_number(x) \ 549#define fc_host_serial_number(x) \
543 (((struct fc_host_attrs *)(x)->shost_data)->serial_number) 550 (((struct fc_host_attrs *)(x)->shost_data)->serial_number)
551#define fc_host_manufacturer(x) \
552 (((struct fc_host_attrs *)(x)->shost_data)->manufacturer)
553#define fc_host_model(x) \
554 (((struct fc_host_attrs *)(x)->shost_data)->model)
555#define fc_host_model_description(x) \
556 (((struct fc_host_attrs *)(x)->shost_data)->model_description)
557#define fc_host_hardware_version(x) \
558 (((struct fc_host_attrs *)(x)->shost_data)->hardware_version)
559#define fc_host_driver_version(x) \
560 (((struct fc_host_attrs *)(x)->shost_data)->driver_version)
561#define fc_host_firmware_version(x) \
562 (((struct fc_host_attrs *)(x)->shost_data)->firmware_version)
563#define fc_host_optionrom_version(x) \
564 (((struct fc_host_attrs *)(x)->shost_data)->optionrom_version)
544#define fc_host_port_id(x) \ 565#define fc_host_port_id(x) \
545 (((struct fc_host_attrs *)(x)->shost_data)->port_id) 566 (((struct fc_host_attrs *)(x)->shost_data)->port_id)
546#define fc_host_port_type(x) \ 567#define fc_host_port_type(x) \
@@ -700,6 +721,13 @@ struct fc_function_template {
700 unsigned long show_host_supported_speeds:1; 721 unsigned long show_host_supported_speeds:1;
701 unsigned long show_host_maxframe_size:1; 722 unsigned long show_host_maxframe_size:1;
702 unsigned long show_host_serial_number:1; 723 unsigned long show_host_serial_number:1;
724 unsigned long show_host_manufacturer:1;
725 unsigned long show_host_model:1;
726 unsigned long show_host_model_description:1;
727 unsigned long show_host_hardware_version:1;
728 unsigned long show_host_driver_version:1;
729 unsigned long show_host_firmware_version:1;
730 unsigned long show_host_optionrom_version:1;
703 /* host dynamic attributes */ 731 /* host dynamic attributes */
704 unsigned long show_host_port_id:1; 732 unsigned long show_host_port_id:1;
705 unsigned long show_host_port_type:1; 733 unsigned long show_host_port_type:1;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 2c3a46d102f..53f0b361d66 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -144,6 +144,12 @@ struct iscsi_transport {
144 int param, char *buf); 144 int param, char *buf);
145 umode_t (*attr_is_visible)(int param_type, int param); 145 umode_t (*attr_is_visible)(int param_type, int param);
146 int (*bsg_request)(struct bsg_job *job); 146 int (*bsg_request)(struct bsg_job *job);
147 int (*send_ping) (struct Scsi_Host *shost, uint32_t iface_num,
148 uint32_t iface_type, uint32_t payload_size,
149 uint32_t pid, struct sockaddr *dst_addr);
150 int (*get_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx,
151 uint32_t *num_entries, char *buf);
152 int (*delete_chap) (struct Scsi_Host *shost, uint16_t chap_tbl_idx);
147}; 153};
148 154
149/* 155/*
@@ -166,6 +172,17 @@ extern int iscsi_offload_mesg(struct Scsi_Host *shost,
166 struct iscsi_transport *transport, uint32_t type, 172 struct iscsi_transport *transport, uint32_t type,
167 char *data, uint16_t data_size); 173 char *data, uint16_t data_size);
168 174
175extern void iscsi_post_host_event(uint32_t host_no,
176 struct iscsi_transport *transport,
177 enum iscsi_host_event_code code,
178 uint32_t data_size,
179 uint8_t *data);
180
181extern void iscsi_ping_comp_event(uint32_t host_no,
182 struct iscsi_transport *transport,
183 uint32_t status, uint32_t pid,
184 uint32_t data_size, uint8_t *data);
185
169struct iscsi_cls_conn { 186struct iscsi_cls_conn {
170 struct list_head conn_list; /* item in connlist */ 187 struct list_head conn_list; /* item in connlist */
171 void *dd_data; /* LLD private data */ 188 void *dd_data; /* LLD private data */
@@ -238,6 +255,8 @@ struct iscsi_cls_host {
238 atomic_t nr_scans; 255 atomic_t nr_scans;
239 struct mutex mutex; 256 struct mutex mutex;
240 struct request_queue *bsg_q; 257 struct request_queue *bsg_q;
258 uint32_t port_speed;
259 uint32_t port_state;
241}; 260};
242 261
243#define iscsi_job_to_shost(_job) \ 262#define iscsi_job_to_shost(_job) \
@@ -307,5 +326,8 @@ extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
307 uint32_t iface_num, int dd_size); 326 uint32_t iface_num, int dd_size);
308extern void iscsi_destroy_iface(struct iscsi_iface *iface); 327extern void iscsi_destroy_iface(struct iscsi_iface *iface);
309extern struct iscsi_iface *iscsi_lookup_iface(int handle); 328extern struct iscsi_iface *iscsi_lookup_iface(int handle);
329extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
330extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
331extern int iscsi_is_session_dev(const struct device *dev);
310 332
311#endif 333#endif
diff --git a/include/scsi/scsi_transport_sas.h b/include/scsi/scsi_transport_sas.h
index ffeebc34a4f..98b3a20a010 100644
--- a/include/scsi/scsi_transport_sas.h
+++ b/include/scsi/scsi_transport_sas.h
@@ -75,7 +75,8 @@ struct sas_phy {
75 /* for the list of phys belonging to a port */ 75 /* for the list of phys belonging to a port */
76 struct list_head port_siblings; 76 struct list_head port_siblings;
77 77
78 struct work_struct reset_work; 78 /* available to the lldd */
79 void *hostdata;
79}; 80};
80 81
81#define dev_to_phy(d) \ 82#define dev_to_phy(d) \
@@ -169,6 +170,8 @@ struct sas_function_template {
169 int (*get_bay_identifier)(struct sas_rphy *); 170 int (*get_bay_identifier)(struct sas_rphy *);
170 int (*phy_reset)(struct sas_phy *, int); 171 int (*phy_reset)(struct sas_phy *, int);
171 int (*phy_enable)(struct sas_phy *, int); 172 int (*phy_enable)(struct sas_phy *, int);
173 int (*phy_setup)(struct sas_phy *);
174 void (*phy_release)(struct sas_phy *);
172 int (*set_phy_speed)(struct sas_phy *, struct sas_phy_linkrates *); 175 int (*set_phy_speed)(struct sas_phy *, struct sas_phy_linkrates *);
173 int (*smp_handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 176 int (*smp_handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
174}; 177};
@@ -194,6 +197,7 @@ void sas_rphy_free(struct sas_rphy *);
194extern int sas_rphy_add(struct sas_rphy *); 197extern int sas_rphy_add(struct sas_rphy *);
195extern void sas_rphy_remove(struct sas_rphy *); 198extern void sas_rphy_remove(struct sas_rphy *);
196extern void sas_rphy_delete(struct sas_rphy *); 199extern void sas_rphy_delete(struct sas_rphy *);
200extern void sas_rphy_unlink(struct sas_rphy *);
197extern int scsi_is_sas_rphy(const struct device *); 201extern int scsi_is_sas_rphy(const struct device *);
198 202
199struct sas_port *sas_port_alloc(struct device *, int); 203struct sas_port *sas_port_alloc(struct device *, int);
@@ -205,6 +209,12 @@ void sas_port_add_phy(struct sas_port *, struct sas_phy *);
205void sas_port_delete_phy(struct sas_port *, struct sas_phy *); 209void sas_port_delete_phy(struct sas_port *, struct sas_phy *);
206void sas_port_mark_backlink(struct sas_port *); 210void sas_port_mark_backlink(struct sas_port *);
207int scsi_is_sas_port(const struct device *); 211int scsi_is_sas_port(const struct device *);
212struct sas_phy *sas_port_get_phy(struct sas_port *port);
213static inline void sas_port_put_phy(struct sas_phy *phy)
214{
215 if (phy)
216 put_device(&phy->dev);
217}
208 218
209extern struct scsi_transport_template * 219extern struct scsi_transport_template *
210sas_attach_transport(struct sas_function_template *); 220sas_attach_transport(struct sas_function_template *);