aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 19:44:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 19:44:18 -0400
commitec7ae517537ae5c7b0b2cd7f562dfa3e7a05b954 (patch)
treee6b0c64a51a7c0aa0efd09d4f7a80872e3b1657a
parent97d2eb13a019ec09cc1a7ea2d3705c0b117b3c0d (diff)
parent590134fa78fbdbe5fea78c7ae0b2c3364bc9572f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (204 commits) [SCSI] qla4xxx: export address/port of connection (fix udev disk names) [SCSI] ipr: Fix BUG on adapter dump timeout [SCSI] megaraid_sas: Fix instance access in megasas_reset_timer [SCSI] hpsa: change confusing message to be more clear [SCSI] iscsi class: fix vlan configuration [SCSI] qla4xxx: fix data alignment and use nl helpers [SCSI] iscsi class: fix link local mispelling [SCSI] iscsi class: Replace iscsi_get_next_target_id with IDA [SCSI] aacraid: use lower snprintf() limit [SCSI] lpfc 8.3.27: Change driver version to 8.3.27 [SCSI] lpfc 8.3.27: T10 additions for SLI4 [SCSI] lpfc 8.3.27: Fix queue allocation failure recovery [SCSI] lpfc 8.3.27: Change algorithm for getting physical port name [SCSI] lpfc 8.3.27: Changed worst case mailbox timeout [SCSI] lpfc 8.3.27: Miscellanous logic and interface fixes [SCSI] megaraid_sas: Changelog and version update [SCSI] megaraid_sas: Add driver workaround for PERC5/1068 kdump kernel panic [SCSI] megaraid_sas: Add multiple MSI-X vector/multiple reply queue support [SCSI] megaraid_sas: Add support for MegaRAID 9360/9380 12GB/s controllers [SCSI] megaraid_sas: Clear FUSION_IN_RESET before enabling interrupts ...
-rw-r--r--Documentation/ABI/testing/sysfs-block13
-rw-r--r--Documentation/scsi/00-INDEX2
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas15
-rw-r--r--Documentation/scsi/LICENSE.qla4xxx310
-rw-r--r--Documentation/scsi/bnx2fc.txt75
-rw-r--r--arch/s390/include/asm/qdio.h5
-rw-r--r--block/genhd.c71
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-scsi.c44
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c80
-rw-r--r--drivers/message/fusion/mptbase.c92
-rw-r--r--drivers/message/fusion/mptbase.h72
-rw-r--r--drivers/message/fusion/mptsas.c47
-rw-r--r--drivers/message/fusion/mptscsih.c18
-rw-r--r--drivers/message/fusion/mptscsih.h1
-rw-r--r--drivers/s390/cio/qdio_main.c11
-rw-r--r--drivers/s390/cio/qdio_setup.c1
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c36
-rw-r--r--drivers/s390/scsi/zfcp_def.h7
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c80
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c58
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h66
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c15
-rw-r--r--drivers/scsi/Kconfig14
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/linit.c7
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c1
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c20
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c60
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c112
-rw-r--r--drivers/scsi/be2iscsi/be_main.h5
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c26
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c226
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c32
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c23
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c80
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c20
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c20
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c56
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h1
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c61
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c107
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c6
-rw-r--r--drivers/scsi/fcoe/fcoe.c103
-rw-r--r--drivers/scsi/fcoe/fcoe.h11
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c101
-rw-r--r--drivers/scsi/hpsa.c6
-rw-r--r--drivers/scsi/ipr.c31
-rw-r--r--drivers/scsi/ipr.h4
-rw-r--r--drivers/scsi/isci/host.c69
-rw-r--r--drivers/scsi/isci/host.h15
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/isci/isci.h2
-rw-r--r--drivers/scsi/isci/phy.c11
-rw-r--r--drivers/scsi/isci/port.c2
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/isci/registers.h122
-rw-r--r--drivers/scsi/isci/remote_device.c24
-rw-r--r--drivers/scsi/isci/remote_device.h9
-rw-r--r--drivers/scsi/isci/request.c380
-rw-r--r--drivers/scsi/isci/request.h31
-rw-r--r--drivers/scsi/isci/sas.h2
-rw-r--r--drivers/scsi/isci/task.c29
-rw-r--r--drivers/scsi/isci/task.h19
-rw-r--r--drivers/scsi/iscsi_tcp.c83
-rw-r--r--drivers/scsi/libfc/fc_exch.c11
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libiscsi.c9
-rw-r--r--drivers/scsi/libsas/sas_discover.c13
-rw-r--r--drivers/scsi/libsas/sas_expander.c37
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c103
-rw-r--r--drivers/scsi/libsas/sas_init.c43
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c145
-rw-r--r--drivers/scsi/lpfc/lpfc.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c79
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c181
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h300
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c287
-rw-r--r--drivers/scsi/lpfc/lpfc_logmsg.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c80
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c244
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c460
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/mac_esp.c9
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h22
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c239
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c26
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c193
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h19
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c447
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h46
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c28
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c2
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c35
-rw-r--r--drivers/scsi/mvsas/mv_defs.h2
-rw-r--r--drivers/scsi/mvsas/mv_init.c13
-rw-r--r--drivers/scsi/mvsas/mv_sas.c79
-rw-r--r--drivers/scsi/mvsas/mv_sas.h4
-rw-r--r--drivers/scsi/mvumi.c2018
-rw-r--r--drivers/scsi/mvumi.h505
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c73
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c93
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c111
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c152
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h42
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c127
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c970
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h255
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c46
-rw-r--r--drivers/scsi/qla4xxx/Kconfig1
-rw-r--r--drivers/scsi/qla4xxx/Makefile2
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c76
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c513
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.h19
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h214
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h198
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h71
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c852
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c83
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c824
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c21
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2321
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi_error.c12
-rw-r--r--drivers/scsi/scsi_sysfs.c38
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c742
-rw-r--r--drivers/scsi/scsi_transport_sas.c10
-rw-r--r--drivers/scsi/sd.c7
-rw-r--r--include/linux/genhd.h4
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/scsi/iscsi_if.h135
-rw-r--r--include/scsi/libfc.h76
-rw-r--r--include/scsi/libfcoe.h5
-rw-r--r--include/scsi/libsas.h54
-rw-r--r--include/scsi/sas.h24
-rw-r--r--include/scsi/scsi_bsg_iscsi.h110
-rw-r--r--include/scsi/scsi_device.h6
-rw-r--r--include/scsi/scsi_host.h16
-rw-r--r--include/scsi/scsi_transport_iscsi.h47
162 files changed, 14101 insertions, 3311 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index c1eb41cb987..2b5d56127fc 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -206,3 +206,16 @@ Description:
206 when a discarded area is read the discard_zeroes_data 206 when a discarded area is read the discard_zeroes_data
207 parameter will be set to one. Otherwise it will be 0 and 207 parameter will be set to one. Otherwise it will be 0 and
208 the result of reading a discarded area is undefined. 208 the result of reading a discarded area is undefined.
209What: /sys/block/<disk>/alias
210Date: Aug 2011
211Contact: Nao Nishijima <nao.nishijima.xt@hitachi.com>
212Description:
213 A raw device name of a disk does not always point a same disk
214 each boot-up time. Therefore, users have to use persistent
215 device names, which udev creates when the kernel finds a disk,
216 instead of raw device name. However, kernel doesn't show those
217 persistent names on its messages (e.g. dmesg).
218 This file can store an alias of the disk and it would be
219 appeared in kernel messages if it is set. A disk can have an
220 alias which length is up to 255bytes. Users can use alphabets,
221 numbers, "-" and "_" in alias name. This file is writeonce.
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index c2e18e10985..b48ded55b55 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -28,6 +28,8 @@ LICENSE.FlashPoint
28 - Licence of the Flashpoint driver 28 - Licence of the Flashpoint driver
29LICENSE.qla2xxx 29LICENSE.qla2xxx
30 - License for QLogic Linux Fibre Channel HBA Driver firmware. 30 - License for QLogic Linux Fibre Channel HBA Driver firmware.
31LICENSE.qla4xxx
32 - License for QLogic Linux iSCSI HBA Driver.
31Mylex.txt 33Mylex.txt
32 - info on driver for Mylex adapters 34 - info on driver for Mylex adapters
33NinjaSCSI.txt 35NinjaSCSI.txt
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 1b6e27ddb7f..64adb98b181 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,18 @@
1Release Date : Wed. Oct 5, 2011 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford
4Current Version : 00.00.06.12-rc1
5Old Version : 00.00.05.40-rc1
6 1. Continue booting immediately if FW in FAULT at driver load time.
7 2. Increase default cmds per lun to 256.
8 3. Fix mismatch in megasas_reset_fusion() mutex lock-unlock.
9 4. Remove some un-necessary code.
10 5. Clear state change interrupts for Fusion/Invader.
11 6. Clear FUSION_IN_RESET before enabling interrupts.
12 7. Add support for MegaRAID 9360/9380 12GB/s controllers.
13 8. Add multiple MSI-X vector/multiple reply queue support.
14 9. Add driver workaround for PERC5/1068 kdump kernel panic.
15-------------------------------------------------------------------------------
1Release Date : Tue. Jul 26, 2011 17:00:00 PST 2010 - 16Release Date : Tue. Jul 26, 2011 17:00:00 PST 2010 -
2 (emaild-id:megaraidlinux@lsi.com) 17 (emaild-id:megaraidlinux@lsi.com)
3 Adam Radford 18 Adam Radford
diff --git a/Documentation/scsi/LICENSE.qla4xxx b/Documentation/scsi/LICENSE.qla4xxx
new file mode 100644
index 00000000000..494980e4049
--- /dev/null
+++ b/Documentation/scsi/LICENSE.qla4xxx
@@ -0,0 +1,310 @@
1Copyright (c) 2003-2011 QLogic Corporation
2QLogic Linux iSCSI HBA Driver
3
4This program includes a device driver for Linux 3.x.
5You may modify and redistribute the device driver code under the
6GNU General Public License (a copy of which is attached hereto as
7Exhibit A) published by the Free Software Foundation (version 2).
8
9REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
10THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
11EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
12IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
13PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
14BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
16TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
17DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
18ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
19OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
20OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
21POSSIBILITY OF SUCH DAMAGE.
22
23USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
24CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
25OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
26TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
27ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
28COMBINATION WITH THIS PROGRAM.
29
30
31EXHIBIT A
32
33 GNU GENERAL PUBLIC LICENSE
34 Version 2, June 1991
35
36 Copyright (C) 1989, 1991 Free Software Foundation, Inc.
37 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
38 Everyone is permitted to copy and distribute verbatim copies
39 of this license document, but changing it is not allowed.
40
41 Preamble
42
43 The licenses for most software are designed to take away your
44freedom to share and change it. By contrast, the GNU General Public
45License is intended to guarantee your freedom to share and change free
46software--to make sure the software is free for all its users. This
47General Public License applies to most of the Free Software
48Foundation's software and to any other program whose authors commit to
49using it. (Some other Free Software Foundation software is covered by
50the GNU Lesser General Public License instead.) You can apply it to
51your programs, too.
52
53 When we speak of free software, we are referring to freedom, not
54price. Our General Public Licenses are designed to make sure that you
55have the freedom to distribute copies of free software (and charge for
56this service if you wish), that you receive source code or can get it
57if you want it, that you can change the software or use pieces of it
58in new free programs; and that you know you can do these things.
59
60 To protect your rights, we need to make restrictions that forbid
61anyone to deny you these rights or to ask you to surrender the rights.
62These restrictions translate to certain responsibilities for you if you
63distribute copies of the software, or if you modify it.
64
65 For example, if you distribute copies of such a program, whether
66gratis or for a fee, you must give the recipients all the rights that
67you have. You must make sure that they, too, receive or can get the
68source code. And you must show them these terms so they know their
69rights.
70
71 We protect your rights with two steps: (1) copyright the software, and
72(2) offer you this license which gives you legal permission to copy,
73distribute and/or modify the software.
74
75 Also, for each author's protection and ours, we want to make certain
76that everyone understands that there is no warranty for this free
77software. If the software is modified by someone else and passed on, we
78want its recipients to know that what they have is not the original, so
79that any problems introduced by others will not reflect on the original
80authors' reputations.
81
82 Finally, any free program is threatened constantly by software
83patents. We wish to avoid the danger that redistributors of a free
84program will individually obtain patent licenses, in effect making the
85program proprietary. To prevent this, we have made it clear that any
86patent must be licensed for everyone's free use or not licensed at all.
87
88 The precise terms and conditions for copying, distribution and
89modification follow.
90
91 GNU GENERAL PUBLIC LICENSE
92 TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
93
94 0. This License applies to any program or other work which contains
95a notice placed by the copyright holder saying it may be distributed
96under the terms of this General Public License. The "Program", below,
97refers to any such program or work, and a "work based on the Program"
98means either the Program or any derivative work under copyright law:
99that is to say, a work containing the Program or a portion of it,
100either verbatim or with modifications and/or translated into another
101language. (Hereinafter, translation is included without limitation in
102the term "modification".) Each licensee is addressed as "you".
103
104Activities other than copying, distribution and modification are not
105covered by this License; they are outside its scope. The act of
106running the Program is not restricted, and the output from the Program
107is covered only if its contents constitute a work based on the
108Program (independent of having been made by running the Program).
109Whether that is true depends on what the Program does.
110
111 1. You may copy and distribute verbatim copies of the Program's
112source code as you receive it, in any medium, provided that you
113conspicuously and appropriately publish on each copy an appropriate
114copyright notice and disclaimer of warranty; keep intact all the
115notices that refer to this License and to the absence of any warranty;
116and give any other recipients of the Program a copy of this License
117along with the Program.
118
119You may charge a fee for the physical act of transferring a copy, and
120you may at your option offer warranty protection in exchange for a fee.
121
122 2. You may modify your copy or copies of the Program or any portion
123of it, thus forming a work based on the Program, and copy and
124distribute such modifications or work under the terms of Section 1
125above, provided that you also meet all of these conditions:
126
127 a) You must cause the modified files to carry prominent notices
128 stating that you changed the files and the date of any change.
129
130 b) You must cause any work that you distribute or publish, that in
131 whole or in part contains or is derived from the Program or any
132 part thereof, to be licensed as a whole at no charge to all third
133 parties under the terms of this License.
134
135 c) If the modified program normally reads commands interactively
136 when run, you must cause it, when started running for such
137 interactive use in the most ordinary way, to print or display an
138 announcement including an appropriate copyright notice and a
139 notice that there is no warranty (or else, saying that you provide
140 a warranty) and that users may redistribute the program under
141 these conditions, and telling the user how to view a copy of this
142 License. (Exception: if the Program itself is interactive but
143 does not normally print such an announcement, your work based on
144 the Program is not required to print an announcement.)
145
146These requirements apply to the modified work as a whole. If
147identifiable sections of that work are not derived from the Program,
148and can be reasonably considered independent and separate works in
149themselves, then this License, and its terms, do not apply to those
150sections when you distribute them as separate works. But when you
151distribute the same sections as part of a whole which is a work based
152on the Program, the distribution of the whole must be on the terms of
153this License, whose permissions for other licensees extend to the
154entire whole, and thus to each and every part regardless of who wrote it.
155
156Thus, it is not the intent of this section to claim rights or contest
157your rights to work written entirely by you; rather, the intent is to
158exercise the right to control the distribution of derivative or
159collective works based on the Program.
160
161In addition, mere aggregation of another work not based on the Program
162with the Program (or with a work based on the Program) on a volume of
163a storage or distribution medium does not bring the other work under
164the scope of this License.
165
166 3. You may copy and distribute the Program (or a work based on it,
167under Section 2) in object code or executable form under the terms of
168Sections 1 and 2 above provided that you also do one of the following:
169
170 a) Accompany it with the complete corresponding machine-readable
171 source code, which must be distributed under the terms of Sections
172 1 and 2 above on a medium customarily used for software interchange; or,
173
174 b) Accompany it with a written offer, valid for at least three
175 years, to give any third party, for a charge no more than your
176 cost of physically performing source distribution, a complete
177 machine-readable copy of the corresponding source code, to be
178 distributed under the terms of Sections 1 and 2 above on a medium
179 customarily used for software interchange; or,
180
181 c) Accompany it with the information you received as to the offer
182 to distribute corresponding source code. (This alternative is
183 allowed only for noncommercial distribution and only if you
184 received the program in object code or executable form with such
185 an offer, in accord with Subsection b above.)
186
187The source code for a work means the preferred form of the work for
188making modifications to it. For an executable work, complete source
189code means all the source code for all modules it contains, plus any
190associated interface definition files, plus the scripts used to
191control compilation and installation of the executable. However, as a
192special exception, the source code distributed need not include
193anything that is normally distributed (in either source or binary
194form) with the major components (compiler, kernel, and so on) of the
195operating system on which the executable runs, unless that component
196itself accompanies the executable.
197
198If distribution of executable or object code is made by offering
199access to copy from a designated place, then offering equivalent
200access to copy the source code from the same place counts as
201distribution of the source code, even though third parties are not
202compelled to copy the source along with the object code.
203
204 4. You may not copy, modify, sublicense, or distribute the Program
205except as expressly provided under this License. Any attempt
206otherwise to copy, modify, sublicense or distribute the Program is
207void, and will automatically terminate your rights under this License.
208However, parties who have received copies, or rights, from you under
209this License will not have their licenses terminated so long as such
210parties remain in full compliance.
211
212 5. You are not required to accept this License, since you have not
213signed it. However, nothing else grants you permission to modify or
214distribute the Program or its derivative works. These actions are
215prohibited by law if you do not accept this License. Therefore, by
216modifying or distributing the Program (or any work based on the
217Program), you indicate your acceptance of this License to do so, and
218all its terms and conditions for copying, distributing or modifying
219the Program or works based on it.
220
221 6. Each time you redistribute the Program (or any work based on the
222Program), the recipient automatically receives a license from the
223original licensor to copy, distribute or modify the Program subject to
224these terms and conditions. You may not impose any further
225restrictions on the recipients' exercise of the rights granted herein.
226You are not responsible for enforcing compliance by third parties to
227this License.
228
229 7. If, as a consequence of a court judgment or allegation of patent
230infringement or for any other reason (not limited to patent issues),
231conditions are imposed on you (whether by court order, agreement or
232otherwise) that contradict the conditions of this License, they do not
233excuse you from the conditions of this License. If you cannot
234distribute so as to satisfy simultaneously your obligations under this
235License and any other pertinent obligations, then as a consequence you
236may not distribute the Program at all. For example, if a patent
237license would not permit royalty-free redistribution of the Program by
238all those who receive copies directly or indirectly through you, then
239the only way you could satisfy both it and this License would be to
240refrain entirely from distribution of the Program.
241
242If any portion of this section is held invalid or unenforceable under
243any particular circumstance, the balance of the section is intended to
244apply and the section as a whole is intended to apply in other
245circumstances.
246
247It is not the purpose of this section to induce you to infringe any
248patents or other property right claims or to contest validity of any
249such claims; this section has the sole purpose of protecting the
250integrity of the free software distribution system, which is
251implemented by public license practices. Many people have made
252generous contributions to the wide range of software distributed
253through that system in reliance on consistent application of that
254system; it is up to the author/donor to decide if he or she is willing
255to distribute software through any other system and a licensee cannot
256impose that choice.
257
258This section is intended to make thoroughly clear what is believed to
259be a consequence of the rest of this License.
260
261 8. If the distribution and/or use of the Program is restricted in
262certain countries either by patents or by copyrighted interfaces, the
263original copyright holder who places the Program under this License
264may add an explicit geographical distribution limitation excluding
265those countries, so that distribution is permitted only in or among
266countries not thus excluded. In such case, this License incorporates
267the limitation as if written in the body of this License.
268
269 9. The Free Software Foundation may publish revised and/or new versions
270of the General Public License from time to time. Such new versions will
271be similar in spirit to the present version, but may differ in detail to
272address new problems or concerns.
273
274Each version is given a distinguishing version number. If the Program
275specifies a version number of this License which applies to it and "any
276later version", you have the option of following the terms and conditions
277either of that version or of any later version published by the Free
278Software Foundation. If the Program does not specify a version number of
279this License, you may choose any version ever published by the Free Software
280Foundation.
281
282 10. If you wish to incorporate parts of the Program into other free
283programs whose distribution conditions are different, write to the author
284to ask for permission. For software which is copyrighted by the Free
285Software Foundation, write to the Free Software Foundation; we sometimes
286make exceptions for this. Our decision will be guided by the two goals
287of preserving the free status of all derivatives of our free software and
288of promoting the sharing and reuse of software generally.
289
290 NO WARRANTY
291
292 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
293FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
294OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
295PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
296OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
297MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
298TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
299PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
300REPAIR OR CORRECTION.
301
302 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
303WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
304REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
305INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
306OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
307TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
308YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
309PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
310POSSIBILITY OF SUCH DAMAGES.
diff --git a/Documentation/scsi/bnx2fc.txt b/Documentation/scsi/bnx2fc.txt
new file mode 100644
index 00000000000..80823556d62
--- /dev/null
+++ b/Documentation/scsi/bnx2fc.txt
@@ -0,0 +1,75 @@
1Operating FCoE using bnx2fc
2===========================
3Broadcom FCoE offload through bnx2fc is full stateful hardware offload that
4cooperates with all interfaces provided by the Linux ecosystem for FC/FCoE and
5SCSI controllers. As such, FCoE functionality, once enabled is largely
6transparent. Devices discovered on the SAN will be registered and unregistered
7automatically with the upper storage layers.
8
9Despite the fact that the Broadcom's FCoE offload is fully offloaded, it does
10depend on the state of the network interfaces to operate. As such, the network
11interface (e.g. eth0) associated with the FCoE offload initiator must be 'up'.
12It is recommended that the network interfaces be configured to be brought up
13automatically at boot time.
14
15Furthermore, the Broadcom FCoE offload solution creates VLAN interfaces to
16support the VLANs that have been discovered for FCoE operation (e.g.
17eth0.1001-fcoe). Do not delete or disable these interfaces or FCoE operation
18will be disrupted.
19
20Driver Usage Model:
21===================
22
231. Ensure that fcoe-utils package is installed.
24
252. Configure the interfaces on which bnx2fc driver has to operate on.
26Here are the steps to configure:
27 a. cd /etc/fcoe
28 b. copy cfg-ethx to cfg-eth5 if FCoE has to be enabled on eth5.
29 c. Repeat this for all the interfaces where FCoE has to be enabled.
30 d. Edit all the cfg-eth files to set "no" for DCB_REQUIRED** field, and
31 "yes" for AUTO_VLAN.
32 e. Other configuration parameters should be left as default
33
343. Ensure that "bnx2fc" is in SUPPORTED_DRIVERS list in /etc/fcoe/config.
35
364. Start fcoe service. (service fcoe start). If Broadcom devices are present in
37the system, bnx2fc driver would automatically claim the interfaces, starts vlan
38discovery and log into the targets.
39
405. "Symbolic Name" in 'fcoeadm -i' output would display if bnx2fc has claimed
41the interface.
42Eg:
43[root@bh2 ~]# fcoeadm -i
44 Description: NetXtreme II BCM57712 10 Gigabit Ethernet
45 Revision: 01
46 Manufacturer: Broadcom Corporation
47 Serial Number: 0010186FD558
48 Driver: bnx2x 1.70.00-0
49 Number of Ports: 2
50
51 Symbolic Name: bnx2fc v1.0.5 over eth5.4
52 OS Device Name: host11
53 Node Name: 0x10000010186FD559
54 Port Name: 0x20000010186FD559
55 FabricName: 0x2001000DECB3B681
56 Speed: 10 Gbit
57 Supported Speed: 10 Gbit
58 MaxFrameSize: 2048
59 FC-ID (Port ID): 0x0F0377
60 State: Online
61
626. Verify the vlan discovery is performed by running ifconfig and notice
63<INTERFACE>.<VLAN>-fcoe interfaces are automatically created.
64
65Refer to fcoeadm manpage for more information on fcoeadm operations to
66create/destroy interfaces or to display lun/target information.
67
68NOTE:
69====
70** Broadcom FCoE capable devices implement a DCBX/LLDP client on-chip. Only one
71LLDP client is allowed per interface. For proper operation all host software
72based DCBX/LLDP clients (e.g. lldpad) must be disabled. To disable lldpad on a
73given interface, run the following command:
74
75lldptool set-lldp -i <interface_name> adminStatus=disabled
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 21993623da9..e63d13dd3bf 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -46,6 +46,8 @@ struct qdesfmt0 {
46 u32 : 16; 46 u32 : 16;
47} __attribute__ ((packed)); 47} __attribute__ ((packed));
48 48
49#define QDR_AC_MULTI_BUFFER_ENABLE 0x01
50
49/** 51/**
50 * struct qdr - queue description record (QDR) 52 * struct qdr - queue description record (QDR)
51 * @qfmt: queue format 53 * @qfmt: queue format
@@ -256,6 +258,8 @@ struct slsb {
256 u8 val[QDIO_MAX_BUFFERS_PER_Q]; 258 u8 val[QDIO_MAX_BUFFERS_PER_Q];
257} __attribute__ ((packed, aligned(256))); 259} __attribute__ ((packed, aligned(256)));
258 260
261#define CHSC_AC2_MULTI_BUFFER_AVAILABLE 0x0080
262#define CHSC_AC2_MULTI_BUFFER_ENABLED 0x0040
259#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010 263#define CHSC_AC2_DATA_DIV_AVAILABLE 0x0010
260#define CHSC_AC2_DATA_DIV_ENABLED 0x0002 264#define CHSC_AC2_DATA_DIV_ENABLED 0x0002
261 265
@@ -357,6 +361,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
357struct qdio_initialize { 361struct qdio_initialize {
358 struct ccw_device *cdev; 362 struct ccw_device *cdev;
359 unsigned char q_format; 363 unsigned char q_format;
364 unsigned char qdr_ac;
360 unsigned char adapter_name[8]; 365 unsigned char adapter_name[8];
361 unsigned int qib_param_field_format; 366 unsigned int qib_param_field_format;
362 unsigned char *qib_param_field; 367 unsigned char *qib_param_field;
diff --git a/block/genhd.c b/block/genhd.c
index e2f67902dd0..94855a9717d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -19,6 +19,7 @@
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/idr.h> 20#include <linux/idr.h>
21#include <linux/log2.h> 21#include <linux/log2.h>
22#include <linux/ctype.h>
22 23
23#include "blk.h" 24#include "blk.h"
24 25
@@ -909,6 +910,74 @@ static int __init genhd_device_init(void)
909 910
910subsys_initcall(genhd_device_init); 911subsys_initcall(genhd_device_init);
911 912
913static ssize_t alias_show(struct device *dev,
914 struct device_attribute *attr, char *buf)
915{
916 struct gendisk *disk = dev_to_disk(dev);
917 ssize_t ret = 0;
918
919 if (disk->alias)
920 ret = snprintf(buf, ALIAS_LEN, "%s\n", disk->alias);
921 return ret;
922}
923
924static ssize_t alias_store(struct device *dev, struct device_attribute *attr,
925 const char *buf, size_t count)
926{
927 struct gendisk *disk = dev_to_disk(dev);
928 char *alias;
929 char *envp[] = { NULL, NULL };
930 unsigned char c;
931 int i;
932 ssize_t ret = count;
933
934 if (!count)
935 return -EINVAL;
936
937 if (count >= ALIAS_LEN) {
938 printk(KERN_ERR "alias: alias is too long\n");
939 return -EINVAL;
940 }
941
942 /* Validation check */
943 for (i = 0; i < count; i++) {
944 c = buf[i];
945 if (i == count - 1 && c == '\n')
946 break;
947 if (!isalnum(c) && c != '_' && c != '-') {
948 printk(KERN_ERR "alias: invalid alias\n");
949 return -EINVAL;
950 }
951 }
952
953 if (disk->alias) {
954 printk(KERN_INFO "alias: %s is already assigned (%s)\n",
955 disk->disk_name, disk->alias);
956 return -EINVAL;
957 }
958
959 alias = kasprintf(GFP_KERNEL, "%s", buf);
960 if (!alias)
961 return -ENOMEM;
962
963 if (alias[count - 1] == '\n')
964 alias[count - 1] = '\0';
965
966 envp[0] = kasprintf(GFP_KERNEL, "ALIAS=%s", alias);
967 if (!envp[0]) {
968 kfree(alias);
969 return -ENOMEM;
970 }
971
972 disk->alias = alias;
973 printk(KERN_INFO "alias: assigned %s to %s\n", alias, disk->disk_name);
974
975 kobject_uevent_env(&dev->kobj, KOBJ_ADD, envp);
976
977 kfree(envp[0]);
978 return ret;
979}
980
912static ssize_t disk_range_show(struct device *dev, 981static ssize_t disk_range_show(struct device *dev,
913 struct device_attribute *attr, char *buf) 982 struct device_attribute *attr, char *buf)
914{ 983{
@@ -968,6 +1037,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
968 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); 1037 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
969} 1038}
970 1039
1040static DEVICE_ATTR(alias, S_IRUGO|S_IWUSR, alias_show, alias_store);
971static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 1041static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
972static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); 1042static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL);
973static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); 1043static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL);
@@ -990,6 +1060,7 @@ static struct device_attribute dev_attr_fail_timeout =
990#endif 1060#endif
991 1061
992static struct attribute *disk_attrs[] = { 1062static struct attribute *disk_attrs[] = {
1063 &dev_attr_alias.attr,
993 &dev_attr_range.attr, 1064 &dev_attr_range.attr,
994 &dev_attr_ext_range.attr, 1065 &dev_attr_ext_range.attr,
995 &dev_attr_removable.attr, 1066 &dev_attr_removable.attr,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 32fc41c1da3..c04ad68cb60 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6713,6 +6713,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6713EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6713EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6714EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6714EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6715EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6715EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6716EXPORT_SYMBOL_GPL(__ata_change_queue_depth);
6716EXPORT_SYMBOL_GPL(sata_scr_valid); 6717EXPORT_SYMBOL_GPL(sata_scr_valid);
6717EXPORT_SYMBOL_GPL(sata_scr_read); 6718EXPORT_SYMBOL_GPL(sata_scr_read);
6718EXPORT_SYMBOL_GPL(sata_scr_write); 6719EXPORT_SYMBOL_GPL(sata_scr_write);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 46d087f0860..19ba77032ac 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1215,25 +1215,15 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
1215} 1215}
1216 1216
1217/** 1217/**
1218 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 1218 * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth
1219 * @sdev: SCSI device to configure queue depth for
1220 * @queue_depth: new queue depth
1221 * @reason: calling context
1222 *
1223 * This is libata standard hostt->change_queue_depth callback.
1224 * SCSI will call into this callback when user tries to set queue
1225 * depth via sysfs.
1226 * 1219 *
1227 * LOCKING: 1220 * libsas and libata have different approaches for associating a sdev to
1228 * SCSI layer (we don't care) 1221 * its ata_port.
1229 * 1222 *
1230 * RETURNS:
1231 * Newly configured queue depth.
1232 */ 1223 */
1233int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, 1224int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1234 int reason) 1225 int queue_depth, int reason)
1235{ 1226{
1236 struct ata_port *ap = ata_shost_to_port(sdev->host);
1237 struct ata_device *dev; 1227 struct ata_device *dev;
1238 unsigned long flags; 1228 unsigned long flags;
1239 1229
@@ -1269,6 +1259,30 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
1269} 1259}
1270 1260
1271/** 1261/**
1262 * ata_scsi_change_queue_depth - SCSI callback for queue depth config
1263 * @sdev: SCSI device to configure queue depth for
1264 * @queue_depth: new queue depth
1265 * @reason: calling context
1266 *
1267 * This is libata standard hostt->change_queue_depth callback.
1268 * SCSI will call into this callback when user tries to set queue
1269 * depth via sysfs.
1270 *
1271 * LOCKING:
1272 * SCSI layer (we don't care)
1273 *
1274 * RETURNS:
1275 * Newly configured queue depth.
1276 */
1277int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth,
1278 int reason)
1279{
1280 struct ata_port *ap = ata_shost_to_port(sdev->host);
1281
1282 return __ata_change_queue_depth(ap, sdev, queue_depth, reason);
1283}
1284
1285/**
1272 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 1286 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
1273 * @qc: Storage for translated ATA taskfile 1287 * @qc: Storage for translated ATA taskfile
1274 * 1288 *
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9c61b9c2c59..84e8c293a71 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -632,6 +632,59 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
632 iser_conn_terminate(ib_conn); 632 iser_conn_terminate(ib_conn);
633} 633}
634 634
635static mode_t iser_attr_is_visible(int param_type, int param)
636{
637 switch (param_type) {
638 case ISCSI_HOST_PARAM:
639 switch (param) {
640 case ISCSI_HOST_PARAM_NETDEV_NAME:
641 case ISCSI_HOST_PARAM_HWADDRESS:
642 case ISCSI_HOST_PARAM_INITIATOR_NAME:
643 return S_IRUGO;
644 default:
645 return 0;
646 }
647 case ISCSI_PARAM:
648 switch (param) {
649 case ISCSI_PARAM_MAX_RECV_DLENGTH:
650 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
651 case ISCSI_PARAM_HDRDGST_EN:
652 case ISCSI_PARAM_DATADGST_EN:
653 case ISCSI_PARAM_CONN_ADDRESS:
654 case ISCSI_PARAM_CONN_PORT:
655 case ISCSI_PARAM_EXP_STATSN:
656 case ISCSI_PARAM_PERSISTENT_ADDRESS:
657 case ISCSI_PARAM_PERSISTENT_PORT:
658 case ISCSI_PARAM_PING_TMO:
659 case ISCSI_PARAM_RECV_TMO:
660 case ISCSI_PARAM_INITIAL_R2T_EN:
661 case ISCSI_PARAM_MAX_R2T:
662 case ISCSI_PARAM_IMM_DATA_EN:
663 case ISCSI_PARAM_FIRST_BURST:
664 case ISCSI_PARAM_MAX_BURST:
665 case ISCSI_PARAM_PDU_INORDER_EN:
666 case ISCSI_PARAM_DATASEQ_INORDER_EN:
667 case ISCSI_PARAM_TARGET_NAME:
668 case ISCSI_PARAM_TPGT:
669 case ISCSI_PARAM_USERNAME:
670 case ISCSI_PARAM_PASSWORD:
671 case ISCSI_PARAM_USERNAME_IN:
672 case ISCSI_PARAM_PASSWORD_IN:
673 case ISCSI_PARAM_FAST_ABORT:
674 case ISCSI_PARAM_ABORT_TMO:
675 case ISCSI_PARAM_LU_RESET_TMO:
676 case ISCSI_PARAM_TGT_RESET_TMO:
677 case ISCSI_PARAM_IFACE_NAME:
678 case ISCSI_PARAM_INITIATOR_NAME:
679 return S_IRUGO;
680 default:
681 return 0;
682 }
683 }
684
685 return 0;
686}
687
635static struct scsi_host_template iscsi_iser_sht = { 688static struct scsi_host_template iscsi_iser_sht = {
636 .module = THIS_MODULE, 689 .module = THIS_MODULE,
637 .name = "iSCSI Initiator over iSER, v." DRV_VER, 690 .name = "iSCSI Initiator over iSER, v." DRV_VER,
@@ -653,32 +706,6 @@ static struct iscsi_transport iscsi_iser_transport = {
653 .owner = THIS_MODULE, 706 .owner = THIS_MODULE,
654 .name = "iser", 707 .name = "iser",
655 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T, 708 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T,
656 .param_mask = ISCSI_MAX_RECV_DLENGTH |
657 ISCSI_MAX_XMIT_DLENGTH |
658 ISCSI_HDRDGST_EN |
659 ISCSI_DATADGST_EN |
660 ISCSI_INITIAL_R2T_EN |
661 ISCSI_MAX_R2T |
662 ISCSI_IMM_DATA_EN |
663 ISCSI_FIRST_BURST |
664 ISCSI_MAX_BURST |
665 ISCSI_PDU_INORDER_EN |
666 ISCSI_DATASEQ_INORDER_EN |
667 ISCSI_CONN_PORT |
668 ISCSI_CONN_ADDRESS |
669 ISCSI_EXP_STATSN |
670 ISCSI_PERSISTENT_PORT |
671 ISCSI_PERSISTENT_ADDRESS |
672 ISCSI_TARGET_NAME | ISCSI_TPGT |
673 ISCSI_USERNAME | ISCSI_PASSWORD |
674 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
675 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
676 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
677 ISCSI_PING_TMO | ISCSI_RECV_TMO |
678 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
679 .host_param_mask = ISCSI_HOST_HWADDRESS |
680 ISCSI_HOST_NETDEV_NAME |
681 ISCSI_HOST_INITIATOR_NAME,
682 /* session management */ 709 /* session management */
683 .create_session = iscsi_iser_session_create, 710 .create_session = iscsi_iser_session_create,
684 .destroy_session = iscsi_iser_session_destroy, 711 .destroy_session = iscsi_iser_session_destroy,
@@ -686,6 +713,7 @@ static struct iscsi_transport iscsi_iser_transport = {
686 .create_conn = iscsi_iser_conn_create, 713 .create_conn = iscsi_iser_conn_create,
687 .bind_conn = iscsi_iser_conn_bind, 714 .bind_conn = iscsi_iser_conn_bind,
688 .destroy_conn = iscsi_iser_conn_destroy, 715 .destroy_conn = iscsi_iser_conn_destroy,
716 .attr_is_visible = iser_attr_is_visible,
689 .set_param = iscsi_iser_set_param, 717 .set_param = iscsi_iser_set_param,
690 .get_conn_param = iscsi_conn_get_param, 718 .get_conn_param = iscsi_conn_get_param,
691 .get_ep_param = iscsi_iser_get_ep_param, 719 .get_ep_param = iscsi_iser_get_ep_param,
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 7956a10f948..e9c6a6047a0 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -63,6 +63,8 @@
63#ifdef CONFIG_MTRR 63#ifdef CONFIG_MTRR
64#include <asm/mtrr.h> 64#include <asm/mtrr.h>
65#endif 65#endif
66#include <linux/kthread.h>
67#include <scsi/scsi_host.h>
66 68
67#include "mptbase.h" 69#include "mptbase.h"
68#include "lsi/mpi_log_fc.h" 70#include "lsi/mpi_log_fc.h"
@@ -323,6 +325,32 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
323 return rc; 325 return rc;
324} 326}
325 327
328
329/**
330 * mpt_remove_dead_ioc_func - kthread context to remove dead ioc
331 * @arg: input argument, used to derive ioc
332 *
333 * Return 0 if controller is removed from pci subsystem.
334 * Return -1 for other case.
335 */
336static int mpt_remove_dead_ioc_func(void *arg)
337{
338 MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
339 struct pci_dev *pdev;
340
341 if ((ioc == NULL))
342 return -1;
343
344 pdev = ioc->pcidev;
345 if ((pdev == NULL))
346 return -1;
347
348 pci_remove_bus_device(pdev);
349 return 0;
350}
351
352
353
326/** 354/**
327 * mpt_fault_reset_work - work performed on workq after ioc fault 355 * mpt_fault_reset_work - work performed on workq after ioc fault
328 * @work: input argument, used to derive ioc 356 * @work: input argument, used to derive ioc
@@ -336,12 +364,45 @@ mpt_fault_reset_work(struct work_struct *work)
336 u32 ioc_raw_state; 364 u32 ioc_raw_state;
337 int rc; 365 int rc;
338 unsigned long flags; 366 unsigned long flags;
367 MPT_SCSI_HOST *hd;
368 struct task_struct *p;
339 369
340 if (ioc->ioc_reset_in_progress || !ioc->active) 370 if (ioc->ioc_reset_in_progress || !ioc->active)
341 goto out; 371 goto out;
342 372
373
343 ioc_raw_state = mpt_GetIocState(ioc, 0); 374 ioc_raw_state = mpt_GetIocState(ioc, 0);
344 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { 375 if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) {
376 printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n",
377 ioc->name, __func__);
378
379 /*
380 * Call mptscsih_flush_pending_cmds callback so that we
381 * flush all pending commands back to OS.
382 * This call is required to aovid deadlock at block layer.
383 * Dead IOC will fail to do diag reset,and this call is safe
384 * since dead ioc will never return any command back from HW.
385 */
386 hd = shost_priv(ioc->sh);
387 ioc->schedule_dead_ioc_flush_running_cmds(hd);
388
389 /*Remove the Dead Host */
390 p = kthread_run(mpt_remove_dead_ioc_func, ioc,
391 "mpt_dead_ioc_%d", ioc->id);
392 if (IS_ERR(p)) {
393 printk(MYIOC_s_ERR_FMT
394 "%s: Running mpt_dead_ioc thread failed !\n",
395 ioc->name, __func__);
396 } else {
397 printk(MYIOC_s_WARN_FMT
398 "%s: Running mpt_dead_ioc thread success !\n",
399 ioc->name, __func__);
400 }
401 return; /* don't rearm timer */
402 }
403
404 if ((ioc_raw_state & MPI_IOC_STATE_MASK)
405 == MPI_IOC_STATE_FAULT) {
345 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n", 406 printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n",
346 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); 407 ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK);
347 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", 408 printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
@@ -6413,8 +6474,19 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
6413 pReq->Action, ioc->mptbase_cmds.status, timeleft)); 6474 pReq->Action, ioc->mptbase_cmds.status, timeleft));
6414 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) 6475 if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
6415 goto out; 6476 goto out;
6416 if (!timeleft) 6477 if (!timeleft) {
6478 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
6479 if (ioc->ioc_reset_in_progress) {
6480 spin_unlock_irqrestore(&ioc->taskmgmt_lock,
6481 flags);
6482 printk(MYIOC_s_INFO_FMT "%s: host reset in"
6483 " progress mpt_config timed out.!!\n",
6484 __func__, ioc->name);
6485 return -EFAULT;
6486 }
6487 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
6417 issue_hard_reset = 1; 6488 issue_hard_reset = 1;
6489 }
6418 goto out; 6490 goto out;
6419 } 6491 }
6420 6492
@@ -7128,7 +7200,18 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
7128 spin_lock_irqsave(&ioc->taskmgmt_lock, flags); 7200 spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
7129 if (ioc->ioc_reset_in_progress) { 7201 if (ioc->ioc_reset_in_progress) {
7130 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 7202 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
7131 return 0; 7203 ioc->wait_on_reset_completion = 1;
7204 do {
7205 ssleep(1);
7206 } while (ioc->ioc_reset_in_progress == 1);
7207 ioc->wait_on_reset_completion = 0;
7208 return ioc->reset_status;
7209 }
7210 if (ioc->wait_on_reset_completion) {
7211 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
7212 rc = 0;
7213 time_count = jiffies;
7214 goto exit;
7132 } 7215 }
7133 ioc->ioc_reset_in_progress = 1; 7216 ioc->ioc_reset_in_progress = 1;
7134 if (ioc->alt_ioc) 7217 if (ioc->alt_ioc)
@@ -7165,6 +7248,7 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
7165 ioc->ioc_reset_in_progress = 0; 7248 ioc->ioc_reset_in_progress = 0;
7166 ioc->taskmgmt_quiesce_io = 0; 7249 ioc->taskmgmt_quiesce_io = 0;
7167 ioc->taskmgmt_in_progress = 0; 7250 ioc->taskmgmt_in_progress = 0;
7251 ioc->reset_status = rc;
7168 if (ioc->alt_ioc) { 7252 if (ioc->alt_ioc) {
7169 ioc->alt_ioc->ioc_reset_in_progress = 0; 7253 ioc->alt_ioc->ioc_reset_in_progress = 0;
7170 ioc->alt_ioc->taskmgmt_quiesce_io = 0; 7254 ioc->alt_ioc->taskmgmt_quiesce_io = 0;
@@ -7180,7 +7264,7 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
7180 ioc->alt_ioc, MPT_IOC_POST_RESET); 7264 ioc->alt_ioc, MPT_IOC_POST_RESET);
7181 } 7265 }
7182 } 7266 }
7183 7267exit:
7184 dtmprintk(ioc, 7268 dtmprintk(ioc,
7185 printk(MYIOC_s_DEBUG_FMT 7269 printk(MYIOC_s_DEBUG_FMT
7186 "HardResetHandler: completed (%d seconds): %s\n", ioc->name, 7270 "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index fe902338539..b4d24dc081a 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.19" 79#define MPT_LINUX_VERSION_COMMON "3.04.20"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.19" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.20"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
@@ -554,10 +554,47 @@ struct mptfc_rport_info
554 u8 flags; 554 u8 flags;
555}; 555};
556 556
557/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
558
559/*
560 * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers
561 * Private to the driver.
562 */
563
564#define MPT_HOST_BUS_UNKNOWN (0xFF)
565#define MPT_HOST_TOO_MANY_TM (0x05)
566#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
567#define MPT_HOST_NO_CHAIN (0xFFFFFFFF)
568#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF)
569#define MPT_NVRAM_SYNC_MASK (0x0000FF00)
570#define MPT_NVRAM_SYNC_SHIFT (8)
571#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000)
572#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000)
573#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000)
574#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000)
575#define MPT_NVRAM_WIDE_DISABLE (0x00100000)
576#define MPT_NVRAM_BOOT_CHOICE (0x00200000)
577
578typedef enum {
579 FC,
580 SPI,
581 SAS
582} BUS_TYPE;
583
584typedef struct _MPT_SCSI_HOST {
585 struct _MPT_ADAPTER *ioc;
586 ushort sel_timeout[MPT_MAX_FC_DEVICES];
587 char *info_kbuf;
588 long last_queue_full;
589 u16 spi_pending;
590 struct list_head target_reset_list;
591} MPT_SCSI_HOST;
592
557typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr); 593typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
558typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length, 594typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
559 dma_addr_t dma_addr); 595 dma_addr_t dma_addr);
560typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc); 596typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc);
597typedef void (*MPT_FLUSH_RUNNING_CMDS)(MPT_SCSI_HOST *hd);
561 598
562/* 599/*
563 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS 600 * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
@@ -716,7 +753,10 @@ typedef struct _MPT_ADAPTER
716 int taskmgmt_in_progress; 753 int taskmgmt_in_progress;
717 u8 taskmgmt_quiesce_io; 754 u8 taskmgmt_quiesce_io;
718 u8 ioc_reset_in_progress; 755 u8 ioc_reset_in_progress;
756 u8 reset_status;
757 u8 wait_on_reset_completion;
719 MPT_SCHEDULE_TARGET_RESET schedule_target_reset; 758 MPT_SCHEDULE_TARGET_RESET schedule_target_reset;
759 MPT_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
720 struct work_struct sas_persist_task; 760 struct work_struct sas_persist_task;
721 761
722 struct work_struct fc_setup_reset_work; 762 struct work_struct fc_setup_reset_work;
@@ -830,19 +870,6 @@ typedef struct _MPT_LOCAL_REPLY {
830 u32 pad; 870 u32 pad;
831} MPT_LOCAL_REPLY; 871} MPT_LOCAL_REPLY;
832 872
833#define MPT_HOST_BUS_UNKNOWN (0xFF)
834#define MPT_HOST_TOO_MANY_TM (0x05)
835#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF)
836#define MPT_HOST_NO_CHAIN (0xFFFFFFFF)
837#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF)
838#define MPT_NVRAM_SYNC_MASK (0x0000FF00)
839#define MPT_NVRAM_SYNC_SHIFT (8)
840#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000)
841#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000)
842#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000)
843#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000)
844#define MPT_NVRAM_WIDE_DISABLE (0x00100000)
845#define MPT_NVRAM_BOOT_CHOICE (0x00200000)
846 873
847/* The TM_STATE variable is used to provide strict single threading of TM 874/* The TM_STATE variable is used to provide strict single threading of TM
848 * requests as well as communicate TM error conditions. 875 * requests as well as communicate TM error conditions.
@@ -851,21 +878,6 @@ typedef struct _MPT_LOCAL_REPLY {
851#define TM_STATE_IN_PROGRESS (1) 878#define TM_STATE_IN_PROGRESS (1)
852#define TM_STATE_ERROR (2) 879#define TM_STATE_ERROR (2)
853 880
854typedef enum {
855 FC,
856 SPI,
857 SAS
858} BUS_TYPE;
859
860typedef struct _MPT_SCSI_HOST {
861 MPT_ADAPTER *ioc;
862 ushort sel_timeout[MPT_MAX_FC_DEVICES];
863 char *info_kbuf;
864 long last_queue_full;
865 u16 spi_pending;
866 struct list_head target_reset_list;
867} MPT_SCSI_HOST;
868
869/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ 881/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
870/* 882/*
871 * More Dynamic Multi-Pathing stuff... 883 * More Dynamic Multi-Pathing stuff...
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7596aecd507..9d950429854 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -92,6 +92,11 @@ static int max_lun = MPTSAS_MAX_LUN;
92module_param(max_lun, int, 0); 92module_param(max_lun, int, 0);
93MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); 93MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
94 94
95static int mpt_loadtime_max_sectors = 8192;
96module_param(mpt_loadtime_max_sectors, int, 0);
97MODULE_PARM_DESC(mpt_loadtime_max_sectors,
98 " Maximum sector define for Host Bus Adaptor.Range 64 to 8192 default=8192");
99
95static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; 100static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
96static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; 101static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
97static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ 102static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
@@ -285,10 +290,11 @@ mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
285 spin_lock_irqsave(&ioc->fw_event_lock, flags); 290 spin_lock_irqsave(&ioc->fw_event_lock, flags);
286 list_add_tail(&fw_event->list, &ioc->fw_event_list); 291 list_add_tail(&fw_event->list, &ioc->fw_event_list);
287 INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work); 292 INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
288 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n", 293 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)"
289 ioc->name, __func__, fw_event)); 294 "on cpuid %d\n", ioc->name, __func__,
290 queue_delayed_work(ioc->fw_event_q, &fw_event->work, 295 fw_event, smp_processor_id()));
291 delay); 296 queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
297 &fw_event->work, delay);
292 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 298 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
293} 299}
294 300
@@ -300,10 +306,11 @@ mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
300 unsigned long flags; 306 unsigned long flags;
301 spin_lock_irqsave(&ioc->fw_event_lock, flags); 307 spin_lock_irqsave(&ioc->fw_event_lock, flags);
302 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task " 308 devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
303 "(fw_event=0x%p)\n", ioc->name, __func__, fw_event)); 309 "(fw_event=0x%p)on cpuid %d\n", ioc->name, __func__,
310 fw_event, smp_processor_id()));
304 fw_event->retries++; 311 fw_event->retries++;
305 queue_delayed_work(ioc->fw_event_q, &fw_event->work, 312 queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q,
306 msecs_to_jiffies(delay)); 313 &fw_event->work, msecs_to_jiffies(delay));
307 spin_unlock_irqrestore(&ioc->fw_event_lock, flags); 314 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
308} 315}
309 316
@@ -1943,6 +1950,15 @@ static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc)
1943 goto done; 1950 goto done;
1944 } 1951 }
1945 1952
1953 /* In case if IOC is in reset from internal context.
1954 * Do not execute EEH for the same IOC. SML should to reset timer.
1955 */
1956 if (ioc->ioc_reset_in_progress) {
1957 dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: ioc is in reset,"
1958 "SML need to reset the timer (sc=%p)\n",
1959 ioc->name, __func__, sc));
1960 rc = BLK_EH_RESET_TIMER;
1961 }
1946 vdevice = sc->device->hostdata; 1962 vdevice = sc->device->hostdata;
1947 if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD 1963 if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD
1948 || vdevice->vtarget->deleted)) { 1964 || vdevice->vtarget->deleted)) {
@@ -5142,6 +5158,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5142 ioc->TaskCtx = mptsasTaskCtx; 5158 ioc->TaskCtx = mptsasTaskCtx;
5143 ioc->InternalCtx = mptsasInternalCtx; 5159 ioc->InternalCtx = mptsasInternalCtx;
5144 ioc->schedule_target_reset = &mptsas_schedule_target_reset; 5160 ioc->schedule_target_reset = &mptsas_schedule_target_reset;
5161 ioc->schedule_dead_ioc_flush_running_cmds =
5162 &mptscsih_flush_running_cmds;
5145 /* Added sanity check on readiness of the MPT adapter. 5163 /* Added sanity check on readiness of the MPT adapter.
5146 */ 5164 */
5147 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { 5165 if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
@@ -5239,6 +5257,21 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5239 sh->sg_tablesize = numSGE; 5257 sh->sg_tablesize = numSGE;
5240 } 5258 }
5241 5259
5260 if (mpt_loadtime_max_sectors) {
5261 if (mpt_loadtime_max_sectors < 64 ||
5262 mpt_loadtime_max_sectors > 8192) {
5263 printk(MYIOC_s_INFO_FMT "Invalid value passed for"
5264 "mpt_loadtime_max_sectors %d."
5265 "Range from 64 to 8192\n", ioc->name,
5266 mpt_loadtime_max_sectors);
5267 }
5268 mpt_loadtime_max_sectors &= 0xFFFFFFFE;
5269 dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
5270 "Resetting max sector to %d from %d\n",
5271 ioc->name, mpt_loadtime_max_sectors, sh->max_sectors));
5272 sh->max_sectors = mpt_loadtime_max_sectors;
5273 }
5274
5242 hd = shost_priv(sh); 5275 hd = shost_priv(sh);
5243 hd->ioc = ioc; 5276 hd->ioc = ioc;
5244 5277
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index ce61a576976..0c3ced70707 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -830,7 +830,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
830 if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) || 830 if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) ||
831 pScsiReq->CDB[0] == READ_10 || 831 pScsiReq->CDB[0] == READ_10 ||
832 pScsiReq->CDB[0] == READ_12 || 832 pScsiReq->CDB[0] == READ_12 ||
833 pScsiReq->CDB[0] == READ_16 || 833 (pScsiReq->CDB[0] == READ_16 &&
834 ((pScsiReq->CDB[1] & 0x02) == 0)) ||
834 pScsiReq->CDB[0] == VERIFY || 835 pScsiReq->CDB[0] == VERIFY ||
835 pScsiReq->CDB[0] == VERIFY_16) { 836 pScsiReq->CDB[0] == VERIFY_16) {
836 if (scsi_bufflen(sc) != 837 if (scsi_bufflen(sc) !=
@@ -1024,7 +1025,7 @@ out:
1024 * 1025 *
1025 * Must be called while new I/Os are being queued. 1026 * Must be called while new I/Os are being queued.
1026 */ 1027 */
1027static void 1028void
1028mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) 1029mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
1029{ 1030{
1030 MPT_ADAPTER *ioc = hd->ioc; 1031 MPT_ADAPTER *ioc = hd->ioc;
@@ -1055,6 +1056,7 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
1055 sc->scsi_done(sc); 1056 sc->scsi_done(sc);
1056 } 1057 }
1057} 1058}
1059EXPORT_SYMBOL(mptscsih_flush_running_cmds);
1058 1060
1059/* 1061/*
1060 * mptscsih_search_running_cmds - Delete any commands associated 1062 * mptscsih_search_running_cmds - Delete any commands associated
@@ -1629,7 +1631,13 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1629 return 0; 1631 return 0;
1630 } 1632 }
1631 1633
1632 if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { 1634 /* DOORBELL ACTIVE check is not required if
1635 * MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q is supported.
1636 */
1637
1638 if (!((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q)
1639 && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) &&
1640 (ioc_raw_state & MPI_DOORBELL_ACTIVE)) {
1633 printk(MYIOC_s_WARN_FMT 1641 printk(MYIOC_s_WARN_FMT
1634 "TaskMgmt type=%x: ioc_state: " 1642 "TaskMgmt type=%x: ioc_state: "
1635 "DOORBELL_ACTIVE (0x%x)!\n", 1643 "DOORBELL_ACTIVE (0x%x)!\n",
@@ -1728,7 +1736,9 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
1728 printk(MYIOC_s_WARN_FMT 1736 printk(MYIOC_s_WARN_FMT
1729 "Issuing Reset from %s!! doorbell=0x%08x\n", 1737 "Issuing Reset from %s!! doorbell=0x%08x\n",
1730 ioc->name, __func__, mpt_GetIocState(ioc, 0)); 1738 ioc->name, __func__, mpt_GetIocState(ioc, 0));
1731 retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); 1739 retval = (ioc->bus_type == SAS) ?
1740 mpt_HardResetHandler(ioc, CAN_SLEEP) :
1741 mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP);
1732 mpt_free_msg_frame(ioc, mf); 1742 mpt_free_msg_frame(ioc, mf);
1733 } 1743 }
1734 1744
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index 45a5ff3eff6..43e75ff3992 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -135,3 +135,4 @@ extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
135extern struct device_attribute *mptscsih_host_attrs[]; 135extern struct device_attribute *mptscsih_host_attrs[];
136extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); 136extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
137extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); 137extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
138extern void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9a122280246..6547ff46941 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -160,7 +160,8 @@ again:
160 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 160 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
161 q->handler(q->irq_ptr->cdev, 161 q->handler(q->irq_ptr->cdev,
162 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 162 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
163 0, -1, -1, q->irq_ptr->int_parm); 163 q->nr, q->first_to_kick, count,
164 q->irq_ptr->int_parm);
164 return 0; 165 return 0;
165 } 166 }
166 return count - tmp_count; 167 return count - tmp_count;
@@ -206,7 +207,8 @@ again:
206 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 207 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
207 q->handler(q->irq_ptr->cdev, 208 q->handler(q->irq_ptr->cdev,
208 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 209 QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
209 0, -1, -1, q->irq_ptr->int_parm); 210 q->nr, q->first_to_kick, count,
211 q->irq_ptr->int_parm);
210 return 0; 212 return 0;
211 } 213 }
212 WARN_ON(tmp_count); 214 WARN_ON(tmp_count);
@@ -1070,6 +1072,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
1070{ 1072{
1071 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1073 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1072 struct qdio_q *q; 1074 struct qdio_q *q;
1075 int count;
1073 1076
1074 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); 1077 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1075 DBF_ERROR("intp :%lx", intparm); 1078 DBF_ERROR("intp :%lx", intparm);
@@ -1083,8 +1086,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev,
1083 dump_stack(); 1086 dump_stack();
1084 goto no_handler; 1087 goto no_handler;
1085 } 1088 }
1089
1090 count = sub_buf(q->first_to_check, q->first_to_kick);
1086 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 1091 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
1087 0, -1, -1, irq_ptr->int_parm); 1092 q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1088no_handler: 1093no_handler:
1089 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1094 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1090} 1095}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index dd8bd670a6b..d9a46a429bc 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -381,6 +381,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
381 int i; 381 int i;
382 382
383 irq_ptr->qdr->qfmt = qdio_init->q_format; 383 irq_ptr->qdr->qfmt = qdio_init->q_format;
384 irq_ptr->qdr->ac = qdio_init->qdr_ac;
384 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; 385 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
385 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; 386 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
386 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 387 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 96d1462e0bf..967e7b70e97 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -163,6 +163,42 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
163 spin_unlock_irqrestore(&dbf->hba_lock, flags); 163 spin_unlock_irqrestore(&dbf->hba_lock, flags);
164} 164}
165 165
166/**
167 * zfcp_dbf_hba_def_err - trace event for deferred error messages
168 * @adapter: pointer to struct zfcp_adapter
169 * @req_id: request id which caused the deferred error message
170 * @scount: number of sbals incl. the signaling sbal
171 * @pl: array of all involved sbals
172 */
173void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
174 void **pl)
175{
176 struct zfcp_dbf *dbf = adapter->dbf;
177 struct zfcp_dbf_pay *payload = &dbf->pay_buf;
178 unsigned long flags;
179 u16 length;
180
181 if (!pl)
182 return;
183
184 spin_lock_irqsave(&dbf->pay_lock, flags);
185 memset(payload, 0, sizeof(*payload));
186
187 memcpy(payload->area, "def_err", 7);
188 payload->fsf_req_id = req_id;
189 payload->counter = 0;
190 length = min((u16)sizeof(struct qdio_buffer),
191 (u16)ZFCP_DBF_PAY_MAX_REC);
192
193 while ((char *)pl[payload->counter] && payload->counter < scount) {
194 memcpy(payload->data, (char *)pl[payload->counter], length);
195 debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
196 payload->counter++;
197 }
198
199 spin_unlock_irqrestore(&dbf->pay_lock, flags);
200}
201
166static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, 202static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
167 struct zfcp_adapter *adapter, 203 struct zfcp_adapter *adapter,
168 struct zfcp_port *port, 204 struct zfcp_port *port,
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 527ba48eea5..ed5d921e82c 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -72,6 +72,7 @@ struct zfcp_reqlist;
72#define ZFCP_STATUS_COMMON_NOESC 0x00200000 72#define ZFCP_STATUS_COMMON_NOESC 0x00200000
73 73
74/* adapter status */ 74/* adapter status */
75#define ZFCP_STATUS_ADAPTER_MB_ACT 0x00000001
75#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 76#define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002
76#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 77#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
77#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 78#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
@@ -314,4 +315,10 @@ struct zfcp_fsf_req {
314 void (*handler)(struct zfcp_fsf_req *); 315 void (*handler)(struct zfcp_fsf_req *);
315}; 316};
316 317
318static inline
319int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter)
320{
321 return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT;
322}
323
317#endif /* ZFCP_DEF_H */ 324#endif /* ZFCP_DEF_H */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 03627cfd81c..2302e1cfb76 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -53,6 +53,7 @@ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
53extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); 53extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); 54extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 55extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
56extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
56extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); 57extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
57extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); 58extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
58extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); 59extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 022fb6a8cb8..e9a787e2e6a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -936,39 +936,47 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
936 struct scatterlist *sg_resp) 936 struct scatterlist *sg_resp)
937{ 937{
938 struct zfcp_adapter *adapter = req->adapter; 938 struct zfcp_adapter *adapter = req->adapter;
939 struct zfcp_qdio *qdio = adapter->qdio;
940 struct fsf_qtcb *qtcb = req->qtcb;
939 u32 feat = adapter->adapter_features; 941 u32 feat = adapter->adapter_features;
940 int bytes;
941 942
942 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { 943 if (zfcp_adapter_multi_buffer_active(adapter)) {
943 if (!zfcp_qdio_sg_one_sbale(sg_req) || 944 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
944 !zfcp_qdio_sg_one_sbale(sg_resp)) 945 return -EIO;
945 return -EOPNOTSUPP; 946 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
947 return -EIO;
946 948
947 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req, 949 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
948 sg_req, sg_resp); 950 zfcp_qdio_sbale_count(sg_req));
951 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
952 zfcp_qdio_set_scount(qdio, &req->qdio_req);
949 return 0; 953 return 0;
950 } 954 }
951 955
952 /* use single, unchained SBAL if it can hold the request */ 956 /* use single, unchained SBAL if it can hold the request */
953 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { 957 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
954 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req, 958 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
955 sg_req, sg_resp); 959 sg_req, sg_resp);
956 return 0; 960 return 0;
957 } 961 }
958 962
959 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req); 963 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
960 if (bytes <= 0) 964 return -EOPNOTSUPP;
965
966 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
961 return -EIO; 967 return -EIO;
962 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
963 req->qtcb->bottom.support.req_buf_length = bytes;
964 zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
965 968
966 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, 969 qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
967 sg_resp); 970
968 req->qtcb->bottom.support.resp_buf_length = bytes; 971 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
969 if (bytes <= 0) 972 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
973
974 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
970 return -EIO; 975 return -EIO;
971 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 976
977 qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
978
979 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
972 980
973 return 0; 981 return 0;
974} 982}
@@ -1119,7 +1127,8 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1119 1127
1120 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; 1128 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1121 1129
1122 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); 1130 if (!zfcp_adapter_multi_buffer_active(adapter))
1131 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1123 1132
1124 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); 1133 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1125 1134
@@ -2162,7 +2171,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2162 struct zfcp_fsf_req *req; 2171 struct zfcp_fsf_req *req;
2163 struct fcp_cmnd *fcp_cmnd; 2172 struct fcp_cmnd *fcp_cmnd;
2164 u8 sbtype = SBAL_SFLAGS0_TYPE_READ; 2173 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2165 int real_bytes, retval = -EIO, dix_bytes = 0; 2174 int retval = -EIO;
2166 struct scsi_device *sdev = scsi_cmnd->device; 2175 struct scsi_device *sdev = scsi_cmnd->device;
2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2176 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2168 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 2177 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
@@ -2207,7 +2216,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2207 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; 2216 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2208 } 2217 }
2209 2218
2210 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); 2219 if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2220 goto failed_scsi_cmnd;
2211 2221
2212 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; 2222 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2213 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); 2223 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
@@ -2215,18 +2225,22 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2215 if (scsi_prot_sg_count(scsi_cmnd)) { 2225 if (scsi_prot_sg_count(scsi_cmnd)) {
2216 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 2226 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2217 scsi_prot_sg_count(scsi_cmnd)); 2227 scsi_prot_sg_count(scsi_cmnd));
2218 dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2228 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2229 scsi_prot_sglist(scsi_cmnd));
2230 if (retval)
2231 goto failed_scsi_cmnd;
2232 io->prot_data_length = zfcp_qdio_real_bytes(
2219 scsi_prot_sglist(scsi_cmnd)); 2233 scsi_prot_sglist(scsi_cmnd));
2220 io->prot_data_length = dix_bytes;
2221 } 2234 }
2222 2235
2223 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, 2236 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2224 scsi_sglist(scsi_cmnd)); 2237 scsi_sglist(scsi_cmnd));
2225 2238 if (unlikely(retval))
2226 if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
2227 goto failed_scsi_cmnd; 2239 goto failed_scsi_cmnd;
2228 2240
2229 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2241 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2242 if (zfcp_adapter_multi_buffer_active(adapter))
2243 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2230 2244
2231 retval = zfcp_fsf_req_send(req); 2245 retval = zfcp_fsf_req_send(req);
2232 if (unlikely(retval)) 2246 if (unlikely(retval))
@@ -2328,7 +2342,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2328 struct zfcp_qdio *qdio = adapter->qdio; 2342 struct zfcp_qdio *qdio = adapter->qdio;
2329 struct zfcp_fsf_req *req = NULL; 2343 struct zfcp_fsf_req *req = NULL;
2330 struct fsf_qtcb_bottom_support *bottom; 2344 struct fsf_qtcb_bottom_support *bottom;
2331 int retval = -EIO, bytes; 2345 int retval = -EIO;
2332 u8 direction; 2346 u8 direction;
2333 2347
2334 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) 2348 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
@@ -2361,13 +2375,17 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2361 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; 2375 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2362 bottom->option = fsf_cfdc->option; 2376 bottom->option = fsf_cfdc->option;
2363 2377
2364 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg); 2378 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2365 2379
2366 if (bytes != ZFCP_CFDC_MAX_SIZE) { 2380 if (retval ||
2381 (zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) {
2367 zfcp_fsf_req_free(req); 2382 zfcp_fsf_req_free(req);
2383 retval = -EIO;
2368 goto out; 2384 goto out;
2369 } 2385 }
2370 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); 2386 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2387 if (zfcp_adapter_multi_buffer_active(adapter))
2388 zfcp_qdio_set_scount(qdio, &req->qdio_req);
2371 2389
2372 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); 2390 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2373 retval = zfcp_fsf_req_send(req); 2391 retval = zfcp_fsf_req_send(req);
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index d9c40ea73ee..df9e69f5474 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -15,6 +15,10 @@
15 15
16#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) 16#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
17 17
18static bool enable_multibuffer;
19module_param_named(datarouter, enable_multibuffer, bool, 0400);
20MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
21
18static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) 22static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
19{ 23{
20 int pos; 24 int pos;
@@ -37,8 +41,11 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
37 41
38 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); 42 dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
39 43
40 if (qdio_err & QDIO_ERROR_SLSB_STATE) 44 if (qdio_err & QDIO_ERROR_SLSB_STATE) {
41 zfcp_qdio_siosl(adapter); 45 zfcp_qdio_siosl(adapter);
46 zfcp_erp_adapter_shutdown(adapter, 0, id);
47 return;
48 }
42 zfcp_erp_adapter_reopen(adapter, 49 zfcp_erp_adapter_reopen(adapter,
43 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | 50 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
44 ZFCP_STATUS_COMMON_ERP_FAILED, id); 51 ZFCP_STATUS_COMMON_ERP_FAILED, id);
@@ -93,9 +100,27 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
93 unsigned long parm) 100 unsigned long parm)
94{ 101{
95 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; 102 struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
96 int sbal_idx, sbal_no; 103 struct zfcp_adapter *adapter = qdio->adapter;
104 struct qdio_buffer_element *sbale;
105 int sbal_no, sbal_idx;
106 void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
107 u64 req_id;
108 u8 scount;
97 109
98 if (unlikely(qdio_err)) { 110 if (unlikely(qdio_err)) {
111 memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
112 if (zfcp_adapter_multi_buffer_active(adapter)) {
113 sbale = qdio->res_q[idx]->element;
114 req_id = (u64) sbale->addr;
115 scount = sbale->scount + 1; /* incl. signaling SBAL */
116
117 for (sbal_no = 0; sbal_no < scount; sbal_no++) {
118 sbal_idx = (idx + sbal_no) %
119 QDIO_MAX_BUFFERS_PER_Q;
120 pl[sbal_no] = qdio->res_q[sbal_idx];
121 }
122 zfcp_dbf_hba_def_err(adapter, req_id, scount, pl);
123 }
99 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); 124 zfcp_qdio_handler_error(qdio, "qdires1", qdio_err);
100 return; 125 return;
101 } 126 }
@@ -155,7 +180,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
155static struct qdio_buffer_element * 180static struct qdio_buffer_element *
156zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) 181zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
157{ 182{
158 if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL) 183 if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1)
159 return zfcp_qdio_sbal_chain(qdio, q_req); 184 return zfcp_qdio_sbal_chain(qdio, q_req);
160 q_req->sbale_curr++; 185 q_req->sbale_curr++;
161 return zfcp_qdio_sbale_curr(qdio, q_req); 186 return zfcp_qdio_sbale_curr(qdio, q_req);
@@ -167,13 +192,12 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
167 * @q_req: pointer to struct zfcp_qdio_req 192 * @q_req: pointer to struct zfcp_qdio_req
168 * @sg: scatter-gather list 193 * @sg: scatter-gather list
169 * @max_sbals: upper bound for number of SBALs to be used 194 * @max_sbals: upper bound for number of SBALs to be used
170 * Returns: number of bytes, or error (negativ) 195 * Returns: zero or -EINVAL on error
171 */ 196 */
172int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 197int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
173 struct scatterlist *sg) 198 struct scatterlist *sg)
174{ 199{
175 struct qdio_buffer_element *sbale; 200 struct qdio_buffer_element *sbale;
176 int bytes = 0;
177 201
178 /* set storage-block type for this request */ 202 /* set storage-block type for this request */
179 sbale = zfcp_qdio_sbale_req(qdio, q_req); 203 sbale = zfcp_qdio_sbale_req(qdio, q_req);
@@ -187,14 +211,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
187 q_req->sbal_number); 211 q_req->sbal_number);
188 return -EINVAL; 212 return -EINVAL;
189 } 213 }
190
191 sbale->addr = sg_virt(sg); 214 sbale->addr = sg_virt(sg);
192 sbale->length = sg->length; 215 sbale->length = sg->length;
193
194 bytes += sg->length;
195 } 216 }
196 217 return 0;
197 return bytes;
198} 218}
199 219
200static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) 220static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
@@ -283,6 +303,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
283 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); 303 memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
284 ASCEBC(id->adapter_name, 8); 304 ASCEBC(id->adapter_name, 8);
285 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; 305 id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
306 if (enable_multibuffer)
307 id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
286 id->no_input_qs = 1; 308 id->no_input_qs = 1;
287 id->no_output_qs = 1; 309 id->no_output_qs = 1;
288 id->input_handler = zfcp_qdio_int_resp; 310 id->input_handler = zfcp_qdio_int_resp;
@@ -378,6 +400,17 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
378 atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, 400 atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED,
379 &qdio->adapter->status); 401 &qdio->adapter->status);
380 402
403 if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) {
404 atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
405 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER;
406 } else {
407 atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status);
408 qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1;
409 }
410
411 qdio->max_sbale_per_req =
412 ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal
413 - 2;
381 if (qdio_activate(cdev)) 414 if (qdio_activate(cdev))
382 goto failed_qdio; 415 goto failed_qdio;
383 416
@@ -397,6 +430,11 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
397 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); 430 atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q);
398 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); 431 atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
399 432
433 if (adapter->scsi_host) {
434 adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req;
435 adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8;
436 }
437
400 return 0; 438 return 0;
401 439
402failed_qdio: 440failed_qdio:
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 54e22ace012..8ac7f5342d2 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -13,20 +13,9 @@
13 13
14#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE 14#define ZFCP_QDIO_SBALE_LEN PAGE_SIZE
15 15
16/* DMQ bug workaround: don't use last SBALE */
17#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
18
19/* index of last SBALE (with respect to DMQ bug workaround) */
20#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1)
21
22/* Max SBALS for chaining */ 16/* Max SBALS for chaining */
23#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36 17#define ZFCP_QDIO_MAX_SBALS_PER_REQ 36
24 18
25/* max. number of (data buffer) SBALEs in largest SBAL chain
26 * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */
27#define ZFCP_QDIO_MAX_SBALES_PER_REQ \
28 (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2)
29
30/** 19/**
31 * struct zfcp_qdio - basic qdio data structure 20 * struct zfcp_qdio - basic qdio data structure
32 * @res_q: response queue 21 * @res_q: response queue
@@ -53,6 +42,8 @@ struct zfcp_qdio {
53 atomic_t req_q_full; 42 atomic_t req_q_full;
54 wait_queue_head_t req_q_wq; 43 wait_queue_head_t req_q_wq;
55 struct zfcp_adapter *adapter; 44 struct zfcp_adapter *adapter;
45 u16 max_sbale_per_sbal;
46 u16 max_sbale_per_req;
56}; 47};
57 48
58/** 49/**
@@ -155,7 +146,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
155{ 146{
156 struct qdio_buffer_element *sbale; 147 struct qdio_buffer_element *sbale;
157 148
158 BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL); 149 BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
159 q_req->sbale_curr++; 150 q_req->sbale_curr++;
160 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 151 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
161 sbale->addr = data; 152 sbale->addr = data;
@@ -195,9 +186,10 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg)
195 * @q_req: The current zfcp_qdio_req 186 * @q_req: The current zfcp_qdio_req
196 */ 187 */
197static inline 188static inline
198void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req) 189void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio,
190 struct zfcp_qdio_req *q_req)
199{ 191{
200 q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL; 192 q_req->sbale_curr = qdio->max_sbale_per_sbal - 1;
201} 193}
202 194
203/** 195/**
@@ -228,8 +220,52 @@ void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio,
228{ 220{
229 struct qdio_buffer_element *sbale; 221 struct qdio_buffer_element *sbale;
230 222
231 sbale = &qdio->req_q[q_req->sbal_first]->element[0]; 223 sbale = qdio->req_q[q_req->sbal_first]->element;
232 sbale->length = count; 224 sbale->length = count;
233} 225}
234 226
227/**
228 * zfcp_qdio_sbale_count - count sbale used
229 * @sg: pointer to struct scatterlist
230 */
231static inline
232unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg)
233{
234 unsigned int count = 0;
235
236 for (; sg; sg = sg_next(sg))
237 count++;
238
239 return count;
240}
241
242/**
243 * zfcp_qdio_real_bytes - count bytes used
244 * @sg: pointer to struct scatterlist
245 */
246static inline
247unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg)
248{
249 unsigned int real_bytes = 0;
250
251 for (; sg; sg = sg_next(sg))
252 real_bytes += sg->length;
253
254 return real_bytes;
255}
256
257/**
258 * zfcp_qdio_set_scount - set SBAL count value
259 * @qdio: pointer to struct zfcp_qdio
260 * @q_req: The current zfcp_qdio_req
261 */
262static inline
263void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
264{
265 struct qdio_buffer_element *sbale;
266
267 sbale = qdio->req_q[q_req->sbal_first]->element;
268 sbale->scount = q_req->sbal_number - 1;
269}
270
235#endif /* ZFCP_QDIO_H */ 271#endif /* ZFCP_QDIO_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 7cac873c738..09126a9d62f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -24,11 +24,8 @@ module_param_named(queue_depth, default_depth, uint, 0600);
24MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); 24MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
25 25
26static bool enable_dif; 26static bool enable_dif;
27 27module_param_named(dif, enable_dif, bool, 0400);
28#ifdef CONFIG_ZFCP_DIF
29module_param_named(dif, enable_dif, bool, 0600);
30MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); 28MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support");
31#endif
32 29
33static bool allow_lun_scan = 1; 30static bool allow_lun_scan = 1;
34module_param(allow_lun_scan, bool, 0600); 31module_param(allow_lun_scan, bool, 0600);
@@ -309,8 +306,8 @@ static struct scsi_host_template zfcp_scsi_host_template = {
309 .proc_name = "zfcp", 306 .proc_name = "zfcp",
310 .can_queue = 4096, 307 .can_queue = 4096,
311 .this_id = -1, 308 .this_id = -1,
312 .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, 309 .sg_tablesize = 1, /* adjusted later */
313 .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), 310 .max_sectors = 8, /* adjusted later */
314 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, 311 .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1,
315 .cmd_per_lun = 1, 312 .cmd_per_lun = 1,
316 .use_clustering = 1, 313 .use_clustering = 1,
@@ -668,9 +665,9 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
668 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { 665 adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) {
669 mask |= SHOST_DIX_TYPE1_PROTECTION; 666 mask |= SHOST_DIX_TYPE1_PROTECTION;
670 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); 667 scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP);
671 shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; 668 shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2;
672 shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; 669 shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2;
673 shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; 670 shost->max_sectors = shost->sg_tablesize * 8;
674 } 671 }
675 672
676 scsi_host_set_prot(shost, mask); 673 scsi_host_set_prot(shost, mask);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 3878b739508..aa573c39f59 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -309,6 +309,7 @@ config SCSI_FC_TGT_ATTRS
309config SCSI_ISCSI_ATTRS 309config SCSI_ISCSI_ATTRS
310 tristate "iSCSI Transport Attributes" 310 tristate "iSCSI Transport Attributes"
311 depends on SCSI && NET 311 depends on SCSI && NET
312 select BLK_DEV_BSGLIB
312 help 313 help
313 If you wish to export transport-specific information about 314 If you wish to export transport-specific information about
314 each attached iSCSI device to sysfs, say Y. 315 each attached iSCSI device to sysfs, say Y.
@@ -559,6 +560,15 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
559source "drivers/scsi/aic94xx/Kconfig" 560source "drivers/scsi/aic94xx/Kconfig"
560source "drivers/scsi/mvsas/Kconfig" 561source "drivers/scsi/mvsas/Kconfig"
561 562
563config SCSI_MVUMI
564 tristate "Marvell UMI driver"
565 depends on SCSI && PCI
566 help
567 Module for Marvell Universal Message Interface(UMI) driver
568
569 To compile this driver as a module, choose M here: the
570 module will be called mvumi.
571
562config SCSI_DPT_I2O 572config SCSI_DPT_I2O
563 tristate "Adaptec I2O RAID support " 573 tristate "Adaptec I2O RAID support "
564 depends on SCSI && PCI && VIRT_TO_BUS 574 depends on SCSI && PCI && VIRT_TO_BUS
@@ -1872,10 +1882,6 @@ config ZFCP
1872 called zfcp. If you want to compile it as a module, say M here 1882 called zfcp. If you want to compile it as a module, say M here
1873 and read <file:Documentation/kbuild/modules.txt>. 1883 and read <file:Documentation/kbuild/modules.txt>.
1874 1884
1875config ZFCP_DIF
1876 tristate "T10 DIF/DIX support for the zfcp driver (EXPERIMENTAL)"
1877 depends on ZFCP && EXPERIMENTAL
1878
1879config SCSI_PMCRAID 1885config SCSI_PMCRAID
1880 tristate "PMC SIERRA Linux MaxRAID adapter support" 1886 tristate "PMC SIERRA Linux MaxRAID adapter support"
1881 depends on PCI && SCSI && NET 1887 depends on PCI && SCSI && NET
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6153a66a8a3..2b887498be5 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -134,6 +134,7 @@ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/
134obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o 134obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
135obj-$(CONFIG_SCSI_STEX) += stex.o 135obj-$(CONFIG_SCSI_STEX) += stex.o
136obj-$(CONFIG_SCSI_MVSAS) += mvsas/ 136obj-$(CONFIG_SCSI_MVSAS) += mvsas/
137obj-$(CONFIG_SCSI_MVUMI) += mvumi.o
137obj-$(CONFIG_PS3_ROM) += ps3rom.o 138obj-$(CONFIG_PS3_ROM) += ps3rom.o
138obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ 139obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
139obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ 140obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 3382475dc22..4aa76d6f11d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -894,16 +894,17 @@ static ssize_t aac_show_serial_number(struct device *device,
894 int len = 0; 894 int len = 0;
895 895
896 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) 896 if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
897 len = snprintf(buf, PAGE_SIZE, "%06X\n", 897 len = snprintf(buf, 16, "%06X\n",
898 le32_to_cpu(dev->adapter_info.serial[0])); 898 le32_to_cpu(dev->adapter_info.serial[0]));
899 if (len && 899 if (len &&
900 !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ 900 !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[
901 sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], 901 sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len],
902 buf, len-1)) 902 buf, len-1))
903 len = snprintf(buf, PAGE_SIZE, "%.*s\n", 903 len = snprintf(buf, 16, "%.*s\n",
904 (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), 904 (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo),
905 dev->supplement_adapter_info.MfgPcbaSerialNo); 905 dev->supplement_adapter_info.MfgPcbaSerialNo);
906 return len; 906
907 return min(len, 16);
907} 908}
908 909
909static ssize_t aac_show_max_channel(struct device *device, 910static ssize_t aac_show_max_channel(struct device *device,
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 29593275201..fdac7c2fef3 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -906,6 +906,7 @@ int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg)
906 906
907 switch (func) { 907 switch (func) {
908 case PHY_FUNC_CLEAR_ERROR_LOG: 908 case PHY_FUNC_CLEAR_ERROR_LOG:
909 case PHY_FUNC_GET_EVENTS:
909 return -ENOSYS; 910 return -ENOSYS;
910 case PHY_FUNC_SET_LINK_RATE: 911 case PHY_FUNC_SET_LINK_RATE:
911 rates = arg; 912 rates = arg;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index b8a82f2c62c..cdb15364bc6 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -660,6 +660,7 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
660 spin_lock(&phba->ctrl.mbox_lock); 660 spin_lock(&phba->ctrl.mbox_lock);
661 ctrl = &phba->ctrl; 661 ctrl = &phba->ctrl;
662 wrb = wrb_from_mbox(&ctrl->mbox_mem); 662 wrb = wrb_from_mbox(&ctrl->mbox_mem);
663 memset(wrb, 0, sizeof(*wrb));
663 req = embedded_payload(wrb); 664 req = embedded_payload(wrb);
664 ctxt = &req->context; 665 ctxt = &req->context;
665 666
@@ -868,3 +869,22 @@ error:
868 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 869 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
869 return status; 870 return status;
870} 871}
872
873int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
874{
875 struct be_ctrl_info *ctrl = &phba->ctrl;
876 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
877 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
878 int status;
879
880 spin_lock(&ctrl->mbox_lock);
881
882 req = embedded_payload(wrb);
883 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
884 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
885 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
886 status = be_mbox_notify_wait(phba);
887
888 spin_unlock(&ctrl->mbox_lock);
889 return status;
890}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 497eb29e5c9..8b40a5b4366 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -561,6 +561,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
561 struct be_dma_mem *q_mem, u32 page_offset, 561 struct be_dma_mem *q_mem, u32 page_offset,
562 u32 num_pages); 562 u32 num_pages);
563 563
564int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
565
564int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, 566int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
565 struct be_queue_info *wrbq); 567 struct be_queue_info *wrbq);
566 568
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 3cad1060502..8b002f6db6c 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -177,9 +177,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
177{ 177{
178 struct iscsi_conn *conn = cls_conn->dd_data; 178 struct iscsi_conn *conn = cls_conn->dd_data;
179 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 179 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
180 struct Scsi_Host *shost = 180 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
181 (struct Scsi_Host *)iscsi_session_to_shost(cls_session); 181 struct beiscsi_hba *phba = iscsi_host_priv(shost);
182 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost);
183 struct beiscsi_endpoint *beiscsi_ep; 182 struct beiscsi_endpoint *beiscsi_ep;
184 struct iscsi_endpoint *ep; 183 struct iscsi_endpoint *ep;
185 184
@@ -290,7 +289,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
290int beiscsi_get_host_param(struct Scsi_Host *shost, 289int beiscsi_get_host_param(struct Scsi_Host *shost,
291 enum iscsi_host_param param, char *buf) 290 enum iscsi_host_param param, char *buf)
292{ 291{
293 struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); 292 struct beiscsi_hba *phba = iscsi_host_priv(shost);
294 int status = 0; 293 int status = 0;
295 294
296 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); 295 SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param);
@@ -733,3 +732,56 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
733 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); 732 beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
734 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); 733 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
735} 734}
735
736mode_t be2iscsi_attr_is_visible(int param_type, int param)
737{
738 switch (param_type) {
739 case ISCSI_HOST_PARAM:
740 switch (param) {
741 case ISCSI_HOST_PARAM_HWADDRESS:
742 case ISCSI_HOST_PARAM_IPADDRESS:
743 case ISCSI_HOST_PARAM_INITIATOR_NAME:
744 return S_IRUGO;
745 default:
746 return 0;
747 }
748 case ISCSI_PARAM:
749 switch (param) {
750 case ISCSI_PARAM_MAX_RECV_DLENGTH:
751 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
752 case ISCSI_PARAM_HDRDGST_EN:
753 case ISCSI_PARAM_DATADGST_EN:
754 case ISCSI_PARAM_CONN_ADDRESS:
755 case ISCSI_PARAM_CONN_PORT:
756 case ISCSI_PARAM_EXP_STATSN:
757 case ISCSI_PARAM_PERSISTENT_ADDRESS:
758 case ISCSI_PARAM_PERSISTENT_PORT:
759 case ISCSI_PARAM_PING_TMO:
760 case ISCSI_PARAM_RECV_TMO:
761 case ISCSI_PARAM_INITIAL_R2T_EN:
762 case ISCSI_PARAM_MAX_R2T:
763 case ISCSI_PARAM_IMM_DATA_EN:
764 case ISCSI_PARAM_FIRST_BURST:
765 case ISCSI_PARAM_MAX_BURST:
766 case ISCSI_PARAM_PDU_INORDER_EN:
767 case ISCSI_PARAM_DATASEQ_INORDER_EN:
768 case ISCSI_PARAM_ERL:
769 case ISCSI_PARAM_TARGET_NAME:
770 case ISCSI_PARAM_TPGT:
771 case ISCSI_PARAM_USERNAME:
772 case ISCSI_PARAM_PASSWORD:
773 case ISCSI_PARAM_USERNAME_IN:
774 case ISCSI_PARAM_PASSWORD_IN:
775 case ISCSI_PARAM_FAST_ABORT:
776 case ISCSI_PARAM_ABORT_TMO:
777 case ISCSI_PARAM_LU_RESET_TMO:
778 case ISCSI_PARAM_IFACE_NAME:
779 case ISCSI_PARAM_INITIATOR_NAME:
780 return S_IRUGO;
781 default:
782 return 0;
783 }
784 }
785
786 return 0;
787}
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index ff60b7fd92d..4a1f2e393f3 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -26,6 +26,8 @@
26#define BE2_IPV4 0x1 26#define BE2_IPV4 0x1
27#define BE2_IPV6 0x10 27#define BE2_IPV6 0x10
28 28
29mode_t be2iscsi_attr_is_visible(int param_type, int param);
30
29void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 31void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
30 struct beiscsi_offload_params *params); 32 struct beiscsi_offload_params *params);
31 33
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 0a9bdfa3d93..7b0a8ab7104 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -822,33 +822,47 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
822 struct hwi_controller *phwi_ctrlr; 822 struct hwi_controller *phwi_ctrlr;
823 struct hwi_context_memory *phwi_context; 823 struct hwi_context_memory *phwi_context;
824 int ret, msix_vec, i, j; 824 int ret, msix_vec, i, j;
825 char desc[32];
826 825
827 phwi_ctrlr = phba->phwi_ctrlr; 826 phwi_ctrlr = phba->phwi_ctrlr;
828 phwi_context = phwi_ctrlr->phwi_ctxt; 827 phwi_context = phwi_ctrlr->phwi_ctxt;
829 828
830 if (phba->msix_enabled) { 829 if (phba->msix_enabled) {
831 for (i = 0; i < phba->num_cpus; i++) { 830 for (i = 0; i < phba->num_cpus; i++) {
832 sprintf(desc, "beiscsi_msix_%04x", i); 831 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
832 GFP_KERNEL);
833 if (!phba->msi_name[i]) {
834 ret = -ENOMEM;
835 goto free_msix_irqs;
836 }
837
838 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
839 phba->shost->host_no, i);
833 msix_vec = phba->msix_entries[i].vector; 840 msix_vec = phba->msix_entries[i].vector;
834 ret = request_irq(msix_vec, be_isr_msix, 0, desc, 841 ret = request_irq(msix_vec, be_isr_msix, 0,
842 phba->msi_name[i],
835 &phwi_context->be_eq[i]); 843 &phwi_context->be_eq[i]);
836 if (ret) { 844 if (ret) {
837 shost_printk(KERN_ERR, phba->shost, 845 shost_printk(KERN_ERR, phba->shost,
838 "beiscsi_init_irqs-Failed to" 846 "beiscsi_init_irqs-Failed to"
839 "register msix for i = %d\n", i); 847 "register msix for i = %d\n", i);
840 if (!i) 848 kfree(phba->msi_name[i]);
841 return ret;
842 goto free_msix_irqs; 849 goto free_msix_irqs;
843 } 850 }
844 } 851 }
852 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
853 if (!phba->msi_name[i]) {
854 ret = -ENOMEM;
855 goto free_msix_irqs;
856 }
857 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
858 phba->shost->host_no);
845 msix_vec = phba->msix_entries[i].vector; 859 msix_vec = phba->msix_entries[i].vector;
846 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc", 860 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
847 &phwi_context->be_eq[i]); 861 &phwi_context->be_eq[i]);
848 if (ret) { 862 if (ret) {
849 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" 863 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
850 "Failed to register beiscsi_msix_mcc\n"); 864 "Failed to register beiscsi_msix_mcc\n");
851 i++; 865 kfree(phba->msi_name[i]);
852 goto free_msix_irqs; 866 goto free_msix_irqs;
853 } 867 }
854 868
@@ -863,8 +877,11 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba)
863 } 877 }
864 return 0; 878 return 0;
865free_msix_irqs: 879free_msix_irqs:
866 for (j = i - 1; j == 0; j++) 880 for (j = i - 1; j >= 0; j--) {
881 kfree(phba->msi_name[j]);
882 msix_vec = phba->msix_entries[j].vector;
867 free_irq(msix_vec, &phwi_context->be_eq[j]); 883 free_irq(msix_vec, &phwi_context->be_eq[j]);
884 }
868 return ret; 885 return ret;
869} 886}
870 887
@@ -1106,7 +1123,12 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
1106 & SOL_STS_MASK) >> 8); 1123 & SOL_STS_MASK) >> 8);
1107 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1124 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1108 & SOL_FLAGS_MASK) >> 24) | 0x80; 1125 & SOL_FLAGS_MASK) >> 24) | 0x80;
1126 if (!task->sc) {
1127 if (io_task->scsi_cmnd)
1128 scsi_dma_unmap(io_task->scsi_cmnd);
1109 1129
1130 return;
1131 }
1110 task->sc->result = (DID_OK << 16) | status; 1132 task->sc->result = (DID_OK << 16) | status;
1111 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1133 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1112 task->sc->result = DID_ERROR << 16; 1134 task->sc->result = DID_ERROR << 16;
@@ -4027,11 +4049,11 @@ static int beiscsi_mtask(struct iscsi_task *task)
4027 TGT_DM_CMD); 4049 TGT_DM_CMD);
4028 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, 4050 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4029 pwrb, 0); 4051 pwrb, 0);
4030 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4052 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4031 } else { 4053 } else {
4032 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4054 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4033 INI_RD_CMD); 4055 INI_RD_CMD);
4034 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); 4056 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4035 } 4057 }
4036 hwi_write_buffer(pwrb, task); 4058 hwi_write_buffer(pwrb, task);
4037 break; 4059 break;
@@ -4102,9 +4124,8 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
4102 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); 4124 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4103} 4125}
4104 4126
4105static void beiscsi_remove(struct pci_dev *pcidev) 4127static void beiscsi_quiesce(struct beiscsi_hba *phba)
4106{ 4128{
4107 struct beiscsi_hba *phba = NULL;
4108 struct hwi_controller *phwi_ctrlr; 4129 struct hwi_controller *phwi_ctrlr;
4109 struct hwi_context_memory *phwi_context; 4130 struct hwi_context_memory *phwi_context;
4110 struct be_eq_obj *pbe_eq; 4131 struct be_eq_obj *pbe_eq;
@@ -4112,12 +4133,6 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4112 u8 *real_offset = 0; 4133 u8 *real_offset = 0;
4113 u32 value = 0; 4134 u32 value = 0;
4114 4135
4115 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4116 if (!phba) {
4117 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4118 return;
4119 }
4120
4121 phwi_ctrlr = phba->phwi_ctrlr; 4136 phwi_ctrlr = phba->phwi_ctrlr;
4122 phwi_context = phwi_ctrlr->phwi_ctxt; 4137 phwi_context = phwi_ctrlr->phwi_ctxt;
4123 hwi_disable_intr(phba); 4138 hwi_disable_intr(phba);
@@ -4125,6 +4140,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4125 for (i = 0; i <= phba->num_cpus; i++) { 4140 for (i = 0; i <= phba->num_cpus; i++) {
4126 msix_vec = phba->msix_entries[i].vector; 4141 msix_vec = phba->msix_entries[i].vector;
4127 free_irq(msix_vec, &phwi_context->be_eq[i]); 4142 free_irq(msix_vec, &phwi_context->be_eq[i]);
4143 kfree(phba->msi_name[i]);
4128 } 4144 }
4129 } else 4145 } else
4130 if (phba->pcidev->irq) 4146 if (phba->pcidev->irq)
@@ -4152,10 +4168,40 @@ static void beiscsi_remove(struct pci_dev *pcidev)
4152 phba->ctrl.mbox_mem_alloced.size, 4168 phba->ctrl.mbox_mem_alloced.size,
4153 phba->ctrl.mbox_mem_alloced.va, 4169 phba->ctrl.mbox_mem_alloced.va,
4154 phba->ctrl.mbox_mem_alloced.dma); 4170 phba->ctrl.mbox_mem_alloced.dma);
4171}
4172
4173static void beiscsi_remove(struct pci_dev *pcidev)
4174{
4175
4176 struct beiscsi_hba *phba = NULL;
4177
4178 phba = pci_get_drvdata(pcidev);
4179 if (!phba) {
4180 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4181 return;
4182 }
4183
4184 beiscsi_quiesce(phba);
4155 iscsi_boot_destroy_kset(phba->boot_kset); 4185 iscsi_boot_destroy_kset(phba->boot_kset);
4156 iscsi_host_remove(phba->shost); 4186 iscsi_host_remove(phba->shost);
4157 pci_dev_put(phba->pcidev); 4187 pci_dev_put(phba->pcidev);
4158 iscsi_host_free(phba->shost); 4188 iscsi_host_free(phba->shost);
4189 pci_disable_device(pcidev);
4190}
4191
4192static void beiscsi_shutdown(struct pci_dev *pcidev)
4193{
4194
4195 struct beiscsi_hba *phba = NULL;
4196
4197 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4198 if (!phba) {
4199 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4200 return;
4201 }
4202
4203 beiscsi_quiesce(phba);
4204 pci_disable_device(pcidev);
4159} 4205}
4160 4206
4161static void beiscsi_msix_enable(struct beiscsi_hba *phba) 4207static void beiscsi_msix_enable(struct beiscsi_hba *phba)
@@ -4235,7 +4281,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4235 gcrashmode++; 4281 gcrashmode++;
4236 shost_printk(KERN_ERR, phba->shost, 4282 shost_printk(KERN_ERR, phba->shost,
4237 "Loading Driver in crashdump mode\n"); 4283 "Loading Driver in crashdump mode\n");
4238 ret = beiscsi_pci_soft_reset(phba); 4284 ret = beiscsi_cmd_reset_function(phba);
4239 if (ret) { 4285 if (ret) {
4240 shost_printk(KERN_ERR, phba->shost, 4286 shost_printk(KERN_ERR, phba->shost,
4241 "Reset Failed. Aborting Crashdump\n"); 4287 "Reset Failed. Aborting Crashdump\n");
@@ -4364,37 +4410,12 @@ struct iscsi_transport beiscsi_iscsi_transport = {
4364 .name = DRV_NAME, 4410 .name = DRV_NAME,
4365 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 4411 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
4366 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 4412 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
4367 .param_mask = ISCSI_MAX_RECV_DLENGTH |
4368 ISCSI_MAX_XMIT_DLENGTH |
4369 ISCSI_HDRDGST_EN |
4370 ISCSI_DATADGST_EN |
4371 ISCSI_INITIAL_R2T_EN |
4372 ISCSI_MAX_R2T |
4373 ISCSI_IMM_DATA_EN |
4374 ISCSI_FIRST_BURST |
4375 ISCSI_MAX_BURST |
4376 ISCSI_PDU_INORDER_EN |
4377 ISCSI_DATASEQ_INORDER_EN |
4378 ISCSI_ERL |
4379 ISCSI_CONN_PORT |
4380 ISCSI_CONN_ADDRESS |
4381 ISCSI_EXP_STATSN |
4382 ISCSI_PERSISTENT_PORT |
4383 ISCSI_PERSISTENT_ADDRESS |
4384 ISCSI_TARGET_NAME | ISCSI_TPGT |
4385 ISCSI_USERNAME | ISCSI_PASSWORD |
4386 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
4387 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
4388 ISCSI_LU_RESET_TMO |
4389 ISCSI_PING_TMO | ISCSI_RECV_TMO |
4390 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
4391 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
4392 ISCSI_HOST_INITIATOR_NAME,
4393 .create_session = beiscsi_session_create, 4413 .create_session = beiscsi_session_create,
4394 .destroy_session = beiscsi_session_destroy, 4414 .destroy_session = beiscsi_session_destroy,
4395 .create_conn = beiscsi_conn_create, 4415 .create_conn = beiscsi_conn_create,
4396 .bind_conn = beiscsi_conn_bind, 4416 .bind_conn = beiscsi_conn_bind,
4397 .destroy_conn = iscsi_conn_teardown, 4417 .destroy_conn = iscsi_conn_teardown,
4418 .attr_is_visible = be2iscsi_attr_is_visible,
4398 .set_param = beiscsi_set_param, 4419 .set_param = beiscsi_set_param,
4399 .get_conn_param = iscsi_conn_get_param, 4420 .get_conn_param = iscsi_conn_get_param,
4400 .get_session_param = iscsi_session_get_param, 4421 .get_session_param = iscsi_session_get_param,
@@ -4418,6 +4439,7 @@ static struct pci_driver beiscsi_pci_driver = {
4418 .name = DRV_NAME, 4439 .name = DRV_NAME,
4419 .probe = beiscsi_dev_probe, 4440 .probe = beiscsi_dev_probe,
4420 .remove = beiscsi_remove, 4441 .remove = beiscsi_remove,
4442 .shutdown = beiscsi_shutdown,
4421 .id_table = beiscsi_pci_id_table 4443 .id_table = beiscsi_pci_id_table
4422}; 4444};
4423 4445
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 5ce5170254c..b4a06d5e5f9 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -34,7 +34,7 @@
34 34
35#include "be.h" 35#include "be.h"
36#define DRV_NAME "be2iscsi" 36#define DRV_NAME "be2iscsi"
37#define BUILD_STR "2.103.298.0" 37#define BUILD_STR "4.1.239.0"
38#define BE_NAME "ServerEngines BladeEngine2" \ 38#define BE_NAME "ServerEngines BladeEngine2" \
39 "Linux iSCSI Driver version" BUILD_STR 39 "Linux iSCSI Driver version" BUILD_STR
40#define DRV_DESC BE_NAME " " "Driver" 40#define DRV_DESC BE_NAME " " "Driver"
@@ -162,6 +162,8 @@ do { \
162#define PAGES_REQUIRED(x) \ 162#define PAGES_REQUIRED(x) \
163 ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) 163 ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE))
164 164
165#define BEISCSI_MSI_NAME 20 /* size of msi_name string */
166
165enum be_mem_enum { 167enum be_mem_enum {
166 HWI_MEM_ADDN_CONTEXT, 168 HWI_MEM_ADDN_CONTEXT,
167 HWI_MEM_WRB, 169 HWI_MEM_WRB,
@@ -287,6 +289,7 @@ struct beiscsi_hba {
287 unsigned int num_cpus; 289 unsigned int num_cpus;
288 unsigned int nxt_cqid; 290 unsigned int nxt_cqid;
289 struct msix_entry msix_entries[MAX_CPUS + 1]; 291 struct msix_entry msix_entries[MAX_CPUS + 1];
292 char *msi_name[MAX_CPUS + 1];
290 bool msix_enabled; 293 bool msix_enabled;
291 struct be_mem_descriptor *init_mem; 294 struct be_mem_descriptor *init_mem;
292 295
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index dd335a2a797..63de1c7cd0c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
62#include "bnx2fc_constants.h" 62#include "bnx2fc_constants.h"
63 63
64#define BNX2FC_NAME "bnx2fc" 64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.4" 65#define BNX2FC_VERSION "1.0.8"
66 66
67#define PFX "bnx2fc: " 67#define PFX "bnx2fc: "
68 68
@@ -224,6 +224,7 @@ struct bnx2fc_interface {
224 struct fcoe_ctlr ctlr; 224 struct fcoe_ctlr ctlr;
225 u8 vlan_enabled; 225 u8 vlan_enabled;
226 int vlan_id; 226 int vlan_id;
227 bool enabled;
227}; 228};
228 229
229#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) 230#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index d66dcbd0df1..fd382fe33f6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -391,18 +391,6 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
391 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); 391 BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
392 tgt = orig_io_req->tgt; 392 tgt = orig_io_req->tgt;
393 393
394 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
395 BNX2FC_IO_DBG(rec_req, "completed"
396 "orig_io - 0x%x\n",
397 orig_io_req->xid);
398 goto rec_compl_done;
399 }
400 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
401 BNX2FC_IO_DBG(rec_req, "abts in prog "
402 "orig_io - 0x%x\n",
403 orig_io_req->xid);
404 goto rec_compl_done;
405 }
406 /* Handle REC timeout case */ 394 /* Handle REC timeout case */
407 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { 395 if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
408 BNX2FC_IO_DBG(rec_req, "timed out, abort " 396 BNX2FC_IO_DBG(rec_req, "timed out, abort "
@@ -433,6 +421,20 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
433 } 421 }
434 goto rec_compl_done; 422 goto rec_compl_done;
435 } 423 }
424
425 if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
426 BNX2FC_IO_DBG(rec_req, "completed"
427 "orig_io - 0x%x\n",
428 orig_io_req->xid);
429 goto rec_compl_done;
430 }
431 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
432 BNX2FC_IO_DBG(rec_req, "abts in prog "
433 "orig_io - 0x%x\n",
434 orig_io_req->xid);
435 goto rec_compl_done;
436 }
437
436 mp_req = &(rec_req->mp_req); 438 mp_req = &(rec_req->mp_req);
437 fc_hdr = &(mp_req->resp_fc_hdr); 439 fc_hdr = &(mp_req->resp_fc_hdr);
438 resp_len = mp_req->resp_len; 440 resp_len = mp_req->resp_len;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 820a1840c3f..85bcc4b5596 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Jun 23, 2011" 25#define DRV_MODULE_RELDATE "Oct 02, 2011"
26 26
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -56,6 +56,7 @@ static struct scsi_host_template bnx2fc_shost_template;
56static struct fc_function_template bnx2fc_transport_function; 56static struct fc_function_template bnx2fc_transport_function;
57static struct fc_function_template bnx2fc_vport_xport_function; 57static struct fc_function_template bnx2fc_vport_xport_function;
58static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); 58static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
59static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
59static int bnx2fc_destroy(struct net_device *net_device); 60static int bnx2fc_destroy(struct net_device *net_device);
60static int bnx2fc_enable(struct net_device *netdev); 61static int bnx2fc_enable(struct net_device *netdev);
61static int bnx2fc_disable(struct net_device *netdev); 62static int bnx2fc_disable(struct net_device *netdev);
@@ -64,7 +65,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb);
64 65
65static void bnx2fc_start_disc(struct bnx2fc_interface *interface); 66static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
66static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); 67static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev);
67static int bnx2fc_net_config(struct fc_lport *lp);
68static int bnx2fc_lport_config(struct fc_lport *lport); 68static int bnx2fc_lport_config(struct fc_lport *lport);
69static int bnx2fc_em_config(struct fc_lport *lport); 69static int bnx2fc_em_config(struct fc_lport *lport);
70static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); 70static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba);
@@ -78,6 +78,7 @@ static void bnx2fc_destroy_work(struct work_struct *work);
78static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); 78static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
79static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device 79static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
80 *phys_dev); 80 *phys_dev);
81static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface);
81static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); 82static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic);
82 83
83static int bnx2fc_fw_init(struct bnx2fc_hba *hba); 84static int bnx2fc_fw_init(struct bnx2fc_hba *hba);
@@ -98,6 +99,25 @@ static struct notifier_block bnx2fc_cpu_notifier = {
98 .notifier_call = bnx2fc_cpu_callback, 99 .notifier_call = bnx2fc_cpu_callback,
99}; 100};
100 101
102static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
103{
104 return ((struct bnx2fc_interface *)
105 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
106}
107
108/**
109 * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
110 * @lport: the local port
111 * @fc_lesb: the link error status block
112 */
113static void bnx2fc_get_lesb(struct fc_lport *lport,
114 struct fc_els_lesb *fc_lesb)
115{
116 struct net_device *netdev = bnx2fc_netdev(lport);
117
118 __fcoe_get_lesb(lport, fc_lesb, netdev);
119}
120
101static void bnx2fc_clean_rx_queue(struct fc_lport *lp) 121static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
102{ 122{
103 struct fcoe_percpu_s *bg; 123 struct fcoe_percpu_s *bg;
@@ -545,6 +565,14 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
545 break; 565 break;
546 } 566 }
547 } 567 }
568
569 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
570 /* Drop incoming ABTS */
571 put_cpu();
572 kfree_skb(skb);
573 return;
574 }
575
548 if (le32_to_cpu(fr_crc(fp)) != 576 if (le32_to_cpu(fr_crc(fp)) !=
549 ~crc32(~0, skb->data, fr_len)) { 577 ~crc32(~0, skb->data, fr_len)) {
550 if (stats->InvalidCRCCount < 5) 578 if (stats->InvalidCRCCount < 5)
@@ -727,7 +755,7 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba)
727 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); 755 clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
728} 756}
729 757
730static int bnx2fc_net_config(struct fc_lport *lport) 758static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
731{ 759{
732 struct bnx2fc_hba *hba; 760 struct bnx2fc_hba *hba;
733 struct bnx2fc_interface *interface; 761 struct bnx2fc_interface *interface;
@@ -753,11 +781,16 @@ static int bnx2fc_net_config(struct fc_lport *lport)
753 bnx2fc_link_speed_update(lport); 781 bnx2fc_link_speed_update(lport);
754 782
755 if (!lport->vport) { 783 if (!lport->vport) {
756 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0); 784 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
785 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
786 1, 0);
757 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); 787 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
758 fc_set_wwnn(lport, wwnn); 788 fc_set_wwnn(lport, wwnn);
759 789
760 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0); 790 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
791 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
792 2, 0);
793
761 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); 794 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
762 fc_set_wwpn(lport, wwpn); 795 fc_set_wwpn(lport, wwpn);
763 } 796 }
@@ -769,8 +802,8 @@ static void bnx2fc_destroy_timer(unsigned long data)
769{ 802{
770 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; 803 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data;
771 804
772 BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - " 805 printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - "
773 "Destroy compl not received!!\n"); 806 "Destroy compl not received!!\n");
774 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); 807 set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
775 wake_up_interruptible(&hba->destroy_wait); 808 wake_up_interruptible(&hba->destroy_wait);
776} 809}
@@ -783,7 +816,7 @@ static void bnx2fc_destroy_timer(unsigned long data)
783 * @vlan_id: vlan id - associated vlan id with this event 816 * @vlan_id: vlan id - associated vlan id with this event
784 * 817 *
785 * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and 818 * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and
786 * NETDEV_CHANGE_MTU events 819 * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans.
787 */ 820 */
788static void bnx2fc_indicate_netevent(void *context, unsigned long event, 821static void bnx2fc_indicate_netevent(void *context, unsigned long event,
789 u16 vlan_id) 822 u16 vlan_id)
@@ -791,12 +824,11 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
791 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; 824 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
792 struct fc_lport *lport; 825 struct fc_lport *lport;
793 struct fc_lport *vport; 826 struct fc_lport *vport;
794 struct bnx2fc_interface *interface; 827 struct bnx2fc_interface *interface, *tmp;
795 int wait_for_upload = 0; 828 int wait_for_upload = 0;
796 u32 link_possible = 1; 829 u32 link_possible = 1;
797 830
798 /* Ignore vlans for now */ 831 if (vlan_id != 0 && event != NETDEV_UNREGISTER)
799 if (vlan_id != 0)
800 return; 832 return;
801 833
802 switch (event) { 834 switch (event) {
@@ -820,6 +852,18 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
820 case NETDEV_CHANGE: 852 case NETDEV_CHANGE:
821 break; 853 break;
822 854
855 case NETDEV_UNREGISTER:
856 if (!vlan_id)
857 return;
858 mutex_lock(&bnx2fc_dev_lock);
859 list_for_each_entry_safe(interface, tmp, &if_list, list) {
860 if (interface->hba == hba &&
861 interface->vlan_id == (vlan_id & VLAN_VID_MASK))
862 __bnx2fc_destroy(interface);
863 }
864 mutex_unlock(&bnx2fc_dev_lock);
865 return;
866
823 default: 867 default:
824 printk(KERN_ERR PFX "Unkonwn netevent %ld", event); 868 printk(KERN_ERR PFX "Unkonwn netevent %ld", event);
825 return; 869 return;
@@ -838,8 +882,15 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
838 bnx2fc_link_speed_update(lport); 882 bnx2fc_link_speed_update(lport);
839 883
840 if (link_possible && !bnx2fc_link_ok(lport)) { 884 if (link_possible && !bnx2fc_link_ok(lport)) {
841 printk(KERN_ERR "indicate_netevent: ctlr_link_up\n"); 885 /* Reset max recv frame size to default */
842 fcoe_ctlr_link_up(&interface->ctlr); 886 fc_set_mfs(lport, BNX2FC_MFS);
887 /*
888 * ctlr link up will only be handled during
889 * enable to avoid sending discovery solicitation
890 * on a stale vlan
891 */
892 if (interface->enabled)
893 fcoe_ctlr_link_up(&interface->ctlr);
843 } else if (fcoe_ctlr_link_down(&interface->ctlr)) { 894 } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
844 mutex_lock(&lport->lp_mutex); 895 mutex_lock(&lport->lp_mutex);
845 list_for_each_entry(vport, &lport->vports, list) 896 list_for_each_entry(vport, &lport->vports, list)
@@ -995,6 +1046,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
995 struct bnx2fc_interface *interface = port->priv; 1046 struct bnx2fc_interface *interface = port->priv;
996 struct net_device *netdev = interface->netdev; 1047 struct net_device *netdev = interface->netdev;
997 struct fc_lport *vn_port; 1048 struct fc_lport *vn_port;
1049 int rc;
1050 char buf[32];
1051
1052 rc = fcoe_validate_vport_create(vport);
1053 if (rc) {
1054 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
1055 printk(KERN_ERR PFX "Failed to create vport, "
1056 "WWPN (0x%s) already exists\n",
1057 buf);
1058 return rc;
1059 }
998 1060
999 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { 1061 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) {
1000 printk(KERN_ERR PFX "vn ports cannot be created on" 1062 printk(KERN_ERR PFX "vn ports cannot be created on"
@@ -1024,16 +1086,46 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled)
1024 return 0; 1086 return 0;
1025} 1087}
1026 1088
1089static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport)
1090{
1091 struct bnx2fc_lport *blport, *tmp;
1092
1093 spin_lock_bh(&hba->hba_lock);
1094 list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
1095 if (blport->lport == lport) {
1096 list_del(&blport->list);
1097 kfree(blport);
1098 }
1099 }
1100 spin_unlock_bh(&hba->hba_lock);
1101}
1102
1027static int bnx2fc_vport_destroy(struct fc_vport *vport) 1103static int bnx2fc_vport_destroy(struct fc_vport *vport)
1028{ 1104{
1029 struct Scsi_Host *shost = vport_to_shost(vport); 1105 struct Scsi_Host *shost = vport_to_shost(vport);
1030 struct fc_lport *n_port = shost_priv(shost); 1106 struct fc_lport *n_port = shost_priv(shost);
1031 struct fc_lport *vn_port = vport->dd_data; 1107 struct fc_lport *vn_port = vport->dd_data;
1032 struct fcoe_port *port = lport_priv(vn_port); 1108 struct fcoe_port *port = lport_priv(vn_port);
1109 struct bnx2fc_interface *interface = port->priv;
1110 struct fc_lport *v_port;
1111 bool found = false;
1033 1112
1034 mutex_lock(&n_port->lp_mutex); 1113 mutex_lock(&n_port->lp_mutex);
1114 list_for_each_entry(v_port, &n_port->vports, list)
1115 if (v_port->vport == vport) {
1116 found = true;
1117 break;
1118 }
1119
1120 if (!found) {
1121 mutex_unlock(&n_port->lp_mutex);
1122 return -ENOENT;
1123 }
1035 list_del(&vn_port->list); 1124 list_del(&vn_port->list);
1036 mutex_unlock(&n_port->lp_mutex); 1125 mutex_unlock(&n_port->lp_mutex);
1126 bnx2fc_free_vport(interface->hba, port->lport);
1127 bnx2fc_port_shutdown(port->lport);
1128 bnx2fc_interface_put(interface);
1037 queue_work(bnx2fc_wq, &port->destroy_work); 1129 queue_work(bnx2fc_wq, &port->destroy_work);
1038 return 0; 1130 return 0;
1039} 1131}
@@ -1054,7 +1146,7 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable)
1054} 1146}
1055 1147
1056 1148
1057static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface) 1149static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
1058{ 1150{
1059 struct net_device *netdev = interface->netdev; 1151 struct net_device *netdev = interface->netdev;
1060 struct net_device *physdev = interface->hba->phys_dev; 1152 struct net_device *physdev = interface->hba->phys_dev;
@@ -1252,7 +1344,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1252 interface->ctlr.get_src_addr = bnx2fc_get_src_mac; 1344 interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
1253 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); 1345 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
1254 1346
1255 rc = bnx2fc_netdev_setup(interface); 1347 rc = bnx2fc_interface_setup(interface);
1256 if (!rc) 1348 if (!rc)
1257 return interface; 1349 return interface;
1258 1350
@@ -1318,7 +1410,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1318 fc_set_wwpn(lport, vport->port_name); 1410 fc_set_wwpn(lport, vport->port_name);
1319 } 1411 }
1320 /* Configure netdev and networking properties of the lport */ 1412 /* Configure netdev and networking properties of the lport */
1321 rc = bnx2fc_net_config(lport); 1413 rc = bnx2fc_net_config(lport, interface->netdev);
1322 if (rc) { 1414 if (rc) {
1323 printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); 1415 printk(KERN_ERR PFX "Error on bnx2fc_net_config\n");
1324 goto lp_config_err; 1416 goto lp_config_err;
@@ -1372,7 +1464,7 @@ free_blport:
1372 return NULL; 1464 return NULL;
1373} 1465}
1374 1466
1375static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface) 1467static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
1376{ 1468{
1377 /* Dont listen for Ethernet packets anymore */ 1469 /* Dont listen for Ethernet packets anymore */
1378 __dev_remove_pack(&interface->fcoe_packet_type); 1470 __dev_remove_pack(&interface->fcoe_packet_type);
@@ -1380,10 +1472,11 @@ static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface)
1380 synchronize_net(); 1472 synchronize_net();
1381} 1473}
1382 1474
1383static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba) 1475static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
1384{ 1476{
1477 struct fc_lport *lport = interface->ctlr.lp;
1385 struct fcoe_port *port = lport_priv(lport); 1478 struct fcoe_port *port = lport_priv(lport);
1386 struct bnx2fc_lport *blport, *tmp; 1479 struct bnx2fc_hba *hba = interface->hba;
1387 1480
1388 /* Stop the transmit retry timer */ 1481 /* Stop the transmit retry timer */
1389 del_timer_sync(&port->timer); 1482 del_timer_sync(&port->timer);
@@ -1391,6 +1484,14 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
1391 /* Free existing transmit skbs */ 1484 /* Free existing transmit skbs */
1392 fcoe_clean_pending_queue(lport); 1485 fcoe_clean_pending_queue(lport);
1393 1486
1487 bnx2fc_net_cleanup(interface);
1488
1489 bnx2fc_free_vport(hba, lport);
1490}
1491
1492static void bnx2fc_if_destroy(struct fc_lport *lport)
1493{
1494
1394 /* Free queued packets for the receive thread */ 1495 /* Free queued packets for the receive thread */
1395 bnx2fc_clean_rx_queue(lport); 1496 bnx2fc_clean_rx_queue(lport);
1396 1497
@@ -1407,19 +1508,22 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
1407 /* Free memory used by statistical counters */ 1508 /* Free memory used by statistical counters */
1408 fc_lport_free_stats(lport); 1509 fc_lport_free_stats(lport);
1409 1510
1410 spin_lock_bh(&hba->hba_lock);
1411 list_for_each_entry_safe(blport, tmp, &hba->vports, list) {
1412 if (blport->lport == lport) {
1413 list_del(&blport->list);
1414 kfree(blport);
1415 }
1416 }
1417 spin_unlock_bh(&hba->hba_lock);
1418
1419 /* Release Scsi_Host */ 1511 /* Release Scsi_Host */
1420 scsi_host_put(lport->host); 1512 scsi_host_put(lport->host);
1421} 1513}
1422 1514
1515static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1516{
1517 struct fc_lport *lport = interface->ctlr.lp;
1518 struct fcoe_port *port = lport_priv(lport);
1519
1520 bnx2fc_interface_cleanup(interface);
1521 bnx2fc_stop(interface);
1522 list_del(&interface->list);
1523 bnx2fc_interface_put(interface);
1524 queue_work(bnx2fc_wq, &port->destroy_work);
1525}
1526
1423/** 1527/**
1424 * bnx2fc_destroy - Destroy a bnx2fc FCoE interface 1528 * bnx2fc_destroy - Destroy a bnx2fc FCoE interface
1425 * 1529 *
@@ -1433,8 +1537,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba)
1433static int bnx2fc_destroy(struct net_device *netdev) 1537static int bnx2fc_destroy(struct net_device *netdev)
1434{ 1538{
1435 struct bnx2fc_interface *interface = NULL; 1539 struct bnx2fc_interface *interface = NULL;
1436 struct bnx2fc_hba *hba;
1437 struct fc_lport *lport;
1438 int rc = 0; 1540 int rc = 0;
1439 1541
1440 rtnl_lock(); 1542 rtnl_lock();
@@ -1447,15 +1549,9 @@ static int bnx2fc_destroy(struct net_device *netdev)
1447 goto netdev_err; 1549 goto netdev_err;
1448 } 1550 }
1449 1551
1450 hba = interface->hba;
1451 1552
1452 bnx2fc_netdev_cleanup(interface);
1453 lport = interface->ctlr.lp;
1454 bnx2fc_stop(interface);
1455 list_del(&interface->list);
1456 destroy_workqueue(interface->timer_work_queue); 1553 destroy_workqueue(interface->timer_work_queue);
1457 bnx2fc_interface_put(interface); 1554 __bnx2fc_destroy(interface);
1458 bnx2fc_if_destroy(lport, hba);
1459 1555
1460netdev_err: 1556netdev_err:
1461 mutex_unlock(&bnx2fc_dev_lock); 1557 mutex_unlock(&bnx2fc_dev_lock);
@@ -1467,22 +1563,13 @@ static void bnx2fc_destroy_work(struct work_struct *work)
1467{ 1563{
1468 struct fcoe_port *port; 1564 struct fcoe_port *port;
1469 struct fc_lport *lport; 1565 struct fc_lport *lport;
1470 struct bnx2fc_interface *interface;
1471 struct bnx2fc_hba *hba;
1472 1566
1473 port = container_of(work, struct fcoe_port, destroy_work); 1567 port = container_of(work, struct fcoe_port, destroy_work);
1474 lport = port->lport; 1568 lport = port->lport;
1475 interface = port->priv;
1476 hba = interface->hba;
1477 1569
1478 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); 1570 BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1479 1571
1480 bnx2fc_port_shutdown(lport); 1572 bnx2fc_if_destroy(lport);
1481 rtnl_lock();
1482 mutex_lock(&bnx2fc_dev_lock);
1483 bnx2fc_if_destroy(lport, hba);
1484 mutex_unlock(&bnx2fc_dev_lock);
1485 rtnl_unlock();
1486} 1573}
1487 1574
1488static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) 1575static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba)
@@ -1661,6 +1748,7 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba)
1661 wait_event_interruptible(hba->destroy_wait, 1748 wait_event_interruptible(hba->destroy_wait,
1662 test_bit(BNX2FC_FLAG_DESTROY_CMPL, 1749 test_bit(BNX2FC_FLAG_DESTROY_CMPL,
1663 &hba->flags)); 1750 &hba->flags));
1751 clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags);
1664 /* This should never happen */ 1752 /* This should never happen */
1665 if (signal_pending(current)) 1753 if (signal_pending(current))
1666 flush_signals(current); 1754 flush_signals(current);
@@ -1723,7 +1811,7 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1723 lport = interface->ctlr.lp; 1811 lport = interface->ctlr.lp;
1724 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); 1812 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1725 1813
1726 if (!bnx2fc_link_ok(lport)) { 1814 if (!bnx2fc_link_ok(lport) && interface->enabled) {
1727 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); 1815 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1728 fcoe_ctlr_link_up(&interface->ctlr); 1816 fcoe_ctlr_link_up(&interface->ctlr);
1729 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1817 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
@@ -1737,6 +1825,11 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1737 if (++wait_cnt > 12) 1825 if (++wait_cnt > 12)
1738 break; 1826 break;
1739 } 1827 }
1828
1829 /* Reset max receive frame size to default */
1830 if (fc_set_mfs(lport, BNX2FC_MFS))
1831 return;
1832
1740 fc_lport_init(lport); 1833 fc_lport_init(lport);
1741 fc_fabric_login(lport); 1834 fc_fabric_login(lport);
1742} 1835}
@@ -1800,6 +1893,7 @@ static int bnx2fc_disable(struct net_device *netdev)
1800 rc = -ENODEV; 1893 rc = -ENODEV;
1801 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); 1894 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
1802 } else { 1895 } else {
1896 interface->enabled = false;
1803 fcoe_ctlr_link_down(&interface->ctlr); 1897 fcoe_ctlr_link_down(&interface->ctlr);
1804 fcoe_clean_pending_queue(interface->ctlr.lp); 1898 fcoe_clean_pending_queue(interface->ctlr.lp);
1805 } 1899 }
@@ -1822,8 +1916,10 @@ static int bnx2fc_enable(struct net_device *netdev)
1822 if (!interface || !interface->ctlr.lp) { 1916 if (!interface || !interface->ctlr.lp) {
1823 rc = -ENODEV; 1917 rc = -ENODEV;
1824 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); 1918 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
1825 } else if (!bnx2fc_link_ok(interface->ctlr.lp)) 1919 } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
1826 fcoe_ctlr_link_up(&interface->ctlr); 1920 fcoe_ctlr_link_up(&interface->ctlr);
1921 interface->enabled = true;
1922 }
1827 1923
1828 mutex_unlock(&bnx2fc_dev_lock); 1924 mutex_unlock(&bnx2fc_dev_lock);
1829 rtnl_unlock(); 1925 rtnl_unlock();
@@ -1923,7 +2019,6 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1923 if (!lport) { 2019 if (!lport) {
1924 printk(KERN_ERR PFX "Failed to create interface (%s)\n", 2020 printk(KERN_ERR PFX "Failed to create interface (%s)\n",
1925 netdev->name); 2021 netdev->name);
1926 bnx2fc_netdev_cleanup(interface);
1927 rc = -EINVAL; 2022 rc = -EINVAL;
1928 goto if_create_err; 2023 goto if_create_err;
1929 } 2024 }
@@ -1936,8 +2031,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1936 /* Make this master N_port */ 2031 /* Make this master N_port */
1937 interface->ctlr.lp = lport; 2032 interface->ctlr.lp = lport;
1938 2033
2034 if (!bnx2fc_link_ok(lport)) {
2035 fcoe_ctlr_link_up(&interface->ctlr);
2036 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
2037 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
2038 }
2039
1939 BNX2FC_HBA_DBG(lport, "create: START DISC\n"); 2040 BNX2FC_HBA_DBG(lport, "create: START DISC\n");
1940 bnx2fc_start_disc(interface); 2041 bnx2fc_start_disc(interface);
2042 interface->enabled = true;
1941 /* 2043 /*
1942 * Release from kref_init in bnx2fc_interface_setup, on success 2044 * Release from kref_init in bnx2fc_interface_setup, on success
1943 * lport should be holding a reference taken in bnx2fc_if_create 2045 * lport should be holding a reference taken in bnx2fc_if_create
@@ -1951,6 +2053,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1951if_create_err: 2053if_create_err:
1952 destroy_workqueue(interface->timer_work_queue); 2054 destroy_workqueue(interface->timer_work_queue);
1953ifput_err: 2055ifput_err:
2056 bnx2fc_net_cleanup(interface);
1954 bnx2fc_interface_put(interface); 2057 bnx2fc_interface_put(interface);
1955netdev_err: 2058netdev_err:
1956 module_put(THIS_MODULE); 2059 module_put(THIS_MODULE);
@@ -2017,7 +2120,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2017{ 2120{
2018 struct bnx2fc_hba *hba; 2121 struct bnx2fc_hba *hba;
2019 struct bnx2fc_interface *interface, *tmp; 2122 struct bnx2fc_interface *interface, *tmp;
2020 struct fc_lport *lport;
2021 2123
2022 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); 2124 BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n");
2023 2125
@@ -2039,18 +2141,10 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
2039 list_del_init(&hba->list); 2141 list_del_init(&hba->list);
2040 adapter_count--; 2142 adapter_count--;
2041 2143
2042 list_for_each_entry_safe(interface, tmp, &if_list, list) { 2144 list_for_each_entry_safe(interface, tmp, &if_list, list)
2043 /* destroy not called yet, move to quiesced list */ 2145 /* destroy not called yet, move to quiesced list */
2044 if (interface->hba == hba) { 2146 if (interface->hba == hba)
2045 bnx2fc_netdev_cleanup(interface); 2147 __bnx2fc_destroy(interface);
2046 bnx2fc_stop(interface);
2047
2048 list_del(&interface->list);
2049 lport = interface->ctlr.lp;
2050 bnx2fc_interface_put(interface);
2051 bnx2fc_if_destroy(lport, hba);
2052 }
2053 }
2054 mutex_unlock(&bnx2fc_dev_lock); 2148 mutex_unlock(&bnx2fc_dev_lock);
2055 2149
2056 bnx2fc_ulp_stop(hba); 2150 bnx2fc_ulp_stop(hba);
@@ -2119,7 +2213,7 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
2119 (void *)p, 2213 (void *)p,
2120 "bnx2fc_thread/%d", cpu); 2214 "bnx2fc_thread/%d", cpu);
2121 /* bind thread to the cpu */ 2215 /* bind thread to the cpu */
2122 if (likely(!IS_ERR(p->iothread))) { 2216 if (likely(!IS_ERR(thread))) {
2123 kthread_bind(thread, cpu); 2217 kthread_bind(thread, cpu);
2124 p->iothread = thread; 2218 p->iothread = thread;
2125 wake_up_process(thread); 2219 wake_up_process(thread);
@@ -2131,7 +2225,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2131 struct bnx2fc_percpu_s *p; 2225 struct bnx2fc_percpu_s *p;
2132 struct task_struct *thread; 2226 struct task_struct *thread;
2133 struct bnx2fc_work *work, *tmp; 2227 struct bnx2fc_work *work, *tmp;
2134 LIST_HEAD(work_list);
2135 2228
2136 BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); 2229 BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu);
2137 2230
@@ -2143,7 +2236,7 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu)
2143 2236
2144 2237
2145 /* Free all work in the list */ 2238 /* Free all work in the list */
2146 list_for_each_entry_safe(work, tmp, &work_list, list) { 2239 list_for_each_entry_safe(work, tmp, &p->work_list, list) {
2147 list_del_init(&work->list); 2240 list_del_init(&work->list);
2148 bnx2fc_process_cq_compl(work->tgt, work->wqe); 2241 bnx2fc_process_cq_compl(work->tgt, work->wqe);
2149 kfree(work); 2242 kfree(work);
@@ -2376,6 +2469,7 @@ static struct fc_function_template bnx2fc_transport_function = {
2376 .vport_create = bnx2fc_vport_create, 2469 .vport_create = bnx2fc_vport_create,
2377 .vport_delete = bnx2fc_vport_destroy, 2470 .vport_delete = bnx2fc_vport_destroy,
2378 .vport_disable = bnx2fc_vport_disable, 2471 .vport_disable = bnx2fc_vport_disable,
2472 .bsg_request = fc_lport_bsg_request,
2379}; 2473};
2380 2474
2381static struct fc_function_template bnx2fc_vport_xport_function = { 2475static struct fc_function_template bnx2fc_vport_xport_function = {
@@ -2409,6 +2503,7 @@ static struct fc_function_template bnx2fc_vport_xport_function = {
2409 .get_fc_host_stats = fc_get_host_stats, 2503 .get_fc_host_stats = fc_get_host_stats,
2410 .issue_fc_host_lip = bnx2fc_fcoe_reset, 2504 .issue_fc_host_lip = bnx2fc_fcoe_reset,
2411 .terminate_rport_io = fc_rport_terminate_io, 2505 .terminate_rport_io = fc_rport_terminate_io,
2506 .bsg_request = fc_lport_bsg_request,
2412}; 2507};
2413 2508
2414/** 2509/**
@@ -2438,6 +2533,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
2438 .elsct_send = bnx2fc_elsct_send, 2533 .elsct_send = bnx2fc_elsct_send,
2439 .fcp_abort_io = bnx2fc_abort_io, 2534 .fcp_abort_io = bnx2fc_abort_io,
2440 .fcp_cleanup = bnx2fc_cleanup, 2535 .fcp_cleanup = bnx2fc_cleanup,
2536 .get_lesb = bnx2fc_get_lesb,
2441 .rport_event_callback = bnx2fc_rport_event_handler, 2537 .rport_event_callback = bnx2fc_rport_event_handler,
2442}; 2538};
2443 2539
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 72cfb14acd3..1923a25cb6a 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1009,6 +1009,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
1009 u32 cq_cons; 1009 u32 cq_cons;
1010 struct fcoe_cqe *cqe; 1010 struct fcoe_cqe *cqe;
1011 u32 num_free_sqes = 0; 1011 u32 num_free_sqes = 0;
1012 u32 num_cqes = 0;
1012 u16 wqe; 1013 u16 wqe;
1013 1014
1014 /* 1015 /*
@@ -1058,10 +1059,11 @@ unlock:
1058 wake_up_process(fps->iothread); 1059 wake_up_process(fps->iothread);
1059 else 1060 else
1060 bnx2fc_process_cq_compl(tgt, wqe); 1061 bnx2fc_process_cq_compl(tgt, wqe);
1062 num_free_sqes++;
1061 } 1063 }
1062 cqe++; 1064 cqe++;
1063 tgt->cq_cons_idx++; 1065 tgt->cq_cons_idx++;
1064 num_free_sqes++; 1066 num_cqes++;
1065 1067
1066 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { 1068 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
1067 tgt->cq_cons_idx = 0; 1069 tgt->cq_cons_idx = 0;
@@ -1070,8 +1072,10 @@ unlock:
1070 1 - tgt->cq_curr_toggle_bit; 1072 1 - tgt->cq_curr_toggle_bit;
1071 } 1073 }
1072 } 1074 }
1073 if (num_free_sqes) { 1075 if (num_cqes) {
1074 bnx2fc_arm_cq(tgt); 1076 /* Arm CQ only if doorbell is mapped */
1077 if (tgt->ctx_base)
1078 bnx2fc_arm_cq(tgt);
1075 atomic_add(num_free_sqes, &tgt->free_sqes); 1079 atomic_add(num_free_sqes, &tgt->free_sqes);
1076 } 1080 }
1077 spin_unlock_bh(&tgt->cq_lock); 1081 spin_unlock_bh(&tgt->cq_lock);
@@ -1739,11 +1743,13 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1739 /* Init state to NORMAL */ 1743 /* Init state to NORMAL */
1740 task->txwr_rxrd.const_ctx.init_flags |= task_type << 1744 task->txwr_rxrd.const_ctx.init_flags |= task_type <<
1741 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; 1745 FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT;
1742 if (dev_type == TYPE_TAPE) 1746 if (dev_type == TYPE_TAPE) {
1743 task->txwr_rxrd.const_ctx.init_flags |= 1747 task->txwr_rxrd.const_ctx.init_flags |=
1744 FCOE_TASK_DEV_TYPE_TAPE << 1748 FCOE_TASK_DEV_TYPE_TAPE <<
1745 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1749 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
1746 else 1750 io_req->rec_retry = 0;
1751 io_req->rec_retry = 0;
1752 } else
1747 task->txwr_rxrd.const_ctx.init_flags |= 1753 task->txwr_rxrd.const_ctx.init_flags |=
1748 FCOE_TASK_DEV_TYPE_DISK << 1754 FCOE_TASK_DEV_TYPE_DISK <<
1749 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; 1755 FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 6cc3789075b..0c64d184d73 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -17,7 +17,7 @@
17static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 17static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
18 int bd_index); 18 int bd_index);
19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 19static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req);
20static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 20static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req);
21static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 21static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
22static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 22static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
23static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 23static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
@@ -1251,7 +1251,6 @@ void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req,
1251 seq_clnp_req->xid); 1251 seq_clnp_req->xid);
1252 goto free_cb_arg; 1252 goto free_cb_arg;
1253 } 1253 }
1254 kref_get(&orig_io_req->refcount);
1255 1254
1256 spin_unlock_bh(&tgt->tgt_lock); 1255 spin_unlock_bh(&tgt->tgt_lock);
1257 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); 1256 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
@@ -1569,6 +1568,8 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len,
1569 1568
1570static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) 1569static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1571{ 1570{
1571 struct bnx2fc_interface *interface = io_req->port->priv;
1572 struct bnx2fc_hba *hba = interface->hba;
1572 struct scsi_cmnd *sc = io_req->sc_cmd; 1573 struct scsi_cmnd *sc = io_req->sc_cmd;
1573 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1574 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1574 struct scatterlist *sg; 1575 struct scatterlist *sg;
@@ -1580,7 +1581,8 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1580 u64 addr; 1581 u64 addr;
1581 int i; 1582 int i;
1582 1583
1583 sg_count = scsi_dma_map(sc); 1584 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
1585 scsi_sg_count(sc), sc->sc_data_direction);
1584 scsi_for_each_sg(sc, sg, sg_count, i) { 1586 scsi_for_each_sg(sc, sg, sg_count, i) {
1585 sg_len = sg_dma_len(sg); 1587 sg_len = sg_dma_len(sg);
1586 addr = sg_dma_address(sg); 1588 addr = sg_dma_address(sg);
@@ -1605,20 +1607,24 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req)
1605 return bd_count; 1607 return bd_count;
1606} 1608}
1607 1609
1608static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) 1610static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req)
1609{ 1611{
1610 struct scsi_cmnd *sc = io_req->sc_cmd; 1612 struct scsi_cmnd *sc = io_req->sc_cmd;
1611 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1613 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl;
1612 int bd_count; 1614 int bd_count;
1613 1615
1614 if (scsi_sg_count(sc)) 1616 if (scsi_sg_count(sc)) {
1615 bd_count = bnx2fc_map_sg(io_req); 1617 bd_count = bnx2fc_map_sg(io_req);
1616 else { 1618 if (bd_count == 0)
1619 return -ENOMEM;
1620 } else {
1617 bd_count = 0; 1621 bd_count = 0;
1618 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; 1622 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0;
1619 bd[0].buf_len = bd[0].flags = 0; 1623 bd[0].buf_len = bd[0].flags = 0;
1620 } 1624 }
1621 io_req->bd_tbl->bd_valid = bd_count; 1625 io_req->bd_tbl->bd_valid = bd_count;
1626
1627 return 0;
1622} 1628}
1623 1629
1624static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1630static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
@@ -1790,12 +1796,6 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
1790 tgt = (struct bnx2fc_rport *)&rp[1]; 1796 tgt = (struct bnx2fc_rport *)&rp[1];
1791 1797
1792 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1798 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
1793 if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) {
1794 sc_cmd->result = DID_NO_CONNECT << 16;
1795 sc_cmd->scsi_done(sc_cmd);
1796 return 0;
1797
1798 }
1799 /* 1799 /*
1800 * Session is not offloaded yet. Let SCSI-ml retry 1800 * Session is not offloaded yet. Let SCSI-ml retry
1801 * the command. 1801 * the command.
@@ -1946,7 +1946,13 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
1946 xid = io_req->xid; 1946 xid = io_req->xid;
1947 1947
1948 /* Build buffer descriptor list for firmware from sg list */ 1948 /* Build buffer descriptor list for firmware from sg list */
1949 bnx2fc_build_bd_list_from_sg(io_req); 1949 if (bnx2fc_build_bd_list_from_sg(io_req)) {
1950 printk(KERN_ERR PFX "BD list creation failed\n");
1951 spin_lock_bh(&tgt->tgt_lock);
1952 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1953 spin_unlock_bh(&tgt->tgt_lock);
1954 return -EAGAIN;
1955 }
1950 1956
1951 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 1957 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
1952 index = xid % BNX2FC_TASKS_PER_PAGE; 1958 index = xid % BNX2FC_TASKS_PER_PAGE;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index d5311b577cc..c1800b53127 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -76,7 +76,7 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
76 if (rval) { 76 if (rval) {
77 printk(KERN_ERR PFX "Failed to allocate conn id for " 77 printk(KERN_ERR PFX "Failed to allocate conn id for "
78 "port_id (%6x)\n", rport->port_id); 78 "port_id (%6x)\n", rport->port_id);
79 goto ofld_err; 79 goto tgt_init_err;
80 } 80 }
81 81
82 /* Allocate session resources */ 82 /* Allocate session resources */
@@ -134,18 +134,17 @@ retry_ofld:
134 /* upload will take care of cleaning up sess resc */ 134 /* upload will take care of cleaning up sess resc */
135 lport->tt.rport_logoff(rdata); 135 lport->tt.rport_logoff(rdata);
136 } 136 }
137 /* Arm CQ */
138 bnx2fc_arm_cq(tgt);
139 return; 137 return;
140 138
141ofld_err: 139ofld_err:
142 /* couldn't offload the session. log off from this rport */ 140 /* couldn't offload the session. log off from this rport */
143 BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); 141 BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
144 lport->tt.rport_logoff(rdata);
145 /* Free session resources */ 142 /* Free session resources */
146 bnx2fc_free_session_resc(hba, tgt); 143 bnx2fc_free_session_resc(hba, tgt);
144tgt_init_err:
147 if (tgt->fcoe_conn_id != -1) 145 if (tgt->fcoe_conn_id != -1)
148 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); 146 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
147 lport->tt.rport_logoff(rdata);
149} 148}
150 149
151void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) 150void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
@@ -624,7 +623,6 @@ static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
624 /* called with hba mutex held */ 623 /* called with hba mutex held */
625 spin_lock_bh(&hba->hba_lock); 624 spin_lock_bh(&hba->hba_lock);
626 hba->tgt_ofld_list[conn_id] = NULL; 625 hba->tgt_ofld_list[conn_id] = NULL;
627 hba->next_conn_id = conn_id;
628 spin_unlock_bh(&hba->hba_lock); 626 spin_unlock_bh(&hba->hba_lock);
629} 627}
630 628
@@ -791,8 +789,6 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
791 return 0; 789 return 0;
792 790
793mem_alloc_failure: 791mem_alloc_failure:
794 bnx2fc_free_session_resc(hba, tgt);
795 bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
796 return -ENOMEM; 792 return -ENOMEM;
797} 793}
798 794
@@ -807,14 +803,14 @@ mem_alloc_failure:
807static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, 803static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
808 struct bnx2fc_rport *tgt) 804 struct bnx2fc_rport *tgt)
809{ 805{
810 BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); 806 void __iomem *ctx_base_ptr;
811 807
812 if (tgt->ctx_base) { 808 BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
813 iounmap(tgt->ctx_base);
814 tgt->ctx_base = NULL;
815 }
816 809
817 spin_lock_bh(&tgt->cq_lock); 810 spin_lock_bh(&tgt->cq_lock);
811 ctx_base_ptr = tgt->ctx_base;
812 tgt->ctx_base = NULL;
813
818 /* Free LCQ */ 814 /* Free LCQ */
819 if (tgt->lcq) { 815 if (tgt->lcq) {
820 dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, 816 dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
@@ -868,4 +864,7 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
868 tgt->sq = NULL; 864 tgt->sq = NULL;
869 } 865 }
870 spin_unlock_bh(&tgt->cq_lock); 866 spin_unlock_bh(&tgt->cq_lock);
867
868 if (ctx_base_ptr)
869 iounmap(ctx_base_ptr);
871} 870}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index cffd4d75df5..d1e69719097 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2177,6 +2177,59 @@ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
2177 return 0; 2177 return 0;
2178} 2178}
2179 2179
2180static mode_t bnx2i_attr_is_visible(int param_type, int param)
2181{
2182 switch (param_type) {
2183 case ISCSI_HOST_PARAM:
2184 switch (param) {
2185 case ISCSI_HOST_PARAM_NETDEV_NAME:
2186 case ISCSI_HOST_PARAM_HWADDRESS:
2187 case ISCSI_HOST_PARAM_IPADDRESS:
2188 return S_IRUGO;
2189 default:
2190 return 0;
2191 }
2192 case ISCSI_PARAM:
2193 switch (param) {
2194 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2195 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2196 case ISCSI_PARAM_HDRDGST_EN:
2197 case ISCSI_PARAM_DATADGST_EN:
2198 case ISCSI_PARAM_CONN_ADDRESS:
2199 case ISCSI_PARAM_CONN_PORT:
2200 case ISCSI_PARAM_EXP_STATSN:
2201 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2202 case ISCSI_PARAM_PERSISTENT_PORT:
2203 case ISCSI_PARAM_PING_TMO:
2204 case ISCSI_PARAM_RECV_TMO:
2205 case ISCSI_PARAM_INITIAL_R2T_EN:
2206 case ISCSI_PARAM_MAX_R2T:
2207 case ISCSI_PARAM_IMM_DATA_EN:
2208 case ISCSI_PARAM_FIRST_BURST:
2209 case ISCSI_PARAM_MAX_BURST:
2210 case ISCSI_PARAM_PDU_INORDER_EN:
2211 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2212 case ISCSI_PARAM_ERL:
2213 case ISCSI_PARAM_TARGET_NAME:
2214 case ISCSI_PARAM_TPGT:
2215 case ISCSI_PARAM_USERNAME:
2216 case ISCSI_PARAM_PASSWORD:
2217 case ISCSI_PARAM_USERNAME_IN:
2218 case ISCSI_PARAM_PASSWORD_IN:
2219 case ISCSI_PARAM_FAST_ABORT:
2220 case ISCSI_PARAM_ABORT_TMO:
2221 case ISCSI_PARAM_LU_RESET_TMO:
2222 case ISCSI_PARAM_TGT_RESET_TMO:
2223 case ISCSI_PARAM_IFACE_NAME:
2224 case ISCSI_PARAM_INITIATOR_NAME:
2225 return S_IRUGO;
2226 default:
2227 return 0;
2228 }
2229 }
2230
2231 return 0;
2232}
2180 2233
2181/* 2234/*
2182 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template 2235 * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
@@ -2207,37 +2260,12 @@ struct iscsi_transport bnx2i_iscsi_transport = {
2207 CAP_MULTI_R2T | CAP_DATADGST | 2260 CAP_MULTI_R2T | CAP_DATADGST |
2208 CAP_DATA_PATH_OFFLOAD | 2261 CAP_DATA_PATH_OFFLOAD |
2209 CAP_TEXT_NEGO, 2262 CAP_TEXT_NEGO,
2210 .param_mask = ISCSI_MAX_RECV_DLENGTH |
2211 ISCSI_MAX_XMIT_DLENGTH |
2212 ISCSI_HDRDGST_EN |
2213 ISCSI_DATADGST_EN |
2214 ISCSI_INITIAL_R2T_EN |
2215 ISCSI_MAX_R2T |
2216 ISCSI_IMM_DATA_EN |
2217 ISCSI_FIRST_BURST |
2218 ISCSI_MAX_BURST |
2219 ISCSI_PDU_INORDER_EN |
2220 ISCSI_DATASEQ_INORDER_EN |
2221 ISCSI_ERL |
2222 ISCSI_CONN_PORT |
2223 ISCSI_CONN_ADDRESS |
2224 ISCSI_EXP_STATSN |
2225 ISCSI_PERSISTENT_PORT |
2226 ISCSI_PERSISTENT_ADDRESS |
2227 ISCSI_TARGET_NAME | ISCSI_TPGT |
2228 ISCSI_USERNAME | ISCSI_PASSWORD |
2229 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
2230 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
2231 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
2232 ISCSI_PING_TMO | ISCSI_RECV_TMO |
2233 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
2234 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
2235 ISCSI_HOST_NETDEV_NAME,
2236 .create_session = bnx2i_session_create, 2263 .create_session = bnx2i_session_create,
2237 .destroy_session = bnx2i_session_destroy, 2264 .destroy_session = bnx2i_session_destroy,
2238 .create_conn = bnx2i_conn_create, 2265 .create_conn = bnx2i_conn_create,
2239 .bind_conn = bnx2i_conn_bind, 2266 .bind_conn = bnx2i_conn_bind,
2240 .destroy_conn = bnx2i_conn_destroy, 2267 .destroy_conn = bnx2i_conn_destroy,
2268 .attr_is_visible = bnx2i_attr_is_visible,
2241 .set_param = iscsi_set_param, 2269 .set_param = iscsi_set_param,
2242 .get_conn_param = iscsi_conn_get_param, 2270 .get_conn_param = iscsi_conn_get_param,
2243 .get_session_param = iscsi_session_get_param, 2271 .get_session_param = iscsi_session_get_param,
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 1242c7c04a0..000294a9df8 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -105,25 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
105 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST 105 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
106 | CAP_DATADGST | CAP_DIGEST_OFFLOAD | 106 | CAP_DATADGST | CAP_DIGEST_OFFLOAD |
107 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 107 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
108 .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | 108 .attr_is_visible = cxgbi_attr_is_visible,
109 ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
110 ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
111 ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
112 ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
113 ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
114 ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
115 ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
116 ISCSI_PERSISTENT_ADDRESS |
117 ISCSI_TARGET_NAME | ISCSI_TPGT |
118 ISCSI_USERNAME | ISCSI_PASSWORD |
119 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
120 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
121 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
122 ISCSI_PING_TMO | ISCSI_RECV_TMO |
123 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
124 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
125 ISCSI_HOST_INITIATOR_NAME |
126 ISCSI_HOST_NETDEV_NAME,
127 .get_host_param = cxgbi_get_host_param, 109 .get_host_param = cxgbi_get_host_param,
128 .set_host_param = cxgbi_set_host_param, 110 .set_host_param = cxgbi_set_host_param,
129 /* session management */ 111 /* session management */
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 31c79bde697..ac7a9b1e3e2 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -106,25 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
106 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | 106 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST |
107 CAP_DATADGST | CAP_DIGEST_OFFLOAD | 107 CAP_DATADGST | CAP_DIGEST_OFFLOAD |
108 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, 108 CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO,
109 .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | 109 .attr_is_visible = cxgbi_attr_is_visible,
110 ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN |
111 ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T |
112 ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST |
113 ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN |
114 ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL |
115 ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS |
116 ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT |
117 ISCSI_PERSISTENT_ADDRESS |
118 ISCSI_TARGET_NAME | ISCSI_TPGT |
119 ISCSI_USERNAME | ISCSI_PASSWORD |
120 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
121 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
122 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
123 ISCSI_PING_TMO | ISCSI_RECV_TMO |
124 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
125 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
126 ISCSI_HOST_INITIATOR_NAME |
127 ISCSI_HOST_NETDEV_NAME,
128 .get_host_param = cxgbi_get_host_param, 110 .get_host_param = cxgbi_get_host_param,
129 .set_host_param = cxgbi_set_host_param, 111 .set_host_param = cxgbi_set_host_param,
130 /* session management */ 112 /* session management */
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 1c1329bc77c..c363a4b260f 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2568,6 +2568,62 @@ void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
2568} 2568}
2569EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); 2569EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
2570 2570
2571mode_t cxgbi_attr_is_visible(int param_type, int param)
2572{
2573 switch (param_type) {
2574 case ISCSI_HOST_PARAM:
2575 switch (param) {
2576 case ISCSI_HOST_PARAM_NETDEV_NAME:
2577 case ISCSI_HOST_PARAM_HWADDRESS:
2578 case ISCSI_HOST_PARAM_IPADDRESS:
2579 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2580 return S_IRUGO;
2581 default:
2582 return 0;
2583 }
2584 case ISCSI_PARAM:
2585 switch (param) {
2586 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2587 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2588 case ISCSI_PARAM_HDRDGST_EN:
2589 case ISCSI_PARAM_DATADGST_EN:
2590 case ISCSI_PARAM_CONN_ADDRESS:
2591 case ISCSI_PARAM_CONN_PORT:
2592 case ISCSI_PARAM_EXP_STATSN:
2593 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2594 case ISCSI_PARAM_PERSISTENT_PORT:
2595 case ISCSI_PARAM_PING_TMO:
2596 case ISCSI_PARAM_RECV_TMO:
2597 case ISCSI_PARAM_INITIAL_R2T_EN:
2598 case ISCSI_PARAM_MAX_R2T:
2599 case ISCSI_PARAM_IMM_DATA_EN:
2600 case ISCSI_PARAM_FIRST_BURST:
2601 case ISCSI_PARAM_MAX_BURST:
2602 case ISCSI_PARAM_PDU_INORDER_EN:
2603 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2604 case ISCSI_PARAM_ERL:
2605 case ISCSI_PARAM_TARGET_NAME:
2606 case ISCSI_PARAM_TPGT:
2607 case ISCSI_PARAM_USERNAME:
2608 case ISCSI_PARAM_PASSWORD:
2609 case ISCSI_PARAM_USERNAME_IN:
2610 case ISCSI_PARAM_PASSWORD_IN:
2611 case ISCSI_PARAM_FAST_ABORT:
2612 case ISCSI_PARAM_ABORT_TMO:
2613 case ISCSI_PARAM_LU_RESET_TMO:
2614 case ISCSI_PARAM_TGT_RESET_TMO:
2615 case ISCSI_PARAM_IFACE_NAME:
2616 case ISCSI_PARAM_INITIATOR_NAME:
2617 return S_IRUGO;
2618 default:
2619 return 0;
2620 }
2621 }
2622
2623 return 0;
2624}
2625EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible);
2626
2571static int __init libcxgbi_init_module(void) 2627static int __init libcxgbi_init_module(void)
2572{ 2628{
2573 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; 2629 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 3a25b1187c1..20c88279c7a 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -709,6 +709,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *);
709 709
710void cxgbi_cleanup_task(struct iscsi_task *task); 710void cxgbi_cleanup_task(struct iscsi_task *task);
711 711
712mode_t cxgbi_attr_is_visible(int param_type, int param);
712void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); 713void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
713int cxgbi_set_conn_param(struct iscsi_cls_conn *, 714int cxgbi_set_conn_param(struct iscsi_cls_conn *,
714 enum iscsi_param, char *, int); 715 enum iscsi_param, char *, int);
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 0119b814779..7c05fd9dccf 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -60,6 +60,46 @@ static struct scsi_device_handler *get_device_handler_by_idx(int idx)
60} 60}
61 61
62/* 62/*
63 * device_handler_match_function - Match a device handler to a device
64 * @sdev - SCSI device to be tested
65 *
66 * Tests @sdev against the match function of all registered device_handler.
67 * Returns the found device handler or NULL if not found.
68 */
69static struct scsi_device_handler *
70device_handler_match_function(struct scsi_device *sdev)
71{
72 struct scsi_device_handler *tmp_dh, *found_dh = NULL;
73
74 spin_lock(&list_lock);
75 list_for_each_entry(tmp_dh, &scsi_dh_list, list) {
76 if (tmp_dh->match && tmp_dh->match(sdev)) {
77 found_dh = tmp_dh;
78 break;
79 }
80 }
81 spin_unlock(&list_lock);
82 return found_dh;
83}
84
85/*
86 * device_handler_match_devlist - Match a device handler to a device
87 * @sdev - SCSI device to be tested
88 *
89 * Tests @sdev against all device_handler registered in the devlist.
90 * Returns the found device handler or NULL if not found.
91 */
92static struct scsi_device_handler *
93device_handler_match_devlist(struct scsi_device *sdev)
94{
95 int idx;
96
97 idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
98 SCSI_DEVINFO_DH);
99 return get_device_handler_by_idx(idx);
100}
101
102/*
63 * device_handler_match - Attach a device handler to a device 103 * device_handler_match - Attach a device handler to a device
64 * @scsi_dh - The device handler to match against or NULL 104 * @scsi_dh - The device handler to match against or NULL
65 * @sdev - SCSI device to be tested against @scsi_dh 105 * @sdev - SCSI device to be tested against @scsi_dh
@@ -72,12 +112,11 @@ static struct scsi_device_handler *
72device_handler_match(struct scsi_device_handler *scsi_dh, 112device_handler_match(struct scsi_device_handler *scsi_dh,
73 struct scsi_device *sdev) 113 struct scsi_device *sdev)
74{ 114{
75 struct scsi_device_handler *found_dh = NULL; 115 struct scsi_device_handler *found_dh;
76 int idx;
77 116
78 idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model, 117 found_dh = device_handler_match_function(sdev);
79 SCSI_DEVINFO_DH); 118 if (!found_dh)
80 found_dh = get_device_handler_by_idx(idx); 119 found_dh = device_handler_match_devlist(sdev);
81 120
82 if (scsi_dh && found_dh != scsi_dh) 121 if (scsi_dh && found_dh != scsi_dh)
83 found_dh = NULL; 122 found_dh = NULL;
@@ -151,6 +190,10 @@ store_dh_state(struct device *dev, struct device_attribute *attr,
151 struct scsi_device_handler *scsi_dh; 190 struct scsi_device_handler *scsi_dh;
152 int err = -EINVAL; 191 int err = -EINVAL;
153 192
193 if (sdev->sdev_state == SDEV_CANCEL ||
194 sdev->sdev_state == SDEV_DEL)
195 return -ENODEV;
196
154 if (!sdev->scsi_dh_data) { 197 if (!sdev->scsi_dh_data) {
155 /* 198 /*
156 * Attach to a device handler 199 * Attach to a device handler
@@ -327,7 +370,7 @@ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
327 list_add(&scsi_dh->list, &scsi_dh_list); 370 list_add(&scsi_dh->list, &scsi_dh_list);
328 spin_unlock(&list_lock); 371 spin_unlock(&list_lock);
329 372
330 for (i = 0; scsi_dh->devlist[i].vendor; i++) { 373 for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
331 scsi_dev_info_list_add_keyed(0, 374 scsi_dev_info_list_add_keyed(0,
332 scsi_dh->devlist[i].vendor, 375 scsi_dh->devlist[i].vendor,
333 scsi_dh->devlist[i].model, 376 scsi_dh->devlist[i].model,
@@ -360,7 +403,7 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
360 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, 403 bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
361 scsi_dh_notifier_remove); 404 scsi_dh_notifier_remove);
362 405
363 for (i = 0; scsi_dh->devlist[i].vendor; i++) { 406 for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
364 scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor, 407 scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
365 scsi_dh->devlist[i].model, 408 scsi_dh->devlist[i].model,
366 SCSI_DEVINFO_DH); 409 SCSI_DEVINFO_DH);
@@ -468,7 +511,7 @@ int scsi_dh_handler_exist(const char *name)
468EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); 511EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
469 512
470/* 513/*
471 * scsi_dh_handler_attach - Attach device handler 514 * scsi_dh_attach - Attach device handler
472 * @sdev - sdev the handler should be attached to 515 * @sdev - sdev the handler should be attached to
473 * @name - name of the handler to attach 516 * @name - name of the handler to attach
474 */ 517 */
@@ -498,7 +541,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name)
498EXPORT_SYMBOL_GPL(scsi_dh_attach); 541EXPORT_SYMBOL_GPL(scsi_dh_attach);
499 542
500/* 543/*
501 * scsi_dh_handler_detach - Detach device handler 544 * scsi_dh_detach - Detach device handler
502 * @sdev - sdev the handler should be detached from 545 * @sdev - sdev the handler should be detached from
503 * 546 *
504 * This function will detach the device handler only 547 * This function will detach the device handler only
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 6fec9fe5dc3..627f4b5e517 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -128,43 +128,6 @@ static struct request *get_alua_req(struct scsi_device *sdev,
128} 128}
129 129
130/* 130/*
131 * submit_std_inquiry - Issue a standard INQUIRY command
132 * @sdev: sdev the command should be send to
133 */
134static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h)
135{
136 struct request *rq;
137 int err = SCSI_DH_RES_TEMP_UNAVAIL;
138
139 rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ);
140 if (!rq)
141 goto done;
142
143 /* Prepare the command. */
144 rq->cmd[0] = INQUIRY;
145 rq->cmd[1] = 0;
146 rq->cmd[2] = 0;
147 rq->cmd[4] = ALUA_INQUIRY_SIZE;
148 rq->cmd_len = COMMAND_SIZE(INQUIRY);
149
150 rq->sense = h->sense;
151 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
152 rq->sense_len = h->senselen = 0;
153
154 err = blk_execute_rq(rq->q, NULL, rq, 1);
155 if (err == -EIO) {
156 sdev_printk(KERN_INFO, sdev,
157 "%s: std inquiry failed with %x\n",
158 ALUA_DH_NAME, rq->errors);
159 h->senselen = rq->sense_len;
160 err = SCSI_DH_IO;
161 }
162 blk_put_request(rq);
163done:
164 return err;
165}
166
167/*
168 * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command 131 * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command
169 * @sdev: sdev the command should be sent to 132 * @sdev: sdev the command should be sent to
170 */ 133 */
@@ -338,23 +301,17 @@ static unsigned submit_stpg(struct alua_dh_data *h)
338} 301}
339 302
340/* 303/*
341 * alua_std_inquiry - Evaluate standard INQUIRY command 304 * alua_check_tpgs - Evaluate TPGS setting
342 * @sdev: device to be checked 305 * @sdev: device to be checked
343 * 306 *
344 * Just extract the TPGS setting to find out if ALUA 307 * Examine the TPGS setting of the sdev to find out if ALUA
345 * is supported. 308 * is supported.
346 */ 309 */
347static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) 310static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h)
348{ 311{
349 int err; 312 int err = SCSI_DH_OK;
350
351 err = submit_std_inquiry(sdev, h);
352
353 if (err != SCSI_DH_OK)
354 return err;
355 313
356 /* Check TPGS setting */ 314 h->tpgs = scsi_device_tpgs(sdev);
357 h->tpgs = (h->inq[5] >> 4) & 0x3;
358 switch (h->tpgs) { 315 switch (h->tpgs) {
359 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: 316 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
360 sdev_printk(KERN_INFO, sdev, 317 sdev_printk(KERN_INFO, sdev,
@@ -508,27 +465,28 @@ static int alua_check_sense(struct scsi_device *sdev,
508 * Power On, Reset, or Bus Device Reset, just retry. 465 * Power On, Reset, or Bus Device Reset, just retry.
509 */ 466 */
510 return ADD_TO_MLQUEUE; 467 return ADD_TO_MLQUEUE;
511 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { 468 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
512 /* 469 /*
513 * ALUA state changed 470 * ALUA state changed
514 */ 471 */
515 return ADD_TO_MLQUEUE; 472 return ADD_TO_MLQUEUE;
516 } 473 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07)
517 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
518 /* 474 /*
519 * Implicit ALUA state transition failed 475 * Implicit ALUA state transition failed
520 */ 476 */
521 return ADD_TO_MLQUEUE; 477 return ADD_TO_MLQUEUE;
522 } 478 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
523 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) { 479 /*
480 * Inquiry data has changed
481 */
482 return ADD_TO_MLQUEUE;
483 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e)
524 /* 484 /*
525 * REPORTED_LUNS_DATA_HAS_CHANGED is reported 485 * REPORTED_LUNS_DATA_HAS_CHANGED is reported
526 * when switching controllers on targets like 486 * when switching controllers on targets like
527 * Intel Multi-Flex. We can just retry. 487 * Intel Multi-Flex. We can just retry.
528 */ 488 */
529 return ADD_TO_MLQUEUE; 489 return ADD_TO_MLQUEUE;
530 }
531
532 break; 490 break;
533 } 491 }
534 492
@@ -547,9 +505,9 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
547{ 505{
548 struct scsi_sense_hdr sense_hdr; 506 struct scsi_sense_hdr sense_hdr;
549 int len, k, off, valid_states = 0; 507 int len, k, off, valid_states = 0;
550 char *ucp; 508 unsigned char *ucp;
551 unsigned err; 509 unsigned err;
552 unsigned long expiry, interval = 10; 510 unsigned long expiry, interval = 1;
553 511
554 expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT); 512 expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
555 retry: 513 retry:
@@ -610,7 +568,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
610 case TPGS_STATE_TRANSITIONING: 568 case TPGS_STATE_TRANSITIONING:
611 if (time_before(jiffies, expiry)) { 569 if (time_before(jiffies, expiry)) {
612 /* State transition, retry */ 570 /* State transition, retry */
613 interval *= 10; 571 interval *= 2;
614 msleep(interval); 572 msleep(interval);
615 goto retry; 573 goto retry;
616 } 574 }
@@ -642,7 +600,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
642{ 600{
643 int err; 601 int err;
644 602
645 err = alua_std_inquiry(sdev, h); 603 err = alua_check_tpgs(sdev, h);
646 if (err != SCSI_DH_OK) 604 if (err != SCSI_DH_OK)
647 goto out; 605 goto out;
648 606
@@ -674,11 +632,9 @@ static int alua_activate(struct scsi_device *sdev,
674 struct alua_dh_data *h = get_alua_data(sdev); 632 struct alua_dh_data *h = get_alua_data(sdev);
675 int err = SCSI_DH_OK; 633 int err = SCSI_DH_OK;
676 634
677 if (h->group_id != -1) { 635 err = alua_rtpg(sdev, h);
678 err = alua_rtpg(sdev, h); 636 if (err != SCSI_DH_OK)
679 if (err != SCSI_DH_OK) 637 goto out;
680 goto out;
681 }
682 638
683 if (h->tpgs & TPGS_MODE_EXPLICIT && 639 if (h->tpgs & TPGS_MODE_EXPLICIT &&
684 h->state != TPGS_STATE_OPTIMIZED && 640 h->state != TPGS_STATE_OPTIMIZED &&
@@ -720,23 +676,10 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
720 676
721} 677}
722 678
723static const struct scsi_dh_devlist alua_dev_list[] = { 679static bool alua_match(struct scsi_device *sdev)
724 {"HP", "MSA VOLUME" }, 680{
725 {"HP", "HSV101" }, 681 return (scsi_device_tpgs(sdev) != 0);
726 {"HP", "HSV111" }, 682}
727 {"HP", "HSV200" },
728 {"HP", "HSV210" },
729 {"HP", "HSV300" },
730 {"IBM", "2107900" },
731 {"IBM", "2145" },
732 {"Pillar", "Axiom" },
733 {"Intel", "Multi-Flex"},
734 {"NETAPP", "LUN"},
735 {"NETAPP", "LUN C-Mode"},
736 {"AIX", "NVDISK"},
737 {"Promise", "VTrak"},
738 {NULL, NULL}
739};
740 683
741static int alua_bus_attach(struct scsi_device *sdev); 684static int alua_bus_attach(struct scsi_device *sdev);
742static void alua_bus_detach(struct scsi_device *sdev); 685static void alua_bus_detach(struct scsi_device *sdev);
@@ -744,12 +687,12 @@ static void alua_bus_detach(struct scsi_device *sdev);
744static struct scsi_device_handler alua_dh = { 687static struct scsi_device_handler alua_dh = {
745 .name = ALUA_DH_NAME, 688 .name = ALUA_DH_NAME,
746 .module = THIS_MODULE, 689 .module = THIS_MODULE,
747 .devlist = alua_dev_list,
748 .attach = alua_bus_attach, 690 .attach = alua_bus_attach,
749 .detach = alua_bus_detach, 691 .detach = alua_bus_detach,
750 .prep_fn = alua_prep_fn, 692 .prep_fn = alua_prep_fn,
751 .check_sense = alua_check_sense, 693 .check_sense = alua_check_sense,
752 .activate = alua_activate, 694 .activate = alua_activate,
695 .match = alua_match,
753}; 696};
754 697
755/* 698/*
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 27c9d65d54a..82d612f0c49 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Engenio/LSI RDAC SCSI Device Handler 2 * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler
3 * 3 *
4 * Copyright (C) 2005 Mike Christie. All rights reserved. 4 * Copyright (C) 2005 Mike Christie. All rights reserved.
5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 5 * Copyright (C) Chandra Seetharaman, IBM Corp. 2007
@@ -795,6 +795,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
795 {"IBM", "3526"}, 795 {"IBM", "3526"},
796 {"SGI", "TP9400"}, 796 {"SGI", "TP9400"},
797 {"SGI", "TP9500"}, 797 {"SGI", "TP9500"},
798 {"SGI", "TP9700"},
798 {"SGI", "IS"}, 799 {"SGI", "IS"},
799 {"STK", "OPENstorage D280"}, 800 {"STK", "OPENstorage D280"},
800 {"SUN", "CSM200_R"}, 801 {"SUN", "CSM200_R"},
@@ -814,6 +815,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
814 {"SUN", "CSM100_R_FC"}, 815 {"SUN", "CSM100_R_FC"},
815 {"SUN", "STK6580_6780"}, 816 {"SUN", "STK6580_6780"},
816 {"SUN", "SUN_6180"}, 817 {"SUN", "SUN_6180"},
818 {"SUN", "ArrayStorage"},
817 {NULL, NULL}, 819 {NULL, NULL},
818}; 820};
819 821
@@ -945,7 +947,7 @@ static void __exit rdac_exit(void)
945module_init(rdac_init); 947module_init(rdac_init);
946module_exit(rdac_exit); 948module_exit(rdac_exit);
947 949
948MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver"); 950MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver");
949MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); 951MODULE_AUTHOR("Mike Christie, Chandra Seetharaman");
950MODULE_VERSION("01.00.0000.0000"); 952MODULE_VERSION("01.00.0000.0000");
951MODULE_LICENSE("GPL"); 953MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index a1c0ddd53aa..61384ee4049 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -51,7 +51,7 @@ MODULE_DESCRIPTION("FCoE");
51MODULE_LICENSE("GPL v2"); 51MODULE_LICENSE("GPL v2");
52 52
53/* Performance tuning parameters for fcoe */ 53/* Performance tuning parameters for fcoe */
54static unsigned int fcoe_ddp_min; 54static unsigned int fcoe_ddp_min = 4096;
55module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); 55module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
56MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ 56MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
57 "Direct Data Placement (DDP)."); 57 "Direct Data Placement (DDP).");
@@ -137,7 +137,6 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
137static int fcoe_vport_disable(struct fc_vport *, bool disable); 137static int fcoe_vport_disable(struct fc_vport *, bool disable);
138static void fcoe_set_vport_symbolic_name(struct fc_vport *); 138static void fcoe_set_vport_symbolic_name(struct fc_vport *);
139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 139static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
140static int fcoe_validate_vport_create(struct fc_vport *);
141 140
142static struct libfc_function_template fcoe_libfc_fcn_templ = { 141static struct libfc_function_template fcoe_libfc_fcn_templ = {
143 .frame_send = fcoe_xmit, 142 .frame_send = fcoe_xmit,
@@ -280,6 +279,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
280 * use the first one for SPMA */ 279 * use the first one for SPMA */
281 real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? 280 real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ?
282 vlan_dev_real_dev(netdev) : netdev; 281 vlan_dev_real_dev(netdev) : netdev;
282 fcoe->realdev = real_dev;
283 rcu_read_lock(); 283 rcu_read_lock();
284 for_each_dev_addr(real_dev, ha) { 284 for_each_dev_addr(real_dev, ha) {
285 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 285 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
@@ -580,23 +580,6 @@ static int fcoe_lport_config(struct fc_lport *lport)
580} 580}
581 581
582/** 582/**
583 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
584 * @netdev: the associated net device
585 * @wwn: the output WWN
586 * @type: the type of WWN (WWPN or WWNN)
587 *
588 * Returns: 0 for success
589 */
590static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
591{
592 const struct net_device_ops *ops = netdev->netdev_ops;
593
594 if (ops->ndo_fcoe_get_wwn)
595 return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
596 return -EINVAL;
597}
598
599/**
600 * fcoe_netdev_features_change - Updates the lport's offload flags based 583 * fcoe_netdev_features_change - Updates the lport's offload flags based
601 * on the LLD netdev's FCoE feature flags 584 * on the LLD netdev's FCoE feature flags
602 */ 585 */
@@ -1134,8 +1117,9 @@ static void fcoe_percpu_thread_create(unsigned int cpu)
1134 1117
1135 p = &per_cpu(fcoe_percpu, cpu); 1118 p = &per_cpu(fcoe_percpu, cpu);
1136 1119
1137 thread = kthread_create(fcoe_percpu_receive_thread, 1120 thread = kthread_create_on_node(fcoe_percpu_receive_thread,
1138 (void *)p, "fcoethread/%d", cpu); 1121 (void *)p, cpu_to_node(cpu),
1122 "fcoethread/%d", cpu);
1139 1123
1140 if (likely(!IS_ERR(thread))) { 1124 if (likely(!IS_ERR(thread))) {
1141 kthread_bind(thread, cpu); 1125 kthread_bind(thread, cpu);
@@ -1538,7 +1522,13 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1538 skb_reset_network_header(skb); 1522 skb_reset_network_header(skb);
1539 skb->mac_len = elen; 1523 skb->mac_len = elen;
1540 skb->protocol = htons(ETH_P_FCOE); 1524 skb->protocol = htons(ETH_P_FCOE);
1541 skb->dev = fcoe->netdev; 1525 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
1526 fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
1527 skb->vlan_tci = VLAN_TAG_PRESENT |
1528 vlan_dev_vlan_id(fcoe->netdev);
1529 skb->dev = fcoe->realdev;
1530 } else
1531 skb->dev = fcoe->netdev;
1542 1532
1543 /* fill up mac and fcoe headers */ 1533 /* fill up mac and fcoe headers */
1544 eh = eth_hdr(skb); 1534 eh = eth_hdr(skb);
@@ -2446,7 +2436,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled)
2446 2436
2447 rc = fcoe_validate_vport_create(vport); 2437 rc = fcoe_validate_vport_create(vport);
2448 if (rc) { 2438 if (rc) {
2449 wwn_to_str(vport->port_name, buf, sizeof(buf)); 2439 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
2450 printk(KERN_ERR "fcoe: Failed to create vport, " 2440 printk(KERN_ERR "fcoe: Failed to create vport, "
2451 "WWPN (0x%s) already exists\n", 2441 "WWPN (0x%s) already exists\n",
2452 buf); 2442 buf);
@@ -2555,28 +2545,9 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
2555static void fcoe_get_lesb(struct fc_lport *lport, 2545static void fcoe_get_lesb(struct fc_lport *lport,
2556 struct fc_els_lesb *fc_lesb) 2546 struct fc_els_lesb *fc_lesb)
2557{ 2547{
2558 unsigned int cpu;
2559 u32 lfc, vlfc, mdac;
2560 struct fcoe_dev_stats *devst;
2561 struct fcoe_fc_els_lesb *lesb;
2562 struct rtnl_link_stats64 temp;
2563 struct net_device *netdev = fcoe_netdev(lport); 2548 struct net_device *netdev = fcoe_netdev(lport);
2564 2549
2565 lfc = 0; 2550 __fcoe_get_lesb(lport, fc_lesb, netdev);
2566 vlfc = 0;
2567 mdac = 0;
2568 lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
2569 memset(lesb, 0, sizeof(*lesb));
2570 for_each_possible_cpu(cpu) {
2571 devst = per_cpu_ptr(lport->dev_stats, cpu);
2572 lfc += devst->LinkFailureCount;
2573 vlfc += devst->VLinkFailureCount;
2574 mdac += devst->MissDiscAdvCount;
2575 }
2576 lesb->lesb_link_fail = htonl(lfc);
2577 lesb->lesb_vlink_fail = htonl(vlfc);
2578 lesb->lesb_miss_fka = htonl(mdac);
2579 lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
2580} 2551}
2581 2552
2582/** 2553/**
@@ -2600,49 +2571,3 @@ static void fcoe_set_port_id(struct fc_lport *lport,
2600 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2571 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2601 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2572 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
2602} 2573}
2603
2604/**
2605 * fcoe_validate_vport_create() - Validate a vport before creating it
2606 * @vport: NPIV port to be created
2607 *
2608 * This routine is meant to add validation for a vport before creating it
2609 * via fcoe_vport_create().
2610 * Current validations are:
2611 * - WWPN supplied is unique for given lport
2612 *
2613 *
2614*/
2615static int fcoe_validate_vport_create(struct fc_vport *vport)
2616{
2617 struct Scsi_Host *shost = vport_to_shost(vport);
2618 struct fc_lport *n_port = shost_priv(shost);
2619 struct fc_lport *vn_port;
2620 int rc = 0;
2621 char buf[32];
2622
2623 mutex_lock(&n_port->lp_mutex);
2624
2625 wwn_to_str(vport->port_name, buf, sizeof(buf));
2626 /* Check if the wwpn is not same as that of the lport */
2627 if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
2628 FCOE_DBG("vport WWPN 0x%s is same as that of the "
2629 "base port WWPN\n", buf);
2630 rc = -EINVAL;
2631 goto out;
2632 }
2633
2634 /* Check if there is any existing vport with same wwpn */
2635 list_for_each_entry(vn_port, &n_port->vports, list) {
2636 if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
2637 FCOE_DBG("vport with given WWPN 0x%s already "
2638 "exists\n", buf);
2639 rc = -EINVAL;
2640 break;
2641 }
2642 }
2643
2644out:
2645 mutex_unlock(&n_port->lp_mutex);
2646
2647 return rc;
2648}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index c4a93993c0c..6c6884bcf84 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -80,6 +80,7 @@ do { \
80struct fcoe_interface { 80struct fcoe_interface {
81 struct list_head list; 81 struct list_head list;
82 struct net_device *netdev; 82 struct net_device *netdev;
83 struct net_device *realdev;
83 struct packet_type fcoe_packet_type; 84 struct packet_type fcoe_packet_type;
84 struct packet_type fip_packet_type; 85 struct packet_type fip_packet_type;
85 struct fcoe_ctlr ctlr; 86 struct fcoe_ctlr ctlr;
@@ -99,14 +100,4 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport)
99 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; 100 ((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
100} 101}
101 102
102static inline void wwn_to_str(u64 wwn, char *buf, int len)
103{
104 u8 wwpn[8];
105
106 u64_to_wwn(wwn, wwpn);
107 snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
108 wwpn[0], wwpn[1], wwpn[2], wwpn[3],
109 wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
110}
111
112#endif /* _FCOE_H_ */ 103#endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index dac8e39a518..bd97b2273f2 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -83,6 +83,107 @@ static struct notifier_block libfcoe_notifier = {
83 .notifier_call = libfcoe_device_notification, 83 .notifier_call = libfcoe_device_notification,
84}; 84};
85 85
86void __fcoe_get_lesb(struct fc_lport *lport,
87 struct fc_els_lesb *fc_lesb,
88 struct net_device *netdev)
89{
90 unsigned int cpu;
91 u32 lfc, vlfc, mdac;
92 struct fcoe_dev_stats *devst;
93 struct fcoe_fc_els_lesb *lesb;
94 struct rtnl_link_stats64 temp;
95
96 lfc = 0;
97 vlfc = 0;
98 mdac = 0;
99 lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
100 memset(lesb, 0, sizeof(*lesb));
101 for_each_possible_cpu(cpu) {
102 devst = per_cpu_ptr(lport->dev_stats, cpu);
103 lfc += devst->LinkFailureCount;
104 vlfc += devst->VLinkFailureCount;
105 mdac += devst->MissDiscAdvCount;
106 }
107 lesb->lesb_link_fail = htonl(lfc);
108 lesb->lesb_vlink_fail = htonl(vlfc);
109 lesb->lesb_miss_fka = htonl(mdac);
110 lesb->lesb_fcs_error =
111 htonl(dev_get_stats(netdev, &temp)->rx_crc_errors);
112}
113EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
114
115void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
116{
117 u8 wwpn[8];
118
119 u64_to_wwn(wwn, wwpn);
120 snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
121 wwpn[0], wwpn[1], wwpn[2], wwpn[3],
122 wwpn[4], wwpn[5], wwpn[6], wwpn[7]);
123}
124EXPORT_SYMBOL_GPL(fcoe_wwn_to_str);
125
126/**
127 * fcoe_validate_vport_create() - Validate a vport before creating it
128 * @vport: NPIV port to be created
129 *
130 * This routine is meant to add validation for a vport before creating it
131 * via fcoe_vport_create().
132 * Current validations are:
133 * - WWPN supplied is unique for given lport
134 */
135int fcoe_validate_vport_create(struct fc_vport *vport)
136{
137 struct Scsi_Host *shost = vport_to_shost(vport);
138 struct fc_lport *n_port = shost_priv(shost);
139 struct fc_lport *vn_port;
140 int rc = 0;
141 char buf[32];
142
143 mutex_lock(&n_port->lp_mutex);
144
145 fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf));
146 /* Check if the wwpn is not same as that of the lport */
147 if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) {
148 LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the "
149 "base port WWPN\n", buf);
150 rc = -EINVAL;
151 goto out;
152 }
153
154 /* Check if there is any existing vport with same wwpn */
155 list_for_each_entry(vn_port, &n_port->vports, list) {
156 if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) {
157 LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s "
158 "already exists\n", buf);
159 rc = -EINVAL;
160 break;
161 }
162 }
163out:
164 mutex_unlock(&n_port->lp_mutex);
165 return rc;
166}
167EXPORT_SYMBOL_GPL(fcoe_validate_vport_create);
168
169/**
170 * fcoe_get_wwn() - Get the world wide name from LLD if it supports it
171 * @netdev: the associated net device
172 * @wwn: the output WWN
173 * @type: the type of WWN (WWPN or WWNN)
174 *
175 * Returns: 0 for success
176 */
177int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
178{
179 const struct net_device_ops *ops = netdev->netdev_ops;
180
181 if (ops->ndo_fcoe_get_wwn)
182 return ops->ndo_fcoe_get_wwn(netdev, wwn, type);
183 return -EINVAL;
184}
185EXPORT_SYMBOL_GPL(fcoe_get_wwn);
186
86/** 187/**
87 * fcoe_fc_crc() - Calculates the CRC for a given frame 188 * fcoe_fc_crc() - Calculates the CRC for a given frame
88 * @fp: The frame to be checksumed 189 * @fp: The frame to be checksumed
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index b200b736b00..9825ecf3495 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3438,10 +3438,8 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3438 } else { 3438 } else {
3439 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 3439 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3440 if (use_doorbell) { 3440 if (use_doorbell) {
3441 dev_warn(&pdev->dev, "Controller claims that " 3441 dev_warn(&pdev->dev, "Soft reset not supported. "
3442 "'Bit 2 doorbell reset' is " 3442 "Firmware update is required.\n");
3443 "supported, but not 'bit 5 doorbell reset'. "
3444 "Firmware update is recommended.\n");
3445 rc = -ENOTSUPP; /* try soft reset */ 3443 rc = -ENOTSUPP; /* try soft reset */
3446 goto unmap_cfgtable; 3444 goto unmap_cfgtable;
3447 } 3445 }
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 8d636301e32..73e24b48dce 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2901,7 +2901,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2901 2901
2902 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 2902 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2903 2903
2904 if (ioa_cfg->sdt_state != GET_DUMP) { 2904 if (ioa_cfg->sdt_state != READ_DUMP) {
2905 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 2905 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2906 return; 2906 return;
2907 } 2907 }
@@ -3097,7 +3097,7 @@ static void ipr_worker_thread(struct work_struct *work)
3097 ENTER; 3097 ENTER;
3098 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3098 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3099 3099
3100 if (ioa_cfg->sdt_state == GET_DUMP) { 3100 if (ioa_cfg->sdt_state == READ_DUMP) {
3101 dump = ioa_cfg->dump; 3101 dump = ioa_cfg->dump;
3102 if (!dump) { 3102 if (!dump) {
3103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3103 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3109,7 +3109,7 @@ static void ipr_worker_thread(struct work_struct *work)
3109 kref_put(&dump->kref, ipr_release_dump); 3109 kref_put(&dump->kref, ipr_release_dump);
3110 3110
3111 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3111 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3112 if (ioa_cfg->sdt_state == DUMP_OBTAINED) 3112 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3113 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); 3113 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3114 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3115 return; 3115 return;
@@ -3751,14 +3751,6 @@ static ssize_t ipr_store_update_fw(struct device *dev,
3751 3751
3752 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; 3752 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3753 3753
3754 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3755 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3756 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3757 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3758 release_firmware(fw_entry);
3759 return -EINVAL;
3760 }
3761
3762 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); 3754 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3763 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); 3755 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3764 sglist = ipr_alloc_ucode_buffer(dnld_size); 3756 sglist = ipr_alloc_ucode_buffer(dnld_size);
@@ -3777,6 +3769,8 @@ static ssize_t ipr_store_update_fw(struct device *dev,
3777 goto out; 3769 goto out;
3778 } 3770 }
3779 3771
3772 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
3773
3780 result = ipr_update_ioa_ucode(ioa_cfg, sglist); 3774 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3781 3775
3782 if (!result) 3776 if (!result)
@@ -7449,8 +7443,11 @@ static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7449 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7443 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7450 7444
7451 if (ioa_cfg->sdt_state == GET_DUMP) 7445 if (ioa_cfg->sdt_state == GET_DUMP)
7446 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7447 else if (ioa_cfg->sdt_state == READ_DUMP)
7452 ioa_cfg->sdt_state = ABORT_DUMP; 7448 ioa_cfg->sdt_state = ABORT_DUMP;
7453 7449
7450 ioa_cfg->dump_timeout = 1;
7454 ipr_cmd->job_step = ipr_reset_alert; 7451 ipr_cmd->job_step = ipr_reset_alert;
7455 7452
7456 return IPR_RC_JOB_CONTINUE; 7453 return IPR_RC_JOB_CONTINUE;
@@ -7614,6 +7611,8 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7614 ipr_cmd->job_step = ipr_reset_enable_ioa; 7611 ipr_cmd->job_step = ipr_reset_enable_ioa;
7615 7612
7616 if (GET_DUMP == ioa_cfg->sdt_state) { 7613 if (GET_DUMP == ioa_cfg->sdt_state) {
7614 ioa_cfg->sdt_state = READ_DUMP;
7615 ioa_cfg->dump_timeout = 0;
7617 if (ioa_cfg->sis64) 7616 if (ioa_cfg->sis64)
7618 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); 7617 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7619 else 7618 else
@@ -8003,8 +8002,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8003 if (ioa_cfg->ioa_is_dead) 8002 if (ioa_cfg->ioa_is_dead)
8004 return; 8003 return;
8005 8004
8006 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP) 8005 if (ioa_cfg->in_reset_reload) {
8007 ioa_cfg->sdt_state = ABORT_DUMP; 8006 if (ioa_cfg->sdt_state == GET_DUMP)
8007 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8008 else if (ioa_cfg->sdt_state == READ_DUMP)
8009 ioa_cfg->sdt_state = ABORT_DUMP;
8010 }
8008 8011
8009 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { 8012 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8010 dev_err(&ioa_cfg->pdev->dev, 8013 dev_err(&ioa_cfg->pdev->dev,
@@ -8812,7 +8815,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8812 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); 8815 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8813 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) 8816 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8814 ioa_cfg->needs_hard_reset = 1; 8817 ioa_cfg->needs_hard_reset = 1;
8815 if (interrupts & IPR_PCII_ERROR_INTERRUPTS) 8818 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
8816 ioa_cfg->needs_hard_reset = 1; 8819 ioa_cfg->needs_hard_reset = 1;
8817 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) 8820 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8818 ioa_cfg->ioa_unit_checked = 1; 8821 ioa_cfg->ioa_unit_checked = 1;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index f93f8637c5a..6d257e0dd6a 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -208,7 +208,7 @@
208#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) 208#define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
209#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) 209#define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
210#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) 210#define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ)
211#define IPR_WRITE_BUFFER_TIMEOUT (10 * 60 * HZ) 211#define IPR_WRITE_BUFFER_TIMEOUT (30 * 60 * HZ)
212#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ) 212#define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ)
213#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ) 213#define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ)
214#define IPR_OPERATIONAL_TIMEOUT (5 * 60) 214#define IPR_OPERATIONAL_TIMEOUT (5 * 60)
@@ -1360,6 +1360,7 @@ enum ipr_sdt_state {
1360 INACTIVE, 1360 INACTIVE,
1361 WAIT_FOR_DUMP, 1361 WAIT_FOR_DUMP,
1362 GET_DUMP, 1362 GET_DUMP,
1363 READ_DUMP,
1363 ABORT_DUMP, 1364 ABORT_DUMP,
1364 DUMP_OBTAINED 1365 DUMP_OBTAINED
1365}; 1366};
@@ -1384,6 +1385,7 @@ struct ipr_ioa_cfg {
1384 u8 needs_warm_reset:1; 1385 u8 needs_warm_reset:1;
1385 u8 msi_received:1; 1386 u8 msi_received:1;
1386 u8 sis64:1; 1387 u8 sis64:1;
1388 u8 dump_timeout:1;
1387 1389
1388 u8 revid; 1390 u8 revid;
1389 1391
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 6981b773a88..f07f30fada1 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -1263,6 +1263,10 @@ void isci_host_deinit(struct isci_host *ihost)
1263{ 1263{
1264 int i; 1264 int i;
1265 1265
1266 /* disable output data selects */
1267 for (i = 0; i < isci_gpio_count(ihost); i++)
1268 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
1269
1266 isci_host_change_state(ihost, isci_stopping); 1270 isci_host_change_state(ihost, isci_stopping);
1267 for (i = 0; i < SCI_MAX_PORTS; i++) { 1271 for (i = 0; i < SCI_MAX_PORTS; i++) {
1268 struct isci_port *iport = &ihost->ports[i]; 1272 struct isci_port *iport = &ihost->ports[i];
@@ -1281,6 +1285,12 @@ void isci_host_deinit(struct isci_host *ihost)
1281 spin_unlock_irq(&ihost->scic_lock); 1285 spin_unlock_irq(&ihost->scic_lock);
1282 1286
1283 wait_for_stop(ihost); 1287 wait_for_stop(ihost);
1288
1289 /* disable sgpio: where the above wait should give time for the
1290 * enclosure to sample the gpios going inactive
1291 */
1292 writel(0, &ihost->scu_registers->peg0.sgpio.interface_control);
1293
1284 sci_controller_reset(ihost); 1294 sci_controller_reset(ihost);
1285 1295
1286 /* Cancel any/all outstanding port timers */ 1296 /* Cancel any/all outstanding port timers */
@@ -2365,6 +2375,12 @@ int isci_host_init(struct isci_host *ihost)
2365 for (i = 0; i < SCI_MAX_PHYS; i++) 2375 for (i = 0; i < SCI_MAX_PHYS; i++)
2366 isci_phy_init(&ihost->phys[i], ihost, i); 2376 isci_phy_init(&ihost->phys[i], ihost, i);
2367 2377
2378 /* enable sgpio */
2379 writel(1, &ihost->scu_registers->peg0.sgpio.interface_control);
2380 for (i = 0; i < isci_gpio_count(ihost); i++)
2381 writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]);
2382 writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code);
2383
2368 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { 2384 for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
2369 struct isci_remote_device *idev = &ihost->devices[i]; 2385 struct isci_remote_device *idev = &ihost->devices[i];
2370 2386
@@ -2760,3 +2776,56 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
2760 2776
2761 return status; 2777 return status;
2762} 2778}
2779
2780static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data)
2781{
2782 int d;
2783
2784 /* no support for TX_GP_CFG */
2785 if (reg_index == 0)
2786 return -EINVAL;
2787
2788 for (d = 0; d < isci_gpio_count(ihost); d++) {
2789 u32 val = 0x444; /* all ODx.n clear */
2790 int i;
2791
2792 for (i = 0; i < 3; i++) {
2793 int bit = (i << 2) + 2;
2794
2795 bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i),
2796 write_data, reg_index,
2797 reg_count);
2798 if (bit < 0)
2799 break;
2800
2801 /* if od is set, clear the 'invert' bit */
2802 val &= ~(bit << ((i << 2) + 2));
2803 }
2804
2805 if (i < 3)
2806 break;
2807 writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]);
2808 }
2809
2810 /* unless reg_index is > 1, we should always be able to write at
2811 * least one register
2812 */
2813 return d > 0;
2814}
2815
2816int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index,
2817 u8 reg_count, u8 *write_data)
2818{
2819 struct isci_host *ihost = sas_ha->lldd_ha;
2820 int written;
2821
2822 switch (reg_type) {
2823 case SAS_GPIO_REG_TX_GP:
2824 written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data);
2825 break;
2826 default:
2827 written = -EINVAL;
2828 }
2829
2830 return written;
2831}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 9f33831a2f0..646051afd3c 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -440,6 +440,18 @@ static inline bool is_c0(struct pci_dev *pdev)
440 return false; 440 return false;
441} 441}
442 442
443/* set hw control for 'activity', even though active enclosures seem to drive
444 * the activity led on their own. Skip setting FSENG control on 'status' due
445 * to unexpected operation and 'error' due to not being a supported automatic
446 * FSENG output
447 */
448#define SGPIO_HW_CONTROL 0x00000443
449
450static inline int isci_gpio_count(struct isci_host *ihost)
451{
452 return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select);
453}
454
443void sci_controller_post_request(struct isci_host *ihost, 455void sci_controller_post_request(struct isci_host *ihost,
444 u32 request); 456 u32 request);
445void sci_controller_release_frame(struct isci_host *ihost, 457void sci_controller_release_frame(struct isci_host *ihost,
@@ -542,4 +554,7 @@ void sci_port_configuration_agent_construct(
542enum sci_status sci_port_configuration_agent_initialize( 554enum sci_status sci_port_configuration_agent_initialize(
543 struct isci_host *ihost, 555 struct isci_host *ihost,
544 struct sci_port_configuration_agent *port_agent); 556 struct sci_port_configuration_agent *port_agent);
557
558int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index,
559 u8 reg_count, u8 *write_data);
545#endif 560#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 29aa34efb0f..43fe840fbe9 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -192,6 +192,9 @@ static struct sas_domain_function_template isci_transport_ops = {
192 192
193 /* Phy management */ 193 /* Phy management */
194 .lldd_control_phy = isci_phy_control, 194 .lldd_control_phy = isci_phy_control,
195
196 /* GPIO support */
197 .lldd_write_gpio = isci_gpio_write,
195}; 198};
196 199
197 200
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index d1de63312e7..8efeb6b0832 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -97,7 +97,7 @@
97#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES)) 97#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
98 98
99#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) 99#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
100#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024) 100#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024U)
101#define SCU_INVALID_FRAME_INDEX (0xFFFF) 101#define SCU_INVALID_FRAME_INDEX (0xFFFF)
102 102
103#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF) 103#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 09e61134037..35f50c2183e 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -1313,6 +1313,17 @@ int isci_phy_control(struct asd_sas_phy *sas_phy,
1313 ret = isci_port_perform_hard_reset(ihost, iport, iphy); 1313 ret = isci_port_perform_hard_reset(ihost, iport, iphy);
1314 1314
1315 break; 1315 break;
1316 case PHY_FUNC_GET_EVENTS: {
1317 struct scu_link_layer_registers __iomem *r;
1318 struct sas_phy *phy = sas_phy->phy;
1319
1320 r = iphy->link_layer_registers;
1321 phy->running_disparity_error_count = readl(&r->running_disparity_error_count);
1322 phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count);
1323 phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count);
1324 phy->invalid_dword_count = readl(&r->invalid_dword_counter);
1325 break;
1326 }
1316 1327
1317 default: 1328 default:
1318 dev_dbg(&ihost->pdev->dev, 1329 dev_dbg(&ihost->pdev->dev,
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 8f6f9b77e41..8e59c8865dc 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -294,8 +294,8 @@ static void isci_port_link_down(struct isci_host *isci_host,
294 __func__, isci_device); 294 __func__, isci_device);
295 set_bit(IDEV_GONE, &isci_device->flags); 295 set_bit(IDEV_GONE, &isci_device->flags);
296 } 296 }
297 isci_port_change_state(isci_port, isci_stopping);
297 } 298 }
298 isci_port_change_state(isci_port, isci_stopping);
299 } 299 }
300 300
301 /* Notify libsas of the borken link, this will trigger calls to our 301 /* Notify libsas of the borken link, this will trigger calls to our
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 486b113c634..38a99d28114 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -678,7 +678,7 @@ static void apc_agent_timeout(unsigned long data)
678 configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; 678 configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
679 679
680 if (!configure_phy_mask) 680 if (!configure_phy_mask)
681 return; 681 goto done;
682 682
683 for (index = 0; index < SCI_MAX_PHYS; index++) { 683 for (index = 0; index < SCI_MAX_PHYS; index++) {
684 if ((configure_phy_mask & (1 << index)) == 0) 684 if ((configure_phy_mask & (1 << index)) == 0)
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
index 00afc738bbe..eaa541afc75 100644
--- a/drivers/scsi/isci/registers.h
+++ b/drivers/scsi/isci/registers.h
@@ -875,122 +875,6 @@ struct scu_iit_entry {
875#define SCU_PTSxSR_GEN_BIT(name) \ 875#define SCU_PTSxSR_GEN_BIT(name) \
876 SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name) 876 SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
877 877
878
879/*
880 * *****************************************************************************
881 * * SGPIO Register shift and mask values
882 * ***************************************************************************** */
883#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
884#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
885#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
886#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
887#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
888#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
889#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
890#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
891#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
892
893#define SCU_SGICRx_GEN_BIT(name) \
894 SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
895
896#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
897#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
898#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
899#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
900#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
901#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
902#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
903#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
904#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
905
906#define SCU_SGPBRx_GEN_VAL(name, value) \
907 SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
908
909#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
910#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
911#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
912#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
913#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
914#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
915#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
916#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
917#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
918
919#define SCU_SGSDLRx_GEN_VAL(name, value) \
920 SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
921
922#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
923#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
924#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
925#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
926#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
927#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
928#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
929#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
930#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
931
932#define SCU_SGSDURx_GEN_VAL(name, value) \
933 SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
934
935#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
936#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
937#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
938#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
939#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
940#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
941#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
942#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
943#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
944
945#define SCU_SGSIDLRx_GEN_VAL(name, value) \
946 SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
947
948#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
949#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
950#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
951#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
952#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
953#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
954#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
955#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
956#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
957
958#define SCU_SGSIDURx_GEN_VAL(name, value) \
959 SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
960
961#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
962#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
963#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
964
965#define SCU_SGVSCR_GEN_VAL(value) \
966 SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
967
968#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
969#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
970#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
971#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
972#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
973#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
974#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
975#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
976#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
977#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
978#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
979#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
980#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
981#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
982#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
983#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
984#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
985#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
986#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
987
988#define SCU_SGODSR_GEN_VAL(name, value) \
989 SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
990
991#define SCU_SGODSR_GEN_BIT(name) \
992 SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
993
994/* 878/*
995 * ***************************************************************************** 879 * *****************************************************************************
996 * * SMU Registers 880 * * SMU Registers
@@ -1529,10 +1413,12 @@ struct scu_sgpio_registers {
1529 u32 serial_input_upper; 1413 u32 serial_input_upper;
1530/* 0x0018 SGPIO_SGVSCR */ 1414/* 0x0018 SGPIO_SGVSCR */
1531 u32 vendor_specific_code; 1415 u32 vendor_specific_code;
1416/* 0x001C Reserved */
1417 u32 reserved_001c;
1532/* 0x0020 SGPIO_SGODSR */ 1418/* 0x0020 SGPIO_SGODSR */
1533 u32 ouput_data_select[8]; 1419 u32 output_data_select[8];
1534/* Remainder of memory space 256 bytes */ 1420/* Remainder of memory space 256 bytes */
1535 u32 reserved_1444_14ff[0x31]; 1421 u32 reserved_1444_14ff[0x30];
1536 1422
1537}; 1423};
1538 1424
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b6e6368c266..fbf9ce28c3f 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -386,6 +386,18 @@ static bool is_remote_device_ready(struct isci_remote_device *idev)
386 } 386 }
387} 387}
388 388
389/*
390 * called once the remote node context has transisitioned to a ready
391 * state (after suspending RX and/or TX due to early D2H fis)
392 */
393static void atapi_remote_device_resume_done(void *_dev)
394{
395 struct isci_remote_device *idev = _dev;
396 struct isci_request *ireq = idev->working_request;
397
398 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
399}
400
389enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, 401enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
390 u32 event_code) 402 u32 event_code)
391{ 403{
@@ -432,6 +444,16 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
432 if (status != SCI_SUCCESS) 444 if (status != SCI_SUCCESS)
433 return status; 445 return status;
434 446
447 if (state == SCI_STP_DEV_ATAPI_ERROR) {
448 /* For ATAPI error state resume the RNC right away. */
449 if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
450 scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) {
451 return sci_remote_node_context_resume(&idev->rnc,
452 atapi_remote_device_resume_done,
453 idev);
454 }
455 }
456
435 if (state == SCI_STP_DEV_IDLE) { 457 if (state == SCI_STP_DEV_IDLE) {
436 458
437 /* We pick up suspension events to handle specifically to this 459 /* We pick up suspension events to handle specifically to this
@@ -625,6 +647,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
625 case SCI_STP_DEV_CMD: 647 case SCI_STP_DEV_CMD:
626 case SCI_STP_DEV_NCQ: 648 case SCI_STP_DEV_NCQ:
627 case SCI_STP_DEV_NCQ_ERROR: 649 case SCI_STP_DEV_NCQ_ERROR:
650 case SCI_STP_DEV_ATAPI_ERROR:
628 status = common_complete_io(iport, idev, ireq); 651 status = common_complete_io(iport, idev, ireq);
629 if (status != SCI_SUCCESS) 652 if (status != SCI_SUCCESS)
630 break; 653 break;
@@ -1020,6 +1043,7 @@ static const struct sci_base_state sci_remote_device_state_table[] = {
1020 [SCI_STP_DEV_NCQ_ERROR] = { 1043 [SCI_STP_DEV_NCQ_ERROR] = {
1021 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, 1044 .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
1022 }, 1045 },
1046 [SCI_STP_DEV_ATAPI_ERROR] = { },
1023 [SCI_STP_DEV_AWAIT_RESET] = { }, 1047 [SCI_STP_DEV_AWAIT_RESET] = { },
1024 [SCI_SMP_DEV_IDLE] = { 1048 [SCI_SMP_DEV_IDLE] = {
1025 .enter_state = sci_smp_remote_device_ready_idle_substate_enter, 1049 .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
index 57ccfc3d6ad..e1747ea0d0e 100644
--- a/drivers/scsi/isci/remote_device.h
+++ b/drivers/scsi/isci/remote_device.h
@@ -244,6 +244,15 @@ enum sci_remote_device_states {
244 SCI_STP_DEV_NCQ_ERROR, 244 SCI_STP_DEV_NCQ_ERROR,
245 245
246 /** 246 /**
247 * This is the ATAPI error state for the STP ATAPI remote device.
248 * This state is entered when ATAPI device sends error status FIS
249 * without data while the device object is in CMD state.
250 * A suspension event is expected in this state.
251 * The device object will resume right away.
252 */
253 SCI_STP_DEV_ATAPI_ERROR,
254
255 /**
247 * This is the READY substate indicates the device is waiting for the RESET task 256 * This is the READY substate indicates the device is waiting for the RESET task
248 * coming to be recovered from certain hardware specific error. 257 * coming to be recovered from certain hardware specific error.
249 */ 258 */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index b5d3a8c4d32..565a9f0a9bc 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -481,7 +481,29 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq,
481 } 481 }
482} 482}
483 483
484static void sci_atapi_construct(struct isci_request *ireq)
485{
486 struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd;
487 struct sas_task *task;
488
489 /* To simplify the implementation we take advantage of the
490 * silicon's partial acceleration of atapi protocol (dma data
491 * transfers), so we promote all commands to dma protocol. This
492 * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives.
493 */
494 h2d_fis->features |= ATAPI_PKT_DMA;
484 495
496 scu_stp_raw_request_construct_task_context(ireq);
497
498 task = isci_request_access_task(ireq);
499 if (task->data_dir == DMA_NONE)
500 task->total_xfer_len = 0;
501
502 /* clear the response so we can detect arrivial of an
503 * unsolicited h2d fis
504 */
505 ireq->stp.rsp.fis_type = 0;
506}
485 507
486static enum sci_status 508static enum sci_status
487sci_io_request_construct_sata(struct isci_request *ireq, 509sci_io_request_construct_sata(struct isci_request *ireq,
@@ -491,6 +513,7 @@ sci_io_request_construct_sata(struct isci_request *ireq,
491{ 513{
492 enum sci_status status = SCI_SUCCESS; 514 enum sci_status status = SCI_SUCCESS;
493 struct sas_task *task = isci_request_access_task(ireq); 515 struct sas_task *task = isci_request_access_task(ireq);
516 struct domain_device *dev = ireq->target_device->domain_dev;
494 517
495 /* check for management protocols */ 518 /* check for management protocols */
496 if (ireq->ttype == tmf_task) { 519 if (ireq->ttype == tmf_task) {
@@ -519,6 +542,13 @@ sci_io_request_construct_sata(struct isci_request *ireq,
519 542
520 } 543 }
521 544
545 /* ATAPI */
546 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
547 task->ata_task.fis.command == ATA_CMD_PACKET) {
548 sci_atapi_construct(ireq);
549 return SCI_SUCCESS;
550 }
551
522 /* non data */ 552 /* non data */
523 if (task->data_dir == DMA_NONE) { 553 if (task->data_dir == DMA_NONE) {
524 scu_stp_raw_request_construct_task_context(ireq); 554 scu_stp_raw_request_construct_task_context(ireq);
@@ -627,7 +657,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
627 657
628/** 658/**
629 * sci_req_tx_bytes - bytes transferred when reply underruns request 659 * sci_req_tx_bytes - bytes transferred when reply underruns request
630 * @sci_req: request that was terminated early 660 * @ireq: request that was terminated early
631 */ 661 */
632#define SCU_TASK_CONTEXT_SRAM 0x200000 662#define SCU_TASK_CONTEXT_SRAM 0x200000
633static u32 sci_req_tx_bytes(struct isci_request *ireq) 663static u32 sci_req_tx_bytes(struct isci_request *ireq)
@@ -729,6 +759,10 @@ sci_io_request_terminate(struct isci_request *ireq)
729 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: 759 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
730 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: 760 case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
731 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: 761 case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
762 case SCI_REQ_ATAPI_WAIT_H2D:
763 case SCI_REQ_ATAPI_WAIT_PIO_SETUP:
764 case SCI_REQ_ATAPI_WAIT_D2H:
765 case SCI_REQ_ATAPI_WAIT_TC_COMP:
732 sci_change_state(&ireq->sm, SCI_REQ_ABORTING); 766 sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
733 return SCI_SUCCESS; 767 return SCI_SUCCESS;
734 case SCI_REQ_TASK_WAIT_TC_RESP: 768 case SCI_REQ_TASK_WAIT_TC_RESP:
@@ -1194,8 +1228,8 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
1194{ 1228{
1195 struct isci_stp_request *stp_req = &ireq->stp.req; 1229 struct isci_stp_request *stp_req = &ireq->stp.req;
1196 struct scu_sgl_element_pair *sgl_pair; 1230 struct scu_sgl_element_pair *sgl_pair;
1231 enum sci_status status = SCI_SUCCESS;
1197 struct scu_sgl_element *sgl; 1232 struct scu_sgl_element *sgl;
1198 enum sci_status status;
1199 u32 offset; 1233 u32 offset;
1200 u32 len = 0; 1234 u32 len = 0;
1201 1235
@@ -1249,7 +1283,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re
1249 */ 1283 */
1250static enum sci_status 1284static enum sci_status
1251sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, 1285sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1252 u8 *data_buf, u32 len) 1286 u8 *data_buf, u32 len)
1253{ 1287{
1254 struct isci_request *ireq; 1288 struct isci_request *ireq;
1255 u8 *src_addr; 1289 u8 *src_addr;
@@ -1423,6 +1457,128 @@ static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_re
1423 return status; 1457 return status;
1424} 1458}
1425 1459
1460static enum sci_status process_unsolicited_fis(struct isci_request *ireq,
1461 u32 frame_index)
1462{
1463 struct isci_host *ihost = ireq->owning_controller;
1464 enum sci_status status;
1465 struct dev_to_host_fis *frame_header;
1466 u32 *frame_buffer;
1467
1468 status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1469 frame_index,
1470 (void **)&frame_header);
1471
1472 if (status != SCI_SUCCESS)
1473 return status;
1474
1475 if (frame_header->fis_type != FIS_REGD2H) {
1476 dev_err(&ireq->isci_host->pdev->dev,
1477 "%s ERROR: invalid fis type 0x%X\n",
1478 __func__, frame_header->fis_type);
1479 return SCI_FAILURE;
1480 }
1481
1482 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1483 frame_index,
1484 (void **)&frame_buffer);
1485
1486 sci_controller_copy_sata_response(&ireq->stp.rsp,
1487 (u32 *)frame_header,
1488 frame_buffer);
1489
1490 /* Frame has been decoded return it to the controller */
1491 sci_controller_release_frame(ihost, frame_index);
1492
1493 return status;
1494}
1495
1496static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
1497 u32 frame_index)
1498{
1499 struct sas_task *task = isci_request_access_task(ireq);
1500 enum sci_status status;
1501
1502 status = process_unsolicited_fis(ireq, frame_index);
1503
1504 if (status == SCI_SUCCESS) {
1505 if (ireq->stp.rsp.status & ATA_ERR)
1506 status = SCI_IO_FAILURE_RESPONSE_VALID;
1507 } else {
1508 status = SCI_IO_FAILURE_RESPONSE_VALID;
1509 }
1510
1511 if (status != SCI_SUCCESS) {
1512 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
1513 ireq->sci_status = status;
1514 } else {
1515 ireq->scu_status = SCU_TASK_DONE_GOOD;
1516 ireq->sci_status = SCI_SUCCESS;
1517 }
1518
1519 /* the d2h ufi is the end of non-data commands */
1520 if (task->data_dir == DMA_NONE)
1521 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1522
1523 return status;
1524}
1525
1526static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq)
1527{
1528 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1529 void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet;
1530 struct scu_task_context *task_context = ireq->tc;
1531
1532 /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame
1533 * type. The TC for previous Packet fis was already there, we only need to
1534 * change the H2D fis content.
1535 */
1536 memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis));
1537 memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN);
1538 memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context));
1539 task_context->type.stp.fis_type = FIS_DATA;
1540 task_context->transfer_length_bytes = dev->cdb_len;
1541}
1542
1543static void scu_atapi_construct_task_context(struct isci_request *ireq)
1544{
1545 struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev);
1546 struct sas_task *task = isci_request_access_task(ireq);
1547 struct scu_task_context *task_context = ireq->tc;
1548 int cdb_len = dev->cdb_len;
1549
1550 /* reference: SSTL 1.13.4.2
1551 * task_type, sata_direction
1552 */
1553 if (task->data_dir == DMA_TO_DEVICE) {
1554 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT;
1555 task_context->sata_direction = 0;
1556 } else {
1557 /* todo: for NO_DATA command, we need to send out raw frame. */
1558 task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN;
1559 task_context->sata_direction = 1;
1560 }
1561
1562 memset(&task_context->type.stp, 0, sizeof(task_context->type.stp));
1563 task_context->type.stp.fis_type = FIS_DATA;
1564
1565 memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
1566 memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len);
1567 task_context->ssp_command_iu_length = cdb_len / sizeof(u32);
1568
1569 /* task phase is set to TX_CMD */
1570 task_context->task_phase = 0x1;
1571
1572 /* retry counter */
1573 task_context->stp_retry_count = 0;
1574
1575 /* data transfer size. */
1576 task_context->transfer_length_bytes = task->total_xfer_len;
1577
1578 /* setup sgl */
1579 sci_request_build_sgl(ireq);
1580}
1581
1426enum sci_status 1582enum sci_status
1427sci_io_request_frame_handler(struct isci_request *ireq, 1583sci_io_request_frame_handler(struct isci_request *ireq,
1428 u32 frame_index) 1584 u32 frame_index)
@@ -1490,29 +1646,30 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1490 return SCI_SUCCESS; 1646 return SCI_SUCCESS;
1491 1647
1492 case SCI_REQ_SMP_WAIT_RESP: { 1648 case SCI_REQ_SMP_WAIT_RESP: {
1493 struct smp_resp *rsp_hdr = &ireq->smp.rsp; 1649 struct sas_task *task = isci_request_access_task(ireq);
1494 void *frame_header; 1650 struct scatterlist *sg = &task->smp_task.smp_resp;
1651 void *frame_header, *kaddr;
1652 u8 *rsp;
1495 1653
1496 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1654 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1497 frame_index, 1655 frame_index,
1498 &frame_header); 1656 &frame_header);
1499 1657 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
1500 /* byte swap the header. */ 1658 rsp = kaddr + sg->offset;
1501 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); 1659 sci_swab32_cpy(rsp, frame_header, 1);
1502 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1503 1660
1504 if (rsp_hdr->frame_type == SMP_RESPONSE) { 1661 if (rsp[0] == SMP_RESPONSE) {
1505 void *smp_resp; 1662 void *smp_resp;
1506 1663
1507 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, 1664 sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
1508 frame_index, 1665 frame_index,
1509 &smp_resp); 1666 &smp_resp);
1510
1511 word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
1512 sizeof(u32);
1513 1667
1514 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, 1668 word_cnt = (sg->length/4)-1;
1515 smp_resp, word_cnt); 1669 if (word_cnt > 0)
1670 word_cnt = min_t(unsigned int, word_cnt,
1671 SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4);
1672 sci_swab32_cpy(rsp + 4, smp_resp, word_cnt);
1516 1673
1517 ireq->scu_status = SCU_TASK_DONE_GOOD; 1674 ireq->scu_status = SCU_TASK_DONE_GOOD;
1518 ireq->sci_status = SCI_SUCCESS; 1675 ireq->sci_status = SCI_SUCCESS;
@@ -1528,12 +1685,13 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1528 __func__, 1685 __func__,
1529 ireq, 1686 ireq,
1530 frame_index, 1687 frame_index,
1531 rsp_hdr->frame_type); 1688 rsp[0]);
1532 1689
1533 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; 1690 ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
1534 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1691 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1535 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1692 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1536 } 1693 }
1694 kunmap_atomic(kaddr, KM_IRQ0);
1537 1695
1538 sci_controller_release_frame(ihost, frame_index); 1696 sci_controller_release_frame(ihost, frame_index);
1539 1697
@@ -1833,6 +1991,24 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1833 1991
1834 return status; 1992 return status;
1835 } 1993 }
1994 case SCI_REQ_ATAPI_WAIT_PIO_SETUP: {
1995 struct sas_task *task = isci_request_access_task(ireq);
1996
1997 sci_controller_release_frame(ihost, frame_index);
1998 ireq->target_device->working_request = ireq;
1999 if (task->data_dir == DMA_NONE) {
2000 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP);
2001 scu_atapi_reconstruct_raw_frame_task_context(ireq);
2002 } else {
2003 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2004 scu_atapi_construct_task_context(ireq);
2005 }
2006
2007 sci_controller_continue_io(ireq);
2008 return SCI_SUCCESS;
2009 }
2010 case SCI_REQ_ATAPI_WAIT_D2H:
2011 return atapi_d2h_reg_frame_handler(ireq, frame_index);
1836 case SCI_REQ_ABORTING: 2012 case SCI_REQ_ABORTING:
1837 /* 2013 /*
1838 * TODO: Is it even possible to get an unsolicited frame in the 2014 * TODO: Is it even possible to get an unsolicited frame in the
@@ -1898,10 +2074,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
1898 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): 2074 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1899 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): 2075 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1900 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): 2076 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1901 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1902 sci_remote_device_suspend(ireq->target_device, 2077 sci_remote_device_suspend(ireq->target_device,
1903 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); 2078 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1904 /* Fall through to the default case */ 2079 /* Fall through to the default case */
1905 default: 2080 default:
1906 /* All other completion status cause the IO to be complete. */ 2081 /* All other completion status cause the IO to be complete. */
1907 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); 2082 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
@@ -1964,6 +2139,112 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
1964 return SCI_SUCCESS; 2139 return SCI_SUCCESS;
1965} 2140}
1966 2141
2142static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
2143 enum sci_base_request_states next)
2144{
2145 enum sci_status status = SCI_SUCCESS;
2146
2147 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2148 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2149 ireq->scu_status = SCU_TASK_DONE_GOOD;
2150 ireq->sci_status = SCI_SUCCESS;
2151 sci_change_state(&ireq->sm, next);
2152 break;
2153 default:
2154 /* All other completion status cause the IO to be complete.
2155 * If a NAK was received, then it is up to the user to retry
2156 * the request.
2157 */
2158 ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
2159 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
2160
2161 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2162 break;
2163 }
2164
2165 return status;
2166}
2167
2168static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
2169 u32 completion_code)
2170{
2171 struct isci_remote_device *idev = ireq->target_device;
2172 struct dev_to_host_fis *d2h = &ireq->stp.rsp;
2173 enum sci_status status = SCI_SUCCESS;
2174
2175 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2176 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
2177 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2178 break;
2179
2180 case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): {
2181 u16 len = sci_req_tx_bytes(ireq);
2182
2183 /* likely non-error data underrrun, workaround missing
2184 * d2h frame from the controller
2185 */
2186 if (d2h->fis_type != FIS_REGD2H) {
2187 d2h->fis_type = FIS_REGD2H;
2188 d2h->flags = (1 << 6);
2189 d2h->status = 0x50;
2190 d2h->error = 0;
2191 d2h->lbal = 0;
2192 d2h->byte_count_low = len & 0xff;
2193 d2h->byte_count_high = len >> 8;
2194 d2h->device = 0xa0;
2195 d2h->lbal_exp = 0;
2196 d2h->lbam_exp = 0;
2197 d2h->lbah_exp = 0;
2198 d2h->_r_a = 0;
2199 d2h->sector_count = 0x3;
2200 d2h->sector_count_exp = 0;
2201 d2h->_r_b = 0;
2202 d2h->_r_c = 0;
2203 d2h->_r_d = 0;
2204 }
2205
2206 ireq->scu_status = SCU_TASK_DONE_GOOD;
2207 ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
2208 status = ireq->sci_status;
2209
2210 /* the hw will have suspended the rnc, so complete the
2211 * request upon pending resume
2212 */
2213 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2214 break;
2215 }
2216 case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT):
2217 /* In this case, there is no UF coming after.
2218 * compelte the IO now.
2219 */
2220 ireq->scu_status = SCU_TASK_DONE_GOOD;
2221 ireq->sci_status = SCI_SUCCESS;
2222 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
2223 break;
2224
2225 default:
2226 if (d2h->fis_type == FIS_REGD2H) {
2227 /* UF received change the device state to ATAPI_ERROR */
2228 status = ireq->sci_status;
2229 sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR);
2230 } else {
2231 /* If receiving any non-sucess TC status, no UF
2232 * received yet, then an UF for the status fis
2233 * is coming after (XXX: suspect this is
2234 * actually a protocol error or a bug like the
2235 * DONE_UNEXP_FIS case)
2236 */
2237 ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
2238 ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
2239
2240 sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H);
2241 }
2242 break;
2243 }
2244
2245 return status;
2246}
2247
1967enum sci_status 2248enum sci_status
1968sci_io_request_tc_completion(struct isci_request *ireq, 2249sci_io_request_tc_completion(struct isci_request *ireq,
1969 u32 completion_code) 2250 u32 completion_code)
@@ -2015,6 +2296,17 @@ sci_io_request_tc_completion(struct isci_request *ireq,
2015 return request_aborting_state_tc_event(ireq, 2296 return request_aborting_state_tc_event(ireq,
2016 completion_code); 2297 completion_code);
2017 2298
2299 case SCI_REQ_ATAPI_WAIT_H2D:
2300 return atapi_raw_completion(ireq, completion_code,
2301 SCI_REQ_ATAPI_WAIT_PIO_SETUP);
2302
2303 case SCI_REQ_ATAPI_WAIT_TC_COMP:
2304 return atapi_raw_completion(ireq, completion_code,
2305 SCI_REQ_ATAPI_WAIT_D2H);
2306
2307 case SCI_REQ_ATAPI_WAIT_D2H:
2308 return atapi_data_tc_completion_handler(ireq, completion_code);
2309
2018 default: 2310 default:
2019 dev_warn(&ihost->pdev->dev, 2311 dev_warn(&ihost->pdev->dev,
2020 "%s: SCIC IO Request given task completion " 2312 "%s: SCIC IO Request given task completion "
@@ -2421,6 +2713,8 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_
2421 */ 2713 */
2422 if (fis->status & ATA_DF) 2714 if (fis->status & ATA_DF)
2423 ts->stat = SAS_PROTO_RESPONSE; 2715 ts->stat = SAS_PROTO_RESPONSE;
2716 else if (fis->status & ATA_ERR)
2717 ts->stat = SAM_STAT_CHECK_CONDITION;
2424 else 2718 else
2425 ts->stat = SAM_STAT_GOOD; 2719 ts->stat = SAM_STAT_GOOD;
2426 2720
@@ -2603,18 +2897,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
2603 status = SAM_STAT_GOOD; 2897 status = SAM_STAT_GOOD;
2604 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); 2898 set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
2605 2899
2606 if (task->task_proto == SAS_PROTOCOL_SMP) { 2900 if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2607 void *rsp = &request->smp.rsp;
2608
2609 dev_dbg(&ihost->pdev->dev,
2610 "%s: SMP protocol completion\n",
2611 __func__);
2612
2613 sg_copy_from_buffer(
2614 &task->smp_task.smp_resp, 1,
2615 rsp, sizeof(struct smp_resp));
2616 } else if (completion_status
2617 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2618 2901
2619 /* This was an SSP / STP / SATA transfer. 2902 /* This was an SSP / STP / SATA transfer.
2620 * There is a possibility that less data than 2903 * There is a possibility that less data than
@@ -2791,6 +3074,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2791{ 3074{
2792 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); 3075 struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
2793 struct domain_device *dev = ireq->target_device->domain_dev; 3076 struct domain_device *dev = ireq->target_device->domain_dev;
3077 enum sci_base_request_states state;
2794 struct sas_task *task; 3078 struct sas_task *task;
2795 3079
2796 /* XXX as hch said always creating an internal sas_task for tmf 3080 /* XXX as hch said always creating an internal sas_task for tmf
@@ -2802,26 +3086,30 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2802 * substates 3086 * substates
2803 */ 3087 */
2804 if (!task && dev->dev_type == SAS_END_DEV) { 3088 if (!task && dev->dev_type == SAS_END_DEV) {
2805 sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP); 3089 state = SCI_REQ_TASK_WAIT_TC_COMP;
2806 } else if (!task && 3090 } else if (!task &&
2807 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || 3091 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
2808 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { 3092 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
2809 sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED); 3093 state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED;
2810 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { 3094 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
2811 sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP); 3095 state = SCI_REQ_SMP_WAIT_RESP;
2812 } else if (task && sas_protocol_ata(task->task_proto) && 3096 } else if (task && sas_protocol_ata(task->task_proto) &&
2813 !task->ata_task.use_ncq) { 3097 !task->ata_task.use_ncq) {
2814 u32 state; 3098 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET &&
2815 3099 task->ata_task.fis.command == ATA_CMD_PACKET) {
2816 if (task->data_dir == DMA_NONE) 3100 state = SCI_REQ_ATAPI_WAIT_H2D;
3101 } else if (task->data_dir == DMA_NONE) {
2817 state = SCI_REQ_STP_NON_DATA_WAIT_H2D; 3102 state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
2818 else if (task->ata_task.dma_xfer) 3103 } else if (task->ata_task.dma_xfer) {
2819 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; 3104 state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
2820 else /* PIO */ 3105 } else /* PIO */ {
2821 state = SCI_REQ_STP_PIO_WAIT_H2D; 3106 state = SCI_REQ_STP_PIO_WAIT_H2D;
2822 3107 }
2823 sci_change_state(sm, state); 3108 } else {
3109 /* SSP or NCQ are fully accelerated, no substates */
3110 return;
2824 } 3111 }
3112 sci_change_state(sm, state);
2825} 3113}
2826 3114
2827static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) 3115static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
@@ -2913,6 +3201,10 @@ static const struct sci_base_state sci_request_state_table[] = {
2913 [SCI_REQ_TASK_WAIT_TC_RESP] = { }, 3201 [SCI_REQ_TASK_WAIT_TC_RESP] = { },
2914 [SCI_REQ_SMP_WAIT_RESP] = { }, 3202 [SCI_REQ_SMP_WAIT_RESP] = { },
2915 [SCI_REQ_SMP_WAIT_TC_COMP] = { }, 3203 [SCI_REQ_SMP_WAIT_TC_COMP] = { },
3204 [SCI_REQ_ATAPI_WAIT_H2D] = { },
3205 [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { },
3206 [SCI_REQ_ATAPI_WAIT_D2H] = { },
3207 [SCI_REQ_ATAPI_WAIT_TC_COMP] = { },
2916 [SCI_REQ_COMPLETED] = { 3208 [SCI_REQ_COMPLETED] = {
2917 .enter_state = sci_request_completed_state_enter, 3209 .enter_state = sci_request_completed_state_enter,
2918 }, 3210 },
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 7a1d5a9778e..f720b97b7bb 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -96,7 +96,6 @@ enum sci_request_protocol {
96 * to wait for another fis or if the transfer is complete. Upon 96 * to wait for another fis or if the transfer is complete. Upon
97 * receipt of a d2h fis this will be the status field of that fis. 97 * receipt of a d2h fis this will be the status field of that fis.
98 * @sgl - track pio transfer progress as we iterate through the sgl 98 * @sgl - track pio transfer progress as we iterate through the sgl
99 * @device_cdb_len - atapi device advertises it's transfer constraints at setup
100 */ 99 */
101struct isci_stp_request { 100struct isci_stp_request {
102 u32 pio_len; 101 u32 pio_len;
@@ -107,7 +106,6 @@ struct isci_stp_request {
107 u8 set; 106 u8 set;
108 u32 offset; 107 u32 offset;
109 } sgl; 108 } sgl;
110 u32 device_cdb_len;
111}; 109};
112 110
113struct isci_request { 111struct isci_request {
@@ -174,9 +172,6 @@ struct isci_request {
174 }; 172 };
175 } ssp; 173 } ssp;
176 struct { 174 struct {
177 struct smp_resp rsp;
178 } smp;
179 struct {
180 struct isci_stp_request req; 175 struct isci_stp_request req;
181 struct host_to_dev_fis cmd; 176 struct host_to_dev_fis cmd;
182 struct dev_to_host_fis rsp; 177 struct dev_to_host_fis rsp;
@@ -252,6 +247,32 @@ enum sci_base_request_states {
252 SCI_REQ_STP_PIO_DATA_OUT, 247 SCI_REQ_STP_PIO_DATA_OUT,
253 248
254 /* 249 /*
250 * While in this state the IO request object is waiting for the TC
251 * completion notification for the H2D Register FIS
252 */
253 SCI_REQ_ATAPI_WAIT_H2D,
254
255 /*
256 * While in this state the IO request object is waiting for either a
257 * PIO Setup.
258 */
259 SCI_REQ_ATAPI_WAIT_PIO_SETUP,
260
261 /*
262 * The non-data IO transit to this state in this state after receiving
263 * TC completion. While in this state IO request object is waiting for
264 * D2H status frame as UF.
265 */
266 SCI_REQ_ATAPI_WAIT_D2H,
267
268 /*
269 * When transmitting raw frames hardware reports task context completion
270 * after every frame submission, so in the non-accelerated case we need
271 * to expect the completion for the "cdb" frame.
272 */
273 SCI_REQ_ATAPI_WAIT_TC_COMP,
274
275 /*
255 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw 276 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
256 * task management request is waiting for the transmission of the 277 * task management request is waiting for the transmission of the
257 * initial frame (i.e. command, task, etc.). 278 * initial frame (i.e. command, task, etc.).
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
index 462b15174d3..dc26b4aea99 100644
--- a/drivers/scsi/isci/sas.h
+++ b/drivers/scsi/isci/sas.h
@@ -204,8 +204,6 @@ struct smp_req {
204 u8 req_data[0]; 204 u8 req_data[0];
205} __packed; 205} __packed;
206 206
207#define SMP_RESP_HDR_SZ 4
208
209/* 207/*
210 * struct sci_sas_address - This structure depicts how a SAS address is 208 * struct sci_sas_address - This structure depicts how a SAS address is
211 * represented by SCI. 209 * represented by SCI.
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index d6bcdd013dc..e2d9418683c 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -1345,29 +1345,6 @@ static void isci_smp_task_done(struct sas_task *task)
1345 complete(&task->completion); 1345 complete(&task->completion);
1346} 1346}
1347 1347
1348static struct sas_task *isci_alloc_task(void)
1349{
1350 struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
1351
1352 if (task) {
1353 INIT_LIST_HEAD(&task->list);
1354 spin_lock_init(&task->task_state_lock);
1355 task->task_state_flags = SAS_TASK_STATE_PENDING;
1356 init_timer(&task->timer);
1357 init_completion(&task->completion);
1358 }
1359
1360 return task;
1361}
1362
1363static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
1364{
1365 if (task) {
1366 BUG_ON(!list_empty(&task->list));
1367 kfree(task);
1368 }
1369}
1370
1371static int isci_smp_execute_task(struct isci_host *ihost, 1348static int isci_smp_execute_task(struct isci_host *ihost,
1372 struct domain_device *dev, void *req, 1349 struct domain_device *dev, void *req,
1373 int req_size, void *resp, int resp_size) 1350 int req_size, void *resp, int resp_size)
@@ -1376,7 +1353,7 @@ static int isci_smp_execute_task(struct isci_host *ihost,
1376 struct sas_task *task = NULL; 1353 struct sas_task *task = NULL;
1377 1354
1378 for (retry = 0; retry < 3; retry++) { 1355 for (retry = 0; retry < 3; retry++) {
1379 task = isci_alloc_task(); 1356 task = sas_alloc_task(GFP_KERNEL);
1380 if (!task) 1357 if (!task)
1381 return -ENOMEM; 1358 return -ENOMEM;
1382 1359
@@ -1439,13 +1416,13 @@ static int isci_smp_execute_task(struct isci_host *ihost,
1439 SAS_ADDR(dev->sas_addr), 1416 SAS_ADDR(dev->sas_addr),
1440 task->task_status.resp, 1417 task->task_status.resp,
1441 task->task_status.stat); 1418 task->task_status.stat);
1442 isci_free_task(ihost, task); 1419 sas_free_task(task);
1443 task = NULL; 1420 task = NULL;
1444 } 1421 }
1445 } 1422 }
1446ex_err: 1423ex_err:
1447 BUG_ON(retry == 3 && task != NULL); 1424 BUG_ON(retry == 3 && task != NULL);
1448 isci_free_task(ihost, task); 1425 sas_free_task(task);
1449 return res; 1426 return res;
1450} 1427}
1451 1428
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index 4a7fa90287e..15b18d15899 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -286,6 +286,25 @@ isci_task_set_completion_status(
286 task->task_status.resp = response; 286 task->task_status.resp = response;
287 task->task_status.stat = status; 287 task->task_status.stat = status;
288 288
289 switch (task->task_proto) {
290
291 case SAS_PROTOCOL_SATA:
292 case SAS_PROTOCOL_STP:
293 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
294
295 if (task_notification_selection
296 == isci_perform_error_io_completion) {
297 /* SATA/STP I/O has it's own means of scheduling device
298 * error handling on the normal path.
299 */
300 task_notification_selection
301 = isci_perform_normal_io_completion;
302 }
303 break;
304 default:
305 break;
306 }
307
289 switch (task_notification_selection) { 308 switch (task_notification_selection) {
290 309
291 case isci_perform_error_io_completion: 310 case isci_perform_error_io_completion:
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7724414588f..23e706673d0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -872,6 +872,61 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
872 iscsi_host_free(shost); 872 iscsi_host_free(shost);
873} 873}
874 874
875static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
876{
877 switch (param_type) {
878 case ISCSI_HOST_PARAM:
879 switch (param) {
880 case ISCSI_HOST_PARAM_NETDEV_NAME:
881 case ISCSI_HOST_PARAM_HWADDRESS:
882 case ISCSI_HOST_PARAM_IPADDRESS:
883 case ISCSI_HOST_PARAM_INITIATOR_NAME:
884 return S_IRUGO;
885 default:
886 return 0;
887 }
888 case ISCSI_PARAM:
889 switch (param) {
890 case ISCSI_PARAM_MAX_RECV_DLENGTH:
891 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
892 case ISCSI_PARAM_HDRDGST_EN:
893 case ISCSI_PARAM_DATADGST_EN:
894 case ISCSI_PARAM_CONN_ADDRESS:
895 case ISCSI_PARAM_CONN_PORT:
896 case ISCSI_PARAM_EXP_STATSN:
897 case ISCSI_PARAM_PERSISTENT_ADDRESS:
898 case ISCSI_PARAM_PERSISTENT_PORT:
899 case ISCSI_PARAM_PING_TMO:
900 case ISCSI_PARAM_RECV_TMO:
901 case ISCSI_PARAM_INITIAL_R2T_EN:
902 case ISCSI_PARAM_MAX_R2T:
903 case ISCSI_PARAM_IMM_DATA_EN:
904 case ISCSI_PARAM_FIRST_BURST:
905 case ISCSI_PARAM_MAX_BURST:
906 case ISCSI_PARAM_PDU_INORDER_EN:
907 case ISCSI_PARAM_DATASEQ_INORDER_EN:
908 case ISCSI_PARAM_ERL:
909 case ISCSI_PARAM_TARGET_NAME:
910 case ISCSI_PARAM_TPGT:
911 case ISCSI_PARAM_USERNAME:
912 case ISCSI_PARAM_PASSWORD:
913 case ISCSI_PARAM_USERNAME_IN:
914 case ISCSI_PARAM_PASSWORD_IN:
915 case ISCSI_PARAM_FAST_ABORT:
916 case ISCSI_PARAM_ABORT_TMO:
917 case ISCSI_PARAM_LU_RESET_TMO:
918 case ISCSI_PARAM_TGT_RESET_TMO:
919 case ISCSI_PARAM_IFACE_NAME:
920 case ISCSI_PARAM_INITIATOR_NAME:
921 return S_IRUGO;
922 default:
923 return 0;
924 }
925 }
926
927 return 0;
928}
929
875static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev) 930static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
876{ 931{
877 set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags); 932 set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
@@ -910,33 +965,6 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
910 .name = "tcp", 965 .name = "tcp",
911 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST 966 .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST
912 | CAP_DATADGST, 967 | CAP_DATADGST,
913 .param_mask = ISCSI_MAX_RECV_DLENGTH |
914 ISCSI_MAX_XMIT_DLENGTH |
915 ISCSI_HDRDGST_EN |
916 ISCSI_DATADGST_EN |
917 ISCSI_INITIAL_R2T_EN |
918 ISCSI_MAX_R2T |
919 ISCSI_IMM_DATA_EN |
920 ISCSI_FIRST_BURST |
921 ISCSI_MAX_BURST |
922 ISCSI_PDU_INORDER_EN |
923 ISCSI_DATASEQ_INORDER_EN |
924 ISCSI_ERL |
925 ISCSI_CONN_PORT |
926 ISCSI_CONN_ADDRESS |
927 ISCSI_EXP_STATSN |
928 ISCSI_PERSISTENT_PORT |
929 ISCSI_PERSISTENT_ADDRESS |
930 ISCSI_TARGET_NAME | ISCSI_TPGT |
931 ISCSI_USERNAME | ISCSI_PASSWORD |
932 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
933 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
934 ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO |
935 ISCSI_PING_TMO | ISCSI_RECV_TMO |
936 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
937 .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
938 ISCSI_HOST_INITIATOR_NAME |
939 ISCSI_HOST_NETDEV_NAME,
940 /* session management */ 968 /* session management */
941 .create_session = iscsi_sw_tcp_session_create, 969 .create_session = iscsi_sw_tcp_session_create,
942 .destroy_session = iscsi_sw_tcp_session_destroy, 970 .destroy_session = iscsi_sw_tcp_session_destroy,
@@ -944,6 +972,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = {
944 .create_conn = iscsi_sw_tcp_conn_create, 972 .create_conn = iscsi_sw_tcp_conn_create,
945 .bind_conn = iscsi_sw_tcp_conn_bind, 973 .bind_conn = iscsi_sw_tcp_conn_bind,
946 .destroy_conn = iscsi_sw_tcp_conn_destroy, 974 .destroy_conn = iscsi_sw_tcp_conn_destroy,
975 .attr_is_visible = iscsi_sw_tcp_attr_is_visible,
947 .set_param = iscsi_sw_tcp_conn_set_param, 976 .set_param = iscsi_sw_tcp_conn_set_param,
948 .get_conn_param = iscsi_sw_tcp_conn_get_param, 977 .get_conn_param = iscsi_sw_tcp_conn_get_param,
949 .get_session_param = iscsi_session_get_param, 978 .get_session_param = iscsi_session_get_param,
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index d261e982a2f..7c055fdca45 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -65,16 +65,15 @@ static struct workqueue_struct *fc_exch_workqueue;
65 * assigned range of exchanges to per cpu pool. 65 * assigned range of exchanges to per cpu pool.
66 */ 66 */
67struct fc_exch_pool { 67struct fc_exch_pool {
68 spinlock_t lock;
69 struct list_head ex_list;
68 u16 next_index; 70 u16 next_index;
69 u16 total_exches; 71 u16 total_exches;
70 72
71 /* two cache of free slot in exch array */ 73 /* two cache of free slot in exch array */
72 u16 left; 74 u16 left;
73 u16 right; 75 u16 right;
74 76} ____cacheline_aligned_in_smp;
75 spinlock_t lock;
76 struct list_head ex_list;
77};
78 77
79/** 78/**
80 * struct fc_exch_mgr - The Exchange Manager (EM). 79 * struct fc_exch_mgr - The Exchange Manager (EM).
@@ -91,13 +90,13 @@ struct fc_exch_pool {
91 * It manages the allocation of exchange IDs. 90 * It manages the allocation of exchange IDs.
92 */ 91 */
93struct fc_exch_mgr { 92struct fc_exch_mgr {
93 struct fc_exch_pool *pool;
94 mempool_t *ep_pool;
94 enum fc_class class; 95 enum fc_class class;
95 struct kref kref; 96 struct kref kref;
96 u16 min_xid; 97 u16 min_xid;
97 u16 max_xid; 98 u16 max_xid;
98 mempool_t *ep_pool;
99 u16 pool_max_index; 99 u16 pool_max_index;
100 struct fc_exch_pool *pool;
101 100
102 /* 101 /*
103 * currently exchange mgr stats are updated but not used. 102 * currently exchange mgr stats are updated but not used.
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 4c41ee816f0..221875ec3d7 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -759,7 +759,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
759 goto out; 759 goto out;
760 if (fc_fcp_lock_pkt(fsp)) 760 if (fc_fcp_lock_pkt(fsp))
761 goto out; 761 goto out;
762 fsp->last_pkt_time = jiffies;
763 762
764 if (fh->fh_type == FC_TYPE_BLS) { 763 if (fh->fh_type == FC_TYPE_BLS) {
765 fc_fcp_abts_resp(fsp, fp); 764 fc_fcp_abts_resp(fsp, fp);
@@ -1148,7 +1147,6 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1148 rc = -1; 1147 rc = -1;
1149 goto unlock; 1148 goto unlock;
1150 } 1149 }
1151 fsp->last_pkt_time = jiffies;
1152 fsp->seq_ptr = seq; 1150 fsp->seq_ptr = seq;
1153 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ 1151 fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
1154 1152
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 256a999d010..d7c76f2eb63 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3163,7 +3163,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3163{ 3163{
3164 struct iscsi_conn *conn = cls_conn->dd_data; 3164 struct iscsi_conn *conn = cls_conn->dd_data;
3165 struct iscsi_session *session = conn->session; 3165 struct iscsi_session *session = conn->session;
3166 uint32_t value;
3167 3166
3168 switch(param) { 3167 switch(param) {
3169 case ISCSI_PARAM_FAST_ABORT: 3168 case ISCSI_PARAM_FAST_ABORT:
@@ -3220,14 +3219,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
3220 case ISCSI_PARAM_ERL: 3219 case ISCSI_PARAM_ERL:
3221 sscanf(buf, "%d", &session->erl); 3220 sscanf(buf, "%d", &session->erl);
3222 break; 3221 break;
3223 case ISCSI_PARAM_IFMARKER_EN:
3224 sscanf(buf, "%d", &value);
3225 BUG_ON(value);
3226 break;
3227 case ISCSI_PARAM_OFMARKER_EN:
3228 sscanf(buf, "%d", &value);
3229 BUG_ON(value);
3230 break;
3231 case ISCSI_PARAM_EXP_STATSN: 3222 case ISCSI_PARAM_EXP_STATSN:
3232 sscanf(buf, "%u", &conn->exp_statsn); 3223 sscanf(buf, "%u", &conn->exp_statsn);
3233 break; 3224 break;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index f5831930df9..54a5199ceb5 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -219,17 +219,20 @@ out_err2:
219 219
220/* ---------- Device registration and unregistration ---------- */ 220/* ---------- Device registration and unregistration ---------- */
221 221
222static inline void sas_unregister_common_dev(struct domain_device *dev) 222static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
223{ 223{
224 sas_notify_lldd_dev_gone(dev); 224 sas_notify_lldd_dev_gone(dev);
225 if (!dev->parent) 225 if (!dev->parent)
226 dev->port->port_dev = NULL; 226 dev->port->port_dev = NULL;
227 else 227 else
228 list_del_init(&dev->siblings); 228 list_del_init(&dev->siblings);
229
230 spin_lock_irq(&port->dev_list_lock);
229 list_del_init(&dev->dev_list_node); 231 list_del_init(&dev->dev_list_node);
232 spin_unlock_irq(&port->dev_list_lock);
230} 233}
231 234
232void sas_unregister_dev(struct domain_device *dev) 235void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
233{ 236{
234 if (dev->rphy) { 237 if (dev->rphy) {
235 sas_remove_children(&dev->rphy->dev); 238 sas_remove_children(&dev->rphy->dev);
@@ -241,15 +244,15 @@ void sas_unregister_dev(struct domain_device *dev)
241 kfree(dev->ex_dev.ex_phy); 244 kfree(dev->ex_dev.ex_phy);
242 dev->ex_dev.ex_phy = NULL; 245 dev->ex_dev.ex_phy = NULL;
243 } 246 }
244 sas_unregister_common_dev(dev); 247 sas_unregister_common_dev(port, dev);
245} 248}
246 249
247void sas_unregister_domain_devices(struct asd_sas_port *port) 250void sas_unregister_domain_devices(struct asd_sas_port *port)
248{ 251{
249 struct domain_device *dev, *n; 252 struct domain_device *dev, *n;
250 253
251 list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node) 254 list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node)
252 sas_unregister_dev(dev); 255 sas_unregister_dev(port, dev);
253 256
254 port->port->rphy = NULL; 257 port->port->rphy = NULL;
255 258
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 16ad97df5ba..1b831c55ec6 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -199,6 +199,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
199 phy->virtual = dr->virtual; 199 phy->virtual = dr->virtual;
200 phy->last_da_index = -1; 200 phy->last_da_index = -1;
201 201
202 phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
203 phy->phy->identify.device_type = phy->attached_dev_type;
202 phy->phy->identify.initiator_port_protocols = phy->attached_iproto; 204 phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
203 phy->phy->identify.target_port_protocols = phy->attached_tproto; 205 phy->phy->identify.target_port_protocols = phy->attached_tproto;
204 phy->phy->identify.phy_identifier = phy_id; 206 phy->phy->identify.phy_identifier = phy_id;
@@ -329,6 +331,7 @@ static void ex_assign_report_general(struct domain_device *dev,
329 dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); 331 dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
330 dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); 332 dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
331 dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); 333 dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
334 dev->ex_dev.t2t_supp = rg->t2t_supp;
332 dev->ex_dev.conf_route_table = rg->conf_route_table; 335 dev->ex_dev.conf_route_table = rg->conf_route_table;
333 dev->ex_dev.configuring = rg->configuring; 336 dev->ex_dev.configuring = rg->configuring;
334 memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); 337 memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8);
@@ -751,7 +754,10 @@ static struct domain_device *sas_ex_discover_end_dev(
751 out_list_del: 754 out_list_del:
752 sas_rphy_free(child->rphy); 755 sas_rphy_free(child->rphy);
753 child->rphy = NULL; 756 child->rphy = NULL;
757
758 spin_lock_irq(&parent->port->dev_list_lock);
754 list_del(&child->dev_list_node); 759 list_del(&child->dev_list_node);
760 spin_unlock_irq(&parent->port->dev_list_lock);
755 out_free: 761 out_free:
756 sas_port_delete(phy->port); 762 sas_port_delete(phy->port);
757 out_err: 763 out_err:
@@ -1133,15 +1139,17 @@ static void sas_print_parent_topology_bug(struct domain_device *child,
1133 }; 1139 };
1134 struct domain_device *parent = child->parent; 1140 struct domain_device *parent = child->parent;
1135 1141
1136 sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x " 1142 sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx "
1137 "has %c:%c routing link!\n", 1143 "(T2T supp:%d) phy 0x%x has %c:%c routing link!\n",
1138 1144
1139 ex_type[parent->dev_type], 1145 ex_type[parent->dev_type],
1140 SAS_ADDR(parent->sas_addr), 1146 SAS_ADDR(parent->sas_addr),
1147 parent->ex_dev.t2t_supp,
1141 parent_phy->phy_id, 1148 parent_phy->phy_id,
1142 1149
1143 ex_type[child->dev_type], 1150 ex_type[child->dev_type],
1144 SAS_ADDR(child->sas_addr), 1151 SAS_ADDR(child->sas_addr),
1152 child->ex_dev.t2t_supp,
1145 child_phy->phy_id, 1153 child_phy->phy_id,
1146 1154
1147 ra_char[parent_phy->routing_attr], 1155 ra_char[parent_phy->routing_attr],
@@ -1238,10 +1246,15 @@ static int sas_check_parent_topology(struct domain_device *child)
1238 sas_print_parent_topology_bug(child, parent_phy, child_phy); 1246 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1239 res = -ENODEV; 1247 res = -ENODEV;
1240 } 1248 }
1241 } else if (parent_phy->routing_attr == TABLE_ROUTING && 1249 } else if (parent_phy->routing_attr == TABLE_ROUTING) {
1242 child_phy->routing_attr != SUBTRACTIVE_ROUTING) { 1250 if (child_phy->routing_attr == SUBTRACTIVE_ROUTING ||
1243 sas_print_parent_topology_bug(child, parent_phy, child_phy); 1251 (child_phy->routing_attr == TABLE_ROUTING &&
1244 res = -ENODEV; 1252 child_ex->t2t_supp && parent_ex->t2t_supp)) {
1253 /* All good */;
1254 } else {
1255 sas_print_parent_topology_bug(child, parent_phy, child_phy);
1256 res = -ENODEV;
1257 }
1245 } 1258 }
1246 break; 1259 break;
1247 case FANOUT_DEV: 1260 case FANOUT_DEV:
@@ -1729,7 +1742,7 @@ out:
1729 return res; 1742 return res;
1730} 1743}
1731 1744
1732static void sas_unregister_ex_tree(struct domain_device *dev) 1745static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev)
1733{ 1746{
1734 struct expander_device *ex = &dev->ex_dev; 1747 struct expander_device *ex = &dev->ex_dev;
1735 struct domain_device *child, *n; 1748 struct domain_device *child, *n;
@@ -1738,11 +1751,11 @@ static void sas_unregister_ex_tree(struct domain_device *dev)
1738 child->gone = 1; 1751 child->gone = 1;
1739 if (child->dev_type == EDGE_DEV || 1752 if (child->dev_type == EDGE_DEV ||
1740 child->dev_type == FANOUT_DEV) 1753 child->dev_type == FANOUT_DEV)
1741 sas_unregister_ex_tree(child); 1754 sas_unregister_ex_tree(port, child);
1742 else 1755 else
1743 sas_unregister_dev(child); 1756 sas_unregister_dev(port, child);
1744 } 1757 }
1745 sas_unregister_dev(dev); 1758 sas_unregister_dev(port, dev);
1746} 1759}
1747 1760
1748static void sas_unregister_devs_sas_addr(struct domain_device *parent, 1761static void sas_unregister_devs_sas_addr(struct domain_device *parent,
@@ -1759,9 +1772,9 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
1759 child->gone = 1; 1772 child->gone = 1;
1760 if (child->dev_type == EDGE_DEV || 1773 if (child->dev_type == EDGE_DEV ||
1761 child->dev_type == FANOUT_DEV) 1774 child->dev_type == FANOUT_DEV)
1762 sas_unregister_ex_tree(child); 1775 sas_unregister_ex_tree(parent->port, child);
1763 else 1776 else
1764 sas_unregister_dev(child); 1777 sas_unregister_dev(parent->port, child);
1765 break; 1778 break;
1766 } 1779 }
1767 } 1780 }
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index 04ad8dd1a74..e1aa17840c5 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -51,6 +51,91 @@ static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
51 resp_data[15] = rphy->identify.target_port_protocols; 51 resp_data[15] = rphy->identify.target_port_protocols;
52} 52}
53 53
54/**
55 * to_sas_gpio_gp_bit - given the gpio frame data find the byte/bit position of 'od'
56 * @od: od bit to find
57 * @data: incoming bitstream (from frame)
58 * @index: requested data register index (from frame)
59 * @count: total number of registers in the bitstream (from frame)
60 * @bit: bit position of 'od' in the returned byte
61 *
62 * returns NULL if 'od' is not in 'data'
63 *
64 * From SFF-8485 v0.7:
65 * "In GPIO_TX[1], bit 0 of byte 3 contains the first bit (i.e., OD0.0)
66 * and bit 7 of byte 0 contains the 32nd bit (i.e., OD10.1).
67 *
68 * In GPIO_TX[2], bit 0 of byte 3 contains the 33rd bit (i.e., OD10.2)
69 * and bit 7 of byte 0 contains the 64th bit (i.e., OD21.0)."
70 *
71 * The general-purpose (raw-bitstream) RX registers have the same layout
72 * although 'od' is renamed 'id' for 'input data'.
73 *
74 * SFF-8489 defines the behavior of the LEDs in response to the 'od' values.
75 */
76static u8 *to_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count, u8 *bit)
77{
78 unsigned int reg;
79 u8 byte;
80
81 /* gp registers start at index 1 */
82 if (index == 0)
83 return NULL;
84
85 index--; /* make index 0-based */
86 if (od < index * 32)
87 return NULL;
88
89 od -= index * 32;
90 reg = od >> 5;
91
92 if (reg >= count)
93 return NULL;
94
95 od &= (1 << 5) - 1;
96 byte = 3 - (od >> 3);
97 *bit = od & ((1 << 3) - 1);
98
99 return &data[reg * 4 + byte];
100}
101
102int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count)
103{
104 u8 *byte;
105 u8 bit;
106
107 byte = to_sas_gpio_gp_bit(od, data, index, count, &bit);
108 if (!byte)
109 return -1;
110
111 return (*byte >> bit) & 1;
112}
113EXPORT_SYMBOL(try_test_sas_gpio_gp_bit);
114
115static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data,
116 u8 reg_type, u8 reg_index, u8 reg_count,
117 u8 *req_data)
118{
119 struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt);
120 int written;
121
122 if (i->dft->lldd_write_gpio == NULL) {
123 resp_data[2] = SMP_RESP_FUNC_UNK;
124 return 0;
125 }
126
127 written = i->dft->lldd_write_gpio(sas_ha, reg_type, reg_index,
128 reg_count, req_data);
129
130 if (written < 0) {
131 resp_data[2] = SMP_RESP_FUNC_FAILED;
132 written = 0;
133 } else
134 resp_data[2] = SMP_RESP_FUNC_ACC;
135
136 return written;
137}
138
54static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data, 139static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data,
55 u8 phy_id) 140 u8 phy_id)
56{ 141{
@@ -230,9 +315,23 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
230 /* Can't implement; hosts have no routes */ 315 /* Can't implement; hosts have no routes */
231 break; 316 break;
232 317
233 case SMP_WRITE_GPIO_REG: 318 case SMP_WRITE_GPIO_REG: {
234 /* FIXME: need GPIO support in the transport class */ 319 /* SFF-8485 v0.7 */
320 const int base_frame_size = 11;
321 int to_write = req_data[4];
322
323 if (blk_rq_bytes(req) < base_frame_size + to_write * 4 ||
324 req->resid_len < base_frame_size + to_write * 4) {
325 resp_data[2] = SMP_RESP_INV_FRM_LEN;
326 break;
327 }
328
329 to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
330 req_data[3], to_write, &req_data[8]);
331 req->resid_len -= base_frame_size + to_write * 4;
332 rsp->resid_len -= 8;
235 break; 333 break;
334 }
236 335
237 case SMP_CONF_ROUTE_INFO: 336 case SMP_CONF_ROUTE_INFO:
238 /* Can't implement; hosts have no routes */ 337 /* Can't implement; hosts have no routes */
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 2dc55343f67..d81c3b1989f 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -37,7 +37,32 @@
37 37
38#include "../scsi_sas_internal.h" 38#include "../scsi_sas_internal.h"
39 39
40struct kmem_cache *sas_task_cache; 40static struct kmem_cache *sas_task_cache;
41
42struct sas_task *sas_alloc_task(gfp_t flags)
43{
44 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
45
46 if (task) {
47 INIT_LIST_HEAD(&task->list);
48 spin_lock_init(&task->task_state_lock);
49 task->task_state_flags = SAS_TASK_STATE_PENDING;
50 init_timer(&task->timer);
51 init_completion(&task->completion);
52 }
53
54 return task;
55}
56EXPORT_SYMBOL_GPL(sas_alloc_task);
57
58void sas_free_task(struct sas_task *task)
59{
60 if (task) {
61 BUG_ON(!list_empty(&task->list));
62 kmem_cache_free(sas_task_cache, task);
63 }
64}
65EXPORT_SYMBOL_GPL(sas_free_task);
41 66
42/*------------ SAS addr hash -----------*/ 67/*------------ SAS addr hash -----------*/
43void sas_hash_addr(u8 *hashed, const u8 *sas_addr) 68void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
@@ -152,10 +177,15 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
152 177
153static int sas_get_linkerrors(struct sas_phy *phy) 178static int sas_get_linkerrors(struct sas_phy *phy)
154{ 179{
155 if (scsi_is_sas_phy_local(phy)) 180 if (scsi_is_sas_phy_local(phy)) {
156 /* FIXME: we have no local phy stats 181 struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
157 * gathering at this time */ 182 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
158 return -EINVAL; 183 struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
184 struct sas_internal *i =
185 to_sas_internal(sas_ha->core.shost->transportt);
186
187 return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
188 }
159 189
160 return sas_smp_get_phy_events(phy); 190 return sas_smp_get_phy_events(phy);
161} 191}
@@ -293,8 +323,7 @@ EXPORT_SYMBOL_GPL(sas_domain_release_transport);
293 323
294static int __init sas_class_init(void) 324static int __init sas_class_init(void)
295{ 325{
296 sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task), 326 sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
297 0, SLAB_HWCACHE_ALIGN, NULL);
298 if (!sas_task_cache) 327 if (!sas_task_cache)
299 return -ENOMEM; 328 return -ENOMEM;
300 329
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index eeba76cdf77..b2c4a773165 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -182,79 +182,56 @@ int sas_queue_up(struct sas_task *task)
182 return 0; 182 return 0;
183} 183}
184 184
185/** 185int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
186 * sas_queuecommand -- Enqueue a command for processing
187 * @parameters: See SCSI Core documentation
188 *
189 * Note: XXX: Remove the host unlock/lock pair when SCSI Core can
190 * call us without holding an IRQ spinlock...
191 */
192static int sas_queuecommand_lck(struct scsi_cmnd *cmd,
193 void (*scsi_done)(struct scsi_cmnd *))
194 __releases(host->host_lock)
195 __acquires(dev->sata_dev.ap->lock)
196 __releases(dev->sata_dev.ap->lock)
197 __acquires(host->host_lock)
198{ 186{
199 int res = 0;
200 struct domain_device *dev = cmd_to_domain_dev(cmd);
201 struct Scsi_Host *host = cmd->device->host;
202 struct sas_internal *i = to_sas_internal(host->transportt); 187 struct sas_internal *i = to_sas_internal(host->transportt);
188 struct domain_device *dev = cmd_to_domain_dev(cmd);
189 struct sas_ha_struct *sas_ha = dev->port->ha;
190 struct sas_task *task;
191 int res = 0;
203 192
204 spin_unlock_irq(host->host_lock); 193 /* If the device fell off, no sense in issuing commands */
194 if (dev->gone) {
195 cmd->result = DID_BAD_TARGET << 16;
196 goto out_done;
197 }
205 198
206 { 199 if (dev_is_sata(dev)) {
207 struct sas_ha_struct *sas_ha = dev->port->ha; 200 unsigned long flags;
208 struct sas_task *task;
209
210 /* If the device fell off, no sense in issuing commands */
211 if (dev->gone) {
212 cmd->result = DID_BAD_TARGET << 16;
213 scsi_done(cmd);
214 goto out;
215 }
216 201
217 if (dev_is_sata(dev)) { 202 spin_lock_irqsave(dev->sata_dev.ap->lock, flags);
218 unsigned long flags; 203 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
204 spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags);
205 return res;
206 }
219 207
220 spin_lock_irqsave(dev->sata_dev.ap->lock, flags); 208 task = sas_create_task(cmd, dev, GFP_ATOMIC);
221 res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); 209 if (!task)
222 spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); 210 return SCSI_MLQUEUE_HOST_BUSY;
223 goto out;
224 }
225 211
226 res = -ENOMEM; 212 /* Queue up, Direct Mode or Task Collector Mode. */
227 task = sas_create_task(cmd, dev, GFP_ATOMIC); 213 if (sas_ha->lldd_max_execute_num < 2)
228 if (!task) 214 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
229 goto out; 215 else
216 res = sas_queue_up(task);
230 217
231 cmd->scsi_done = scsi_done; 218 if (res)
232 /* Queue up, Direct Mode or Task Collector Mode. */ 219 goto out_free_task;
233 if (sas_ha->lldd_max_execute_num < 2) 220 return 0;
234 res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
235 else
236 res = sas_queue_up(task);
237 221
238 /* Examine */ 222out_free_task:
239 if (res) { 223 SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
240 SAS_DPRINTK("lldd_execute_task returned: %d\n", res); 224 ASSIGN_SAS_TASK(cmd, NULL);
241 ASSIGN_SAS_TASK(cmd, NULL); 225 sas_free_task(task);
242 sas_free_task(task); 226 if (res == -SAS_QUEUE_FULL)
243 if (res == -SAS_QUEUE_FULL) { 227 cmd->result = DID_SOFT_ERROR << 16; /* retry */
244 cmd->result = DID_SOFT_ERROR << 16; /* retry */ 228 else
245 res = 0; 229 cmd->result = DID_ERROR << 16;
246 scsi_done(cmd); 230out_done:
247 } 231 cmd->scsi_done(cmd);
248 goto out; 232 return 0;
249 }
250 }
251out:
252 spin_lock_irq(host->host_lock);
253 return res;
254} 233}
255 234
256DEF_SCSI_QCMD(sas_queuecommand)
257
258static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 235static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
259{ 236{
260 struct sas_task *task = TO_SAS_TASK(cmd); 237 struct sas_task *task = TO_SAS_TASK(cmd);
@@ -784,8 +761,7 @@ int sas_target_alloc(struct scsi_target *starget)
784 return 0; 761 return 0;
785} 762}
786 763
787#define SAS_DEF_QD 32 764#define SAS_DEF_QD 256
788#define SAS_MAX_QD 64
789 765
790int sas_slave_configure(struct scsi_device *scsi_dev) 766int sas_slave_configure(struct scsi_device *scsi_dev)
791{ 767{
@@ -825,34 +801,41 @@ void sas_slave_destroy(struct scsi_device *scsi_dev)
825 struct domain_device *dev = sdev_to_domain_dev(scsi_dev); 801 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
826 802
827 if (dev_is_sata(dev)) 803 if (dev_is_sata(dev))
828 dev->sata_dev.ap->link.device[0].class = ATA_DEV_NONE; 804 sas_to_ata_dev(dev)->class = ATA_DEV_NONE;
829} 805}
830 806
831int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth, 807int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
832 int reason)
833{ 808{
834 int res = min(new_depth, SAS_MAX_QD); 809 struct domain_device *dev = sdev_to_domain_dev(sdev);
835 810
836 if (reason != SCSI_QDEPTH_DEFAULT) 811 if (dev_is_sata(dev))
812 return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth,
813 reason);
814
815 switch (reason) {
816 case SCSI_QDEPTH_DEFAULT:
817 case SCSI_QDEPTH_RAMP_UP:
818 if (!sdev->tagged_supported)
819 depth = 1;
820 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
821 break;
822 case SCSI_QDEPTH_QFULL:
823 scsi_track_queue_full(sdev, depth);
824 break;
825 default:
837 return -EOPNOTSUPP; 826 return -EOPNOTSUPP;
838
839 if (scsi_dev->tagged_supported)
840 scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev),
841 res);
842 else {
843 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
844 sas_printk("device %llx LUN %x queue depth changed to 1\n",
845 SAS_ADDR(dev->sas_addr),
846 scsi_dev->lun);
847 scsi_adjust_queue_depth(scsi_dev, 0, 1);
848 res = 1;
849 } 827 }
850 828
851 return res; 829 return depth;
852} 830}
853 831
854int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) 832int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
855{ 833{
834 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
835
836 if (dev_is_sata(dev))
837 return -EINVAL;
838
856 if (!scsi_dev->tagged_supported) 839 if (!scsi_dev->tagged_supported)
857 return 0; 840 return 0;
858 841
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index c088a36d1f3..bb4c8e0584e 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -846,8 +846,24 @@ struct lpfc_hba {
846 struct dentry *debug_hbqinfo; 846 struct dentry *debug_hbqinfo;
847 struct dentry *debug_dumpHostSlim; 847 struct dentry *debug_dumpHostSlim;
848 struct dentry *debug_dumpHBASlim; 848 struct dentry *debug_dumpHBASlim;
849 struct dentry *debug_dumpData; /* BlockGuard BPL*/ 849 struct dentry *debug_dumpData; /* BlockGuard BPL */
850 struct dentry *debug_dumpDif; /* BlockGuard BPL*/ 850 struct dentry *debug_dumpDif; /* BlockGuard BPL */
851 struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
852 struct dentry *debug_writeGuard; /* inject write guard_tag errors */
853 struct dentry *debug_writeApp; /* inject write app_tag errors */
854 struct dentry *debug_writeRef; /* inject write ref_tag errors */
855 struct dentry *debug_readApp; /* inject read app_tag errors */
856 struct dentry *debug_readRef; /* inject read ref_tag errors */
857
858 /* T10 DIF error injection */
859 uint32_t lpfc_injerr_wgrd_cnt;
860 uint32_t lpfc_injerr_wapp_cnt;
861 uint32_t lpfc_injerr_wref_cnt;
862 uint32_t lpfc_injerr_rapp_cnt;
863 uint32_t lpfc_injerr_rref_cnt;
864 sector_t lpfc_injerr_lba;
865#define LPFC_INJERR_LBA_OFF (sector_t)0xffffffffffffffff
866
851 struct dentry *debug_slow_ring_trc; 867 struct dentry *debug_slow_ring_trc;
852 struct lpfc_debugfs_trc *slow_ring_trc; 868 struct lpfc_debugfs_trc *slow_ring_trc;
853 atomic_t slow_ring_trc_cnt; 869 atomic_t slow_ring_trc_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 2542f1f8bf8..4b0333ee2d9 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -52,6 +52,13 @@
52#define LPFC_MIN_DEVLOSS_TMO 1 52#define LPFC_MIN_DEVLOSS_TMO 1
53#define LPFC_MAX_DEVLOSS_TMO 255 53#define LPFC_MAX_DEVLOSS_TMO 255
54 54
55/*
56 * Write key size should be multiple of 4. If write key is changed
57 * make sure that library write key is also changed.
58 */
59#define LPFC_REG_WRITE_KEY_SIZE 4
60#define LPFC_REG_WRITE_KEY "EMLX"
61
55/** 62/**
56 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules 63 * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules
57 * @incr: integer to convert. 64 * @incr: integer to convert.
@@ -693,7 +700,7 @@ lpfc_selective_reset(struct lpfc_hba *phba)
693 int rc; 700 int rc;
694 701
695 if (!phba->cfg_enable_hba_reset) 702 if (!phba->cfg_enable_hba_reset)
696 return -EIO; 703 return -EACCES;
697 704
698 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); 705 status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
699 706
@@ -742,9 +749,11 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
742 struct Scsi_Host *shost = class_to_shost(dev); 749 struct Scsi_Host *shost = class_to_shost(dev);
743 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 750 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
744 struct lpfc_hba *phba = vport->phba; 751 struct lpfc_hba *phba = vport->phba;
745
746 int status = -EINVAL; 752 int status = -EINVAL;
747 753
754 if (!phba->cfg_enable_hba_reset)
755 return -EACCES;
756
748 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) 757 if (strncmp(buf, "selective", sizeof("selective") - 1) == 0)
749 status = phba->lpfc_selective_reset(phba); 758 status = phba->lpfc_selective_reset(phba);
750 759
@@ -765,16 +774,21 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
765 * Returns: 774 * Returns:
766 * zero for success 775 * zero for success
767 **/ 776 **/
768static int 777int
769lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) 778lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
770{ 779{
771 struct lpfc_register portstat_reg; 780 struct lpfc_register portstat_reg = {0};
772 int i; 781 int i;
773 782
774 783 msleep(100);
775 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, 784 lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
776 &portstat_reg.word0); 785 &portstat_reg.word0);
777 786
787 /* verify if privilaged for the request operation */
788 if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) &&
789 !bf_get(lpfc_sliport_status_err, &portstat_reg))
790 return -EPERM;
791
778 /* wait for the SLI port firmware ready after firmware reset */ 792 /* wait for the SLI port firmware ready after firmware reset */
779 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { 793 for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) {
780 msleep(10); 794 msleep(10);
@@ -816,16 +830,13 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
816 int rc; 830 int rc;
817 831
818 if (!phba->cfg_enable_hba_reset) 832 if (!phba->cfg_enable_hba_reset)
819 return -EIO; 833 return -EACCES;
820 834
821 if ((phba->sli_rev < LPFC_SLI_REV4) || 835 if ((phba->sli_rev < LPFC_SLI_REV4) ||
822 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 836 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
823 LPFC_SLI_INTF_IF_TYPE_2)) 837 LPFC_SLI_INTF_IF_TYPE_2))
824 return -EPERM; 838 return -EPERM;
825 839
826 if (!pdev->is_physfn)
827 return -EPERM;
828
829 /* Disable SR-IOV virtual functions if enabled */ 840 /* Disable SR-IOV virtual functions if enabled */
830 if (phba->cfg_sriov_nr_virtfn) { 841 if (phba->cfg_sriov_nr_virtfn) {
831 pci_disable_sriov(pdev); 842 pci_disable_sriov(pdev);
@@ -858,7 +869,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
858 rc = lpfc_sli4_pdev_status_reg_wait(phba); 869 rc = lpfc_sli4_pdev_status_reg_wait(phba);
859 870
860 if (rc) 871 if (rc)
861 return -EIO; 872 return rc;
862 873
863 init_completion(&online_compl); 874 init_completion(&online_compl);
864 rc = lpfc_workq_post_event(phba, &status, &online_compl, 875 rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -984,7 +995,7 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
984 if (!status) 995 if (!status)
985 return strlen(buf); 996 return strlen(buf);
986 else 997 else
987 return -EIO; 998 return status;
988} 999}
989 1000
990/** 1001/**
@@ -3885,18 +3896,23 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj,
3885 if ((off + count) > FF_REG_AREA_SIZE) 3896 if ((off + count) > FF_REG_AREA_SIZE)
3886 return -ERANGE; 3897 return -ERANGE;
3887 3898
3888 if (count == 0) return 0; 3899 if (count <= LPFC_REG_WRITE_KEY_SIZE)
3900 return 0;
3889 3901
3890 if (off % 4 || count % 4 || (unsigned long)buf % 4) 3902 if (off % 4 || count % 4 || (unsigned long)buf % 4)
3891 return -EINVAL; 3903 return -EINVAL;
3892 3904
3893 if (!(vport->fc_flag & FC_OFFLINE_MODE)) { 3905 /* This is to protect HBA registers from accidental writes. */
3906 if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE))
3907 return -EINVAL;
3908
3909 if (!(vport->fc_flag & FC_OFFLINE_MODE))
3894 return -EPERM; 3910 return -EPERM;
3895 }
3896 3911
3897 spin_lock_irq(&phba->hbalock); 3912 spin_lock_irq(&phba->hbalock);
3898 for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) 3913 for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE;
3899 writel(*((uint32_t *)(buf + buf_off)), 3914 buf_off += sizeof(uint32_t))
3915 writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)),
3900 phba->ctrl_regs_memmap_p + off + buf_off); 3916 phba->ctrl_regs_memmap_p + off + buf_off);
3901 3917
3902 spin_unlock_irq(&phba->hbalock); 3918 spin_unlock_irq(&phba->hbalock);
@@ -4097,8 +4113,10 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
4097 struct Scsi_Host *shost = class_to_shost(dev); 4113 struct Scsi_Host *shost = class_to_shost(dev);
4098 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; 4114 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
4099 struct lpfc_hba *phba = vport->phba; 4115 struct lpfc_hba *phba = vport->phba;
4100 int rc; 4116 LPFC_MBOXQ_t *mboxq;
4101 MAILBOX_t *pmb; 4117 MAILBOX_t *pmb;
4118 uint32_t mbox_tmo;
4119 int rc;
4102 4120
4103 if (off > MAILBOX_CMD_SIZE) 4121 if (off > MAILBOX_CMD_SIZE)
4104 return -ERANGE; 4122 return -ERANGE;
@@ -4123,7 +4141,8 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
4123 if (off == 0 && 4141 if (off == 0 &&
4124 phba->sysfs_mbox.state == SMBOX_WRITING && 4142 phba->sysfs_mbox.state == SMBOX_WRITING &&
4125 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { 4143 phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
4126 pmb = &phba->sysfs_mbox.mbox->u.mb; 4144 mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
4145 pmb = &mboxq->u.mb;
4127 switch (pmb->mbxCommand) { 4146 switch (pmb->mbxCommand) {
4128 /* Offline only */ 4147 /* Offline only */
4129 case MBX_INIT_LINK: 4148 case MBX_INIT_LINK:
@@ -4233,9 +4252,8 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj,
4233 4252
4234 } else { 4253 } else {
4235 spin_unlock_irq(&phba->hbalock); 4254 spin_unlock_irq(&phba->hbalock);
4236 rc = lpfc_sli_issue_mbox_wait (phba, 4255 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
4237 phba->sysfs_mbox.mbox, 4256 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
4238 lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
4239 spin_lock_irq(&phba->hbalock); 4257 spin_lock_irq(&phba->hbalock);
4240 } 4258 }
4241 4259
@@ -4480,9 +4498,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost)
4480 4498
4481 spin_lock_irq(shost->host_lock); 4499 spin_lock_irq(shost->host_lock);
4482 4500
4483 if ((vport->fc_flag & FC_FABRIC) || 4501 if ((vport->port_state > LPFC_FLOGI) &&
4484 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 4502 ((vport->fc_flag & FC_FABRIC) ||
4485 (vport->fc_flag & FC_PUBLIC_LOOP))) 4503 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
4504 (vport->fc_flag & FC_PUBLIC_LOOP))))
4486 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); 4505 node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn);
4487 else 4506 else
4488 /* fabric is local port if there is no F/FL_Port */ 4507 /* fabric is local port if there is no F/FL_Port */
@@ -4555,9 +4574,17 @@ lpfc_get_stats(struct Scsi_Host *shost)
4555 memset(hs, 0, sizeof (struct fc_host_statistics)); 4574 memset(hs, 0, sizeof (struct fc_host_statistics));
4556 4575
4557 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; 4576 hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt;
4558 hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256); 4577 /*
4578 * The MBX_READ_STATUS returns tx_k_bytes which has to
4579 * converted to words
4580 */
4581 hs->tx_words = (uint64_t)
4582 ((uint64_t)pmb->un.varRdStatus.xmitByteCnt
4583 * (uint64_t)256);
4559 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; 4584 hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt;
4560 hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256); 4585 hs->rx_words = (uint64_t)
4586 ((uint64_t)pmb->un.varRdStatus.rcvByteCnt
4587 * (uint64_t)256);
4561 4588
4562 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); 4589 memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
4563 pmb->mbxCommand = MBX_READ_LNK_STAT; 4590 pmb->mbxCommand = MBX_READ_LNK_STAT;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a6db6aef133..60f95347bab 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -209,7 +209,7 @@ void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
209void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); 209void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
210int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); 210int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
211int lpfc_mbox_dev_check(struct lpfc_hba *); 211int lpfc_mbox_dev_check(struct lpfc_hba *);
212int lpfc_mbox_tmo_val(struct lpfc_hba *, int); 212int lpfc_mbox_tmo_val(struct lpfc_hba *, LPFC_MBOXQ_t *);
213void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); 213void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
214void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); 214void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
215void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); 215void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
@@ -451,3 +451,5 @@ int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *);
451/* functions to support SR-IOV */ 451/* functions to support SR-IOV */
452int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); 452int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *); 453uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
454int lpfc_sli4_queue_create(struct lpfc_hba *);
455void lpfc_sli4_queue_destroy(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 779b88e1469..707081d0a22 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1856,6 +1856,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
1856 case 2: 1856 case 2:
1857 c = 'B'; 1857 c = 'B';
1858 break; 1858 break;
1859 case 3:
1860 c = 'X';
1861 break;
1859 default: 1862 default:
1860 c = 0; 1863 c = 0;
1861 break; 1864 break;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index a0424dd90e4..2cd844f7058 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -996,6 +996,85 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf,
996 return nbytes; 996 return nbytes;
997} 997}
998 998
999static int
1000lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file)
1001{
1002 file->private_data = inode->i_private;
1003 return 0;
1004}
1005
1006static ssize_t
1007lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
1008 size_t nbytes, loff_t *ppos)
1009{
1010 struct dentry *dent = file->f_dentry;
1011 struct lpfc_hba *phba = file->private_data;
1012 char cbuf[16];
1013 int cnt = 0;
1014
1015 if (dent == phba->debug_writeGuard)
1016 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt);
1017 else if (dent == phba->debug_writeApp)
1018 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt);
1019 else if (dent == phba->debug_writeRef)
1020 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt);
1021 else if (dent == phba->debug_readApp)
1022 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt);
1023 else if (dent == phba->debug_readRef)
1024 cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt);
1025 else if (dent == phba->debug_InjErrLBA)
1026 cnt = snprintf(cbuf, 16, "0x%lx\n",
1027 (unsigned long) phba->lpfc_injerr_lba);
1028 else
1029 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1030 "0547 Unknown debugfs error injection entry\n");
1031
1032 return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
1033}
1034
1035static ssize_t
1036lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
1037 size_t nbytes, loff_t *ppos)
1038{
1039 struct dentry *dent = file->f_dentry;
1040 struct lpfc_hba *phba = file->private_data;
1041 char dstbuf[32];
1042 unsigned long tmp;
1043 int size;
1044
1045 memset(dstbuf, 0, 32);
1046 size = (nbytes < 32) ? nbytes : 32;
1047 if (copy_from_user(dstbuf, buf, size))
1048 return 0;
1049
1050 if (strict_strtoul(dstbuf, 0, &tmp))
1051 return 0;
1052
1053 if (dent == phba->debug_writeGuard)
1054 phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
1055 else if (dent == phba->debug_writeApp)
1056 phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
1057 else if (dent == phba->debug_writeRef)
1058 phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
1059 else if (dent == phba->debug_readApp)
1060 phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
1061 else if (dent == phba->debug_readRef)
1062 phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
1063 else if (dent == phba->debug_InjErrLBA)
1064 phba->lpfc_injerr_lba = (sector_t)tmp;
1065 else
1066 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1067 "0548 Unknown debugfs error injection entry\n");
1068
1069 return nbytes;
1070}
1071
1072static int
1073lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file)
1074{
1075 return 0;
1076}
1077
999/** 1078/**
1000 * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file 1079 * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file
1001 * @inode: The inode pointer that contains a vport pointer. 1080 * @inode: The inode pointer that contains a vport pointer.
@@ -3380,6 +3459,16 @@ static const struct file_operations lpfc_debugfs_op_dumpDif = {
3380 .release = lpfc_debugfs_dumpDataDif_release, 3459 .release = lpfc_debugfs_dumpDataDif_release,
3381}; 3460};
3382 3461
3462#undef lpfc_debugfs_op_dif_err
3463static const struct file_operations lpfc_debugfs_op_dif_err = {
3464 .owner = THIS_MODULE,
3465 .open = lpfc_debugfs_dif_err_open,
3466 .llseek = lpfc_debugfs_lseek,
3467 .read = lpfc_debugfs_dif_err_read,
3468 .write = lpfc_debugfs_dif_err_write,
3469 .release = lpfc_debugfs_dif_err_release,
3470};
3471
3383#undef lpfc_debugfs_op_slow_ring_trc 3472#undef lpfc_debugfs_op_slow_ring_trc
3384static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { 3473static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
3385 .owner = THIS_MODULE, 3474 .owner = THIS_MODULE,
@@ -3788,6 +3877,74 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
3788 goto debug_failed; 3877 goto debug_failed;
3789 } 3878 }
3790 3879
3880 /* Setup DIF Error Injections */
3881 snprintf(name, sizeof(name), "InjErrLBA");
3882 phba->debug_InjErrLBA =
3883 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3884 phba->hba_debugfs_root,
3885 phba, &lpfc_debugfs_op_dif_err);
3886 if (!phba->debug_InjErrLBA) {
3887 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3888 "0807 Cannot create debugfs InjErrLBA\n");
3889 goto debug_failed;
3890 }
3891 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
3892
3893 snprintf(name, sizeof(name), "writeGuardInjErr");
3894 phba->debug_writeGuard =
3895 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3896 phba->hba_debugfs_root,
3897 phba, &lpfc_debugfs_op_dif_err);
3898 if (!phba->debug_writeGuard) {
3899 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3900 "0802 Cannot create debugfs writeGuard\n");
3901 goto debug_failed;
3902 }
3903
3904 snprintf(name, sizeof(name), "writeAppInjErr");
3905 phba->debug_writeApp =
3906 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3907 phba->hba_debugfs_root,
3908 phba, &lpfc_debugfs_op_dif_err);
3909 if (!phba->debug_writeApp) {
3910 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3911 "0803 Cannot create debugfs writeApp\n");
3912 goto debug_failed;
3913 }
3914
3915 snprintf(name, sizeof(name), "writeRefInjErr");
3916 phba->debug_writeRef =
3917 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3918 phba->hba_debugfs_root,
3919 phba, &lpfc_debugfs_op_dif_err);
3920 if (!phba->debug_writeRef) {
3921 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3922 "0804 Cannot create debugfs writeRef\n");
3923 goto debug_failed;
3924 }
3925
3926 snprintf(name, sizeof(name), "readAppInjErr");
3927 phba->debug_readApp =
3928 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3929 phba->hba_debugfs_root,
3930 phba, &lpfc_debugfs_op_dif_err);
3931 if (!phba->debug_readApp) {
3932 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3933 "0805 Cannot create debugfs readApp\n");
3934 goto debug_failed;
3935 }
3936
3937 snprintf(name, sizeof(name), "readRefInjErr");
3938 phba->debug_readRef =
3939 debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
3940 phba->hba_debugfs_root,
3941 phba, &lpfc_debugfs_op_dif_err);
3942 if (!phba->debug_readRef) {
3943 lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
3944 "0806 Cannot create debugfs readApp\n");
3945 goto debug_failed;
3946 }
3947
3791 /* Setup slow ring trace */ 3948 /* Setup slow ring trace */
3792 if (lpfc_debugfs_max_slow_ring_trc) { 3949 if (lpfc_debugfs_max_slow_ring_trc) {
3793 num = lpfc_debugfs_max_slow_ring_trc - 1; 3950 num = lpfc_debugfs_max_slow_ring_trc - 1;
@@ -4090,6 +4247,30 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
4090 debugfs_remove(phba->debug_dumpDif); /* dumpDif */ 4247 debugfs_remove(phba->debug_dumpDif); /* dumpDif */
4091 phba->debug_dumpDif = NULL; 4248 phba->debug_dumpDif = NULL;
4092 } 4249 }
4250 if (phba->debug_InjErrLBA) {
4251 debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
4252 phba->debug_InjErrLBA = NULL;
4253 }
4254 if (phba->debug_writeGuard) {
4255 debugfs_remove(phba->debug_writeGuard); /* writeGuard */
4256 phba->debug_writeGuard = NULL;
4257 }
4258 if (phba->debug_writeApp) {
4259 debugfs_remove(phba->debug_writeApp); /* writeApp */
4260 phba->debug_writeApp = NULL;
4261 }
4262 if (phba->debug_writeRef) {
4263 debugfs_remove(phba->debug_writeRef); /* writeRef */
4264 phba->debug_writeRef = NULL;
4265 }
4266 if (phba->debug_readApp) {
4267 debugfs_remove(phba->debug_readApp); /* readApp */
4268 phba->debug_readApp = NULL;
4269 }
4270 if (phba->debug_readRef) {
4271 debugfs_remove(phba->debug_readRef); /* readRef */
4272 phba->debug_readRef = NULL;
4273 }
4093 4274
4094 if (phba->slow_ring_trc) { 4275 if (phba->slow_ring_trc) {
4095 kfree(phba->slow_ring_trc); 4276 kfree(phba->slow_ring_trc);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 023da0e00d3..445826a4c98 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3386,7 +3386,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3386 cmdiocb->context1 = NULL; 3386 cmdiocb->context1 = NULL;
3387 } 3387 }
3388 } 3388 }
3389
3390 /*
3391 * The driver received a LOGO from the rport and has ACK'd it.
3392 * At this point, the driver is done so release the IOCB and
3393 * remove the ndlp reference.
3394 */
3389 lpfc_els_free_iocb(phba, cmdiocb); 3395 lpfc_els_free_iocb(phba, cmdiocb);
3396 lpfc_nlp_put(ndlp);
3390 return; 3397 return;
3391} 3398}
3392 3399
@@ -4082,9 +4089,6 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
4082 4089
4083 phba->fc_stat.elsXmitACC++; 4090 phba->fc_stat.elsXmitACC++;
4084 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4091 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4085 lpfc_nlp_put(ndlp);
4086 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
4087 * it could be freed */
4088 4092
4089 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4093 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4090 if (rc == IOCB_ERROR) { 4094 if (rc == IOCB_ERROR) {
@@ -4166,6 +4170,11 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4166 psli = &phba->sli; 4170 psli = &phba->sli;
4167 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; 4171 cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
4168 4172
4173 /* The accumulated length can exceed the BPL_SIZE. For
4174 * now, use this as the limit
4175 */
4176 if (cmdsize > LPFC_BPL_SIZE)
4177 cmdsize = LPFC_BPL_SIZE;
4169 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, 4178 elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4170 ndlp->nlp_DID, ELS_CMD_ACC); 4179 ndlp->nlp_DID, ELS_CMD_ACC);
4171 if (!elsiocb) 4180 if (!elsiocb)
@@ -4189,9 +4198,6 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
4189 4198
4190 phba->fc_stat.elsXmitACC++; 4199 phba->fc_stat.elsXmitACC++;
4191 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; 4200 elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4192 lpfc_nlp_put(ndlp);
4193 elsiocb->context1 = NULL; /* Don't need ndlp for cmpl,
4194 * it could be freed */
4195 4201
4196 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); 4202 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
4197 if (rc == IOCB_ERROR) { 4203 if (rc == IOCB_ERROR) {
@@ -7258,16 +7264,11 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
7258 icmd->un.elsreq64.myID = 0; 7264 icmd->un.elsreq64.myID = 0;
7259 icmd->un.elsreq64.fl = 1; 7265 icmd->un.elsreq64.fl = 1;
7260 7266
7261 if ((phba->sli_rev == LPFC_SLI_REV4) && 7267 /*
7262 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == 7268 * SLI3 ports require a different context type value than SLI4.
7263 LPFC_SLI_INTF_IF_TYPE_0)) { 7269 * Catch SLI3 ports here and override the prep.
7264 /* FDISC needs to be 1 for WQE VPI */ 7270 */
7265 elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; 7271 if (phba->sli_rev == LPFC_SLI_REV3) {
7266 elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
7267 /* Set the ulpContext to the vpi */
7268 elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi];
7269 } else {
7270 /* For FDISC, Let FDISC rsp set the NPortID for this VPI */
7271 icmd->ulpCt_h = 1; 7272 icmd->ulpCt_h = 1;
7272 icmd->ulpCt_l = 0; 7273 icmd->ulpCt_l = 0;
7273 } 7274 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 0b47adf9fee..091f68e5cb7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1412,7 +1412,7 @@ lpfc_register_fcf(struct lpfc_hba *phba)
1412 if (phba->pport->port_state != LPFC_FLOGI) { 1412 if (phba->pport->port_state != LPFC_FLOGI) {
1413 phba->hba_flag |= FCF_RR_INPROG; 1413 phba->hba_flag |= FCF_RR_INPROG;
1414 spin_unlock_irq(&phba->hbalock); 1414 spin_unlock_irq(&phba->hbalock);
1415 lpfc_issue_init_vfi(phba->pport); 1415 lpfc_initial_flogi(phba->pport);
1416 return; 1416 return;
1417 } 1417 }
1418 spin_unlock_irq(&phba->hbalock); 1418 spin_unlock_irq(&phba->hbalock);
@@ -2646,7 +2646,9 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2646{ 2646{
2647 struct lpfc_vport *vport = mboxq->vport; 2647 struct lpfc_vport *vport = mboxq->vport;
2648 2648
2649 if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) { 2649 /* VFI not supported on interface type 0, just do the flogi */
2650 if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type,
2651 &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) {
2650 lpfc_printf_vlog(vport, KERN_ERR, 2652 lpfc_printf_vlog(vport, KERN_ERR,
2651 LOG_MBOX, 2653 LOG_MBOX,
2652 "2891 Init VFI mailbox failed 0x%x\n", 2654 "2891 Init VFI mailbox failed 0x%x\n",
@@ -2655,6 +2657,7 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2655 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2657 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2656 return; 2658 return;
2657 } 2659 }
2660
2658 lpfc_initial_flogi(vport); 2661 lpfc_initial_flogi(vport);
2659 mempool_free(mboxq, phba->mbox_mem_pool); 2662 mempool_free(mboxq, phba->mbox_mem_pool);
2660 return; 2663 return;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 7f8003b5181..98d21521f53 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -41,6 +41,8 @@
41 * Or clear that bit field: 41 * Or clear that bit field:
42 * bf_set(example_bit_field, &t1, 0); 42 * bf_set(example_bit_field, &t1, 0);
43 */ 43 */
44#define bf_get_be32(name, ptr) \
45 ((be32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
44#define bf_get_le32(name, ptr) \ 46#define bf_get_le32(name, ptr) \
45 ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) 47 ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK)
46#define bf_get(name, ptr) \ 48#define bf_get(name, ptr) \
@@ -678,7 +680,6 @@ struct lpfc_register {
678#define lpfc_rq_doorbell_num_posted_SHIFT 16 680#define lpfc_rq_doorbell_num_posted_SHIFT 16
679#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF 681#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
680#define lpfc_rq_doorbell_num_posted_WORD word0 682#define lpfc_rq_doorbell_num_posted_WORD word0
681#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */
682#define lpfc_rq_doorbell_id_SHIFT 0 683#define lpfc_rq_doorbell_id_SHIFT 0
683#define lpfc_rq_doorbell_id_MASK 0xFFFF 684#define lpfc_rq_doorbell_id_MASK 0xFFFF
684#define lpfc_rq_doorbell_id_WORD word0 685#define lpfc_rq_doorbell_id_WORD word0
@@ -784,6 +785,8 @@ union lpfc_sli4_cfg_shdr {
784#define LPFC_Q_CREATE_VERSION_2 2 785#define LPFC_Q_CREATE_VERSION_2 2
785#define LPFC_Q_CREATE_VERSION_1 1 786#define LPFC_Q_CREATE_VERSION_1 1
786#define LPFC_Q_CREATE_VERSION_0 0 787#define LPFC_Q_CREATE_VERSION_0 0
788#define LPFC_OPCODE_VERSION_0 0
789#define LPFC_OPCODE_VERSION_1 1
787 } request; 790 } request;
788 struct { 791 struct {
789 uint32_t word6; 792 uint32_t word6;
@@ -825,6 +828,7 @@ struct mbox_header {
825#define LPFC_EXTENT_VERSION_DEFAULT 0 828#define LPFC_EXTENT_VERSION_DEFAULT 0
826 829
827/* Subsystem Definitions */ 830/* Subsystem Definitions */
831#define LPFC_MBOX_SUBSYSTEM_NA 0x0
828#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 832#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
829#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC 833#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
830 834
@@ -835,25 +839,34 @@ struct mbox_header {
835#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF 839#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
836 840
837/* Common Opcodes */ 841/* Common Opcodes */
838#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C 842#define LPFC_MBOX_OPCODE_NA 0x00
839#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D 843#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C
840#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 844#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D
841#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 845#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
842#define LPFC_MBOX_OPCODE_NOP 0x21 846#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
843#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 847#define LPFC_MBOX_OPCODE_NOP 0x21
844#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 848#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
845#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 849#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
846#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A 850#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
847#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D 851#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A
848#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A 852#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D
849#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A 853#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D
850#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B 854#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A
851#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C 855#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A
852#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D 856#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B
853#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 857#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C
854#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 858#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D
855#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC 859#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0
856#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 860#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4
861#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5
862#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6
863#define LPFC_MBOX_OPCODE_SET_ACT_PROFILE 0xA8
864#define LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG 0xA9
865#define LPFC_MBOX_OPCODE_READ_OBJECT 0xAB
866#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC
867#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD
868#define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE
869#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5
857 870
858/* FCoE Opcodes */ 871/* FCoE Opcodes */
859#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 872#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01
@@ -867,6 +880,7 @@ struct mbox_header {
867#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A 880#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A
868#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B 881#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B
869#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 882#define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10
883#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21
870#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 884#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
871#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 885#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
872 886
@@ -1470,16 +1484,81 @@ struct sli4_sge { /* SLI-4 */
1470 uint32_t addr_lo; 1484 uint32_t addr_lo;
1471 1485
1472 uint32_t word2; 1486 uint32_t word2;
1473#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ 1487#define lpfc_sli4_sge_offset_SHIFT 0
1474#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF 1488#define lpfc_sli4_sge_offset_MASK 0x07FFFFFF
1475#define lpfc_sli4_sge_offset_WORD word2 1489#define lpfc_sli4_sge_offset_WORD word2
1476#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets 1490#define lpfc_sli4_sge_type_SHIFT 27
1477 this flag !! */ 1491#define lpfc_sli4_sge_type_MASK 0x0000000F
1492#define lpfc_sli4_sge_type_WORD word2
1493#define LPFC_SGE_TYPE_DATA 0x0
1494#define LPFC_SGE_TYPE_DIF 0x4
1495#define LPFC_SGE_TYPE_LSP 0x5
1496#define LPFC_SGE_TYPE_PEDIF 0x6
1497#define LPFC_SGE_TYPE_PESEED 0x7
1498#define LPFC_SGE_TYPE_DISEED 0x8
1499#define LPFC_SGE_TYPE_ENC 0x9
1500#define LPFC_SGE_TYPE_ATM 0xA
1501#define LPFC_SGE_TYPE_SKIP 0xC
1502#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets it */
1478#define lpfc_sli4_sge_last_MASK 0x00000001 1503#define lpfc_sli4_sge_last_MASK 0x00000001
1479#define lpfc_sli4_sge_last_WORD word2 1504#define lpfc_sli4_sge_last_WORD word2
1480 uint32_t sge_len; 1505 uint32_t sge_len;
1481}; 1506};
1482 1507
1508struct sli4_sge_diseed { /* SLI-4 */
1509 uint32_t ref_tag;
1510 uint32_t ref_tag_tran;
1511
1512 uint32_t word2;
1513#define lpfc_sli4_sge_dif_apptran_SHIFT 0
1514#define lpfc_sli4_sge_dif_apptran_MASK 0x0000FFFF
1515#define lpfc_sli4_sge_dif_apptran_WORD word2
1516#define lpfc_sli4_sge_dif_af_SHIFT 24
1517#define lpfc_sli4_sge_dif_af_MASK 0x00000001
1518#define lpfc_sli4_sge_dif_af_WORD word2
1519#define lpfc_sli4_sge_dif_na_SHIFT 25
1520#define lpfc_sli4_sge_dif_na_MASK 0x00000001
1521#define lpfc_sli4_sge_dif_na_WORD word2
1522#define lpfc_sli4_sge_dif_hi_SHIFT 26
1523#define lpfc_sli4_sge_dif_hi_MASK 0x00000001
1524#define lpfc_sli4_sge_dif_hi_WORD word2
1525#define lpfc_sli4_sge_dif_type_SHIFT 27
1526#define lpfc_sli4_sge_dif_type_MASK 0x0000000F
1527#define lpfc_sli4_sge_dif_type_WORD word2
1528#define lpfc_sli4_sge_dif_last_SHIFT 31 /* Last SEG in the SGL sets it */
1529#define lpfc_sli4_sge_dif_last_MASK 0x00000001
1530#define lpfc_sli4_sge_dif_last_WORD word2
1531 uint32_t word3;
1532#define lpfc_sli4_sge_dif_apptag_SHIFT 0
1533#define lpfc_sli4_sge_dif_apptag_MASK 0x0000FFFF
1534#define lpfc_sli4_sge_dif_apptag_WORD word3
1535#define lpfc_sli4_sge_dif_bs_SHIFT 16
1536#define lpfc_sli4_sge_dif_bs_MASK 0x00000007
1537#define lpfc_sli4_sge_dif_bs_WORD word3
1538#define lpfc_sli4_sge_dif_ai_SHIFT 19
1539#define lpfc_sli4_sge_dif_ai_MASK 0x00000001
1540#define lpfc_sli4_sge_dif_ai_WORD word3
1541#define lpfc_sli4_sge_dif_me_SHIFT 20
1542#define lpfc_sli4_sge_dif_me_MASK 0x00000001
1543#define lpfc_sli4_sge_dif_me_WORD word3
1544#define lpfc_sli4_sge_dif_re_SHIFT 21
1545#define lpfc_sli4_sge_dif_re_MASK 0x00000001
1546#define lpfc_sli4_sge_dif_re_WORD word3
1547#define lpfc_sli4_sge_dif_ce_SHIFT 22
1548#define lpfc_sli4_sge_dif_ce_MASK 0x00000001
1549#define lpfc_sli4_sge_dif_ce_WORD word3
1550#define lpfc_sli4_sge_dif_nr_SHIFT 23
1551#define lpfc_sli4_sge_dif_nr_MASK 0x00000001
1552#define lpfc_sli4_sge_dif_nr_WORD word3
1553#define lpfc_sli4_sge_dif_oprx_SHIFT 24
1554#define lpfc_sli4_sge_dif_oprx_MASK 0x0000000F
1555#define lpfc_sli4_sge_dif_oprx_WORD word3
1556#define lpfc_sli4_sge_dif_optx_SHIFT 28
1557#define lpfc_sli4_sge_dif_optx_MASK 0x0000000F
1558#define lpfc_sli4_sge_dif_optx_WORD word3
1559/* optx and oprx use BG_OP_IN defines in lpfc_hw.h */
1560};
1561
1483struct fcf_record { 1562struct fcf_record {
1484 uint32_t max_rcv_size; 1563 uint32_t max_rcv_size;
1485 uint32_t fka_adv_period; 1564 uint32_t fka_adv_period;
@@ -2019,6 +2098,15 @@ struct lpfc_mbx_read_config {
2019#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001 2098#define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001
2020#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1 2099#define lpfc_mbx_rd_conf_extnts_inuse_WORD word1
2021 uint32_t word2; 2100 uint32_t word2;
2101#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0
2102#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F
2103#define lpfc_mbx_rd_conf_lnk_numb_WORD word2
2104#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6
2105#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003
2106#define lpfc_mbx_rd_conf_lnk_type_WORD word2
2107#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
2108#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
2109#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
2022#define lpfc_mbx_rd_conf_topology_SHIFT 24 2110#define lpfc_mbx_rd_conf_topology_SHIFT 24
2023#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF 2111#define lpfc_mbx_rd_conf_topology_MASK 0x000000FF
2024#define lpfc_mbx_rd_conf_topology_WORD word2 2112#define lpfc_mbx_rd_conf_topology_WORD word2
@@ -2552,8 +2640,152 @@ struct lpfc_mbx_get_prof_cfg {
2552 } u; 2640 } u;
2553}; 2641};
2554 2642
2643struct lpfc_controller_attribute {
2644 uint32_t version_string[8];
2645 uint32_t manufacturer_name[8];
2646 uint32_t supported_modes;
2647 uint32_t word17;
2648#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0
2649#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff
2650#define lpfc_cntl_attr_eprom_ver_lo_WORD word17
2651#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8
2652#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff
2653#define lpfc_cntl_attr_eprom_ver_hi_WORD word17
2654 uint32_t mbx_da_struct_ver;
2655 uint32_t ep_fw_da_struct_ver;
2656 uint32_t ncsi_ver_str[3];
2657 uint32_t dflt_ext_timeout;
2658 uint32_t model_number[8];
2659 uint32_t description[16];
2660 uint32_t serial_number[8];
2661 uint32_t ip_ver_str[8];
2662 uint32_t fw_ver_str[8];
2663 uint32_t bios_ver_str[8];
2664 uint32_t redboot_ver_str[8];
2665 uint32_t driver_ver_str[8];
2666 uint32_t flash_fw_ver_str[8];
2667 uint32_t functionality;
2668 uint32_t word105;
2669#define lpfc_cntl_attr_max_cbd_len_SHIFT 0
2670#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff
2671#define lpfc_cntl_attr_max_cbd_len_WORD word105
2672#define lpfc_cntl_attr_asic_rev_SHIFT 16
2673#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff
2674#define lpfc_cntl_attr_asic_rev_WORD word105
2675#define lpfc_cntl_attr_gen_guid0_SHIFT 24
2676#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff
2677#define lpfc_cntl_attr_gen_guid0_WORD word105
2678 uint32_t gen_guid1_12[3];
2679 uint32_t word109;
2680#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0
2681#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff
2682#define lpfc_cntl_attr_gen_guid13_14_WORD word109
2683#define lpfc_cntl_attr_gen_guid15_SHIFT 16
2684#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff
2685#define lpfc_cntl_attr_gen_guid15_WORD word109
2686#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24
2687#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff
2688#define lpfc_cntl_attr_hba_port_cnt_WORD word109
2689 uint32_t word110;
2690#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0
2691#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff
2692#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110
2693#define lpfc_cntl_attr_multi_func_dev_SHIFT 24
2694#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff
2695#define lpfc_cntl_attr_multi_func_dev_WORD word110
2696 uint32_t word111;
2697#define lpfc_cntl_attr_cache_valid_SHIFT 0
2698#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff
2699#define lpfc_cntl_attr_cache_valid_WORD word111
2700#define lpfc_cntl_attr_hba_status_SHIFT 8
2701#define lpfc_cntl_attr_hba_status_MASK 0x000000ff
2702#define lpfc_cntl_attr_hba_status_WORD word111
2703#define lpfc_cntl_attr_max_domain_SHIFT 16
2704#define lpfc_cntl_attr_max_domain_MASK 0x000000ff
2705#define lpfc_cntl_attr_max_domain_WORD word111
2706#define lpfc_cntl_attr_lnk_numb_SHIFT 24
2707#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f
2708#define lpfc_cntl_attr_lnk_numb_WORD word111
2709#define lpfc_cntl_attr_lnk_type_SHIFT 30
2710#define lpfc_cntl_attr_lnk_type_MASK 0x00000003
2711#define lpfc_cntl_attr_lnk_type_WORD word111
2712 uint32_t fw_post_status;
2713 uint32_t hba_mtu[8];
2714 uint32_t word121;
2715 uint32_t reserved1[3];
2716 uint32_t word125;
2717#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0
2718#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff
2719#define lpfc_cntl_attr_pci_vendor_id_WORD word125
2720#define lpfc_cntl_attr_pci_device_id_SHIFT 16
2721#define lpfc_cntl_attr_pci_device_id_MASK 0x0000ffff
2722#define lpfc_cntl_attr_pci_device_id_WORD word125
2723 uint32_t word126;
2724#define lpfc_cntl_attr_pci_subvdr_id_SHIFT 0
2725#define lpfc_cntl_attr_pci_subvdr_id_MASK 0x0000ffff
2726#define lpfc_cntl_attr_pci_subvdr_id_WORD word126
2727#define lpfc_cntl_attr_pci_subsys_id_SHIFT 16
2728#define lpfc_cntl_attr_pci_subsys_id_MASK 0x0000ffff
2729#define lpfc_cntl_attr_pci_subsys_id_WORD word126
2730 uint32_t word127;
2731#define lpfc_cntl_attr_pci_bus_num_SHIFT 0
2732#define lpfc_cntl_attr_pci_bus_num_MASK 0x000000ff
2733#define lpfc_cntl_attr_pci_bus_num_WORD word127
2734#define lpfc_cntl_attr_pci_dev_num_SHIFT 8
2735#define lpfc_cntl_attr_pci_dev_num_MASK 0x000000ff
2736#define lpfc_cntl_attr_pci_dev_num_WORD word127
2737#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16
2738#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff
2739#define lpfc_cntl_attr_pci_fnc_num_WORD word127
2740#define lpfc_cntl_attr_inf_type_SHIFT 24
2741#define lpfc_cntl_attr_inf_type_MASK 0x000000ff
2742#define lpfc_cntl_attr_inf_type_WORD word127
2743 uint32_t unique_id[2];
2744 uint32_t word130;
2745#define lpfc_cntl_attr_num_netfil_SHIFT 0
2746#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff
2747#define lpfc_cntl_attr_num_netfil_WORD word130
2748 uint32_t reserved2[4];
2749};
2750
2751struct lpfc_mbx_get_cntl_attributes {
2752 union lpfc_sli4_cfg_shdr cfg_shdr;
2753 struct lpfc_controller_attribute cntl_attr;
2754};
2755
2756struct lpfc_mbx_get_port_name {
2757 struct mbox_header header;
2758 union {
2759 struct {
2760 uint32_t word4;
2761#define lpfc_mbx_get_port_name_lnk_type_SHIFT 0
2762#define lpfc_mbx_get_port_name_lnk_type_MASK 0x00000003
2763#define lpfc_mbx_get_port_name_lnk_type_WORD word4
2764 } request;
2765 struct {
2766 uint32_t word4;
2767#define lpfc_mbx_get_port_name_name0_SHIFT 0
2768#define lpfc_mbx_get_port_name_name0_MASK 0x000000FF
2769#define lpfc_mbx_get_port_name_name0_WORD word4
2770#define lpfc_mbx_get_port_name_name1_SHIFT 8
2771#define lpfc_mbx_get_port_name_name1_MASK 0x000000FF
2772#define lpfc_mbx_get_port_name_name1_WORD word4
2773#define lpfc_mbx_get_port_name_name2_SHIFT 16
2774#define lpfc_mbx_get_port_name_name2_MASK 0x000000FF
2775#define lpfc_mbx_get_port_name_name2_WORD word4
2776#define lpfc_mbx_get_port_name_name3_SHIFT 24
2777#define lpfc_mbx_get_port_name_name3_MASK 0x000000FF
2778#define lpfc_mbx_get_port_name_name3_WORD word4
2779#define LPFC_LINK_NUMBER_0 0
2780#define LPFC_LINK_NUMBER_1 1
2781#define LPFC_LINK_NUMBER_2 2
2782#define LPFC_LINK_NUMBER_3 3
2783 } response;
2784 } u;
2785};
2786
2555/* Mailbox Completion Queue Error Messages */ 2787/* Mailbox Completion Queue Error Messages */
2556#define MB_CQE_STATUS_SUCCESS 0x0 2788#define MB_CQE_STATUS_SUCCESS 0x0
2557#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 2789#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1
2558#define MB_CQE_STATUS_INVALID_PARAMETER 0x2 2790#define MB_CQE_STATUS_INVALID_PARAMETER 0x2
2559#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 2791#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3
@@ -2637,8 +2869,9 @@ struct lpfc_mqe {
2637 struct lpfc_mbx_run_link_diag_test link_diag_test; 2869 struct lpfc_mbx_run_link_diag_test link_diag_test;
2638 struct lpfc_mbx_get_func_cfg get_func_cfg; 2870 struct lpfc_mbx_get_func_cfg get_func_cfg;
2639 struct lpfc_mbx_get_prof_cfg get_prof_cfg; 2871 struct lpfc_mbx_get_prof_cfg get_prof_cfg;
2640 struct lpfc_mbx_nop nop;
2641 struct lpfc_mbx_wr_object wr_object; 2872 struct lpfc_mbx_wr_object wr_object;
2873 struct lpfc_mbx_get_port_name get_port_name;
2874 struct lpfc_mbx_nop nop;
2642 } un; 2875 } un;
2643}; 2876};
2644 2877
@@ -2855,6 +3088,9 @@ struct wqe_common {
2855#define wqe_ctxt_tag_MASK 0x0000FFFF 3088#define wqe_ctxt_tag_MASK 0x0000FFFF
2856#define wqe_ctxt_tag_WORD word6 3089#define wqe_ctxt_tag_WORD word6
2857 uint32_t word7; 3090 uint32_t word7;
3091#define wqe_dif_SHIFT 0
3092#define wqe_dif_MASK 0x00000003
3093#define wqe_dif_WORD word7
2858#define wqe_ct_SHIFT 2 3094#define wqe_ct_SHIFT 2
2859#define wqe_ct_MASK 0x00000003 3095#define wqe_ct_MASK 0x00000003
2860#define wqe_ct_WORD word7 3096#define wqe_ct_WORD word7
@@ -2867,12 +3103,21 @@ struct wqe_common {
2867#define wqe_class_SHIFT 16 3103#define wqe_class_SHIFT 16
2868#define wqe_class_MASK 0x00000007 3104#define wqe_class_MASK 0x00000007
2869#define wqe_class_WORD word7 3105#define wqe_class_WORD word7
3106#define wqe_ar_SHIFT 19
3107#define wqe_ar_MASK 0x00000001
3108#define wqe_ar_WORD word7
3109#define wqe_ag_SHIFT wqe_ar_SHIFT
3110#define wqe_ag_MASK wqe_ar_MASK
3111#define wqe_ag_WORD wqe_ar_WORD
2870#define wqe_pu_SHIFT 20 3112#define wqe_pu_SHIFT 20
2871#define wqe_pu_MASK 0x00000003 3113#define wqe_pu_MASK 0x00000003
2872#define wqe_pu_WORD word7 3114#define wqe_pu_WORD word7
2873#define wqe_erp_SHIFT 22 3115#define wqe_erp_SHIFT 22
2874#define wqe_erp_MASK 0x00000001 3116#define wqe_erp_MASK 0x00000001
2875#define wqe_erp_WORD word7 3117#define wqe_erp_WORD word7
3118#define wqe_conf_SHIFT wqe_erp_SHIFT
3119#define wqe_conf_MASK wqe_erp_MASK
3120#define wqe_conf_WORD wqe_erp_WORD
2876#define wqe_lnk_SHIFT 23 3121#define wqe_lnk_SHIFT 23
2877#define wqe_lnk_MASK 0x00000001 3122#define wqe_lnk_MASK 0x00000001
2878#define wqe_lnk_WORD word7 3123#define wqe_lnk_WORD word7
@@ -2931,6 +3176,9 @@ struct wqe_common {
2931#define wqe_xc_SHIFT 21 3176#define wqe_xc_SHIFT 21
2932#define wqe_xc_MASK 0x00000001 3177#define wqe_xc_MASK 0x00000001
2933#define wqe_xc_WORD word10 3178#define wqe_xc_WORD word10
3179#define wqe_sr_SHIFT 22
3180#define wqe_sr_MASK 0x00000001
3181#define wqe_sr_WORD word10
2934#define wqe_ccpe_SHIFT 23 3182#define wqe_ccpe_SHIFT 23
2935#define wqe_ccpe_MASK 0x00000001 3183#define wqe_ccpe_MASK 0x00000001
2936#define wqe_ccpe_WORD word10 3184#define wqe_ccpe_WORD word10
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a3c820083c3..907c94b9245 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -58,8 +58,7 @@ spinlock_t _dump_buf_lock;
58 58
59static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); 59static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
60static int lpfc_post_rcv_buf(struct lpfc_hba *); 60static int lpfc_post_rcv_buf(struct lpfc_hba *);
61static int lpfc_sli4_queue_create(struct lpfc_hba *); 61static int lpfc_sli4_queue_verify(struct lpfc_hba *);
62static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
63static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); 62static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
64static int lpfc_setup_endian_order(struct lpfc_hba *); 63static int lpfc_setup_endian_order(struct lpfc_hba *);
65static int lpfc_sli4_read_config(struct lpfc_hba *); 64static int lpfc_sli4_read_config(struct lpfc_hba *);
@@ -1438,6 +1437,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1438 struct Scsi_Host *shost; 1437 struct Scsi_Host *shost;
1439 uint32_t if_type; 1438 uint32_t if_type;
1440 struct lpfc_register portstat_reg; 1439 struct lpfc_register portstat_reg;
1440 int rc;
1441 1441
1442 /* If the pci channel is offline, ignore possible errors, since 1442 /* If the pci channel is offline, ignore possible errors, since
1443 * we cannot communicate with the pci card anyway. 1443 * we cannot communicate with the pci card anyway.
@@ -1480,16 +1480,24 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1480 lpfc_sli4_offline_eratt(phba); 1480 lpfc_sli4_offline_eratt(phba);
1481 return; 1481 return;
1482 } 1482 }
1483 if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { 1483 /*
1484 /* 1484 * On error status condition, driver need to wait for port
1485 * TODO: Attempt port recovery via a port reset. 1485 * ready before performing reset.
1486 * When fully implemented, the driver should 1486 */
1487 * attempt to recover the port here and return. 1487 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1488 * For now, log an error and take the port offline. 1488 if (!rc) {
1489 */ 1489 /* need reset: attempt for port recovery */
1490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 1490 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1491 "2887 Port Error: Attempting " 1491 "2887 Port Error: Attempting "
1492 "Port Recovery\n"); 1492 "Port Recovery\n");
1493 lpfc_offline_prep(phba);
1494 lpfc_offline(phba);
1495 lpfc_sli_brdrestart(phba);
1496 if (lpfc_online(phba) == 0) {
1497 lpfc_unblock_mgmt_io(phba);
1498 return;
1499 }
1500 /* fall through for not able to recover */
1493 } 1501 }
1494 lpfc_sli4_offline_eratt(phba); 1502 lpfc_sli4_offline_eratt(phba);
1495 break; 1503 break;
@@ -1724,11 +1732,20 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1724 j = 0; 1732 j = 0;
1725 Length -= (3+i); 1733 Length -= (3+i);
1726 while(i--) { 1734 while(i--) {
1727 phba->Port[j++] = vpd[index++]; 1735 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1728 if (j == 19) 1736 (phba->sli4_hba.pport_name_sta ==
1729 break; 1737 LPFC_SLI4_PPNAME_GET)) {
1738 j++;
1739 index++;
1740 } else
1741 phba->Port[j++] = vpd[index++];
1742 if (j == 19)
1743 break;
1730 } 1744 }
1731 phba->Port[j] = 0; 1745 if ((phba->sli_rev != LPFC_SLI_REV4) ||
1746 (phba->sli4_hba.pport_name_sta ==
1747 LPFC_SLI4_PPNAME_NON))
1748 phba->Port[j] = 0;
1732 continue; 1749 continue;
1733 } 1750 }
1734 else { 1751 else {
@@ -1958,7 +1975,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1958 case PCI_DEVICE_ID_LANCER_FCOE: 1975 case PCI_DEVICE_ID_LANCER_FCOE:
1959 case PCI_DEVICE_ID_LANCER_FCOE_VF: 1976 case PCI_DEVICE_ID_LANCER_FCOE_VF:
1960 oneConnect = 1; 1977 oneConnect = 1;
1961 m = (typeof(m)){"OCe50100", "PCIe", "FCoE"}; 1978 m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
1962 break; 1979 break;
1963 default: 1980 default:
1964 m = (typeof(m)){"Unknown", "", ""}; 1981 m = (typeof(m)){"Unknown", "", ""};
@@ -2432,17 +2449,19 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba)
2432 uint8_t actcmd = MBX_HEARTBEAT; 2449 uint8_t actcmd = MBX_HEARTBEAT;
2433 unsigned long timeout; 2450 unsigned long timeout;
2434 2451
2435 2452 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
2436 spin_lock_irqsave(&phba->hbalock, iflag); 2453 spin_lock_irqsave(&phba->hbalock, iflag);
2437 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; 2454 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
2438 if (phba->sli.mbox_active) 2455 if (phba->sli.mbox_active) {
2439 actcmd = phba->sli.mbox_active->u.mb.mbxCommand; 2456 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
2457 /* Determine how long we might wait for the active mailbox
2458 * command to be gracefully completed by firmware.
2459 */
2460 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
2461 phba->sli.mbox_active) * 1000) + jiffies;
2462 }
2440 spin_unlock_irqrestore(&phba->hbalock, iflag); 2463 spin_unlock_irqrestore(&phba->hbalock, iflag);
2441 /* Determine how long we might wait for the active mailbox 2464
2442 * command to be gracefully completed by firmware.
2443 */
2444 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) +
2445 jiffies;
2446 /* Wait for the outstnading mailbox command to complete */ 2465 /* Wait for the outstnading mailbox command to complete */
2447 while (phba->sli.mbox_active) { 2466 while (phba->sli.mbox_active) {
2448 /* Check active mailbox complete status every 2ms */ 2467 /* Check active mailbox complete status every 2ms */
@@ -3949,7 +3968,7 @@ static int
3949lpfc_enable_pci_dev(struct lpfc_hba *phba) 3968lpfc_enable_pci_dev(struct lpfc_hba *phba)
3950{ 3969{
3951 struct pci_dev *pdev; 3970 struct pci_dev *pdev;
3952 int bars; 3971 int bars = 0;
3953 3972
3954 /* Obtain PCI device reference */ 3973 /* Obtain PCI device reference */
3955 if (!phba->pcidev) 3974 if (!phba->pcidev)
@@ -3978,6 +3997,8 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba)
3978out_disable_device: 3997out_disable_device:
3979 pci_disable_device(pdev); 3998 pci_disable_device(pdev);
3980out_error: 3999out_error:
4000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4001 "1401 Failed to enable pci device, bars:x%x\n", bars);
3981 return -ENODEV; 4002 return -ENODEV;
3982} 4003}
3983 4004
@@ -4051,9 +4072,6 @@ lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
4051 uint16_t nr_virtfn; 4072 uint16_t nr_virtfn;
4052 int pos; 4073 int pos;
4053 4074
4054 if (!pdev->is_physfn)
4055 return 0;
4056
4057 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); 4075 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4058 if (pos == 0) 4076 if (pos == 0)
4059 return 0; 4077 return 0;
@@ -4474,15 +4492,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4474 } 4492 }
4475 } 4493 }
4476 mempool_free(mboxq, phba->mbox_mem_pool); 4494 mempool_free(mboxq, phba->mbox_mem_pool);
4477 /* Create all the SLI4 queues */ 4495 /* Verify all the SLI4 queues */
4478 rc = lpfc_sli4_queue_create(phba); 4496 rc = lpfc_sli4_queue_verify(phba);
4479 if (rc) 4497 if (rc)
4480 goto out_free_bsmbx; 4498 goto out_free_bsmbx;
4481 4499
4482 /* Create driver internal CQE event pool */ 4500 /* Create driver internal CQE event pool */
4483 rc = lpfc_sli4_cq_event_pool_create(phba); 4501 rc = lpfc_sli4_cq_event_pool_create(phba);
4484 if (rc) 4502 if (rc)
4485 goto out_destroy_queue; 4503 goto out_free_bsmbx;
4486 4504
4487 /* Initialize and populate the iocb list per host */ 4505 /* Initialize and populate the iocb list per host */
4488 rc = lpfc_init_sgl_list(phba); 4506 rc = lpfc_init_sgl_list(phba);
@@ -4516,14 +4534,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
4516 goto out_remove_rpi_hdrs; 4534 goto out_remove_rpi_hdrs;
4517 } 4535 }
4518 4536
4519 phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * 4537 /*
4538 * The cfg_fcp_eq_count can be zero whenever there is exactly one
4539 * interrupt vector. This is not an error
4540 */
4541 if (phba->cfg_fcp_eq_count) {
4542 phba->sli4_hba.fcp_eq_hdl =
4543 kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
4520 phba->cfg_fcp_eq_count), GFP_KERNEL); 4544 phba->cfg_fcp_eq_count), GFP_KERNEL);
4521 if (!phba->sli4_hba.fcp_eq_hdl) { 4545 if (!phba->sli4_hba.fcp_eq_hdl) {
4522 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 4546 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4523 "2572 Failed allocate memory for fast-path " 4547 "2572 Failed allocate memory for "
4524 "per-EQ handle array\n"); 4548 "fast-path per-EQ handle array\n");
4525 rc = -ENOMEM; 4549 rc = -ENOMEM;
4526 goto out_free_fcf_rr_bmask; 4550 goto out_free_fcf_rr_bmask;
4551 }
4527 } 4552 }
4528 4553
4529 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * 4554 phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -4567,8 +4592,6 @@ out_free_sgl_list:
4567 lpfc_free_sgl_list(phba); 4592 lpfc_free_sgl_list(phba);
4568out_destroy_cq_event_pool: 4593out_destroy_cq_event_pool:
4569 lpfc_sli4_cq_event_pool_destroy(phba); 4594 lpfc_sli4_cq_event_pool_destroy(phba);
4570out_destroy_queue:
4571 lpfc_sli4_queue_destroy(phba);
4572out_free_bsmbx: 4595out_free_bsmbx:
4573 lpfc_destroy_bootstrap_mbox(phba); 4596 lpfc_destroy_bootstrap_mbox(phba);
4574out_free_mem: 4597out_free_mem:
@@ -4608,9 +4631,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
4608 /* Free the SCSI sgl management array */ 4631 /* Free the SCSI sgl management array */
4609 kfree(phba->sli4_hba.lpfc_scsi_psb_array); 4632 kfree(phba->sli4_hba.lpfc_scsi_psb_array);
4610 4633
4611 /* Free the SLI4 queues */
4612 lpfc_sli4_queue_destroy(phba);
4613
4614 /* Free the completion queue EQ event pool */ 4634 /* Free the completion queue EQ event pool */
4615 lpfc_sli4_cq_event_release_all(phba); 4635 lpfc_sli4_cq_event_release_all(phba);
4616 lpfc_sli4_cq_event_pool_destroy(phba); 4636 lpfc_sli4_cq_event_pool_destroy(phba);
@@ -6139,24 +6159,21 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
6139} 6159}
6140 6160
6141/** 6161/**
6142 * lpfc_sli4_queue_create - Create all the SLI4 queues 6162 * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
6143 * @phba: pointer to lpfc hba data structure. 6163 * @phba: pointer to lpfc hba data structure.
6144 * 6164 *
6145 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA 6165 * This routine is invoked to check the user settable queue counts for EQs and
6146 * operation. For each SLI4 queue type, the parameters such as queue entry 6166 * CQs. after this routine is called the counts will be set to valid values that
6147 * count (queue depth) shall be taken from the module parameter. For now, 6167 * adhere to the constraints of the system's interrupt vectors and the port's
6148 * we just use some constant number as place holder. 6168 * queue resources.
6149 * 6169 *
6150 * Return codes 6170 * Return codes
6151 * 0 - successful 6171 * 0 - successful
6152 * -ENOMEM - No available memory 6172 * -ENOMEM - No available memory
6153 * -EIO - The mailbox failed to complete successfully.
6154 **/ 6173 **/
6155static int 6174static int
6156lpfc_sli4_queue_create(struct lpfc_hba *phba) 6175lpfc_sli4_queue_verify(struct lpfc_hba *phba)
6157{ 6176{
6158 struct lpfc_queue *qdesc;
6159 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6160 int cfg_fcp_wq_count; 6177 int cfg_fcp_wq_count;
6161 int cfg_fcp_eq_count; 6178 int cfg_fcp_eq_count;
6162 6179
@@ -6229,14 +6246,43 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6229 /* The overall number of event queues used */ 6246 /* The overall number of event queues used */
6230 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; 6247 phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
6231 6248
6232 /*
6233 * Create Event Queues (EQs)
6234 */
6235
6236 /* Get EQ depth from module parameter, fake the default for now */ 6249 /* Get EQ depth from module parameter, fake the default for now */
6237 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; 6250 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
6238 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; 6251 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
6239 6252
6253 /* Get CQ depth from module parameter, fake the default for now */
6254 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6255 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6256
6257 return 0;
6258out_error:
6259 return -ENOMEM;
6260}
6261
6262/**
6263 * lpfc_sli4_queue_create - Create all the SLI4 queues
6264 * @phba: pointer to lpfc hba data structure.
6265 *
6266 * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
6267 * operation. For each SLI4 queue type, the parameters such as queue entry
6268 * count (queue depth) shall be taken from the module parameter. For now,
6269 * we just use some constant number as place holder.
6270 *
6271 * Return codes
6272 * 0 - sucessful
6273 * -ENOMEM - No availble memory
6274 * -EIO - The mailbox failed to complete successfully.
6275 **/
6276int
6277lpfc_sli4_queue_create(struct lpfc_hba *phba)
6278{
6279 struct lpfc_queue *qdesc;
6280 int fcp_eqidx, fcp_cqidx, fcp_wqidx;
6281
6282 /*
6283 * Create Event Queues (EQs)
6284 */
6285
6240 /* Create slow path event queue */ 6286 /* Create slow path event queue */
6241 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6287 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
6242 phba->sli4_hba.eq_ecount); 6288 phba->sli4_hba.eq_ecount);
@@ -6247,14 +6293,20 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6247 } 6293 }
6248 phba->sli4_hba.sp_eq = qdesc; 6294 phba->sli4_hba.sp_eq = qdesc;
6249 6295
6250 /* Create fast-path FCP Event Queue(s) */ 6296 /*
6251 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * 6297 * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be
6252 phba->cfg_fcp_eq_count), GFP_KERNEL); 6298 * zero whenever there is exactly one interrupt vector. This is not
6253 if (!phba->sli4_hba.fp_eq) { 6299 * an error.
6254 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6300 */
6255 "2576 Failed allocate memory for fast-path " 6301 if (phba->cfg_fcp_eq_count) {
6256 "EQ record array\n"); 6302 phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
6257 goto out_free_sp_eq; 6303 phba->cfg_fcp_eq_count), GFP_KERNEL);
6304 if (!phba->sli4_hba.fp_eq) {
6305 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6306 "2576 Failed allocate memory for "
6307 "fast-path EQ record array\n");
6308 goto out_free_sp_eq;
6309 }
6258 } 6310 }
6259 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { 6311 for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
6260 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, 6312 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
@@ -6271,10 +6323,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6271 * Create Complete Queues (CQs) 6323 * Create Complete Queues (CQs)
6272 */ 6324 */
6273 6325
6274 /* Get CQ depth from module parameter, fake the default for now */
6275 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
6276 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
6277
6278 /* Create slow-path Mailbox Command Complete Queue */ 6326 /* Create slow-path Mailbox Command Complete Queue */
6279 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6327 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6280 phba->sli4_hba.cq_ecount); 6328 phba->sli4_hba.cq_ecount);
@@ -6296,16 +6344,25 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6296 phba->sli4_hba.els_cq = qdesc; 6344 phba->sli4_hba.els_cq = qdesc;
6297 6345
6298 6346
6299 /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ 6347 /*
6300 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * 6348 * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs.
6301 phba->cfg_fcp_eq_count), GFP_KERNEL); 6349 * If there are no FCP EQs then create exactly one FCP CQ.
6350 */
6351 if (phba->cfg_fcp_eq_count)
6352 phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
6353 phba->cfg_fcp_eq_count),
6354 GFP_KERNEL);
6355 else
6356 phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *),
6357 GFP_KERNEL);
6302 if (!phba->sli4_hba.fcp_cq) { 6358 if (!phba->sli4_hba.fcp_cq) {
6303 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6359 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6304 "2577 Failed allocate memory for fast-path " 6360 "2577 Failed allocate memory for fast-path "
6305 "CQ record array\n"); 6361 "CQ record array\n");
6306 goto out_free_els_cq; 6362 goto out_free_els_cq;
6307 } 6363 }
6308 for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { 6364 fcp_cqidx = 0;
6365 do {
6309 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, 6366 qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
6310 phba->sli4_hba.cq_ecount); 6367 phba->sli4_hba.cq_ecount);
6311 if (!qdesc) { 6368 if (!qdesc) {
@@ -6315,7 +6372,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
6315 goto out_free_fcp_cq; 6372 goto out_free_fcp_cq;
6316 } 6373 }
6317 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; 6374 phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
6318 } 6375 } while (++fcp_cqidx < phba->cfg_fcp_eq_count);
6319 6376
6320 /* Create Mailbox Command Queue */ 6377 /* Create Mailbox Command Queue */
6321 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; 6378 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
@@ -6447,7 +6504,7 @@ out_error:
6447 * -ENOMEM - No available memory 6504 * -ENOMEM - No available memory
6448 * -EIO - The mailbox failed to complete successfully. 6505 * -EIO - The mailbox failed to complete successfully.
6449 **/ 6506 **/
6450static void 6507void
6451lpfc_sli4_queue_destroy(struct lpfc_hba *phba) 6508lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
6452{ 6509{
6453 int fcp_qidx; 6510 int fcp_qidx;
@@ -6723,6 +6780,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6723 "0540 Receive Queue not allocated\n"); 6780 "0540 Receive Queue not allocated\n");
6724 goto out_destroy_fcp_wq; 6781 goto out_destroy_fcp_wq;
6725 } 6782 }
6783
6784 lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
6785 lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ);
6786
6726 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, 6787 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
6727 phba->sli4_hba.els_cq, LPFC_USOL); 6788 phba->sli4_hba.els_cq, LPFC_USOL);
6728 if (rc) { 6789 if (rc) {
@@ -6731,6 +6792,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
6731 "rc = 0x%x\n", rc); 6792 "rc = 0x%x\n", rc);
6732 goto out_destroy_fcp_wq; 6793 goto out_destroy_fcp_wq;
6733 } 6794 }
6795
6734 lpfc_printf_log(phba, KERN_INFO, LOG_INIT, 6796 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6735 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " 6797 "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
6736 "parent cq-id=%d\n", 6798 "parent cq-id=%d\n",
@@ -6790,8 +6852,10 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
6790 /* Unset ELS complete queue */ 6852 /* Unset ELS complete queue */
6791 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); 6853 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
6792 /* Unset FCP response complete queue */ 6854 /* Unset FCP response complete queue */
6793 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6855 fcp_qidx = 0;
6856 do {
6794 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); 6857 lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
6858 } while (++fcp_qidx < phba->cfg_fcp_eq_count);
6795 /* Unset fast-path event queue */ 6859 /* Unset fast-path event queue */
6796 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) 6860 for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
6797 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); 6861 lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
@@ -7040,10 +7104,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7040 * the loop again. 7104 * the loop again.
7041 */ 7105 */
7042 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { 7106 for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
7107 msleep(10);
7043 if (lpfc_readl(phba->sli4_hba.u.if_type2. 7108 if (lpfc_readl(phba->sli4_hba.u.if_type2.
7044 STATUSregaddr, &reg_data.word0)) { 7109 STATUSregaddr, &reg_data.word0)) {
7045 rc = -ENODEV; 7110 rc = -ENODEV;
7046 break; 7111 goto out;
7047 } 7112 }
7048 if (bf_get(lpfc_sliport_status_rdy, &reg_data)) 7113 if (bf_get(lpfc_sliport_status_rdy, &reg_data))
7049 break; 7114 break;
@@ -7051,7 +7116,6 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7051 reset_again++; 7116 reset_again++;
7052 break; 7117 break;
7053 } 7118 }
7054 msleep(10);
7055 } 7119 }
7056 7120
7057 /* 7121 /*
@@ -7065,11 +7129,6 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7065 } 7129 }
7066 7130
7067 /* Detect any port errors. */ 7131 /* Detect any port errors. */
7068 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
7069 &reg_data.word0)) {
7070 rc = -ENODEV;
7071 break;
7072 }
7073 if ((bf_get(lpfc_sliport_status_err, &reg_data)) || 7132 if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
7074 (rdy_chk >= 1000)) { 7133 (rdy_chk >= 1000)) {
7075 phba->work_status[0] = readl( 7134 phba->work_status[0] = readl(
@@ -7102,6 +7161,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
7102 break; 7161 break;
7103 } 7162 }
7104 7163
7164out:
7105 /* Catch the not-ready port failure after a port reset. */ 7165 /* Catch the not-ready port failure after a port reset. */
7106 if (num_resets >= MAX_IF_TYPE_2_RESETS) 7166 if (num_resets >= MAX_IF_TYPE_2_RESETS)
7107 rc = -ENODEV; 7167 rc = -ENODEV;
@@ -7149,12 +7209,13 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
7149 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, 7209 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
7150 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); 7210 LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
7151 7211
7152 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
7153 for (cmdsent = 0; cmdsent < cnt; cmdsent++) { 7212 for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
7154 if (!phba->sli4_hba.intr_enable) 7213 if (!phba->sli4_hba.intr_enable)
7155 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 7214 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7156 else 7215 else {
7216 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
7157 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 7217 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
7218 }
7158 if (rc == MBX_TIMEOUT) 7219 if (rc == MBX_TIMEOUT)
7159 break; 7220 break;
7160 /* Check return status */ 7221 /* Check return status */
@@ -7974,6 +8035,7 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba)
7974 8035
7975 /* Reset SLI4 HBA FCoE function */ 8036 /* Reset SLI4 HBA FCoE function */
7976 lpfc_pci_function_reset(phba); 8037 lpfc_pci_function_reset(phba);
8038 lpfc_sli4_queue_destroy(phba);
7977 8039
7978 return; 8040 return;
7979} 8041}
@@ -8087,6 +8149,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
8087 8149
8088 /* Reset SLI4 HBA FCoE function */ 8150 /* Reset SLI4 HBA FCoE function */
8089 lpfc_pci_function_reset(phba); 8151 lpfc_pci_function_reset(phba);
8152 lpfc_sli4_queue_destroy(phba);
8090 8153
8091 /* Stop the SLI4 device port */ 8154 /* Stop the SLI4 device port */
8092 phba->pport->work_port_events = 0; 8155 phba->pport->work_port_events = 0;
@@ -8120,7 +8183,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8120 if (!phba->sli4_hba.intr_enable) 8183 if (!phba->sli4_hba.intr_enable)
8121 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8184 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8122 else { 8185 else {
8123 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); 8186 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8124 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 8187 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8125 } 8188 }
8126 8189
@@ -8182,6 +8245,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8182 int rc; 8245 int rc;
8183 struct lpfc_mqe *mqe = &mboxq->u.mqe; 8246 struct lpfc_mqe *mqe = &mboxq->u.mqe;
8184 struct lpfc_pc_sli4_params *sli4_params; 8247 struct lpfc_pc_sli4_params *sli4_params;
8248 uint32_t mbox_tmo;
8185 int length; 8249 int length;
8186 struct lpfc_sli4_parameters *mbx_sli4_parameters; 8250 struct lpfc_sli4_parameters *mbx_sli4_parameters;
8187 8251
@@ -8200,9 +8264,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
8200 length, LPFC_SLI4_MBX_EMBED); 8264 length, LPFC_SLI4_MBX_EMBED);
8201 if (!phba->sli4_hba.intr_enable) 8265 if (!phba->sli4_hba.intr_enable)
8202 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); 8266 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8203 else 8267 else {
8204 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, 8268 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
8205 lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); 8269 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
8270 }
8206 if (unlikely(rc)) 8271 if (unlikely(rc))
8207 return rc; 8272 return rc;
8208 sli4_params = &phba->sli4_hba.pc_sli4_params; 8273 sli4_params = &phba->sli4_hba.pc_sli4_params;
@@ -8271,11 +8336,8 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8271 8336
8272 /* Perform generic PCI device enabling operation */ 8337 /* Perform generic PCI device enabling operation */
8273 error = lpfc_enable_pci_dev(phba); 8338 error = lpfc_enable_pci_dev(phba);
8274 if (error) { 8339 if (error)
8275 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8276 "1401 Failed to enable pci device.\n");
8277 goto out_free_phba; 8340 goto out_free_phba;
8278 }
8279 8341
8280 /* Set up SLI API function jump table for PCI-device group-0 HBAs */ 8342 /* Set up SLI API function jump table for PCI-device group-0 HBAs */
8281 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); 8343 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
@@ -8322,6 +8384,9 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
8322 goto out_free_iocb_list; 8384 goto out_free_iocb_list;
8323 } 8385 }
8324 8386
8387 /* Get the default values for Model Name and Description */
8388 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
8389
8325 /* Create SCSI host to the physical port */ 8390 /* Create SCSI host to the physical port */
8326 error = lpfc_create_shost(phba); 8391 error = lpfc_create_shost(phba);
8327 if (error) { 8392 if (error) {
@@ -8885,16 +8950,17 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8885 uint32_t offset = 0, temp_offset = 0; 8950 uint32_t offset = 0, temp_offset = 0;
8886 8951
8887 INIT_LIST_HEAD(&dma_buffer_list); 8952 INIT_LIST_HEAD(&dma_buffer_list);
8888 if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) || 8953 if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) ||
8889 (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) || 8954 (bf_get_be32(lpfc_grp_hdr_file_type, image) !=
8890 (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || 8955 LPFC_FILE_TYPE_GROUP) ||
8891 (image->size != fw->size)) { 8956 (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) ||
8957 (be32_to_cpu(image->size) != fw->size)) {
8892 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 8958 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8893 "3022 Invalid FW image found. " 8959 "3022 Invalid FW image found. "
8894 "Magic:%d Type:%x ID:%x\n", 8960 "Magic:%x Type:%x ID:%x\n",
8895 image->magic_number, 8961 be32_to_cpu(image->magic_number),
8896 bf_get(lpfc_grp_hdr_file_type, image), 8962 bf_get_be32(lpfc_grp_hdr_file_type, image),
8897 bf_get(lpfc_grp_hdr_id, image)); 8963 bf_get_be32(lpfc_grp_hdr_id, image));
8898 return -EINVAL; 8964 return -EINVAL;
8899 } 8965 }
8900 lpfc_decode_firmware_rev(phba, fwrev, 1); 8966 lpfc_decode_firmware_rev(phba, fwrev, 1);
@@ -8924,11 +8990,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw)
8924 while (offset < fw->size) { 8990 while (offset < fw->size) {
8925 temp_offset = offset; 8991 temp_offset = offset;
8926 list_for_each_entry(dmabuf, &dma_buffer_list, list) { 8992 list_for_each_entry(dmabuf, &dma_buffer_list, list) {
8927 if (offset + SLI4_PAGE_SIZE > fw->size) { 8993 if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
8928 temp_offset += fw->size - offset;
8929 memcpy(dmabuf->virt, 8994 memcpy(dmabuf->virt,
8930 fw->data + temp_offset, 8995 fw->data + temp_offset,
8931 fw->size - offset); 8996 fw->size - temp_offset);
8997 temp_offset = fw->size;
8932 break; 8998 break;
8933 } 8999 }
8934 memcpy(dmabuf->virt, fw->data + temp_offset, 9000 memcpy(dmabuf->virt, fw->data + temp_offset,
@@ -8984,7 +9050,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8984 uint32_t cfg_mode, intr_mode; 9050 uint32_t cfg_mode, intr_mode;
8985 int mcnt; 9051 int mcnt;
8986 int adjusted_fcp_eq_count; 9052 int adjusted_fcp_eq_count;
8987 int fcp_qidx;
8988 const struct firmware *fw; 9053 const struct firmware *fw;
8989 uint8_t file_name[16]; 9054 uint8_t file_name[16];
8990 9055
@@ -8995,11 +9060,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
8995 9060
8996 /* Perform generic PCI device enabling operation */ 9061 /* Perform generic PCI device enabling operation */
8997 error = lpfc_enable_pci_dev(phba); 9062 error = lpfc_enable_pci_dev(phba);
8998 if (error) { 9063 if (error)
8999 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9000 "1409 Failed to enable pci device.\n");
9001 goto out_free_phba; 9064 goto out_free_phba;
9002 }
9003 9065
9004 /* Set up SLI API function jump table for PCI-device group-1 HBAs */ 9066 /* Set up SLI API function jump table for PCI-device group-1 HBAs */
9005 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); 9067 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
@@ -9054,6 +9116,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9054 goto out_free_iocb_list; 9116 goto out_free_iocb_list;
9055 } 9117 }
9056 9118
9119 /* Get the default values for Model Name and Description */
9120 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9121
9057 /* Create SCSI host to the physical port */ 9122 /* Create SCSI host to the physical port */
9058 error = lpfc_create_shost(phba); 9123 error = lpfc_create_shost(phba);
9059 if (error) { 9124 if (error) {
@@ -9093,16 +9158,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
9093 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; 9158 adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1;
9094 else 9159 else
9095 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; 9160 adjusted_fcp_eq_count = phba->cfg_fcp_eq_count;
9096 /* Free unused EQs */
9097 for (fcp_qidx = adjusted_fcp_eq_count;
9098 fcp_qidx < phba->cfg_fcp_eq_count;
9099 fcp_qidx++) {
9100 lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
9101 /* do not delete the first fcp_cq */
9102 if (fcp_qidx)
9103 lpfc_sli4_queue_free(
9104 phba->sli4_hba.fcp_cq[fcp_qidx]);
9105 }
9106 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; 9161 phba->cfg_fcp_eq_count = adjusted_fcp_eq_count;
9107 /* Set up SLI-4 HBA */ 9162 /* Set up SLI-4 HBA */
9108 if (lpfc_sli4_hba_setup(phba)) { 9163 if (lpfc_sli4_hba_setup(phba)) {
@@ -9285,6 +9340,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
9285 9340
9286 /* Disable interrupt from device */ 9341 /* Disable interrupt from device */
9287 lpfc_sli4_disable_intr(phba); 9342 lpfc_sli4_disable_intr(phba);
9343 lpfc_sli4_queue_destroy(phba);
9288 9344
9289 /* Save device state to PCI config space */ 9345 /* Save device state to PCI config space */
9290 pci_save_state(pdev); 9346 pci_save_state(pdev);
@@ -9414,6 +9470,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
9414 9470
9415 /* Disable interrupt and pci device */ 9471 /* Disable interrupt and pci device */
9416 lpfc_sli4_disable_intr(phba); 9472 lpfc_sli4_disable_intr(phba);
9473 lpfc_sli4_queue_destroy(phba);
9417 pci_disable_device(phba->pcidev); 9474 pci_disable_device(phba->pcidev);
9418 9475
9419 /* Flush all driver's outstanding SCSI I/Os as we are to reset */ 9476 /* Flush all driver's outstanding SCSI I/Os as we are to reset */
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index e3b790e5915..baf53e6c2bd 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -36,6 +36,7 @@
36#define LOG_SECURITY 0x00008000 /* Security events */ 36#define LOG_SECURITY 0x00008000 /* Security events */
37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ 37#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
38#define LOG_FIP 0x00020000 /* FIP events */ 38#define LOG_FIP 0x00020000 /* FIP events */
39#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */
39#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ 40#define LOG_ALL_MSG 0xffffffff /* LOG all messages */
40 41
41#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ 42#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 83450cc5c4d..2ebc7d2540c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1598,9 +1598,12 @@ lpfc_mbox_dev_check(struct lpfc_hba *phba)
1598 * Timeout value to be used for the given mailbox command 1598 * Timeout value to be used for the given mailbox command
1599 **/ 1599 **/
1600int 1600int
1601lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) 1601lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1602{ 1602{
1603 switch (cmd) { 1603 MAILBOX_t *mbox = &mboxq->u.mb;
1604 uint8_t subsys, opcode;
1605
1606 switch (mbox->mbxCommand) {
1604 case MBX_WRITE_NV: /* 0x03 */ 1607 case MBX_WRITE_NV: /* 0x03 */
1605 case MBX_UPDATE_CFG: /* 0x1B */ 1608 case MBX_UPDATE_CFG: /* 0x1B */
1606 case MBX_DOWN_LOAD: /* 0x1C */ 1609 case MBX_DOWN_LOAD: /* 0x1C */
@@ -1610,6 +1613,28 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
1610 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1613 case MBX_LOAD_EXP_ROM: /* 0x9C */
1611 return LPFC_MBOX_TMO_FLASH_CMD; 1614 return LPFC_MBOX_TMO_FLASH_CMD;
1612 case MBX_SLI4_CONFIG: /* 0x9b */ 1615 case MBX_SLI4_CONFIG: /* 0x9b */
1616 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1617 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1618 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1619 switch (opcode) {
1620 case LPFC_MBOX_OPCODE_READ_OBJECT:
1621 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1622 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1623 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1624 case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG:
1625 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1626 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1627 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1628 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1629 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1630 }
1631 }
1632 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1633 switch (opcode) {
1634 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1635 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1636 }
1637 }
1613 return LPFC_MBOX_SLI4_CONFIG_TMO; 1638 return LPFC_MBOX_SLI4_CONFIG_TMO;
1614 } 1639 }
1615 return LPFC_MBOX_TMO; 1640 return LPFC_MBOX_TMO;
@@ -1859,7 +1884,7 @@ lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1859 } 1884 }
1860 1885
1861 /* Complete the initialization for the particular Opcode. */ 1886 /* Complete the initialization for the particular Opcode. */
1862 opcode = lpfc_sli4_mbox_opcode_get(phba, mbox); 1887 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1863 switch (opcode) { 1888 switch (opcode) {
1864 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT: 1889 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1865 if (emb == LPFC_SLI4_MBX_EMBED) 1890 if (emb == LPFC_SLI4_MBX_EMBED)
@@ -1886,23 +1911,56 @@ lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1886} 1911}
1887 1912
1888/** 1913/**
1889 * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command 1914 * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd
1890 * @phba: pointer to lpfc hba data structure. 1915 * @phba: pointer to lpfc hba data structure.
1891 * @mbox: pointer to lpfc mbox command. 1916 * @mbox: pointer to lpfc mbox command queue entry.
1917 *
1918 * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox
1919 * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the
1920 * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall
1921 * be returned.
1922 **/
1923uint8_t
1924lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1925{
1926 struct lpfc_mbx_sli4_config *sli4_cfg;
1927 union lpfc_sli4_cfg_shdr *cfg_shdr;
1928
1929 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1930 return LPFC_MBOX_SUBSYSTEM_NA;
1931 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1932
1933 /* For embedded mbox command, get opcode from embedded sub-header*/
1934 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1935 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1936 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1937 }
1938
1939 /* For non-embedded mbox command, get opcode from first dma page */
1940 if (unlikely(!mbox->sge_array))
1941 return LPFC_MBOX_SUBSYSTEM_NA;
1942 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1943 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1944}
1945
1946/**
1947 * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd
1948 * @phba: pointer to lpfc hba data structure.
1949 * @mbox: pointer to lpfc mbox command queue entry.
1892 * 1950 *
1893 * This routine gets the opcode from a SLI4 specific mailbox command for 1951 * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox
1894 * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG 1952 * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if
1895 * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be 1953 * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be
1896 * returned. 1954 * returned.
1897 **/ 1955 **/
1898uint8_t 1956uint8_t
1899lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) 1957lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1900{ 1958{
1901 struct lpfc_mbx_sli4_config *sli4_cfg; 1959 struct lpfc_mbx_sli4_config *sli4_cfg;
1902 union lpfc_sli4_cfg_shdr *cfg_shdr; 1960 union lpfc_sli4_cfg_shdr *cfg_shdr;
1903 1961
1904 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) 1962 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1905 return 0; 1963 return LPFC_MBOX_OPCODE_NA;
1906 sli4_cfg = &mbox->u.mqe.un.sli4_config; 1964 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1907 1965
1908 /* For embedded mbox command, get opcode from embedded sub-header*/ 1966 /* For embedded mbox command, get opcode from embedded sub-header*/
@@ -1913,7 +1971,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1913 1971
1914 /* For non-embedded mbox command, get opcode from first dma page */ 1972 /* For non-embedded mbox command, get opcode from first dma page */
1915 if (unlikely(!mbox->sge_array)) 1973 if (unlikely(!mbox->sge_array))
1916 return 0; 1974 return LPFC_MBOX_OPCODE_NA;
1917 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; 1975 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1918 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); 1976 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1919} 1977}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index eadd241eeff..5b8790b3cf4 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -58,6 +58,13 @@ static char *dif_op_str[] = {
58 "SCSI_PROT_READ_PASS", 58 "SCSI_PROT_READ_PASS",
59 "SCSI_PROT_WRITE_PASS", 59 "SCSI_PROT_WRITE_PASS",
60}; 60};
61
62struct scsi_dif_tuple {
63 __be16 guard_tag; /* Checksum */
64 __be16 app_tag; /* Opaque storage */
65 __be32 ref_tag; /* Target LBA or indirect LBA */
66};
67
61static void 68static void
62lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); 69lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
63static void 70static void
@@ -1263,6 +1270,174 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
1263 return 0; 1270 return 0;
1264} 1271}
1265 1272
1273static inline unsigned
1274lpfc_cmd_blksize(struct scsi_cmnd *sc)
1275{
1276 return sc->device->sector_size;
1277}
1278
1279#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1280/*
1281 * Given a scsi cmnd, determine the BlockGuard tags to be used with it
1282 * @sc: The SCSI command to examine
1283 * @reftag: (out) BlockGuard reference tag for transmitted data
1284 * @apptag: (out) BlockGuard application tag for transmitted data
1285 * @new_guard (in) Value to replace CRC with if needed
1286 *
1287 * Returns (1) if error injection was performed, (0) otherwise
1288 */
1289static int
1290lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1291 uint32_t *reftag, uint16_t *apptag, uint32_t new_guard)
1292{
1293 struct scatterlist *sgpe; /* s/g prot entry */
1294 struct scatterlist *sgde; /* s/g data entry */
1295 struct scsi_dif_tuple *src;
1296 uint32_t op = scsi_get_prot_op(sc);
1297 uint32_t blksize;
1298 uint32_t numblks;
1299 sector_t lba;
1300 int rc = 0;
1301
1302 if (op == SCSI_PROT_NORMAL)
1303 return 0;
1304
1305 lba = scsi_get_lba(sc);
1306 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
1307 blksize = lpfc_cmd_blksize(sc);
1308 numblks = (scsi_bufflen(sc) + blksize - 1) / blksize;
1309
1310 /* Make sure we have the right LBA if one is specified */
1311 if ((phba->lpfc_injerr_lba < lba) ||
1312 (phba->lpfc_injerr_lba >= (lba + numblks)))
1313 return 0;
1314 }
1315
1316 sgpe = scsi_prot_sglist(sc);
1317 sgde = scsi_sglist(sc);
1318
1319 /* Should we change the Reference Tag */
1320 if (reftag) {
1321 /*
1322 * If we are SCSI_PROT_WRITE_STRIP, the protection data is
1323 * being stripped from the wire, thus it doesn't matter.
1324 */
1325 if ((op == SCSI_PROT_WRITE_PASS) ||
1326 (op == SCSI_PROT_WRITE_INSERT)) {
1327 if (phba->lpfc_injerr_wref_cnt) {
1328
1329 /* DEADBEEF will be the reftag on the wire */
1330 *reftag = 0xDEADBEEF;
1331 phba->lpfc_injerr_wref_cnt--;
1332 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1333 rc = 1;
1334
1335 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1336 "9081 BLKGRD: Injecting reftag error: "
1337 "write lba x%lx\n", (unsigned long)lba);
1338 }
1339 } else {
1340 if (phba->lpfc_injerr_rref_cnt) {
1341 *reftag = 0xDEADBEEF;
1342 phba->lpfc_injerr_rref_cnt--;
1343 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1344 rc = 1;
1345
1346 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1347 "9076 BLKGRD: Injecting reftag error: "
1348 "read lba x%lx\n", (unsigned long)lba);
1349 }
1350 }
1351 }
1352
1353 /* Should we change the Application Tag */
1354 if (apptag) {
1355 /*
1356 * If we are SCSI_PROT_WRITE_STRIP, the protection data is
1357 * being stripped from the wire, thus it doesn't matter.
1358 */
1359 if ((op == SCSI_PROT_WRITE_PASS) ||
1360 (op == SCSI_PROT_WRITE_INSERT)) {
1361 if (phba->lpfc_injerr_wapp_cnt) {
1362
1363 /* DEAD will be the apptag on the wire */
1364 *apptag = 0xDEAD;
1365 phba->lpfc_injerr_wapp_cnt--;
1366 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1367 rc = 1;
1368
1369 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1370 "9077 BLKGRD: Injecting apptag error: "
1371 "write lba x%lx\n", (unsigned long)lba);
1372 }
1373 } else {
1374 if (phba->lpfc_injerr_rapp_cnt) {
1375 *apptag = 0xDEAD;
1376 phba->lpfc_injerr_rapp_cnt--;
1377 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1378 rc = 1;
1379
1380 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1381 "9078 BLKGRD: Injecting apptag error: "
1382 "read lba x%lx\n", (unsigned long)lba);
1383 }
1384 }
1385 }
1386
1387 /* Should we change the Guard Tag */
1388
1389 /*
1390 * If we are SCSI_PROT_WRITE_INSERT, the protection data is
1391 * being on the wire is being fully generated on the HBA.
1392 * The host cannot change it or force an error.
1393 */
1394 if (((op == SCSI_PROT_WRITE_STRIP) ||
1395 (op == SCSI_PROT_WRITE_PASS)) &&
1396 phba->lpfc_injerr_wgrd_cnt) {
1397 if (sgpe) {
1398 src = (struct scsi_dif_tuple *)sg_virt(sgpe);
1399 /*
1400 * Just inject an error in the first
1401 * prot block.
1402 */
1403 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1404 "9079 BLKGRD: Injecting guard error: "
1405 "write lba x%lx oldGuard x%x refTag x%x\n",
1406 (unsigned long)lba, src->guard_tag,
1407 src->ref_tag);
1408
1409 src->guard_tag = (uint16_t)new_guard;
1410 phba->lpfc_injerr_wgrd_cnt--;
1411 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1412 rc = 1;
1413
1414 } else {
1415 blksize = lpfc_cmd_blksize(sc);
1416 /*
1417 * Jump past the first data block
1418 * and inject an error in the
1419 * prot data. The prot data is already
1420 * embedded after the regular data.
1421 */
1422 src = (struct scsi_dif_tuple *)
1423 (sg_virt(sgde) + blksize);
1424
1425 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
1426 "9080 BLKGRD: Injecting guard error: "
1427 "write lba x%lx oldGuard x%x refTag x%x\n",
1428 (unsigned long)lba, src->guard_tag,
1429 src->ref_tag);
1430
1431 src->guard_tag = (uint16_t)new_guard;
1432 phba->lpfc_injerr_wgrd_cnt--;
1433 phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
1434 rc = 1;
1435 }
1436 }
1437 return rc;
1438}
1439#endif
1440
1266/* 1441/*
1267 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it 1442 * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it
1268 * @sc: The SCSI command to examine 1443 * @sc: The SCSI command to examine
@@ -1341,18 +1516,6 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1341 return ret; 1516 return ret;
1342} 1517}
1343 1518
1344struct scsi_dif_tuple {
1345 __be16 guard_tag; /* Checksum */
1346 __be16 app_tag; /* Opaque storage */
1347 __be32 ref_tag; /* Target LBA or indirect LBA */
1348};
1349
1350static inline unsigned
1351lpfc_cmd_blksize(struct scsi_cmnd *sc)
1352{
1353 return sc->device->sector_size;
1354}
1355
1356/* 1519/*
1357 * This function sets up buffer list for protection groups of 1520 * This function sets up buffer list for protection groups of
1358 * type LPFC_PG_TYPE_NO_DIF 1521 * type LPFC_PG_TYPE_NO_DIF
@@ -1401,6 +1564,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1401 blksize = lpfc_cmd_blksize(sc); 1564 blksize = lpfc_cmd_blksize(sc);
1402 reftag = scsi_get_lba(sc) & 0xffffffff; 1565 reftag = scsi_get_lba(sc) & 0xffffffff;
1403 1566
1567#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1568 /* reftag is the only error we can inject here */
1569 lpfc_bg_err_inject(phba, sc, &reftag, 0, 0);
1570#endif
1571
1404 /* setup PDE5 with what we have */ 1572 /* setup PDE5 with what we have */
1405 pde5 = (struct lpfc_pde5 *) bpl; 1573 pde5 = (struct lpfc_pde5 *) bpl;
1406 memset(pde5, 0, sizeof(struct lpfc_pde5)); 1574 memset(pde5, 0, sizeof(struct lpfc_pde5));
@@ -1532,6 +1700,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1532 blksize = lpfc_cmd_blksize(sc); 1700 blksize = lpfc_cmd_blksize(sc);
1533 reftag = scsi_get_lba(sc) & 0xffffffff; 1701 reftag = scsi_get_lba(sc) & 0xffffffff;
1534 1702
1703#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1704 /* reftag / guard tag are the only errors we can inject here */
1705 lpfc_bg_err_inject(phba, sc, &reftag, 0, 0xDEAD);
1706#endif
1707
1535 split_offset = 0; 1708 split_offset = 0;
1536 do { 1709 do {
1537 /* setup PDE5 with what we have */ 1710 /* setup PDE5 with what we have */
@@ -1671,7 +1844,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1671 } 1844 }
1672 1845
1673 } while (!alldone); 1846 } while (!alldone);
1674
1675out: 1847out:
1676 1848
1677 return num_bde; 1849 return num_bde;
@@ -2075,6 +2247,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
2075 else 2247 else
2076 bf_set(lpfc_sli4_sge_last, sgl, 0); 2248 bf_set(lpfc_sli4_sge_last, sgl, 0);
2077 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); 2249 bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
2250 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2078 sgl->word2 = cpu_to_le32(sgl->word2); 2251 sgl->word2 = cpu_to_le32(sgl->word2);
2079 sgl->sge_len = cpu_to_le32(dma_len); 2252 sgl->sge_len = cpu_to_le32(dma_len);
2080 dma_offset += dma_len; 2253 dma_offset += dma_len;
@@ -2325,8 +2498,9 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2325 } 2498 }
2326 lp = (uint32_t *)cmnd->sense_buffer; 2499 lp = (uint32_t *)cmnd->sense_buffer;
2327 2500
2328 if (!scsi_status && (resp_info & RESID_UNDER)) 2501 if (!scsi_status && (resp_info & RESID_UNDER) &&
2329 logit = LOG_FCP; 2502 vport->cfg_log_verbose & LOG_FCP_UNDER)
2503 logit = LOG_FCP_UNDER;
2330 2504
2331 lpfc_printf_vlog(vport, KERN_WARNING, logit, 2505 lpfc_printf_vlog(vport, KERN_WARNING, logit,
2332 "9024 FCP command x%x failed: x%x SNS x%x x%x " 2506 "9024 FCP command x%x failed: x%x SNS x%x x%x "
@@ -2342,7 +2516,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
2342 if (resp_info & RESID_UNDER) { 2516 if (resp_info & RESID_UNDER) {
2343 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); 2517 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
2344 2518
2345 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, 2519 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER,
2346 "9025 FCP Read Underrun, expected %d, " 2520 "9025 FCP Read Underrun, expected %d, "
2347 "residual %d Data: x%x x%x x%x\n", 2521 "residual %d Data: x%x x%x x%x\n",
2348 be32_to_cpu(fcpcmd->fcpDl), 2522 be32_to_cpu(fcpcmd->fcpDl),
@@ -2449,6 +2623,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2449 struct lpfc_fast_path_event *fast_path_evt; 2623 struct lpfc_fast_path_event *fast_path_evt;
2450 struct Scsi_Host *shost; 2624 struct Scsi_Host *shost;
2451 uint32_t queue_depth, scsi_id; 2625 uint32_t queue_depth, scsi_id;
2626 uint32_t logit = LOG_FCP;
2452 2627
2453 /* Sanity check on return of outstanding command */ 2628 /* Sanity check on return of outstanding command */
2454 if (!(lpfc_cmd->pCmd)) 2629 if (!(lpfc_cmd->pCmd))
@@ -2470,16 +2645,22 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
2470 lpfc_cmd->status = IOSTAT_DRIVER_REJECT; 2645 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2471 else if (lpfc_cmd->status >= IOSTAT_CNT) 2646 else if (lpfc_cmd->status >= IOSTAT_CNT)
2472 lpfc_cmd->status = IOSTAT_DEFAULT; 2647 lpfc_cmd->status = IOSTAT_DEFAULT;
2473 2648 if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR
2474 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, 2649 && !lpfc_cmd->fcp_rsp->rspStatus3
2475 "9030 FCP cmd x%x failed <%d/%d> " 2650 && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER)
2476 "status: x%x result: x%x Data: x%x x%x\n", 2651 && !(phba->cfg_log_verbose & LOG_FCP_UNDER))
2477 cmd->cmnd[0], 2652 logit = 0;
2478 cmd->device ? cmd->device->id : 0xffff, 2653 else
2479 cmd->device ? cmd->device->lun : 0xffff, 2654 logit = LOG_FCP | LOG_FCP_UNDER;
2480 lpfc_cmd->status, lpfc_cmd->result, 2655 lpfc_printf_vlog(vport, KERN_WARNING, logit,
2481 pIocbOut->iocb.ulpContext, 2656 "9030 FCP cmd x%x failed <%d/%d> "
2482 lpfc_cmd->cur_iocbq.iocb.ulpIoTag); 2657 "status: x%x result: x%x Data: x%x x%x\n",
2658 cmd->cmnd[0],
2659 cmd->device ? cmd->device->id : 0xffff,
2660 cmd->device ? cmd->device->lun : 0xffff,
2661 lpfc_cmd->status, lpfc_cmd->result,
2662 pIocbOut->iocb.ulpContext,
2663 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
2483 2664
2484 switch (lpfc_cmd->status) { 2665 switch (lpfc_cmd->status) {
2485 case IOSTAT_FCP_RSP_ERROR: 2666 case IOSTAT_FCP_RSP_ERROR:
@@ -3056,8 +3237,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
3056 } 3237 }
3057 ndlp = rdata->pnode; 3238 ndlp = rdata->pnode;
3058 3239
3059 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && 3240 if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
3060 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { 3241 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) ||
3242 (phba->sli_rev == LPFC_SLI_REV4))) {
3061 3243
3062 lpfc_printf_log(phba, KERN_ERR, LOG_BG, 3244 lpfc_printf_log(phba, KERN_ERR, LOG_BG,
3063 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" 3245 "9058 BLKGRD: ERROR: rcvd protected cmd:%02x"
@@ -3691,9 +3873,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
3691 fc_host_post_vendor_event(shost, fc_get_event_number(), 3873 fc_host_post_vendor_event(shost, fc_get_event_number(),
3692 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); 3874 sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID);
3693 3875
3694 ret = fc_block_scsi_eh(cmnd); 3876 status = fc_block_scsi_eh(cmnd);
3695 if (ret) 3877 if (status)
3696 return ret; 3878 return status;
3697 3879
3698 /* 3880 /*
3699 * Since the driver manages a single bus device, reset all 3881 * Since the driver manages a single bus device, reset all
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 8b799f047a9..4d4104f38c9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -379,10 +379,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
379 dq->host_index = ((dq->host_index + 1) % dq->entry_count); 379 dq->host_index = ((dq->host_index + 1) % dq->entry_count);
380 380
381 /* Ring The Header Receive Queue Doorbell */ 381 /* Ring The Header Receive Queue Doorbell */
382 if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { 382 if (!(hq->host_index % hq->entry_repost)) {
383 doorbell.word0 = 0; 383 doorbell.word0 = 0;
384 bf_set(lpfc_rq_doorbell_num_posted, &doorbell, 384 bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
385 LPFC_RQ_POST_BATCH); 385 hq->entry_repost);
386 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); 386 bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
387 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); 387 writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
388 } 388 }
@@ -1864,7 +1864,7 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
1864{ 1864{
1865 if (phba->sli_rev == LPFC_SLI_REV4) 1865 if (phba->sli_rev == LPFC_SLI_REV4)
1866 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1866 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1867 lpfc_hbq_defs[qno]->entry_count); 1867 lpfc_hbq_defs[qno]->entry_count);
1868 else 1868 else
1869 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, 1869 return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1870 lpfc_hbq_defs[qno]->init_count); 1870 lpfc_hbq_defs[qno]->init_count);
@@ -2200,10 +2200,13 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2200 /* Unknown mailbox command compl */ 2200 /* Unknown mailbox command compl */
2201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 2201 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2202 "(%d):0323 Unknown Mailbox command " 2202 "(%d):0323 Unknown Mailbox command "
2203 "x%x (x%x) Cmpl\n", 2203 "x%x (x%x/x%x) Cmpl\n",
2204 pmb->vport ? pmb->vport->vpi : 0, 2204 pmb->vport ? pmb->vport->vpi : 0,
2205 pmbox->mbxCommand, 2205 pmbox->mbxCommand,
2206 lpfc_sli4_mbox_opcode_get(phba, pmb)); 2206 lpfc_sli_config_mbox_subsys_get(phba,
2207 pmb),
2208 lpfc_sli_config_mbox_opcode_get(phba,
2209 pmb));
2207 phba->link_state = LPFC_HBA_ERROR; 2210 phba->link_state = LPFC_HBA_ERROR;
2208 phba->work_hs = HS_FFER3; 2211 phba->work_hs = HS_FFER3;
2209 lpfc_handle_eratt(phba); 2212 lpfc_handle_eratt(phba);
@@ -2215,17 +2218,19 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2215 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { 2218 if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2216 /* Mbox cmd cmpl error - RETRYing */ 2219 /* Mbox cmd cmpl error - RETRYing */
2217 lpfc_printf_log(phba, KERN_INFO, 2220 lpfc_printf_log(phba, KERN_INFO,
2218 LOG_MBOX | LOG_SLI, 2221 LOG_MBOX | LOG_SLI,
2219 "(%d):0305 Mbox cmd cmpl " 2222 "(%d):0305 Mbox cmd cmpl "
2220 "error - RETRYing Data: x%x " 2223 "error - RETRYing Data: x%x "
2221 "(x%x) x%x x%x x%x\n", 2224 "(x%x/x%x) x%x x%x x%x\n",
2222 pmb->vport ? pmb->vport->vpi :0, 2225 pmb->vport ? pmb->vport->vpi : 0,
2223 pmbox->mbxCommand, 2226 pmbox->mbxCommand,
2224 lpfc_sli4_mbox_opcode_get(phba, 2227 lpfc_sli_config_mbox_subsys_get(phba,
2225 pmb), 2228 pmb),
2226 pmbox->mbxStatus, 2229 lpfc_sli_config_mbox_opcode_get(phba,
2227 pmbox->un.varWords[0], 2230 pmb),
2228 pmb->vport->port_state); 2231 pmbox->mbxStatus,
2232 pmbox->un.varWords[0],
2233 pmb->vport->port_state);
2229 pmbox->mbxStatus = 0; 2234 pmbox->mbxStatus = 0;
2230 pmbox->mbxOwner = OWN_HOST; 2235 pmbox->mbxOwner = OWN_HOST;
2231 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); 2236 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
@@ -2236,11 +2241,12 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2236 2241
2237 /* Mailbox cmd <cmd> Cmpl <cmpl> */ 2242 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2238 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 2243 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2239 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " 2244 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2240 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", 2245 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2241 pmb->vport ? pmb->vport->vpi : 0, 2246 pmb->vport ? pmb->vport->vpi : 0,
2242 pmbox->mbxCommand, 2247 pmbox->mbxCommand,
2243 lpfc_sli4_mbox_opcode_get(phba, pmb), 2248 lpfc_sli_config_mbox_subsys_get(phba, pmb),
2249 lpfc_sli_config_mbox_opcode_get(phba, pmb),
2244 pmb->mbox_cmpl, 2250 pmb->mbox_cmpl,
2245 *((uint32_t *) pmbox), 2251 *((uint32_t *) pmbox),
2246 pmbox->un.varWords[0], 2252 pmbox->un.varWords[0],
@@ -4686,6 +4692,175 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
4686} 4692}
4687 4693
4688/** 4694/**
4695 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4696 * @phba: pointer to lpfc hba data structure.
4697 *
4698 * This routine retrieves SLI4 device physical port name this PCI function
4699 * is attached to.
4700 *
4701 * Return codes
4702 * 0 - sucessful
4703 * otherwise - failed to retrieve physical port name
4704 **/
4705static int
4706lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
4707{
4708 LPFC_MBOXQ_t *mboxq;
4709 struct lpfc_mbx_read_config *rd_config;
4710 struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
4711 struct lpfc_controller_attribute *cntl_attr;
4712 struct lpfc_mbx_get_port_name *get_port_name;
4713 void *virtaddr = NULL;
4714 uint32_t alloclen, reqlen;
4715 uint32_t shdr_status, shdr_add_status;
4716 union lpfc_sli4_cfg_shdr *shdr;
4717 char cport_name = 0;
4718 int rc;
4719
4720 /* We assume nothing at this point */
4721 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
4722 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
4723
4724 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4725 if (!mboxq)
4726 return -ENOMEM;
4727
4728 /* obtain link type and link number via READ_CONFIG */
4729 lpfc_read_config(phba, mboxq);
4730 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4731 if (rc == MBX_SUCCESS) {
4732 rd_config = &mboxq->u.mqe.un.rd_config;
4733 if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
4734 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4735 phba->sli4_hba.lnk_info.lnk_tp =
4736 bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
4737 phba->sli4_hba.lnk_info.lnk_no =
4738 bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
4739 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4740 "3081 lnk_type:%d, lnk_numb:%d\n",
4741 phba->sli4_hba.lnk_info.lnk_tp,
4742 phba->sli4_hba.lnk_info.lnk_no);
4743 goto retrieve_ppname;
4744 } else
4745 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4746 "3082 Mailbox (x%x) returned ldv:x0\n",
4747 bf_get(lpfc_mqe_command,
4748 &mboxq->u.mqe));
4749 } else
4750 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4751 "3083 Mailbox (x%x) failed, status:x%x\n",
4752 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4753 bf_get(lpfc_mqe_status, &mboxq->u.mqe));
4754
4755 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4756 reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
4757 alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4758 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
4759 LPFC_SLI4_MBX_NEMBED);
4760 if (alloclen < reqlen) {
4761 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4762 "3084 Allocated DMA memory size (%d) is "
4763 "less than the requested DMA memory size "
4764 "(%d)\n", alloclen, reqlen);
4765 rc = -ENOMEM;
4766 goto out_free_mboxq;
4767 }
4768 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4769 virtaddr = mboxq->sge_array->addr[0];
4770 mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
4771 shdr = &mbx_cntl_attr->cfg_shdr;
4772 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4773 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4774 if (shdr_status || shdr_add_status || rc) {
4775 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4776 "3085 Mailbox x%x (x%x/x%x) failed, "
4777 "rc:x%x, status:x%x, add_status:x%x\n",
4778 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4779 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4780 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4781 rc, shdr_status, shdr_add_status);
4782 rc = -ENXIO;
4783 goto out_free_mboxq;
4784 }
4785 cntl_attr = &mbx_cntl_attr->cntl_attr;
4786 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
4787 phba->sli4_hba.lnk_info.lnk_tp =
4788 bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
4789 phba->sli4_hba.lnk_info.lnk_no =
4790 bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
4791 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4792 "3086 lnk_type:%d, lnk_numb:%d\n",
4793 phba->sli4_hba.lnk_info.lnk_tp,
4794 phba->sli4_hba.lnk_info.lnk_no);
4795
4796retrieve_ppname:
4797 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
4798 LPFC_MBOX_OPCODE_GET_PORT_NAME,
4799 sizeof(struct lpfc_mbx_get_port_name) -
4800 sizeof(struct lpfc_sli4_cfg_mhdr),
4801 LPFC_SLI4_MBX_EMBED);
4802 get_port_name = &mboxq->u.mqe.un.get_port_name;
4803 shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
4804 bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
4805 bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
4806 phba->sli4_hba.lnk_info.lnk_tp);
4807 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4808 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
4809 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
4810 if (shdr_status || shdr_add_status || rc) {
4811 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4812 "3087 Mailbox x%x (x%x/x%x) failed: "
4813 "rc:x%x, status:x%x, add_status:x%x\n",
4814 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
4815 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
4816 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
4817 rc, shdr_status, shdr_add_status);
4818 rc = -ENXIO;
4819 goto out_free_mboxq;
4820 }
4821 switch (phba->sli4_hba.lnk_info.lnk_no) {
4822 case LPFC_LINK_NUMBER_0:
4823 cport_name = bf_get(lpfc_mbx_get_port_name_name0,
4824 &get_port_name->u.response);
4825 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4826 break;
4827 case LPFC_LINK_NUMBER_1:
4828 cport_name = bf_get(lpfc_mbx_get_port_name_name1,
4829 &get_port_name->u.response);
4830 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4831 break;
4832 case LPFC_LINK_NUMBER_2:
4833 cport_name = bf_get(lpfc_mbx_get_port_name_name2,
4834 &get_port_name->u.response);
4835 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4836 break;
4837 case LPFC_LINK_NUMBER_3:
4838 cport_name = bf_get(lpfc_mbx_get_port_name_name3,
4839 &get_port_name->u.response);
4840 phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
4841 break;
4842 default:
4843 break;
4844 }
4845
4846 if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
4847 phba->Port[0] = cport_name;
4848 phba->Port[1] = '\0';
4849 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4850 "3091 SLI get port name: %s\n", phba->Port);
4851 }
4852
4853out_free_mboxq:
4854 if (rc != MBX_TIMEOUT) {
4855 if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
4856 lpfc_sli4_mbox_cmd_free(phba, mboxq);
4857 else
4858 mempool_free(mboxq, phba->mbox_mem_pool);
4859 }
4860 return rc;
4861}
4862
4863/**
4689 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues 4864 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4690 * @phba: pointer to lpfc hba data structure. 4865 * @phba: pointer to lpfc hba data structure.
4691 * 4866 *
@@ -4754,7 +4929,7 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
4754 if (!phba->sli4_hba.intr_enable) 4929 if (!phba->sli4_hba.intr_enable)
4755 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 4930 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4756 else { 4931 else {
4757 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 4932 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4758 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 4933 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4759 } 4934 }
4760 if (unlikely(rc)) { 4935 if (unlikely(rc)) {
@@ -4911,7 +5086,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt,
4911 if (!phba->sli4_hba.intr_enable) 5086 if (!phba->sli4_hba.intr_enable)
4912 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5087 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
4913 else { 5088 else {
4914 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5089 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
4915 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5090 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
4916 } 5091 }
4917 5092
@@ -5194,7 +5369,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5194 if (!phba->sli4_hba.intr_enable) 5369 if (!phba->sli4_hba.intr_enable)
5195 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5370 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5196 else { 5371 else {
5197 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo); 5372 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5198 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5373 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5199 } 5374 }
5200 if (unlikely(rc)) { 5375 if (unlikely(rc)) {
@@ -5619,7 +5794,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
5619 if (!phba->sli4_hba.intr_enable) 5794 if (!phba->sli4_hba.intr_enable)
5620 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 5795 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5621 else { 5796 else {
5622 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 5797 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5623 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 5798 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5624 } 5799 }
5625 5800
@@ -5748,6 +5923,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5748 kfree(vpd); 5923 kfree(vpd);
5749 goto out_free_mbox; 5924 goto out_free_mbox;
5750 } 5925 }
5926
5927 /*
5928 * Retrieve sli4 device physical port name, failure of doing it
5929 * is considered as non-fatal.
5930 */
5931 rc = lpfc_sli4_retrieve_pport_name(phba);
5932 if (!rc)
5933 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5934 "3080 Successful retrieving SLI4 device "
5935 "physical port name: %s.\n", phba->Port);
5936
5751 /* 5937 /*
5752 * Evaluate the read rev and vpd data. Populate the driver 5938 * Evaluate the read rev and vpd data. Populate the driver
5753 * state with the results. If this routine fails, the failure 5939 * state with the results. If this routine fails, the failure
@@ -5818,9 +6004,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5818 * then turn off the global config parameters to disable the 6004 * then turn off the global config parameters to disable the
5819 * feature in the driver. This is not a fatal error. 6005 * feature in the driver. This is not a fatal error.
5820 */ 6006 */
5821 if ((phba->cfg_enable_bg) && 6007 phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5822 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) 6008 if (phba->cfg_enable_bg) {
5823 ftr_rsp++; 6009 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6010 phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6011 else
6012 ftr_rsp++;
6013 }
5824 6014
5825 if (phba->max_vpi && phba->cfg_enable_npiv && 6015 if (phba->max_vpi && phba->cfg_enable_npiv &&
5826 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) 6016 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
@@ -5937,12 +6127,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
5937 goto out_free_mbox; 6127 goto out_free_mbox;
5938 } 6128 }
5939 6129
6130 /* Create all the SLI4 queues */
6131 rc = lpfc_sli4_queue_create(phba);
6132 if (rc) {
6133 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6134 "3089 Failed to allocate queues\n");
6135 rc = -ENODEV;
6136 goto out_stop_timers;
6137 }
5940 /* Set up all the queues to the device */ 6138 /* Set up all the queues to the device */
5941 rc = lpfc_sli4_queue_setup(phba); 6139 rc = lpfc_sli4_queue_setup(phba);
5942 if (unlikely(rc)) { 6140 if (unlikely(rc)) {
5943 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6141 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
5944 "0381 Error %d during queue setup.\n ", rc); 6142 "0381 Error %d during queue setup.\n ", rc);
5945 goto out_stop_timers; 6143 goto out_destroy_queue;
5946 } 6144 }
5947 6145
5948 /* Arm the CQs and then EQs on device */ 6146 /* Arm the CQs and then EQs on device */
@@ -6015,15 +6213,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6015 spin_lock_irq(&phba->hbalock); 6213 spin_lock_irq(&phba->hbalock);
6016 phba->link_state = LPFC_LINK_DOWN; 6214 phba->link_state = LPFC_LINK_DOWN;
6017 spin_unlock_irq(&phba->hbalock); 6215 spin_unlock_irq(&phba->hbalock);
6018 if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) 6216 if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
6019 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); 6217 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
6218 if (rc)
6219 goto out_unset_queue;
6220 }
6221 mempool_free(mboxq, phba->mbox_mem_pool);
6222 return rc;
6020out_unset_queue: 6223out_unset_queue:
6021 /* Unset all the queues set up in this routine when error out */ 6224 /* Unset all the queues set up in this routine when error out */
6022 if (rc) 6225 lpfc_sli4_queue_unset(phba);
6023 lpfc_sli4_queue_unset(phba); 6226out_destroy_queue:
6227 lpfc_sli4_queue_destroy(phba);
6024out_stop_timers: 6228out_stop_timers:
6025 if (rc) 6229 lpfc_stop_hba_timers(phba);
6026 lpfc_stop_hba_timers(phba);
6027out_free_mbox: 6230out_free_mbox:
6028 mempool_free(mboxq, phba->mbox_mem_pool); 6231 mempool_free(mboxq, phba->mbox_mem_pool);
6029 return rc; 6232 return rc;
@@ -6318,7 +6521,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6318 } 6521 }
6319 /* timeout active mbox command */ 6522 /* timeout active mbox command */
6320 mod_timer(&psli->mbox_tmo, (jiffies + 6523 mod_timer(&psli->mbox_tmo, (jiffies +
6321 (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); 6524 (HZ * lpfc_mbox_tmo_val(phba, pmbox))));
6322 } 6525 }
6323 6526
6324 /* Mailbox cmd <cmd> issue */ 6527 /* Mailbox cmd <cmd> issue */
@@ -6442,9 +6645,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
6442 drvr_flag); 6645 drvr_flag);
6443 goto out_not_finished; 6646 goto out_not_finished;
6444 } 6647 }
6445 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, 6648 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
6446 mb->mbxCommand) * 6649 1000) + jiffies;
6447 1000) + jiffies;
6448 i = 0; 6650 i = 0;
6449 /* Wait for command to complete */ 6651 /* Wait for command to complete */
6450 while (((word0 & OWN_CHIP) == OWN_CHIP) || 6652 while (((word0 & OWN_CHIP) == OWN_CHIP) ||
@@ -6555,21 +6757,21 @@ static int
6555lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) 6757lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
6556{ 6758{
6557 struct lpfc_sli *psli = &phba->sli; 6759 struct lpfc_sli *psli = &phba->sli;
6558 uint8_t actcmd = MBX_HEARTBEAT;
6559 int rc = 0; 6760 int rc = 0;
6560 unsigned long timeout; 6761 unsigned long timeout = 0;
6561 6762
6562 /* Mark the asynchronous mailbox command posting as blocked */ 6763 /* Mark the asynchronous mailbox command posting as blocked */
6563 spin_lock_irq(&phba->hbalock); 6764 spin_lock_irq(&phba->hbalock);
6564 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 6765 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
6565 if (phba->sli.mbox_active)
6566 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
6567 spin_unlock_irq(&phba->hbalock);
6568 /* Determine how long we might wait for the active mailbox 6766 /* Determine how long we might wait for the active mailbox
6569 * command to be gracefully completed by firmware. 6767 * command to be gracefully completed by firmware.
6570 */ 6768 */
6571 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + 6769 if (phba->sli.mbox_active)
6572 jiffies; 6770 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
6771 phba->sli.mbox_active) *
6772 1000) + jiffies;
6773 spin_unlock_irq(&phba->hbalock);
6774
6573 /* Wait for the outstnading mailbox command to complete */ 6775 /* Wait for the outstnading mailbox command to complete */
6574 while (phba->sli.mbox_active) { 6776 while (phba->sli.mbox_active) {
6575 /* Check active mailbox complete status every 2ms */ 6777 /* Check active mailbox complete status every 2ms */
@@ -6664,11 +6866,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6664 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { 6866 if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
6665 spin_unlock_irqrestore(&phba->hbalock, iflag); 6867 spin_unlock_irqrestore(&phba->hbalock, iflag);
6666 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 6868 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6667 "(%d):2532 Mailbox command x%x (x%x) " 6869 "(%d):2532 Mailbox command x%x (x%x/x%x) "
6668 "cannot issue Data: x%x x%x\n", 6870 "cannot issue Data: x%x x%x\n",
6669 mboxq->vport ? mboxq->vport->vpi : 0, 6871 mboxq->vport ? mboxq->vport->vpi : 0,
6670 mboxq->u.mb.mbxCommand, 6872 mboxq->u.mb.mbxCommand,
6671 lpfc_sli4_mbox_opcode_get(phba, mboxq), 6873 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6874 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6672 psli->sli_flag, MBX_POLL); 6875 psli->sli_flag, MBX_POLL);
6673 return MBXERR_ERROR; 6876 return MBXERR_ERROR;
6674 } 6877 }
@@ -6691,7 +6894,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6691 dma_address = &phba->sli4_hba.bmbx.dma_address; 6894 dma_address = &phba->sli4_hba.bmbx.dma_address;
6692 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); 6895 writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
6693 6896
6694 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 6897 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
6695 * 1000) + jiffies; 6898 * 1000) + jiffies;
6696 do { 6899 do {
6697 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6900 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -6707,7 +6910,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6707 6910
6708 /* Post the low mailbox dma address to the port. */ 6911 /* Post the low mailbox dma address to the port. */
6709 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); 6912 writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
6710 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) 6913 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
6711 * 1000) + jiffies; 6914 * 1000) + jiffies;
6712 do { 6915 do {
6713 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); 6916 bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
@@ -6746,11 +6949,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
6746 lpfc_sli4_swap_str(phba, mboxq); 6949 lpfc_sli4_swap_str(phba, mboxq);
6747 6950
6748 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 6951 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6749 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " 6952 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
6750 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" 6953 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
6751 " x%x x%x CQ: x%x x%x x%x x%x\n", 6954 " x%x x%x CQ: x%x x%x x%x x%x\n",
6752 mboxq->vport ? mboxq->vport->vpi : 0, 6955 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
6753 mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), 6956 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6957 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6754 bf_get(lpfc_mqe_status, mb), 6958 bf_get(lpfc_mqe_status, mb),
6755 mb->un.mb_words[0], mb->un.mb_words[1], 6959 mb->un.mb_words[0], mb->un.mb_words[1],
6756 mb->un.mb_words[2], mb->un.mb_words[3], 6960 mb->un.mb_words[2], mb->un.mb_words[3],
@@ -6796,11 +7000,12 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6796 rc = lpfc_mbox_dev_check(phba); 7000 rc = lpfc_mbox_dev_check(phba);
6797 if (unlikely(rc)) { 7001 if (unlikely(rc)) {
6798 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7002 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6799 "(%d):2544 Mailbox command x%x (x%x) " 7003 "(%d):2544 Mailbox command x%x (x%x/x%x) "
6800 "cannot issue Data: x%x x%x\n", 7004 "cannot issue Data: x%x x%x\n",
6801 mboxq->vport ? mboxq->vport->vpi : 0, 7005 mboxq->vport ? mboxq->vport->vpi : 0,
6802 mboxq->u.mb.mbxCommand, 7006 mboxq->u.mb.mbxCommand,
6803 lpfc_sli4_mbox_opcode_get(phba, mboxq), 7007 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7008 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6804 psli->sli_flag, flag); 7009 psli->sli_flag, flag);
6805 goto out_not_finished; 7010 goto out_not_finished;
6806 } 7011 }
@@ -6814,20 +7019,25 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6814 if (rc != MBX_SUCCESS) 7019 if (rc != MBX_SUCCESS)
6815 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7020 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6816 "(%d):2541 Mailbox command x%x " 7021 "(%d):2541 Mailbox command x%x "
6817 "(x%x) cannot issue Data: x%x x%x\n", 7022 "(x%x/x%x) cannot issue Data: "
7023 "x%x x%x\n",
6818 mboxq->vport ? mboxq->vport->vpi : 0, 7024 mboxq->vport ? mboxq->vport->vpi : 0,
6819 mboxq->u.mb.mbxCommand, 7025 mboxq->u.mb.mbxCommand,
6820 lpfc_sli4_mbox_opcode_get(phba, mboxq), 7026 lpfc_sli_config_mbox_subsys_get(phba,
7027 mboxq),
7028 lpfc_sli_config_mbox_opcode_get(phba,
7029 mboxq),
6821 psli->sli_flag, flag); 7030 psli->sli_flag, flag);
6822 return rc; 7031 return rc;
6823 } else if (flag == MBX_POLL) { 7032 } else if (flag == MBX_POLL) {
6824 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, 7033 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6825 "(%d):2542 Try to issue mailbox command " 7034 "(%d):2542 Try to issue mailbox command "
6826 "x%x (x%x) synchronously ahead of async" 7035 "x%x (x%x/x%x) synchronously ahead of async"
6827 "mailbox command queue: x%x x%x\n", 7036 "mailbox command queue: x%x x%x\n",
6828 mboxq->vport ? mboxq->vport->vpi : 0, 7037 mboxq->vport ? mboxq->vport->vpi : 0,
6829 mboxq->u.mb.mbxCommand, 7038 mboxq->u.mb.mbxCommand,
6830 lpfc_sli4_mbox_opcode_get(phba, mboxq), 7039 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7040 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6831 psli->sli_flag, flag); 7041 psli->sli_flag, flag);
6832 /* Try to block the asynchronous mailbox posting */ 7042 /* Try to block the asynchronous mailbox posting */
6833 rc = lpfc_sli4_async_mbox_block(phba); 7043 rc = lpfc_sli4_async_mbox_block(phba);
@@ -6836,16 +7046,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6836 rc = lpfc_sli4_post_sync_mbox(phba, mboxq); 7046 rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
6837 if (rc != MBX_SUCCESS) 7047 if (rc != MBX_SUCCESS)
6838 lpfc_printf_log(phba, KERN_ERR, 7048 lpfc_printf_log(phba, KERN_ERR,
6839 LOG_MBOX | LOG_SLI, 7049 LOG_MBOX | LOG_SLI,
6840 "(%d):2597 Mailbox command " 7050 "(%d):2597 Mailbox command "
6841 "x%x (x%x) cannot issue " 7051 "x%x (x%x/x%x) cannot issue "
6842 "Data: x%x x%x\n", 7052 "Data: x%x x%x\n",
6843 mboxq->vport ? 7053 mboxq->vport ?
6844 mboxq->vport->vpi : 0, 7054 mboxq->vport->vpi : 0,
6845 mboxq->u.mb.mbxCommand, 7055 mboxq->u.mb.mbxCommand,
6846 lpfc_sli4_mbox_opcode_get(phba, 7056 lpfc_sli_config_mbox_subsys_get(phba,
6847 mboxq), 7057 mboxq),
6848 psli->sli_flag, flag); 7058 lpfc_sli_config_mbox_opcode_get(phba,
7059 mboxq),
7060 psli->sli_flag, flag);
6849 /* Unblock the async mailbox posting afterward */ 7061 /* Unblock the async mailbox posting afterward */
6850 lpfc_sli4_async_mbox_unblock(phba); 7062 lpfc_sli4_async_mbox_unblock(phba);
6851 } 7063 }
@@ -6856,11 +7068,12 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6856 rc = lpfc_mbox_cmd_check(phba, mboxq); 7068 rc = lpfc_mbox_cmd_check(phba, mboxq);
6857 if (rc) { 7069 if (rc) {
6858 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7070 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6859 "(%d):2543 Mailbox command x%x (x%x) " 7071 "(%d):2543 Mailbox command x%x (x%x/x%x) "
6860 "cannot issue Data: x%x x%x\n", 7072 "cannot issue Data: x%x x%x\n",
6861 mboxq->vport ? mboxq->vport->vpi : 0, 7073 mboxq->vport ? mboxq->vport->vpi : 0,
6862 mboxq->u.mb.mbxCommand, 7074 mboxq->u.mb.mbxCommand,
6863 lpfc_sli4_mbox_opcode_get(phba, mboxq), 7075 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7076 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6864 psli->sli_flag, flag); 7077 psli->sli_flag, flag);
6865 goto out_not_finished; 7078 goto out_not_finished;
6866 } 7079 }
@@ -6872,10 +7085,11 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
6872 spin_unlock_irqrestore(&phba->hbalock, iflags); 7085 spin_unlock_irqrestore(&phba->hbalock, iflags);
6873 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7086 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6874 "(%d):0354 Mbox cmd issue - Enqueue Data: " 7087 "(%d):0354 Mbox cmd issue - Enqueue Data: "
6875 "x%x (x%x) x%x x%x x%x\n", 7088 "x%x (x%x/x%x) x%x x%x x%x\n",
6876 mboxq->vport ? mboxq->vport->vpi : 0xffffff, 7089 mboxq->vport ? mboxq->vport->vpi : 0xffffff,
6877 bf_get(lpfc_mqe_command, &mboxq->u.mqe), 7090 bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6878 lpfc_sli4_mbox_opcode_get(phba, mboxq), 7091 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7092 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6879 phba->pport->port_state, 7093 phba->pport->port_state,
6880 psli->sli_flag, MBX_NOWAIT); 7094 psli->sli_flag, MBX_NOWAIT);
6881 /* Wake up worker thread to transport mailbox command from head */ 7095 /* Wake up worker thread to transport mailbox command from head */
@@ -6952,13 +7166,14 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
6952 7166
6953 /* Start timer for the mbox_tmo and log some mailbox post messages */ 7167 /* Start timer for the mbox_tmo and log some mailbox post messages */
6954 mod_timer(&psli->mbox_tmo, (jiffies + 7168 mod_timer(&psli->mbox_tmo, (jiffies +
6955 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); 7169 (HZ * lpfc_mbox_tmo_val(phba, mboxq))));
6956 7170
6957 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, 7171 lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6958 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " 7172 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
6959 "x%x x%x\n", 7173 "x%x x%x\n",
6960 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, 7174 mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
6961 lpfc_sli4_mbox_opcode_get(phba, mboxq), 7175 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7176 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6962 phba->pport->port_state, psli->sli_flag); 7177 phba->pport->port_state, psli->sli_flag);
6963 7178
6964 if (mbx_cmnd != MBX_HEARTBEAT) { 7179 if (mbx_cmnd != MBX_HEARTBEAT) {
@@ -6982,11 +7197,12 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
6982 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); 7197 rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
6983 if (rc != MBX_SUCCESS) { 7198 if (rc != MBX_SUCCESS) {
6984 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, 7199 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6985 "(%d):2533 Mailbox command x%x (x%x) " 7200 "(%d):2533 Mailbox command x%x (x%x/x%x) "
6986 "cannot issue Data: x%x x%x\n", 7201 "cannot issue Data: x%x x%x\n",
6987 mboxq->vport ? mboxq->vport->vpi : 0, 7202 mboxq->vport ? mboxq->vport->vpi : 0,
6988 mboxq->u.mb.mbxCommand, 7203 mboxq->u.mb.mbxCommand,
6989 lpfc_sli4_mbox_opcode_get(phba, mboxq), 7204 lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7205 lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6990 psli->sli_flag, MBX_NOWAIT); 7206 psli->sli_flag, MBX_NOWAIT);
6991 goto out_not_finished; 7207 goto out_not_finished;
6992 } 7208 }
@@ -7322,6 +7538,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
7322 if (inbound == 1) 7538 if (inbound == 1)
7323 offset = 0; 7539 offset = 0;
7324 bf_set(lpfc_sli4_sge_offset, sgl, offset); 7540 bf_set(lpfc_sli4_sge_offset, sgl, offset);
7541 bf_set(lpfc_sli4_sge_type, sgl,
7542 LPFC_SGE_TYPE_DATA);
7325 offset += bde.tus.f.bdeSize; 7543 offset += bde.tus.f.bdeSize;
7326 } 7544 }
7327 sgl->word2 = cpu_to_le32(sgl->word2); 7545 sgl->word2 = cpu_to_le32(sgl->word2);
@@ -9359,7 +9577,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
9359 9577
9360 /* now issue the command */ 9578 /* now issue the command */
9361 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); 9579 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
9362
9363 if (retval == MBX_BUSY || retval == MBX_SUCCESS) { 9580 if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
9364 wait_event_interruptible_timeout(done_q, 9581 wait_event_interruptible_timeout(done_q,
9365 pmboxq->mbox_flag & LPFC_MBX_WAKE, 9582 pmboxq->mbox_flag & LPFC_MBX_WAKE,
@@ -9403,23 +9620,24 @@ void
9403lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) 9620lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
9404{ 9621{
9405 struct lpfc_sli *psli = &phba->sli; 9622 struct lpfc_sli *psli = &phba->sli;
9406 uint8_t actcmd = MBX_HEARTBEAT;
9407 unsigned long timeout; 9623 unsigned long timeout;
9408 9624
9625 timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
9409 spin_lock_irq(&phba->hbalock); 9626 spin_lock_irq(&phba->hbalock);
9410 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; 9627 psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9411 spin_unlock_irq(&phba->hbalock); 9628 spin_unlock_irq(&phba->hbalock);
9412 9629
9413 if (psli->sli_flag & LPFC_SLI_ACTIVE) { 9630 if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9414 spin_lock_irq(&phba->hbalock); 9631 spin_lock_irq(&phba->hbalock);
9415 if (phba->sli.mbox_active)
9416 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
9417 spin_unlock_irq(&phba->hbalock);
9418 /* Determine how long we might wait for the active mailbox 9632 /* Determine how long we might wait for the active mailbox
9419 * command to be gracefully completed by firmware. 9633 * command to be gracefully completed by firmware.
9420 */ 9634 */
9421 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 9635 if (phba->sli.mbox_active)
9422 1000) + jiffies; 9636 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9637 phba->sli.mbox_active) *
9638 1000) + jiffies;
9639 spin_unlock_irq(&phba->hbalock);
9640
9423 while (phba->sli.mbox_active) { 9641 while (phba->sli.mbox_active) {
9424 /* Check active mailbox complete status every 2ms */ 9642 /* Check active mailbox complete status every 2ms */
9425 msleep(2); 9643 msleep(2);
@@ -10415,12 +10633,17 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
10415 /* Move mbox data to caller's mailbox region, do endian swapping */ 10633 /* Move mbox data to caller's mailbox region, do endian swapping */
10416 if (pmb->mbox_cmpl && mbox) 10634 if (pmb->mbox_cmpl && mbox)
10417 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); 10635 lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
10418 /* Set the mailbox status with SLI4 range 0x4000 */
10419 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
10420 if (mcqe_status != MB_CQE_STATUS_SUCCESS)
10421 bf_set(lpfc_mqe_status, mqe,
10422 (LPFC_MBX_ERROR_RANGE | mcqe_status));
10423 10636
10637 /*
10638 * For mcqe errors, conditionally move a modified error code to
10639 * the mbox so that the error will not be missed.
10640 */
10641 mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
10642 if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
10643 if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
10644 bf_set(lpfc_mqe_status, mqe,
10645 (LPFC_MBX_ERROR_RANGE | mcqe_status));
10646 }
10424 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { 10647 if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
10425 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; 10648 pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
10426 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, 10649 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
@@ -10796,7 +11019,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
10796 case LPFC_MCQ: 11019 case LPFC_MCQ:
10797 while ((cqe = lpfc_sli4_cq_get(cq))) { 11020 while ((cqe = lpfc_sli4_cq_get(cq))) {
10798 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); 11021 workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
10799 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11022 if (!(++ecount % cq->entry_repost))
10800 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11023 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
10801 } 11024 }
10802 break; 11025 break;
@@ -10808,7 +11031,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
10808 else 11031 else
10809 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, 11032 workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
10810 cqe); 11033 cqe);
10811 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11034 if (!(++ecount % cq->entry_repost))
10812 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11035 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
10813 } 11036 }
10814 break; 11037 break;
@@ -11040,7 +11263,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
11040 /* Process all the entries to the CQ */ 11263 /* Process all the entries to the CQ */
11041 while ((cqe = lpfc_sli4_cq_get(cq))) { 11264 while ((cqe = lpfc_sli4_cq_get(cq))) {
11042 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); 11265 workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
11043 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11266 if (!(++ecount % cq->entry_repost))
11044 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); 11267 lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
11045 } 11268 }
11046 11269
@@ -11110,6 +11333,8 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11110 11333
11111 /* Get to the EQ struct associated with this vector */ 11334 /* Get to the EQ struct associated with this vector */
11112 speq = phba->sli4_hba.sp_eq; 11335 speq = phba->sli4_hba.sp_eq;
11336 if (unlikely(!speq))
11337 return IRQ_NONE;
11113 11338
11114 /* Check device state for handling interrupt */ 11339 /* Check device state for handling interrupt */
11115 if (unlikely(lpfc_intr_state_check(phba))) { 11340 if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11127,7 +11352,7 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
11127 */ 11352 */
11128 while ((eqe = lpfc_sli4_eq_get(speq))) { 11353 while ((eqe = lpfc_sli4_eq_get(speq))) {
11129 lpfc_sli4_sp_handle_eqe(phba, eqe); 11354 lpfc_sli4_sp_handle_eqe(phba, eqe);
11130 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11355 if (!(++ecount % speq->entry_repost))
11131 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); 11356 lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
11132 } 11357 }
11133 11358
@@ -11187,6 +11412,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11187 11412
11188 if (unlikely(!phba)) 11413 if (unlikely(!phba))
11189 return IRQ_NONE; 11414 return IRQ_NONE;
11415 if (unlikely(!phba->sli4_hba.fp_eq))
11416 return IRQ_NONE;
11190 11417
11191 /* Get to the EQ struct associated with this vector */ 11418 /* Get to the EQ struct associated with this vector */
11192 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; 11419 fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
@@ -11207,7 +11434,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
11207 */ 11434 */
11208 while ((eqe = lpfc_sli4_eq_get(fpeq))) { 11435 while ((eqe = lpfc_sli4_eq_get(fpeq))) {
11209 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); 11436 lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
11210 if (!(++ecount % LPFC_GET_QE_REL_INT)) 11437 if (!(++ecount % fpeq->entry_repost))
11211 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); 11438 lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
11212 } 11439 }
11213 11440
@@ -11359,6 +11586,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
11359 } 11586 }
11360 queue->entry_size = entry_size; 11587 queue->entry_size = entry_size;
11361 queue->entry_count = entry_count; 11588 queue->entry_count = entry_count;
11589
11590 /*
11591 * entry_repost is calculated based on the number of entries in the
11592 * queue. This works out except for RQs. If buffers are NOT initially
11593 * posted for every RQE, entry_repost should be adjusted accordingly.
11594 */
11595 queue->entry_repost = (entry_count >> 3);
11596 if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST)
11597 queue->entry_repost = LPFC_QUEUE_MIN_REPOST;
11362 queue->phba = phba; 11598 queue->phba = phba;
11363 11599
11364 return queue; 11600 return queue;
@@ -11924,6 +12160,31 @@ out:
11924} 12160}
11925 12161
11926/** 12162/**
12163 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
12164 * @phba: HBA structure that indicates port to create a queue on.
12165 * @rq: The queue structure to use for the receive queue.
12166 * @qno: The associated HBQ number
12167 *
12168 *
12169 * For SLI4 we need to adjust the RQ repost value based on
12170 * the number of buffers that are initially posted to the RQ.
12171 */
12172void
12173lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
12174{
12175 uint32_t cnt;
12176
12177 cnt = lpfc_hbq_defs[qno]->entry_count;
12178
12179 /* Recalc repost for RQs based on buffers initially posted */
12180 cnt = (cnt >> 3);
12181 if (cnt < LPFC_QUEUE_MIN_REPOST)
12182 cnt = LPFC_QUEUE_MIN_REPOST;
12183
12184 rq->entry_repost = cnt;
12185}
12186
12187/**
11927 * lpfc_rq_create - Create a Receive Queue on the HBA 12188 * lpfc_rq_create - Create a Receive Queue on the HBA
11928 * @phba: HBA structure that indicates port to create a queue on. 12189 * @phba: HBA structure that indicates port to create a queue on.
11929 * @hrq: The queue structure to use to create the header receive queue. 12190 * @hrq: The queue structure to use to create the header receive queue.
@@ -12489,7 +12750,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba,
12489 if (!phba->sli4_hba.intr_enable) 12750 if (!phba->sli4_hba.intr_enable)
12490 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12751 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12491 else { 12752 else {
12492 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 12753 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
12493 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 12754 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12494 } 12755 }
12495 /* The IOCTL status is embedded in the mailbox subheader. */ 12756 /* The IOCTL status is embedded in the mailbox subheader. */
@@ -12704,7 +12965,7 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba)
12704 if (!phba->sli4_hba.intr_enable) 12965 if (!phba->sli4_hba.intr_enable)
12705 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 12966 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12706 else { 12967 else {
12707 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 12968 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
12708 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 12969 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12709 } 12970 }
12710 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 12971 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -12867,7 +13128,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba)
12867 if (!phba->sli4_hba.intr_enable) 13128 if (!phba->sli4_hba.intr_enable)
12868 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13129 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12869 else { 13130 else {
12870 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 13131 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
12871 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13132 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12872 } 13133 }
12873 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13134 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -12991,7 +13252,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
12991 if (!phba->sli4_hba.intr_enable) 13252 if (!phba->sli4_hba.intr_enable)
12992 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13253 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
12993 else { 13254 else {
12994 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 13255 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
12995 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13256 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
12996 } 13257 }
12997 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13258 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -13147,7 +13408,7 @@ lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist,
13147 if (!phba->sli4_hba.intr_enable) 13408 if (!phba->sli4_hba.intr_enable)
13148 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 13409 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
13149 else { 13410 else {
13150 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 13411 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
13151 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 13412 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
13152 } 13413 }
13153 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; 13414 shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
@@ -13296,7 +13557,8 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
13296 uint32_t did = (fc_hdr->fh_d_id[0] << 16 | 13557 uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
13297 fc_hdr->fh_d_id[1] << 8 | 13558 fc_hdr->fh_d_id[1] << 8 |
13298 fc_hdr->fh_d_id[2]); 13559 fc_hdr->fh_d_id[2]);
13299 13560 if (did == Fabric_DID)
13561 return phba->pport;
13300 vports = lpfc_create_vport_work_array(phba); 13562 vports = lpfc_create_vport_work_array(phba);
13301 if (vports != NULL) 13563 if (vports != NULL)
13302 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { 13564 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
@@ -14312,7 +14574,7 @@ lpfc_sli4_init_vpi(struct lpfc_vport *vport)
14312 if (!mboxq) 14574 if (!mboxq)
14313 return -ENOMEM; 14575 return -ENOMEM;
14314 lpfc_init_vpi(phba, mboxq, vport->vpi); 14576 lpfc_init_vpi(phba, mboxq, vport->vpi);
14315 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); 14577 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
14316 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); 14578 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
14317 if (rc != MBX_SUCCESS) { 14579 if (rc != MBX_SUCCESS) {
14318 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, 14580 lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
@@ -15188,7 +15450,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
15188 if (!phba->sli4_hba.intr_enable) 15450 if (!phba->sli4_hba.intr_enable)
15189 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); 15451 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15190 else { 15452 else {
15191 mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); 15453 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
15192 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); 15454 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15193 } 15455 }
15194 /* The IOCTL status is embedded in the mailbox subheader. */ 15456 /* The IOCTL status is embedded in the mailbox subheader. */
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index a0075b0af14..29c13b63e32 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -293,13 +293,11 @@ struct lpfc_sli {
293 struct lpfc_lnk_stat lnk_stat_offsets; 293 struct lpfc_lnk_stat lnk_stat_offsets;
294}; 294};
295 295
296#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox 296/* Timeout for normal outstanding mbox command (Seconds) */
297 command */ 297#define LPFC_MBOX_TMO 30
298#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox 298/* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */
299 command */ 299#define LPFC_MBOX_SLI4_CONFIG_TMO 60
300#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write 300/* Timeout for flash-based outstanding sli_config mbox command (Seconds) */
301 * or erase cmds. This is especially 301#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300
302 * long because of the potential of 302/* Timeout for other flash-based outstanding mbox command (Seconds) */
303 * multiple flash erases that can be 303#define LPFC_MBOX_TMO_FLASH_CMD 300
304 * spawned.
305 */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 19bb87ae859..d5cffd8af34 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -23,7 +23,6 @@
23#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 23#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
24#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 24#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
25#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 25#define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
26#define LPFC_GET_QE_REL_INT 32
27#define LPFC_RPI_LOW_WATER_MARK 10 26#define LPFC_RPI_LOW_WATER_MARK 10
28 27
29#define LPFC_UNREG_FCF 1 28#define LPFC_UNREG_FCF 1
@@ -126,6 +125,8 @@ struct lpfc_queue {
126 struct list_head child_list; 125 struct list_head child_list;
127 uint32_t entry_count; /* Number of entries to support on the queue */ 126 uint32_t entry_count; /* Number of entries to support on the queue */
128 uint32_t entry_size; /* Size of each queue entry. */ 127 uint32_t entry_size; /* Size of each queue entry. */
128 uint32_t entry_repost; /* Count of entries before doorbell is rung */
129#define LPFC_QUEUE_MIN_REPOST 8
129 uint32_t queue_id; /* Queue ID assigned by the hardware */ 130 uint32_t queue_id; /* Queue ID assigned by the hardware */
130 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ 131 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
131 struct list_head page_list; 132 struct list_head page_list;
@@ -388,6 +389,16 @@ struct lpfc_iov {
388 uint32_t vf_number; 389 uint32_t vf_number;
389}; 390};
390 391
392struct lpfc_sli4_lnk_info {
393 uint8_t lnk_dv;
394#define LPFC_LNK_DAT_INVAL 0
395#define LPFC_LNK_DAT_VAL 1
396 uint8_t lnk_tp;
397#define LPFC_LNK_GE 0x0 /* FCoE */
398#define LPFC_LNK_FC 0x1 /* FC */
399 uint8_t lnk_no;
400};
401
391/* SLI4 HBA data structure entries */ 402/* SLI4 HBA data structure entries */
392struct lpfc_sli4_hba { 403struct lpfc_sli4_hba {
393 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for 404 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
@@ -503,6 +514,10 @@ struct lpfc_sli4_hba {
503 struct list_head sp_els_xri_aborted_work_queue; 514 struct list_head sp_els_xri_aborted_work_queue;
504 struct list_head sp_unsol_work_queue; 515 struct list_head sp_unsol_work_queue;
505 struct lpfc_sli4_link link_state; 516 struct lpfc_sli4_link link_state;
517 struct lpfc_sli4_lnk_info lnk_info;
518 uint32_t pport_name_sta;
519#define LPFC_SLI4_PPNAME_NON 0
520#define LPFC_SLI4_PPNAME_GET 1
506 struct lpfc_iov iov; 521 struct lpfc_iov iov;
507 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ 522 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
508 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ 523 spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
@@ -553,6 +568,7 @@ struct lpfc_rsrc_blks {
553 * SLI4 specific function prototypes 568 * SLI4 specific function prototypes
554 */ 569 */
555int lpfc_pci_function_reset(struct lpfc_hba *); 570int lpfc_pci_function_reset(struct lpfc_hba *);
571int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
556int lpfc_sli4_hba_setup(struct lpfc_hba *); 572int lpfc_sli4_hba_setup(struct lpfc_hba *);
557int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, 573int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
558 uint8_t, uint32_t, bool); 574 uint8_t, uint32_t, bool);
@@ -576,6 +592,7 @@ uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
576 struct lpfc_queue *, uint32_t); 592 struct lpfc_queue *, uint32_t);
577uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, 593uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
578 struct lpfc_queue *, struct lpfc_queue *, uint32_t); 594 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
595void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
579uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); 596uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
580uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); 597uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
581uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); 598uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
@@ -632,5 +649,5 @@ void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
632void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); 649void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
633int lpfc_sli4_unregister_fcf(struct lpfc_hba *); 650int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
634int lpfc_sli4_post_status_check(struct lpfc_hba *); 651int lpfc_sli4_post_status_check(struct lpfc_hba *);
635uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); 652uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
636 653uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c1e0ae94d9f..b0630e37f1e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
18 * included with this package. * 18 * included with this package. *
19 *******************************************************************/ 19 *******************************************************************/
20 20
21#define LPFC_DRIVER_VERSION "8.3.25" 21#define LPFC_DRIVER_VERSION "8.3.27"
22#define LPFC_DRIVER_NAME "lpfc" 22#define LPFC_DRIVER_NAME "lpfc"
23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" 23#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" 24#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1feb551a57b..cff6ca67415 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -692,13 +692,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
692 /* Indicate free memory when release */ 692 /* Indicate free memory when release */
693 NLP_SET_FREE_REQ(ndlp); 693 NLP_SET_FREE_REQ(ndlp);
694 } else { 694 } else {
695 if (!NLP_CHK_NODE_ACT(ndlp)) 695 if (!NLP_CHK_NODE_ACT(ndlp)) {
696 ndlp = lpfc_enable_node(vport, ndlp, 696 ndlp = lpfc_enable_node(vport, ndlp,
697 NLP_STE_UNUSED_NODE); 697 NLP_STE_UNUSED_NODE);
698 if (!ndlp) 698 if (!ndlp)
699 goto skip_logo; 699 goto skip_logo;
700 }
700 701
701 /* Remove ndlp from vport npld list */ 702 /* Remove ndlp from vport list */
702 lpfc_dequeue_node(vport, ndlp); 703 lpfc_dequeue_node(vport, ndlp);
703 spin_lock_irq(&phba->ndlp_lock); 704 spin_lock_irq(&phba->ndlp_lock);
704 if (!NLP_CHK_FREE_REQ(ndlp)) 705 if (!NLP_CHK_FREE_REQ(ndlp))
@@ -711,8 +712,17 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
711 } 712 }
712 spin_unlock_irq(&phba->ndlp_lock); 713 spin_unlock_irq(&phba->ndlp_lock);
713 } 714 }
714 if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) 715
716 /*
717 * If the vpi is not registered, then a valid FDISC doesn't
718 * exist and there is no need for a ELS LOGO. Just cleanup
719 * the ndlp.
720 */
721 if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) {
722 lpfc_nlp_put(ndlp);
715 goto skip_logo; 723 goto skip_logo;
724 }
725
716 vport->unreg_vpi_cmpl = VPORT_INVAL; 726 vport->unreg_vpi_cmpl = VPORT_INVAL;
717 timeout = msecs_to_jiffies(phba->fc_ratov * 2000); 727 timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
718 if (!lpfc_issue_els_npiv_logo(vport, ndlp)) 728 if (!lpfc_issue_els_npiv_logo(vport, ndlp))
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 3893337e3dd..590ce1ef201 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -230,9 +230,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
230 u32 dma_count, int write, u8 cmd) 230 u32 dma_count, int write, u8 cmd)
231{ 231{
232 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 232 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
233 unsigned long flags;
234
235 local_irq_save(flags);
236 233
237 mep->error = 0; 234 mep->error = 0;
238 235
@@ -270,8 +267,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
270 esp_count = n; 267 esp_count = n;
271 } 268 }
272 } while (esp_count); 269 } while (esp_count);
273
274 local_irq_restore(flags);
275} 270}
276 271
277/* 272/*
@@ -353,8 +348,6 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
353 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); 348 struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
354 u8 *fifo = esp->regs + ESP_FDATA * 16; 349 u8 *fifo = esp->regs + ESP_FDATA * 16;
355 350
356 disable_irq(esp->host->irq);
357
358 cmd &= ~ESP_CMD_DMA; 351 cmd &= ~ESP_CMD_DMA;
359 mep->error = 0; 352 mep->error = 0;
360 353
@@ -431,8 +424,6 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
431 scsi_esp_cmd(esp, ESP_CMD_TI); 424 scsi_esp_cmd(esp, ESP_CMD_TI);
432 } 425 }
433 } 426 }
434
435 enable_irq(esp->host->irq);
436} 427}
437 428
438static int mac_esp_irq_pending(struct esp *esp) 429static int mac_esp_irq_pending(struct esp *esp)
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3948a00d81f..dd94c7d574f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
33/* 33/*
34 * MegaRAID SAS Driver meta data 34 * MegaRAID SAS Driver meta data
35 */ 35 */
36#define MEGASAS_VERSION "00.00.05.40-rc1" 36#define MEGASAS_VERSION "00.00.06.12-rc1"
37#define MEGASAS_RELDATE "Jul. 26, 2011" 37#define MEGASAS_RELDATE "Oct. 5, 2011"
38#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011" 38#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011"
39 39
40/* 40/*
41 * Device IDs 41 * Device IDs
@@ -48,6 +48,7 @@
48#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 48#define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073
49#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 49#define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071
50#define PCI_DEVICE_ID_LSI_FUSION 0x005b 50#define PCI_DEVICE_ID_LSI_FUSION 0x005b
51#define PCI_DEVICE_ID_LSI_INVADER 0x005d
51 52
52/* 53/*
53 * ===================================== 54 * =====================================
@@ -138,6 +139,7 @@
138#define MFI_CMD_ABORT 0x06 139#define MFI_CMD_ABORT 0x06
139#define MFI_CMD_SMP 0x07 140#define MFI_CMD_SMP 0x07
140#define MFI_CMD_STP 0x08 141#define MFI_CMD_STP 0x08
142#define MFI_CMD_INVALID 0xff
141 143
142#define MR_DCMD_CTRL_GET_INFO 0x01010000 144#define MR_DCMD_CTRL_GET_INFO 0x01010000
143#define MR_DCMD_LD_GET_LIST 0x03010000 145#define MR_DCMD_LD_GET_LIST 0x03010000
@@ -221,6 +223,7 @@ enum MFI_STAT {
221 MFI_STAT_RESERVATION_IN_PROGRESS = 0x36, 223 MFI_STAT_RESERVATION_IN_PROGRESS = 0x36,
222 MFI_STAT_I2C_ERRORS_DETECTED = 0x37, 224 MFI_STAT_I2C_ERRORS_DETECTED = 0x37,
223 MFI_STAT_PCI_ERRORS_DETECTED = 0x38, 225 MFI_STAT_PCI_ERRORS_DETECTED = 0x38,
226 MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67,
224 227
225 MFI_STAT_INVALID_STATUS = 0xFF 228 MFI_STAT_INVALID_STATUS = 0xFF
226}; 229};
@@ -716,7 +719,7 @@ struct megasas_ctrl_info {
716#define MEGASAS_DEFAULT_INIT_ID -1 719#define MEGASAS_DEFAULT_INIT_ID -1
717#define MEGASAS_MAX_LUN 8 720#define MEGASAS_MAX_LUN 8
718#define MEGASAS_MAX_LD 64 721#define MEGASAS_MAX_LD 64
719#define MEGASAS_DEFAULT_CMD_PER_LUN 128 722#define MEGASAS_DEFAULT_CMD_PER_LUN 256
720#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ 723#define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \
721 MEGASAS_MAX_DEV_PER_CHANNEL) 724 MEGASAS_MAX_DEV_PER_CHANNEL)
722#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ 725#define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \
@@ -755,6 +758,7 @@ struct megasas_ctrl_info {
755#define MEGASAS_INT_CMDS 32 758#define MEGASAS_INT_CMDS 32
756#define MEGASAS_SKINNY_INT_CMDS 5 759#define MEGASAS_SKINNY_INT_CMDS 5
757 760
761#define MEGASAS_MAX_MSIX_QUEUES 16
758/* 762/*
759 * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit 763 * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit
760 * SGLs based on the size of dma_addr_t 764 * SGLs based on the size of dma_addr_t
@@ -1276,6 +1280,11 @@ struct megasas_aen_event {
1276 struct megasas_instance *instance; 1280 struct megasas_instance *instance;
1277}; 1281};
1278 1282
1283struct megasas_irq_context {
1284 struct megasas_instance *instance;
1285 u32 MSIxIndex;
1286};
1287
1279struct megasas_instance { 1288struct megasas_instance {
1280 1289
1281 u32 *producer; 1290 u32 *producer;
@@ -1349,8 +1358,9 @@ struct megasas_instance {
1349 1358
1350 /* Ptr to hba specific information */ 1359 /* Ptr to hba specific information */
1351 void *ctrl_context; 1360 void *ctrl_context;
1352 u8 msi_flag; 1361 unsigned int msix_vectors;
1353 struct msix_entry msixentry; 1362 struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
1363 struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
1354 u64 map_id; 1364 u64 map_id;
1355 struct megasas_cmd *map_update_cmd; 1365 struct megasas_cmd *map_update_cmd;
1356 unsigned long bar; 1366 unsigned long bar;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 776d0198866..29a994f9c4f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * 19 *
20 * FILE: megaraid_sas_base.c 20 * FILE: megaraid_sas_base.c
21 * Version : v00.00.05.40-rc1 21 * Version : v00.00.06.12-rc1
22 * 22 *
23 * Authors: LSI Corporation 23 * Authors: LSI Corporation
24 * Sreenivas Bagalkote 24 * Sreenivas Bagalkote
@@ -84,7 +84,7 @@ MODULE_VERSION(MEGASAS_VERSION);
84MODULE_AUTHOR("megaraidlinux@lsi.com"); 84MODULE_AUTHOR("megaraidlinux@lsi.com");
85MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); 85MODULE_DESCRIPTION("LSI MegaRAID SAS Driver");
86 86
87int megasas_transition_to_ready(struct megasas_instance *instance); 87int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
88static int megasas_get_pd_list(struct megasas_instance *instance); 88static int megasas_get_pd_list(struct megasas_instance *instance);
89static int megasas_issue_init_mfi(struct megasas_instance *instance); 89static int megasas_issue_init_mfi(struct megasas_instance *instance);
90static int megasas_register_aen(struct megasas_instance *instance, 90static int megasas_register_aen(struct megasas_instance *instance,
@@ -114,6 +114,8 @@ static struct pci_device_id megasas_pci_table[] = {
114 /* xscale IOP */ 114 /* xscale IOP */
115 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, 115 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
116 /* Fusion */ 116 /* Fusion */
117 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
118 /* Invader */
117 {} 119 {}
118}; 120};
119 121
@@ -213,6 +215,10 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
213 215
214 cmd->scmd = NULL; 216 cmd->scmd = NULL;
215 cmd->frame_count = 0; 217 cmd->frame_count = 0;
218 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
219 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
220 (reset_devices))
221 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
216 list_add_tail(&cmd->list, &instance->cmd_pool); 222 list_add_tail(&cmd->list, &instance->cmd_pool);
217 223
218 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); 224 spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
@@ -1583,7 +1589,8 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
1583{ 1589{
1584 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 1590 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
1585 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 1591 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
1586 (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) { 1592 (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
1593 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
1587 writel(MFI_STOP_ADP, &instance->reg_set->doorbell); 1594 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
1588 } else { 1595 } else {
1589 writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); 1596 writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell);
@@ -1907,7 +1914,6 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
1907static enum 1914static enum
1908blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) 1915blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1909{ 1916{
1910 struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr;
1911 struct megasas_instance *instance; 1917 struct megasas_instance *instance;
1912 unsigned long flags; 1918 unsigned long flags;
1913 1919
@@ -1916,7 +1922,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
1916 return BLK_EH_NOT_HANDLED; 1922 return BLK_EH_NOT_HANDLED;
1917 } 1923 }
1918 1924
1919 instance = cmd->instance; 1925 instance = (struct megasas_instance *)scmd->device->host->hostdata;
1920 if (!(instance->flag & MEGASAS_FW_BUSY)) { 1926 if (!(instance->flag & MEGASAS_FW_BUSY)) {
1921 /* FW is busy, throttle IO */ 1927 /* FW is busy, throttle IO */
1922 spin_lock_irqsave(instance->host->host_lock, flags); 1928 spin_lock_irqsave(instance->host->host_lock, flags);
@@ -1957,7 +1963,8 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
1957 /* 1963 /*
1958 * First wait for all commands to complete 1964 * First wait for all commands to complete
1959 */ 1965 */
1960 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) 1966 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
1967 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
1961 ret = megasas_reset_fusion(scmd->device->host); 1968 ret = megasas_reset_fusion(scmd->device->host);
1962 else 1969 else
1963 ret = megasas_generic_reset(scmd); 1970 ret = megasas_generic_reset(scmd);
@@ -2161,7 +2168,16 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2161 cmd->scmd->SCp.ptr = NULL; 2168 cmd->scmd->SCp.ptr = NULL;
2162 2169
2163 switch (hdr->cmd) { 2170 switch (hdr->cmd) {
2164 2171 case MFI_CMD_INVALID:
2172 /* Some older 1068 controller FW may keep a pended
2173 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
2174 when booting the kdump kernel. Ignore this command to
2175 prevent a kernel panic on shutdown of the kdump kernel. */
2176 printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command "
2177 "completed.\n");
2178 printk(KERN_WARNING "megaraid_sas: If you have a controller "
2179 "other than PERC5, please upgrade your firmware.\n");
2180 break;
2165 case MFI_CMD_PD_SCSI_IO: 2181 case MFI_CMD_PD_SCSI_IO:
2166 case MFI_CMD_LD_SCSI_IO: 2182 case MFI_CMD_LD_SCSI_IO:
2167 2183
@@ -2477,7 +2493,7 @@ process_fw_state_change_wq(struct work_struct *work)
2477 msleep(1000); 2493 msleep(1000);
2478 } 2494 }
2479 2495
2480 if (megasas_transition_to_ready(instance)) { 2496 if (megasas_transition_to_ready(instance, 1)) {
2481 printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); 2497 printk(KERN_NOTICE "megaraid_sas:adapter not ready\n");
2482 2498
2483 megaraid_sas_kill_hba(instance); 2499 megaraid_sas_kill_hba(instance);
@@ -2532,7 +2548,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
2532 instance->reg_set) 2548 instance->reg_set)
2533 ) == 0) { 2549 ) == 0) {
2534 /* Hardware may not set outbound_intr_status in MSI-X mode */ 2550 /* Hardware may not set outbound_intr_status in MSI-X mode */
2535 if (!instance->msi_flag) 2551 if (!instance->msix_vectors)
2536 return IRQ_NONE; 2552 return IRQ_NONE;
2537 } 2553 }
2538 2554
@@ -2590,16 +2606,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
2590 */ 2606 */
2591static irqreturn_t megasas_isr(int irq, void *devp) 2607static irqreturn_t megasas_isr(int irq, void *devp)
2592{ 2608{
2593 struct megasas_instance *instance; 2609 struct megasas_irq_context *irq_context = devp;
2610 struct megasas_instance *instance = irq_context->instance;
2594 unsigned long flags; 2611 unsigned long flags;
2595 irqreturn_t rc; 2612 irqreturn_t rc;
2596 2613
2597 if (atomic_read( 2614 if (atomic_read(&instance->fw_reset_no_pci_access))
2598 &(((struct megasas_instance *)devp)->fw_reset_no_pci_access)))
2599 return IRQ_HANDLED; 2615 return IRQ_HANDLED;
2600 2616
2601 instance = (struct megasas_instance *)devp;
2602
2603 spin_lock_irqsave(&instance->hba_lock, flags); 2617 spin_lock_irqsave(&instance->hba_lock, flags);
2604 rc = megasas_deplete_reply_queue(instance, DID_OK); 2618 rc = megasas_deplete_reply_queue(instance, DID_OK);
2605 spin_unlock_irqrestore(&instance->hba_lock, flags); 2619 spin_unlock_irqrestore(&instance->hba_lock, flags);
@@ -2617,7 +2631,7 @@ static irqreturn_t megasas_isr(int irq, void *devp)
2617 * has to wait for the ready state. 2631 * has to wait for the ready state.
2618 */ 2632 */
2619int 2633int
2620megasas_transition_to_ready(struct megasas_instance* instance) 2634megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
2621{ 2635{
2622 int i; 2636 int i;
2623 u8 max_wait; 2637 u8 max_wait;
@@ -2639,11 +2653,13 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2639 switch (fw_state) { 2653 switch (fw_state) {
2640 2654
2641 case MFI_STATE_FAULT: 2655 case MFI_STATE_FAULT:
2642
2643 printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); 2656 printk(KERN_DEBUG "megasas: FW in FAULT state!!\n");
2644 max_wait = MEGASAS_RESET_WAIT_TIME; 2657 if (ocr) {
2645 cur_state = MFI_STATE_FAULT; 2658 max_wait = MEGASAS_RESET_WAIT_TIME;
2646 break; 2659 cur_state = MFI_STATE_FAULT;
2660 break;
2661 } else
2662 return -ENODEV;
2647 2663
2648 case MFI_STATE_WAIT_HANDSHAKE: 2664 case MFI_STATE_WAIT_HANDSHAKE:
2649 /* 2665 /*
@@ -2654,7 +2670,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2654 (instance->pdev->device == 2670 (instance->pdev->device ==
2655 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2671 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2656 (instance->pdev->device == 2672 (instance->pdev->device ==
2657 PCI_DEVICE_ID_LSI_FUSION)) { 2673 PCI_DEVICE_ID_LSI_FUSION) ||
2674 (instance->pdev->device ==
2675 PCI_DEVICE_ID_LSI_INVADER)) {
2658 writel( 2676 writel(
2659 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, 2677 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
2660 &instance->reg_set->doorbell); 2678 &instance->reg_set->doorbell);
@@ -2674,7 +2692,9 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2674 (instance->pdev->device == 2692 (instance->pdev->device ==
2675 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2693 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2676 (instance->pdev->device == 2694 (instance->pdev->device ==
2677 PCI_DEVICE_ID_LSI_FUSION)) { 2695 PCI_DEVICE_ID_LSI_FUSION) ||
2696 (instance->pdev->device ==
2697 PCI_DEVICE_ID_LSI_INVADER)) {
2678 writel(MFI_INIT_HOTPLUG, 2698 writel(MFI_INIT_HOTPLUG,
2679 &instance->reg_set->doorbell); 2699 &instance->reg_set->doorbell);
2680 } else 2700 } else
@@ -2695,11 +2715,15 @@ megasas_transition_to_ready(struct megasas_instance* instance)
2695 (instance->pdev->device == 2715 (instance->pdev->device ==
2696 PCI_DEVICE_ID_LSI_SAS0071SKINNY) || 2716 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2697 (instance->pdev->device 2717 (instance->pdev->device
2698 == PCI_DEVICE_ID_LSI_FUSION)) { 2718 == PCI_DEVICE_ID_LSI_FUSION) ||
2719 (instance->pdev->device
2720 == PCI_DEVICE_ID_LSI_INVADER)) {
2699 writel(MFI_RESET_FLAGS, 2721 writel(MFI_RESET_FLAGS,
2700 &instance->reg_set->doorbell); 2722 &instance->reg_set->doorbell);
2701 if (instance->pdev->device == 2723 if ((instance->pdev->device ==
2702 PCI_DEVICE_ID_LSI_FUSION) { 2724 PCI_DEVICE_ID_LSI_FUSION) ||
2725 (instance->pdev->device ==
2726 PCI_DEVICE_ID_LSI_INVADER)) {
2703 for (i = 0; i < (10 * 1000); i += 20) { 2727 for (i = 0; i < (10 * 1000); i += 20) {
2704 if (readl( 2728 if (readl(
2705 &instance-> 2729 &instance->
@@ -2922,6 +2946,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
2922 memset(cmd->frame, 0, total_sz); 2946 memset(cmd->frame, 0, total_sz);
2923 cmd->frame->io.context = cmd->index; 2947 cmd->frame->io.context = cmd->index;
2924 cmd->frame->io.pad_0 = 0; 2948 cmd->frame->io.pad_0 = 0;
2949 if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
2950 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
2951 (reset_devices))
2952 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
2925 } 2953 }
2926 2954
2927 return 0; 2955 return 0;
@@ -3474,6 +3502,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3474 struct megasas_register_set __iomem *reg_set; 3502 struct megasas_register_set __iomem *reg_set;
3475 struct megasas_ctrl_info *ctrl_info; 3503 struct megasas_ctrl_info *ctrl_info;
3476 unsigned long bar_list; 3504 unsigned long bar_list;
3505 int i;
3477 3506
3478 /* Find first memory bar */ 3507 /* Find first memory bar */
3479 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 3508 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
@@ -3496,6 +3525,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
3496 3525
3497 switch (instance->pdev->device) { 3526 switch (instance->pdev->device) {
3498 case PCI_DEVICE_ID_LSI_FUSION: 3527 case PCI_DEVICE_ID_LSI_FUSION:
3528 case PCI_DEVICE_ID_LSI_INVADER:
3499 instance->instancet = &megasas_instance_template_fusion; 3529 instance->instancet = &megasas_instance_template_fusion;
3500 break; 3530 break;
3501 case PCI_DEVICE_ID_LSI_SAS1078R: 3531 case PCI_DEVICE_ID_LSI_SAS1078R:
@@ -3520,15 +3550,39 @@ static int megasas_init_fw(struct megasas_instance *instance)
3520 /* 3550 /*
3521 * We expect the FW state to be READY 3551 * We expect the FW state to be READY
3522 */ 3552 */
3523 if (megasas_transition_to_ready(instance)) 3553 if (megasas_transition_to_ready(instance, 0))
3524 goto fail_ready_state; 3554 goto fail_ready_state;
3525 3555
3526 /* Check if MSI-X is supported while in ready state */ 3556 /* Check if MSI-X is supported while in ready state */
3527 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 3557 msix_enable = (instance->instancet->read_fw_status_reg(reg_set) &
3528 0x4000000) >> 0x1a; 3558 0x4000000) >> 0x1a;
3529 if (msix_enable && !msix_disable && 3559 if (msix_enable && !msix_disable) {
3530 !pci_enable_msix(instance->pdev, &instance->msixentry, 1)) 3560 /* Check max MSI-X vectors */
3531 instance->msi_flag = 1; 3561 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
3562 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
3563 instance->msix_vectors = (readl(&instance->reg_set->
3564 outbound_scratch_pad_2
3565 ) & 0x1F) + 1;
3566 } else
3567 instance->msix_vectors = 1;
3568 /* Don't bother allocating more MSI-X vectors than cpus */
3569 instance->msix_vectors = min(instance->msix_vectors,
3570 (unsigned int)num_online_cpus());
3571 for (i = 0; i < instance->msix_vectors; i++)
3572 instance->msixentry[i].entry = i;
3573 i = pci_enable_msix(instance->pdev, instance->msixentry,
3574 instance->msix_vectors);
3575 if (i >= 0) {
3576 if (i) {
3577 if (!pci_enable_msix(instance->pdev,
3578 instance->msixentry, i))
3579 instance->msix_vectors = i;
3580 else
3581 instance->msix_vectors = 0;
3582 }
3583 } else
3584 instance->msix_vectors = 0;
3585 }
3532 3586
3533 /* Get operational params, sge flags, send init cmd to controller */ 3587 /* Get operational params, sge flags, send init cmd to controller */
3534 if (instance->instancet->init_adapter(instance)) 3588 if (instance->instancet->init_adapter(instance))
@@ -3892,7 +3946,8 @@ static int megasas_io_attach(struct megasas_instance *instance)
3892 host->max_cmd_len = 16; 3946 host->max_cmd_len = 16;
3893 3947
3894 /* Fusion only supports host reset */ 3948 /* Fusion only supports host reset */
3895 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { 3949 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
3950 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) {
3896 host->hostt->eh_device_reset_handler = NULL; 3951 host->hostt->eh_device_reset_handler = NULL;
3897 host->hostt->eh_bus_reset_handler = NULL; 3952 host->hostt->eh_bus_reset_handler = NULL;
3898 } 3953 }
@@ -3942,7 +3997,7 @@ fail_set_dma_mask:
3942static int __devinit 3997static int __devinit
3943megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) 3998megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3944{ 3999{
3945 int rval, pos; 4000 int rval, pos, i, j;
3946 struct Scsi_Host *host; 4001 struct Scsi_Host *host;
3947 struct megasas_instance *instance; 4002 struct megasas_instance *instance;
3948 u16 control = 0; 4003 u16 control = 0;
@@ -4002,6 +4057,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4002 4057
4003 switch (instance->pdev->device) { 4058 switch (instance->pdev->device) {
4004 case PCI_DEVICE_ID_LSI_FUSION: 4059 case PCI_DEVICE_ID_LSI_FUSION:
4060 case PCI_DEVICE_ID_LSI_INVADER:
4005 { 4061 {
4006 struct fusion_context *fusion; 4062 struct fusion_context *fusion;
4007 4063
@@ -4094,7 +4150,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4094 instance->last_time = 0; 4150 instance->last_time = 0;
4095 instance->disableOnlineCtrlReset = 1; 4151 instance->disableOnlineCtrlReset = 1;
4096 4152
4097 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) 4153 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
4154 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
4098 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); 4155 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
4099 else 4156 else
4100 INIT_WORK(&instance->work_init, process_fw_state_change_wq); 4157 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
@@ -4108,11 +4165,32 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4108 /* 4165 /*
4109 * Register IRQ 4166 * Register IRQ
4110 */ 4167 */
4111 if (request_irq(instance->msi_flag ? instance->msixentry.vector : 4168 if (instance->msix_vectors) {
4112 pdev->irq, instance->instancet->service_isr, 4169 for (i = 0 ; i < instance->msix_vectors; i++) {
4113 IRQF_SHARED, "megasas", instance)) { 4170 instance->irq_context[i].instance = instance;
4114 printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); 4171 instance->irq_context[i].MSIxIndex = i;
4115 goto fail_irq; 4172 if (request_irq(instance->msixentry[i].vector,
4173 instance->instancet->service_isr, 0,
4174 "megasas",
4175 &instance->irq_context[i])) {
4176 printk(KERN_DEBUG "megasas: Failed to "
4177 "register IRQ for vector %d.\n", i);
4178 for (j = 0 ; j < i ; j++)
4179 free_irq(
4180 instance->msixentry[j].vector,
4181 &instance->irq_context[j]);
4182 goto fail_irq;
4183 }
4184 }
4185 } else {
4186 instance->irq_context[0].instance = instance;
4187 instance->irq_context[0].MSIxIndex = 0;
4188 if (request_irq(pdev->irq, instance->instancet->service_isr,
4189 IRQF_SHARED, "megasas",
4190 &instance->irq_context[0])) {
4191 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
4192 goto fail_irq;
4193 }
4116 } 4194 }
4117 4195
4118 instance->instancet->enable_intr(instance->reg_set); 4196 instance->instancet->enable_intr(instance->reg_set);
@@ -4156,15 +4234,20 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4156 4234
4157 pci_set_drvdata(pdev, NULL); 4235 pci_set_drvdata(pdev, NULL);
4158 instance->instancet->disable_intr(instance->reg_set); 4236 instance->instancet->disable_intr(instance->reg_set);
4159 free_irq(instance->msi_flag ? instance->msixentry.vector : 4237 if (instance->msix_vectors)
4160 instance->pdev->irq, instance); 4238 for (i = 0 ; i < instance->msix_vectors; i++)
4239 free_irq(instance->msixentry[i].vector,
4240 &instance->irq_context[i]);
4241 else
4242 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4161fail_irq: 4243fail_irq:
4162 if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) 4244 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
4245 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER))
4163 megasas_release_fusion(instance); 4246 megasas_release_fusion(instance);
4164 else 4247 else
4165 megasas_release_mfi(instance); 4248 megasas_release_mfi(instance);
4166 fail_init_mfi: 4249 fail_init_mfi:
4167 if (instance->msi_flag) 4250 if (instance->msix_vectors)
4168 pci_disable_msix(instance->pdev); 4251 pci_disable_msix(instance->pdev);
4169 fail_alloc_dma_buf: 4252 fail_alloc_dma_buf:
4170 if (instance->evt_detail) 4253 if (instance->evt_detail)
@@ -4280,6 +4363,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4280{ 4363{
4281 struct Scsi_Host *host; 4364 struct Scsi_Host *host;
4282 struct megasas_instance *instance; 4365 struct megasas_instance *instance;
4366 int i;
4283 4367
4284 instance = pci_get_drvdata(pdev); 4368 instance = pci_get_drvdata(pdev);
4285 host = instance->host; 4369 host = instance->host;
@@ -4303,9 +4387,14 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4303 4387
4304 pci_set_drvdata(instance->pdev, instance); 4388 pci_set_drvdata(instance->pdev, instance);
4305 instance->instancet->disable_intr(instance->reg_set); 4389 instance->instancet->disable_intr(instance->reg_set);
4306 free_irq(instance->msi_flag ? instance->msixentry.vector : 4390
4307 instance->pdev->irq, instance); 4391 if (instance->msix_vectors)
4308 if (instance->msi_flag) 4392 for (i = 0 ; i < instance->msix_vectors; i++)
4393 free_irq(instance->msixentry[i].vector,
4394 &instance->irq_context[i]);
4395 else
4396 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4397 if (instance->msix_vectors)
4309 pci_disable_msix(instance->pdev); 4398 pci_disable_msix(instance->pdev);
4310 4399
4311 pci_save_state(pdev); 4400 pci_save_state(pdev);
@@ -4323,7 +4412,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
4323static int 4412static int
4324megasas_resume(struct pci_dev *pdev) 4413megasas_resume(struct pci_dev *pdev)
4325{ 4414{
4326 int rval; 4415 int rval, i, j;
4327 struct Scsi_Host *host; 4416 struct Scsi_Host *host;
4328 struct megasas_instance *instance; 4417 struct megasas_instance *instance;
4329 4418
@@ -4357,15 +4446,17 @@ megasas_resume(struct pci_dev *pdev)
4357 /* 4446 /*
4358 * We expect the FW state to be READY 4447 * We expect the FW state to be READY
4359 */ 4448 */
4360 if (megasas_transition_to_ready(instance)) 4449 if (megasas_transition_to_ready(instance, 0))
4361 goto fail_ready_state; 4450 goto fail_ready_state;
4362 4451
4363 /* Now re-enable MSI-X */ 4452 /* Now re-enable MSI-X */
4364 if (instance->msi_flag) 4453 if (instance->msix_vectors)
4365 pci_enable_msix(instance->pdev, &instance->msixentry, 1); 4454 pci_enable_msix(instance->pdev, instance->msixentry,
4455 instance->msix_vectors);
4366 4456
4367 switch (instance->pdev->device) { 4457 switch (instance->pdev->device) {
4368 case PCI_DEVICE_ID_LSI_FUSION: 4458 case PCI_DEVICE_ID_LSI_FUSION:
4459 case PCI_DEVICE_ID_LSI_INVADER:
4369 { 4460 {
4370 megasas_reset_reply_desc(instance); 4461 megasas_reset_reply_desc(instance);
4371 if (megasas_ioc_init_fusion(instance)) { 4462 if (megasas_ioc_init_fusion(instance)) {
@@ -4391,11 +4482,32 @@ megasas_resume(struct pci_dev *pdev)
4391 /* 4482 /*
4392 * Register IRQ 4483 * Register IRQ
4393 */ 4484 */
4394 if (request_irq(instance->msi_flag ? instance->msixentry.vector : 4485 if (instance->msix_vectors) {
4395 pdev->irq, instance->instancet->service_isr, 4486 for (i = 0 ; i < instance->msix_vectors; i++) {
4396 IRQF_SHARED, "megasas", instance)) { 4487 instance->irq_context[i].instance = instance;
4397 printk(KERN_ERR "megasas: Failed to register IRQ\n"); 4488 instance->irq_context[i].MSIxIndex = i;
4398 goto fail_irq; 4489 if (request_irq(instance->msixentry[i].vector,
4490 instance->instancet->service_isr, 0,
4491 "megasas",
4492 &instance->irq_context[i])) {
4493 printk(KERN_DEBUG "megasas: Failed to "
4494 "register IRQ for vector %d.\n", i);
4495 for (j = 0 ; j < i ; j++)
4496 free_irq(
4497 instance->msixentry[j].vector,
4498 &instance->irq_context[j]);
4499 goto fail_irq;
4500 }
4501 }
4502 } else {
4503 instance->irq_context[0].instance = instance;
4504 instance->irq_context[0].MSIxIndex = 0;
4505 if (request_irq(pdev->irq, instance->instancet->service_isr,
4506 IRQF_SHARED, "megasas",
4507 &instance->irq_context[0])) {
4508 printk(KERN_DEBUG "megasas: Failed to register IRQ\n");
4509 goto fail_irq;
4510 }
4399 } 4511 }
4400 4512
4401 instance->instancet->enable_intr(instance->reg_set); 4513 instance->instancet->enable_intr(instance->reg_set);
@@ -4492,13 +4604,18 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
4492 4604
4493 instance->instancet->disable_intr(instance->reg_set); 4605 instance->instancet->disable_intr(instance->reg_set);
4494 4606
4495 free_irq(instance->msi_flag ? instance->msixentry.vector : 4607 if (instance->msix_vectors)
4496 instance->pdev->irq, instance); 4608 for (i = 0 ; i < instance->msix_vectors; i++)
4497 if (instance->msi_flag) 4609 free_irq(instance->msixentry[i].vector,
4610 &instance->irq_context[i]);
4611 else
4612 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4613 if (instance->msix_vectors)
4498 pci_disable_msix(instance->pdev); 4614 pci_disable_msix(instance->pdev);
4499 4615
4500 switch (instance->pdev->device) { 4616 switch (instance->pdev->device) {
4501 case PCI_DEVICE_ID_LSI_FUSION: 4617 case PCI_DEVICE_ID_LSI_FUSION:
4618 case PCI_DEVICE_ID_LSI_INVADER:
4502 megasas_release_fusion(instance); 4619 megasas_release_fusion(instance);
4503 for (i = 0; i < 2 ; i++) 4620 for (i = 0; i < 2 ; i++)
4504 if (fusion->ld_map[i]) 4621 if (fusion->ld_map[i])
@@ -4539,14 +4656,20 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
4539 */ 4656 */
4540static void megasas_shutdown(struct pci_dev *pdev) 4657static void megasas_shutdown(struct pci_dev *pdev)
4541{ 4658{
4659 int i;
4542 struct megasas_instance *instance = pci_get_drvdata(pdev); 4660 struct megasas_instance *instance = pci_get_drvdata(pdev);
4661
4543 instance->unload = 1; 4662 instance->unload = 1;
4544 megasas_flush_cache(instance); 4663 megasas_flush_cache(instance);
4545 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); 4664 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
4546 instance->instancet->disable_intr(instance->reg_set); 4665 instance->instancet->disable_intr(instance->reg_set);
4547 free_irq(instance->msi_flag ? instance->msixentry.vector : 4666 if (instance->msix_vectors)
4548 instance->pdev->irq, instance); 4667 for (i = 0 ; i < instance->msix_vectors; i++)
4549 if (instance->msi_flag) 4668 free_irq(instance->msixentry[i].vector,
4669 &instance->irq_context[i]);
4670 else
4671 free_irq(instance->pdev->irq, &instance->irq_context[0]);
4672 if (instance->msix_vectors)
4550 pci_disable_msix(instance->pdev); 4673 pci_disable_msix(instance->pdev);
4551} 4674}
4552 4675
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5a5af1fe758..5255dd688ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -52,6 +52,7 @@
52#include <scsi/scsi_host.h> 52#include <scsi/scsi_host.h>
53 53
54#include "megaraid_sas_fusion.h" 54#include "megaraid_sas_fusion.h"
55#include "megaraid_sas.h"
55#include <asm/div64.h> 56#include <asm/div64.h>
56 57
57#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) 58#define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
@@ -226,8 +227,9 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
226* span - Span number 227* span - Span number
227* block - Absolute Block number in the physical disk 228* block - Absolute Block number in the physical disk
228*/ 229*/
229u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock, 230u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
230 u16 *pDevHandle, struct RAID_CONTEXT *pRAID_Context, 231 u16 stripRef, u64 *pdBlock, u16 *pDevHandle,
232 struct RAID_CONTEXT *pRAID_Context,
231 struct MR_FW_RAID_MAP_ALL *map) 233 struct MR_FW_RAID_MAP_ALL *map)
232{ 234{
233 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); 235 struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map);
@@ -279,7 +281,8 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
279 *pDevHandle = MR_PdDevHandleGet(pd, map); 281 *pDevHandle = MR_PdDevHandleGet(pd, map);
280 else { 282 else {
281 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ 283 *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
282 if (raid->level >= 5) 284 if ((raid->level >= 5) &&
285 (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
283 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; 286 pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
284 else if (raid->level == 1) { 287 else if (raid->level == 1) {
285 /* Get alternate Pd. */ 288 /* Get alternate Pd. */
@@ -306,7 +309,8 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock,
306* This function will return 0 if region lock was acquired OR return num strips 309* This function will return 0 if region lock was acquired OR return num strips
307*/ 310*/
308u8 311u8
309MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, 312MR_BuildRaidContext(struct megasas_instance *instance,
313 struct IO_REQUEST_INFO *io_info,
310 struct RAID_CONTEXT *pRAID_Context, 314 struct RAID_CONTEXT *pRAID_Context,
311 struct MR_FW_RAID_MAP_ALL *map) 315 struct MR_FW_RAID_MAP_ALL *map)
312{ 316{
@@ -394,8 +398,12 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
394 } 398 }
395 399
396 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; 400 pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec;
397 pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : 401 if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)
398 raid->regTypeReqOnWrite; 402 pRAID_Context->regLockFlags = (isRead) ?
403 raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
404 else
405 pRAID_Context->regLockFlags = (isRead) ?
406 REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
399 pRAID_Context->VirtualDiskTgtId = raid->targetId; 407 pRAID_Context->VirtualDiskTgtId = raid->targetId;
400 pRAID_Context->regLockRowLBA = regStart; 408 pRAID_Context->regLockRowLBA = regStart;
401 pRAID_Context->regLockLength = regSize; 409 pRAID_Context->regLockLength = regSize;
@@ -404,7 +412,8 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
404 /*Get Phy Params only if FP capable, or else leave it to MR firmware 412 /*Get Phy Params only if FP capable, or else leave it to MR firmware
405 to do the calculation.*/ 413 to do the calculation.*/
406 if (io_info->fpOkForIo) { 414 if (io_info->fpOkForIo) {
407 retval = MR_GetPhyParams(ld, start_strip, ref_in_start_stripe, 415 retval = MR_GetPhyParams(instance, ld, start_strip,
416 ref_in_start_stripe,
408 &io_info->pdBlock, 417 &io_info->pdBlock,
409 &io_info->devHandle, pRAID_Context, 418 &io_info->devHandle, pRAID_Context,
410 map); 419 map);
@@ -415,7 +424,8 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info,
415 } else if (isRead) { 424 } else if (isRead) {
416 uint stripIdx; 425 uint stripIdx;
417 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { 426 for (stripIdx = 0; stripIdx < num_strips; stripIdx++) {
418 if (!MR_GetPhyParams(ld, start_strip + stripIdx, 427 if (!MR_GetPhyParams(instance, ld,
428 start_strip + stripIdx,
419 ref_in_start_stripe, 429 ref_in_start_stripe,
420 &io_info->pdBlock, 430 &io_info->pdBlock,
421 &io_info->devHandle, 431 &io_info->devHandle,
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f13e7abd345..bfd87fab39a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -74,7 +74,8 @@ megasas_issue_polled(struct megasas_instance *instance,
74 struct megasas_cmd *cmd); 74 struct megasas_cmd *cmd);
75 75
76u8 76u8
77MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, 77MR_BuildRaidContext(struct megasas_instance *instance,
78 struct IO_REQUEST_INFO *io_info,
78 struct RAID_CONTEXT *pRAID_Context, 79 struct RAID_CONTEXT *pRAID_Context,
79 struct MR_FW_RAID_MAP_ALL *map); 80 struct MR_FW_RAID_MAP_ALL *map);
80u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); 81u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
@@ -89,7 +90,7 @@ u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map,
89 struct LD_LOAD_BALANCE_INFO *lbInfo); 90 struct LD_LOAD_BALANCE_INFO *lbInfo);
90u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, 91u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
91 struct IO_REQUEST_INFO *in_info); 92 struct IO_REQUEST_INFO *in_info);
92int megasas_transition_to_ready(struct megasas_instance *instance); 93int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
93void megaraid_sas_kill_hba(struct megasas_instance *instance); 94void megaraid_sas_kill_hba(struct megasas_instance *instance);
94 95
95extern u32 megasas_dbg_lvl; 96extern u32 megasas_dbg_lvl;
@@ -101,6 +102,10 @@ extern u32 megasas_dbg_lvl;
101void 102void
102megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs) 103megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs)
103{ 104{
105 /* For Thunderbolt/Invader also clear intr on enable */
106 writel(~0, &regs->outbound_intr_status);
107 readl(&regs->outbound_intr_status);
108
104 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); 109 writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
105 110
106 /* Dummy readl to force pci flush */ 111 /* Dummy readl to force pci flush */
@@ -139,11 +144,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
139 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 144 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
140 return 0; 145 return 0;
141 146
142 /*
143 * dummy read to flush PCI
144 */
145 readl(&regs->outbound_intr_status);
146
147 return 1; 147 return 1;
148} 148}
149 149
@@ -385,7 +385,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
385int 385int
386megasas_alloc_cmds_fusion(struct megasas_instance *instance) 386megasas_alloc_cmds_fusion(struct megasas_instance *instance)
387{ 387{
388 int i, j; 388 int i, j, count;
389 u32 max_cmd, io_frames_sz; 389 u32 max_cmd, io_frames_sz;
390 struct fusion_context *fusion; 390 struct fusion_context *fusion;
391 struct megasas_cmd_fusion *cmd; 391 struct megasas_cmd_fusion *cmd;
@@ -409,9 +409,10 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
409 goto fail_req_desc; 409 goto fail_req_desc;
410 } 410 }
411 411
412 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
412 fusion->reply_frames_desc_pool = 413 fusion->reply_frames_desc_pool =
413 pci_pool_create("reply_frames pool", instance->pdev, 414 pci_pool_create("reply_frames pool", instance->pdev,
414 fusion->reply_alloc_sz, 16, 0); 415 fusion->reply_alloc_sz * count, 16, 0);
415 416
416 if (!fusion->reply_frames_desc_pool) { 417 if (!fusion->reply_frames_desc_pool) {
417 printk(KERN_ERR "megasas; Could not allocate memory for " 418 printk(KERN_ERR "megasas; Could not allocate memory for "
@@ -430,7 +431,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
430 } 431 }
431 432
432 reply_desc = fusion->reply_frames_desc; 433 reply_desc = fusion->reply_frames_desc;
433 for (i = 0; i < fusion->reply_q_depth; i++, reply_desc++) 434 for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
434 reply_desc->Words = ULLONG_MAX; 435 reply_desc->Words = ULLONG_MAX;
435 436
436 io_frames_sz = fusion->io_frames_alloc_sz; 437 io_frames_sz = fusion->io_frames_alloc_sz;
@@ -590,7 +591,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
590 struct megasas_init_frame *init_frame; 591 struct megasas_init_frame *init_frame;
591 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage; 592 struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
592 dma_addr_t ioc_init_handle; 593 dma_addr_t ioc_init_handle;
593 u32 context;
594 struct megasas_cmd *cmd; 594 struct megasas_cmd *cmd;
595 u8 ret; 595 u8 ret;
596 struct fusion_context *fusion; 596 struct fusion_context *fusion;
@@ -634,14 +634,13 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
634 fusion->reply_frames_desc_phys; 634 fusion->reply_frames_desc_phys;
635 IOCInitMessage->SystemRequestFrameBaseAddress = 635 IOCInitMessage->SystemRequestFrameBaseAddress =
636 fusion->io_request_frames_phys; 636 fusion->io_request_frames_phys;
637 637 /* Set to 0 for none or 1 MSI-X vectors */
638 IOCInitMessage->HostMSIxVectors = (instance->msix_vectors > 0 ?
639 instance->msix_vectors : 0);
638 init_frame = (struct megasas_init_frame *)cmd->frame; 640 init_frame = (struct megasas_init_frame *)cmd->frame;
639 memset(init_frame, 0, MEGAMFI_FRAME_SIZE); 641 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
640 642
641 frame_hdr = &cmd->frame->hdr; 643 frame_hdr = &cmd->frame->hdr;
642 context = init_frame->context;
643 init_frame->context = context;
644
645 frame_hdr->cmd_status = 0xFF; 644 frame_hdr->cmd_status = 0xFF;
646 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 645 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
647 646
@@ -881,7 +880,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
881 struct megasas_register_set __iomem *reg_set; 880 struct megasas_register_set __iomem *reg_set;
882 struct fusion_context *fusion; 881 struct fusion_context *fusion;
883 u32 max_cmd; 882 u32 max_cmd;
884 int i = 0; 883 int i = 0, count;
885 884
886 fusion = instance->ctrl_context; 885 fusion = instance->ctrl_context;
887 886
@@ -933,7 +932,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
933 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 932 (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
934 sizeof(union MPI2_SGE_IO_UNION))/16; 933 sizeof(union MPI2_SGE_IO_UNION))/16;
935 934
936 fusion->last_reply_idx = 0; 935 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
936 for (i = 0 ; i < count; i++)
937 fusion->last_reply_idx[i] = 0;
937 938
938 /* 939 /*
939 * Allocate memory for descriptors 940 * Allocate memory for descriptors
@@ -1043,7 +1044,9 @@ map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1043 case MFI_STAT_DEVICE_NOT_FOUND: 1044 case MFI_STAT_DEVICE_NOT_FOUND:
1044 cmd->scmd->result = DID_BAD_TARGET << 16; 1045 cmd->scmd->result = DID_BAD_TARGET << 16;
1045 break; 1046 break;
1046 1047 case MFI_STAT_CONFIG_SEQ_MISMATCH:
1048 cmd->scmd->result = DID_IMM_RETRY << 16;
1049 break;
1047 default: 1050 default:
1048 printk(KERN_DEBUG "megasas: FW status %#x\n", status); 1051 printk(KERN_DEBUG "megasas: FW status %#x\n", status);
1049 cmd->scmd->result = DID_ERROR << 16; 1052 cmd->scmd->result = DID_ERROR << 16;
@@ -1066,14 +1069,17 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1066 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, 1069 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
1067 struct megasas_cmd_fusion *cmd) 1070 struct megasas_cmd_fusion *cmd)
1068{ 1071{
1069 int i, sg_processed; 1072 int i, sg_processed, sge_count;
1070 int sge_count, sge_idx;
1071 struct scatterlist *os_sgl; 1073 struct scatterlist *os_sgl;
1072 struct fusion_context *fusion; 1074 struct fusion_context *fusion;
1073 1075
1074 fusion = instance->ctrl_context; 1076 fusion = instance->ctrl_context;
1075 1077
1076 cmd->io_request->ChainOffset = 0; 1078 if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
1079 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
1080 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1081 sgl_ptr_end->Flags = 0;
1082 }
1077 1083
1078 sge_count = scsi_dma_map(scp); 1084 sge_count = scsi_dma_map(scp);
1079 1085
@@ -1082,16 +1088,14 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1082 if (sge_count > instance->max_num_sge || !sge_count) 1088 if (sge_count > instance->max_num_sge || !sge_count)
1083 return sge_count; 1089 return sge_count;
1084 1090
1085 if (sge_count > fusion->max_sge_in_main_msg) {
1086 /* One element to store the chain info */
1087 sge_idx = fusion->max_sge_in_main_msg - 1;
1088 } else
1089 sge_idx = sge_count;
1090
1091 scsi_for_each_sg(scp, os_sgl, sge_count, i) { 1091 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1092 sgl_ptr->Length = sg_dma_len(os_sgl); 1092 sgl_ptr->Length = sg_dma_len(os_sgl);
1093 sgl_ptr->Address = sg_dma_address(os_sgl); 1093 sgl_ptr->Address = sg_dma_address(os_sgl);
1094 sgl_ptr->Flags = 0; 1094 sgl_ptr->Flags = 0;
1095 if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
1096 if (i == sge_count - 1)
1097 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
1098 }
1095 sgl_ptr++; 1099 sgl_ptr++;
1096 1100
1097 sg_processed = i + 1; 1101 sg_processed = i + 1;
@@ -1100,13 +1104,30 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
1100 (sge_count > fusion->max_sge_in_main_msg)) { 1104 (sge_count > fusion->max_sge_in_main_msg)) {
1101 1105
1102 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; 1106 struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
1103 cmd->io_request->ChainOffset = 1107 if (instance->pdev->device ==
1104 fusion->chain_offset_io_request; 1108 PCI_DEVICE_ID_LSI_INVADER) {
1109 if ((cmd->io_request->IoFlags &
1110 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1111 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1112 cmd->io_request->ChainOffset =
1113 fusion->
1114 chain_offset_io_request;
1115 else
1116 cmd->io_request->ChainOffset = 0;
1117 } else
1118 cmd->io_request->ChainOffset =
1119 fusion->chain_offset_io_request;
1120
1105 sg_chain = sgl_ptr; 1121 sg_chain = sgl_ptr;
1106 /* Prepare chain element */ 1122 /* Prepare chain element */
1107 sg_chain->NextChainOffset = 0; 1123 sg_chain->NextChainOffset = 0;
1108 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1124 if (instance->pdev->device ==
1109 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1125 PCI_DEVICE_ID_LSI_INVADER)
1126 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
1127 else
1128 sg_chain->Flags =
1129 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1130 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
1110 sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) 1131 sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION)
1111 *(sge_count - sg_processed)); 1132 *(sge_count - sg_processed));
1112 sg_chain->Address = cmd->sg_frame_phys_addr; 1133 sg_chain->Address = cmd->sg_frame_phys_addr;
@@ -1399,11 +1420,18 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1399 io_request->RaidContext.regLockFlags = 0; 1420 io_request->RaidContext.regLockFlags = 0;
1400 fp_possible = 0; 1421 fp_possible = 0;
1401 } else { 1422 } else {
1402 if (MR_BuildRaidContext(&io_info, &io_request->RaidContext, 1423 if (MR_BuildRaidContext(instance, &io_info,
1424 &io_request->RaidContext,
1403 local_map_ptr)) 1425 local_map_ptr))
1404 fp_possible = io_info.fpOkForIo; 1426 fp_possible = io_info.fpOkForIo;
1405 } 1427 }
1406 1428
1429 /* Use smp_processor_id() for now until cmd->request->cpu is CPU
1430 id by default, not CPU group id, otherwise all MSI-X queues won't
1431 be utilized */
1432 cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
1433 smp_processor_id() % instance->msix_vectors : 0;
1434
1407 if (fp_possible) { 1435 if (fp_possible) {
1408 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, 1436 megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
1409 local_map_ptr, start_lba_lo); 1437 local_map_ptr, start_lba_lo);
@@ -1412,6 +1440,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1412 cmd->request_desc->SCSIIO.RequestFlags = 1440 cmd->request_desc->SCSIIO.RequestFlags =
1413 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY 1441 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
1414 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1442 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1443 if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
1444 if (io_request->RaidContext.regLockFlags ==
1445 REGION_TYPE_UNUSED)
1446 cmd->request_desc->SCSIIO.RequestFlags =
1447 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1448 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1449 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1450 io_request->RaidContext.nseg = 0x1;
1451 io_request->IoFlags |=
1452 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1453 io_request->RaidContext.regLockFlags |=
1454 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1455 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1456 }
1415 if ((fusion->load_balance_info[device_id].loadBalanceFlag) && 1457 if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
1416 (io_info.isRead)) { 1458 (io_info.isRead)) {
1417 io_info.devHandle = 1459 io_info.devHandle =
@@ -1426,11 +1468,23 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
1426 } else { 1468 } else {
1427 io_request->RaidContext.timeoutValue = 1469 io_request->RaidContext.timeoutValue =
1428 local_map_ptr->raidMap.fpPdIoTimeoutSec; 1470 local_map_ptr->raidMap.fpPdIoTimeoutSec;
1429 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1430 io_request->DevHandle = device_id;
1431 cmd->request_desc->SCSIIO.RequestFlags = 1471 cmd->request_desc->SCSIIO.RequestFlags =
1432 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 1472 (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
1433 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1473 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1474 if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
1475 if (io_request->RaidContext.regLockFlags ==
1476 REGION_TYPE_UNUSED)
1477 cmd->request_desc->SCSIIO.RequestFlags =
1478 (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1479 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1480 io_request->RaidContext.Type = MPI2_TYPE_CUDA;
1481 io_request->RaidContext.regLockFlags |=
1482 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1483 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1484 io_request->RaidContext.nseg = 0x1;
1485 }
1486 io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
1487 io_request->DevHandle = device_id;
1434 } /* Not FP */ 1488 } /* Not FP */
1435} 1489}
1436 1490
@@ -1513,8 +1567,10 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1513 io_request->EEDPFlags = 0; 1567 io_request->EEDPFlags = 0;
1514 io_request->Control = 0; 1568 io_request->Control = 0;
1515 io_request->EEDPBlockSize = 0; 1569 io_request->EEDPBlockSize = 0;
1516 io_request->IoFlags = 0; 1570 io_request->ChainOffset = 0;
1517 io_request->RaidContext.RAIDFlags = 0; 1571 io_request->RaidContext.RAIDFlags = 0;
1572 io_request->RaidContext.Type = 0;
1573 io_request->RaidContext.nseg = 0;
1518 1574
1519 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); 1575 memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
1520 /* 1576 /*
@@ -1612,7 +1668,6 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1612 1668
1613 req_desc->Words = 0; 1669 req_desc->Words = 0;
1614 cmd->request_desc = req_desc; 1670 cmd->request_desc = req_desc;
1615 cmd->request_desc->Words = 0;
1616 1671
1617 if (megasas_build_io_fusion(instance, scmd, cmd)) { 1672 if (megasas_build_io_fusion(instance, scmd, cmd)) {
1618 megasas_return_cmd_fusion(instance, cmd); 1673 megasas_return_cmd_fusion(instance, cmd);
@@ -1647,7 +1702,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1647 * Completes all commands that is in reply descriptor queue 1702 * Completes all commands that is in reply descriptor queue
1648 */ 1703 */
1649int 1704int
1650complete_cmd_fusion(struct megasas_instance *instance) 1705complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
1651{ 1706{
1652 union MPI2_REPLY_DESCRIPTORS_UNION *desc; 1707 union MPI2_REPLY_DESCRIPTORS_UNION *desc;
1653 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1708 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
@@ -1667,7 +1722,9 @@ complete_cmd_fusion(struct megasas_instance *instance)
1667 return IRQ_HANDLED; 1722 return IRQ_HANDLED;
1668 1723
1669 desc = fusion->reply_frames_desc; 1724 desc = fusion->reply_frames_desc;
1670 desc += fusion->last_reply_idx; 1725 desc += ((MSIxIndex * fusion->reply_alloc_sz)/
1726 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
1727 fusion->last_reply_idx[MSIxIndex];
1671 1728
1672 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; 1729 reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
1673 1730
@@ -1740,16 +1797,19 @@ complete_cmd_fusion(struct megasas_instance *instance)
1740 break; 1797 break;
1741 } 1798 }
1742 1799
1743 fusion->last_reply_idx++; 1800 fusion->last_reply_idx[MSIxIndex]++;
1744 if (fusion->last_reply_idx >= fusion->reply_q_depth) 1801 if (fusion->last_reply_idx[MSIxIndex] >=
1745 fusion->last_reply_idx = 0; 1802 fusion->reply_q_depth)
1803 fusion->last_reply_idx[MSIxIndex] = 0;
1746 1804
1747 desc->Words = ULLONG_MAX; 1805 desc->Words = ULLONG_MAX;
1748 num_completed++; 1806 num_completed++;
1749 1807
1750 /* Get the next reply descriptor */ 1808 /* Get the next reply descriptor */
1751 if (!fusion->last_reply_idx) 1809 if (!fusion->last_reply_idx[MSIxIndex])
1752 desc = fusion->reply_frames_desc; 1810 desc = fusion->reply_frames_desc +
1811 ((MSIxIndex * fusion->reply_alloc_sz)/
1812 sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
1753 else 1813 else
1754 desc++; 1814 desc++;
1755 1815
@@ -1769,7 +1829,7 @@ complete_cmd_fusion(struct megasas_instance *instance)
1769 return IRQ_NONE; 1829 return IRQ_NONE;
1770 1830
1771 wmb(); 1831 wmb();
1772 writel(fusion->last_reply_idx, 1832 writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex],
1773 &instance->reg_set->reply_post_host_index); 1833 &instance->reg_set->reply_post_host_index);
1774 megasas_check_and_restore_queue_depth(instance); 1834 megasas_check_and_restore_queue_depth(instance);
1775 return IRQ_HANDLED; 1835 return IRQ_HANDLED;
@@ -1787,6 +1847,9 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
1787 struct megasas_instance *instance = 1847 struct megasas_instance *instance =
1788 (struct megasas_instance *)instance_addr; 1848 (struct megasas_instance *)instance_addr;
1789 unsigned long flags; 1849 unsigned long flags;
1850 u32 count, MSIxIndex;
1851
1852 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1790 1853
1791 /* If we have already declared adapter dead, donot complete cmds */ 1854 /* If we have already declared adapter dead, donot complete cmds */
1792 spin_lock_irqsave(&instance->hba_lock, flags); 1855 spin_lock_irqsave(&instance->hba_lock, flags);
@@ -1797,7 +1860,8 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
1797 spin_unlock_irqrestore(&instance->hba_lock, flags); 1860 spin_unlock_irqrestore(&instance->hba_lock, flags);
1798 1861
1799 spin_lock_irqsave(&instance->completion_lock, flags); 1862 spin_lock_irqsave(&instance->completion_lock, flags);
1800 complete_cmd_fusion(instance); 1863 for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
1864 complete_cmd_fusion(instance, MSIxIndex);
1801 spin_unlock_irqrestore(&instance->completion_lock, flags); 1865 spin_unlock_irqrestore(&instance->completion_lock, flags);
1802} 1866}
1803 1867
@@ -1806,20 +1870,24 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
1806 */ 1870 */
1807irqreturn_t megasas_isr_fusion(int irq, void *devp) 1871irqreturn_t megasas_isr_fusion(int irq, void *devp)
1808{ 1872{
1809 struct megasas_instance *instance = (struct megasas_instance *)devp; 1873 struct megasas_irq_context *irq_context = devp;
1874 struct megasas_instance *instance = irq_context->instance;
1810 u32 mfiStatus, fw_state; 1875 u32 mfiStatus, fw_state;
1811 1876
1812 if (!instance->msi_flag) { 1877 if (!instance->msix_vectors) {
1813 mfiStatus = instance->instancet->clear_intr(instance->reg_set); 1878 mfiStatus = instance->instancet->clear_intr(instance->reg_set);
1814 if (!mfiStatus) 1879 if (!mfiStatus)
1815 return IRQ_NONE; 1880 return IRQ_NONE;
1816 } 1881 }
1817 1882
1818 /* If we are resetting, bail */ 1883 /* If we are resetting, bail */
1819 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) 1884 if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
1885 instance->instancet->clear_intr(instance->reg_set);
1820 return IRQ_HANDLED; 1886 return IRQ_HANDLED;
1887 }
1821 1888
1822 if (!complete_cmd_fusion(instance)) { 1889 if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
1890 instance->instancet->clear_intr(instance->reg_set);
1823 /* If we didn't complete any commands, check for FW fault */ 1891 /* If we didn't complete any commands, check for FW fault */
1824 fw_state = instance->instancet->read_fw_status_reg( 1892 fw_state = instance->instancet->read_fw_status_reg(
1825 instance->reg_set) & MFI_STATE_MASK; 1893 instance->reg_set) & MFI_STATE_MASK;
@@ -1866,6 +1934,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
1866 1934
1867 fusion = instance->ctrl_context; 1935 fusion = instance->ctrl_context;
1868 io_req = cmd->io_request; 1936 io_req = cmd->io_request;
1937
1938 if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) {
1939 struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
1940 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
1941 sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
1942 sgl_ptr_end->Flags = 0;
1943 }
1944
1869 mpi25_ieee_chain = 1945 mpi25_ieee_chain =
1870 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; 1946 (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
1871 1947
@@ -1928,15 +2004,12 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
1928 struct megasas_cmd *cmd) 2004 struct megasas_cmd *cmd)
1929{ 2005{
1930 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2006 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
1931 union desc_value d_val;
1932 2007
1933 req_desc = build_mpt_cmd(instance, cmd); 2008 req_desc = build_mpt_cmd(instance, cmd);
1934 if (!req_desc) { 2009 if (!req_desc) {
1935 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); 2010 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
1936 return; 2011 return;
1937 } 2012 }
1938 d_val.word = req_desc->Words;
1939
1940 instance->instancet->fire_cmd(instance, req_desc->u.low, 2013 instance->instancet->fire_cmd(instance, req_desc->u.low,
1941 req_desc->u.high, instance->reg_set); 2014 req_desc->u.high, instance->reg_set);
1942} 2015}
@@ -2029,14 +2102,16 @@ out:
2029 2102
2030void megasas_reset_reply_desc(struct megasas_instance *instance) 2103void megasas_reset_reply_desc(struct megasas_instance *instance)
2031{ 2104{
2032 int i; 2105 int i, count;
2033 struct fusion_context *fusion; 2106 struct fusion_context *fusion;
2034 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; 2107 union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2035 2108
2036 fusion = instance->ctrl_context; 2109 fusion = instance->ctrl_context;
2037 fusion->last_reply_idx = 0; 2110 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
2111 for (i = 0 ; i < count ; i++)
2112 fusion->last_reply_idx[i] = 0;
2038 reply_desc = fusion->reply_frames_desc; 2113 reply_desc = fusion->reply_frames_desc;
2039 for (i = 0 ; i < fusion->reply_q_depth; i++, reply_desc++) 2114 for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
2040 reply_desc->Words = ULLONG_MAX; 2115 reply_desc->Words = ULLONG_MAX;
2041} 2116}
2042 2117
@@ -2057,8 +2132,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2057 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2132 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2058 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2133 printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
2059 "returning FAILED.\n"); 2134 "returning FAILED.\n");
2060 retval = FAILED; 2135 return FAILED;
2061 goto out;
2062 } 2136 }
2063 2137
2064 mutex_lock(&instance->reset_mutex); 2138 mutex_lock(&instance->reset_mutex);
@@ -2173,7 +2247,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2173 } 2247 }
2174 2248
2175 /* Wait for FW to become ready */ 2249 /* Wait for FW to become ready */
2176 if (megasas_transition_to_ready(instance)) { 2250 if (megasas_transition_to_ready(instance, 1)) {
2177 printk(KERN_WARNING "megaraid_sas: Failed to " 2251 printk(KERN_WARNING "megaraid_sas: Failed to "
2178 "transition controller to ready.\n"); 2252 "transition controller to ready.\n");
2179 continue; 2253 continue;
@@ -2186,6 +2260,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2186 continue; 2260 continue;
2187 } 2261 }
2188 2262
2263 clear_bit(MEGASAS_FUSION_IN_RESET,
2264 &instance->reset_flags);
2189 instance->instancet->enable_intr(instance->reg_set); 2265 instance->instancet->enable_intr(instance->reg_set);
2190 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2266 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2191 2267
@@ -2247,6 +2323,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost)
2247 megaraid_sas_kill_hba(instance); 2323 megaraid_sas_kill_hba(instance);
2248 retval = FAILED; 2324 retval = FAILED;
2249 } else { 2325 } else {
2326 clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
2250 instance->instancet->enable_intr(instance->reg_set); 2327 instance->instancet->enable_intr(instance->reg_set);
2251 instance->adprecovery = MEGASAS_HBA_OPERATIONAL; 2328 instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
2252 } 2329 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 82b577a72c8..088c9f91da9 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -43,6 +43,15 @@
43#define HOST_DIAG_WRITE_ENABLE 0x80 43#define HOST_DIAG_WRITE_ENABLE 0x80
44#define HOST_DIAG_RESET_ADAPTER 0x4 44#define HOST_DIAG_RESET_ADAPTER 0x4
45#define MEGASAS_FUSION_MAX_RESET_TRIES 3 45#define MEGASAS_FUSION_MAX_RESET_TRIES 3
46#define MAX_MSIX_QUEUES_FUSION 16
47
48/* Invader defines */
49#define MPI2_TYPE_CUDA 0x2
50#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
51#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
52#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
53#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
54#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
46 55
47/* T10 PI defines */ 56/* T10 PI defines */
48#define MR_PROT_INFO_TYPE_CONTROLLER 0x8 57#define MR_PROT_INFO_TYPE_CONTROLLER 0x8
@@ -70,7 +79,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
70 */ 79 */
71#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7 80#define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
72#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1 81#define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
73 82#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
74#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1 83#define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
75 84
76#define MEGASAS_FP_CMD_LEN 16 85#define MEGASAS_FP_CMD_LEN 16
@@ -82,7 +91,9 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
82 */ 91 */
83 92
84struct RAID_CONTEXT { 93struct RAID_CONTEXT {
85 u16 resvd0; 94 u8 Type:4;
95 u8 nseg:4;
96 u8 resvd0;
86 u16 timeoutValue; 97 u16 timeoutValue;
87 u8 regLockFlags; 98 u8 regLockFlags;
88 u8 resvd1; 99 u8 resvd1;
@@ -527,7 +538,7 @@ struct MR_LD_RAID {
527 u8 ldState; 538 u8 ldState;
528 u8 regTypeReqOnWrite; 539 u8 regTypeReqOnWrite;
529 u8 modFactor; 540 u8 modFactor;
530 u8 reserved2[1]; 541 u8 regTypeReqOnRead;
531 u16 seqNum; 542 u16 seqNum;
532 543
533 struct { 544 struct {
@@ -663,7 +674,7 @@ struct fusion_context {
663 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc; 674 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc;
664 struct dma_pool *reply_frames_desc_pool; 675 struct dma_pool *reply_frames_desc_pool;
665 676
666 u16 last_reply_idx; 677 u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
667 678
668 u32 reply_q_depth; 679 u32 reply_q_depth;
669 u32 request_alloc_sz; 680 u32 request_alloc_sz;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6825772cfd6..81209ca8727 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -833,25 +833,31 @@ union reply_descriptor {
833static irqreturn_t 833static irqreturn_t
834_base_interrupt(int irq, void *bus_id) 834_base_interrupt(int irq, void *bus_id)
835{ 835{
836 struct adapter_reply_queue *reply_q = bus_id;
836 union reply_descriptor rd; 837 union reply_descriptor rd;
837 u32 completed_cmds; 838 u32 completed_cmds;
838 u8 request_desript_type; 839 u8 request_desript_type;
839 u16 smid; 840 u16 smid;
840 u8 cb_idx; 841 u8 cb_idx;
841 u32 reply; 842 u32 reply;
842 u8 msix_index; 843 u8 msix_index = reply_q->msix_index;
843 struct MPT2SAS_ADAPTER *ioc = bus_id; 844 struct MPT2SAS_ADAPTER *ioc = reply_q->ioc;
844 Mpi2ReplyDescriptorsUnion_t *rpf; 845 Mpi2ReplyDescriptorsUnion_t *rpf;
845 u8 rc; 846 u8 rc;
846 847
847 if (ioc->mask_interrupts) 848 if (ioc->mask_interrupts)
848 return IRQ_NONE; 849 return IRQ_NONE;
849 850
850 rpf = &ioc->reply_post_free[ioc->reply_post_host_index]; 851 if (!atomic_add_unless(&reply_q->busy, 1, 1))
852 return IRQ_NONE;
853
854 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
851 request_desript_type = rpf->Default.ReplyFlags 855 request_desript_type = rpf->Default.ReplyFlags
852 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 856 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
853 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 857 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
858 atomic_dec(&reply_q->busy);
854 return IRQ_NONE; 859 return IRQ_NONE;
860 }
855 861
856 completed_cmds = 0; 862 completed_cmds = 0;
857 cb_idx = 0xFF; 863 cb_idx = 0xFF;
@@ -860,9 +866,7 @@ _base_interrupt(int irq, void *bus_id)
860 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) 866 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
861 goto out; 867 goto out;
862 reply = 0; 868 reply = 0;
863 cb_idx = 0xFF;
864 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); 869 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
865 msix_index = rpf->Default.MSIxIndex;
866 if (request_desript_type == 870 if (request_desript_type ==
867 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { 871 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
868 reply = le32_to_cpu 872 reply = le32_to_cpu
@@ -906,32 +910,86 @@ _base_interrupt(int irq, void *bus_id)
906 next: 910 next:
907 911
908 rpf->Words = cpu_to_le64(ULLONG_MAX); 912 rpf->Words = cpu_to_le64(ULLONG_MAX);
909 ioc->reply_post_host_index = (ioc->reply_post_host_index == 913 reply_q->reply_post_host_index =
914 (reply_q->reply_post_host_index ==
910 (ioc->reply_post_queue_depth - 1)) ? 0 : 915 (ioc->reply_post_queue_depth - 1)) ? 0 :
911 ioc->reply_post_host_index + 1; 916 reply_q->reply_post_host_index + 1;
912 request_desript_type = 917 request_desript_type =
913 ioc->reply_post_free[ioc->reply_post_host_index].Default. 918 reply_q->reply_post_free[reply_q->reply_post_host_index].
914 ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 919 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
915 completed_cmds++; 920 completed_cmds++;
916 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 921 if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
917 goto out; 922 goto out;
918 if (!ioc->reply_post_host_index) 923 if (!reply_q->reply_post_host_index)
919 rpf = ioc->reply_post_free; 924 rpf = reply_q->reply_post_free;
920 else 925 else
921 rpf++; 926 rpf++;
922 } while (1); 927 } while (1);
923 928
924 out: 929 out:
925 930
926 if (!completed_cmds) 931 if (!completed_cmds) {
932 atomic_dec(&reply_q->busy);
927 return IRQ_NONE; 933 return IRQ_NONE;
928 934 }
929 wmb(); 935 wmb();
930 writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex); 936 if (ioc->is_warpdrive) {
937 writel(reply_q->reply_post_host_index,
938 ioc->reply_post_host_index[msix_index]);
939 atomic_dec(&reply_q->busy);
940 return IRQ_HANDLED;
941 }
942 writel(reply_q->reply_post_host_index | (msix_index <<
943 MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
944 atomic_dec(&reply_q->busy);
931 return IRQ_HANDLED; 945 return IRQ_HANDLED;
932} 946}
933 947
934/** 948/**
949 * _base_is_controller_msix_enabled - is controller support muli-reply queues
950 * @ioc: per adapter object
951 *
952 */
953static inline int
954_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc)
955{
956 return (ioc->facts.IOCCapabilities &
957 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
958}
959
960/**
961 * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues
962 * @ioc: per adapter object
963 * Context: ISR conext
964 *
965 * Called when a Task Management request has completed. We want
966 * to flush the other reply queues so all the outstanding IO has been
967 * completed back to OS before we process the TM completetion.
968 *
969 * Return nothing.
970 */
971void
972mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc)
973{
974 struct adapter_reply_queue *reply_q;
975
976 /* If MSIX capability is turned off
977 * then multi-queues are not enabled
978 */
979 if (!_base_is_controller_msix_enabled(ioc))
980 return;
981
982 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
983 if (ioc->shost_recovery)
984 return;
985 /* TMs are on msix_index == 0 */
986 if (reply_q->msix_index == 0)
987 continue;
988 _base_interrupt(reply_q->vector, (void *)reply_q);
989 }
990}
991
992/**
935 * mpt2sas_base_release_callback_handler - clear interrupt callback handler 993 * mpt2sas_base_release_callback_handler - clear interrupt callback handler
936 * @cb_idx: callback index 994 * @cb_idx: callback index
937 * 995 *
@@ -1081,74 +1139,171 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1081} 1139}
1082 1140
1083/** 1141/**
1084 * _base_save_msix_table - backup msix vector table 1142 * _base_check_enable_msix - checks MSIX capabable.
1085 * @ioc: per adapter object 1143 * @ioc: per adapter object
1086 * 1144 *
1087 * This address an errata where diag reset clears out the table 1145 * Check to see if card is capable of MSIX, and set number
1146 * of available msix vectors
1088 */ 1147 */
1089static void 1148static int
1090_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc) 1149_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1091{ 1150{
1092 int i; 1151 int base;
1152 u16 message_control;
1093 1153
1094 if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
1095 return;
1096 1154
1097 for (i = 0; i < ioc->msix_vector_count; i++) 1155 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1098 ioc->msix_table_backup[i] = ioc->msix_table[i]; 1156 if (!base) {
1157 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1158 "supported\n", ioc->name));
1159 return -EINVAL;
1160 }
1161
1162 /* get msix vector count */
1163 /* NUMA_IO not supported for older controllers */
1164 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
1165 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
1166 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
1167 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
1168 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
1169 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
1170 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
1171 ioc->msix_vector_count = 1;
1172 else {
1173 pci_read_config_word(ioc->pdev, base + 2, &message_control);
1174 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1175 }
1176 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1177 "vector_count(%d)\n", ioc->name, ioc->msix_vector_count));
1178
1179 return 0;
1099} 1180}
1100 1181
1101/** 1182/**
1102 * _base_restore_msix_table - this restores the msix vector table 1183 * _base_free_irq - free irq
1103 * @ioc: per adapter object 1184 * @ioc: per adapter object
1104 * 1185 *
1186 * Freeing respective reply_queue from the list.
1105 */ 1187 */
1106static void 1188static void
1107_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc) 1189_base_free_irq(struct MPT2SAS_ADAPTER *ioc)
1108{ 1190{
1109 int i; 1191 struct adapter_reply_queue *reply_q, *next;
1110 1192
1111 if (!ioc->msix_enable || ioc->msix_table_backup == NULL) 1193 if (list_empty(&ioc->reply_queue_list))
1112 return; 1194 return;
1113 1195
1114 for (i = 0; i < ioc->msix_vector_count; i++) 1196 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1115 ioc->msix_table[i] = ioc->msix_table_backup[i]; 1197 list_del(&reply_q->list);
1198 synchronize_irq(reply_q->vector);
1199 free_irq(reply_q->vector, reply_q);
1200 kfree(reply_q);
1201 }
1116} 1202}
1117 1203
1118/** 1204/**
1119 * _base_check_enable_msix - checks MSIX capabable. 1205 * _base_request_irq - request irq
1120 * @ioc: per adapter object 1206 * @ioc: per adapter object
1207 * @index: msix index into vector table
1208 * @vector: irq vector
1121 * 1209 *
1122 * Check to see if card is capable of MSIX, and set number 1210 * Inserting respective reply_queue into the list.
1123 * of available msix vectors
1124 */ 1211 */
1125static int 1212static int
1126_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) 1213_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector)
1127{ 1214{
1128 int base; 1215 struct adapter_reply_queue *reply_q;
1129 u16 message_control; 1216 int r;
1130 u32 msix_table_offset;
1131 1217
1132 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); 1218 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
1133 if (!base) { 1219 if (!reply_q) {
1134 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " 1220 printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n",
1135 "supported\n", ioc->name)); 1221 ioc->name, (int)sizeof(struct adapter_reply_queue));
1136 return -EINVAL; 1222 return -ENOMEM;
1223 }
1224 reply_q->ioc = ioc;
1225 reply_q->msix_index = index;
1226 reply_q->vector = vector;
1227 atomic_set(&reply_q->busy, 0);
1228 if (ioc->msix_enable)
1229 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
1230 MPT2SAS_DRIVER_NAME, ioc->id, index);
1231 else
1232 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
1233 MPT2SAS_DRIVER_NAME, ioc->id);
1234 r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
1235 reply_q);
1236 if (r) {
1237 printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1238 reply_q->name, vector);
1239 kfree(reply_q);
1240 return -EBUSY;
1137 } 1241 }
1138 1242
1139 /* get msix vector count */ 1243 INIT_LIST_HEAD(&reply_q->list);
1140 pci_read_config_word(ioc->pdev, base + 2, &message_control); 1244 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
1141 ioc->msix_vector_count = (message_control & 0x3FF) + 1; 1245 return 0;
1246}
1142 1247
1143 /* get msix table */ 1248/**
1144 pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset); 1249 * _base_assign_reply_queues - assigning msix index for each cpu
1145 msix_table_offset &= 0xFFFFFFF8; 1250 * @ioc: per adapter object
1146 ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset); 1251 *
1252 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
1253 *
1254 * It would nice if we could call irq_set_affinity, however it is not
1255 * an exported symbol
1256 */
1257static void
1258_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc)
1259{
1260 struct adapter_reply_queue *reply_q;
1261 int cpu_id;
1262 int cpu_grouping, loop, grouping, grouping_mod;
1147 1263
1148 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " 1264 if (!_base_is_controller_msix_enabled(ioc))
1149 "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name, 1265 return;
1150 ioc->msix_vector_count, msix_table_offset, ioc->msix_table)); 1266
1151 return 0; 1267 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
1268 /* when there are more cpus than available msix vectors,
1269 * then group cpus togeather on same irq
1270 */
1271 if (ioc->cpu_count > ioc->msix_vector_count) {
1272 grouping = ioc->cpu_count / ioc->msix_vector_count;
1273 grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
1274 if (grouping < 2 || (grouping == 2 && !grouping_mod))
1275 cpu_grouping = 2;
1276 else if (grouping < 4 || (grouping == 4 && !grouping_mod))
1277 cpu_grouping = 4;
1278 else if (grouping < 8 || (grouping == 8 && !grouping_mod))
1279 cpu_grouping = 8;
1280 else
1281 cpu_grouping = 16;
1282 } else
1283 cpu_grouping = 0;
1284
1285 loop = 0;
1286 reply_q = list_entry(ioc->reply_queue_list.next,
1287 struct adapter_reply_queue, list);
1288 for_each_online_cpu(cpu_id) {
1289 if (!cpu_grouping) {
1290 ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
1291 reply_q = list_entry(reply_q->list.next,
1292 struct adapter_reply_queue, list);
1293 } else {
1294 if (loop < cpu_grouping) {
1295 ioc->cpu_msix_table[cpu_id] =
1296 reply_q->msix_index;
1297 loop++;
1298 } else {
1299 reply_q = list_entry(reply_q->list.next,
1300 struct adapter_reply_queue, list);
1301 ioc->cpu_msix_table[cpu_id] =
1302 reply_q->msix_index;
1303 loop = 1;
1304 }
1305 }
1306 }
1152} 1307}
1153 1308
1154/** 1309/**
@@ -1161,8 +1316,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1161{ 1316{
1162 if (ioc->msix_enable) { 1317 if (ioc->msix_enable) {
1163 pci_disable_msix(ioc->pdev); 1318 pci_disable_msix(ioc->pdev);
1164 kfree(ioc->msix_table_backup);
1165 ioc->msix_table_backup = NULL;
1166 ioc->msix_enable = 0; 1319 ioc->msix_enable = 0;
1167 } 1320 }
1168} 1321}
@@ -1175,10 +1328,13 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1175static int 1328static int
1176_base_enable_msix(struct MPT2SAS_ADAPTER *ioc) 1329_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1177{ 1330{
1178 struct msix_entry entries; 1331 struct msix_entry *entries, *a;
1179 int r; 1332 int r;
1333 int i;
1180 u8 try_msix = 0; 1334 u8 try_msix = 0;
1181 1335
1336 INIT_LIST_HEAD(&ioc->reply_queue_list);
1337
1182 if (msix_disable == -1 || msix_disable == 0) 1338 if (msix_disable == -1 || msix_disable == 0)
1183 try_msix = 1; 1339 try_msix = 1;
1184 1340
@@ -1188,51 +1344,48 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1188 if (_base_check_enable_msix(ioc) != 0) 1344 if (_base_check_enable_msix(ioc) != 0)
1189 goto try_ioapic; 1345 goto try_ioapic;
1190 1346
1191 ioc->msix_table_backup = kcalloc(ioc->msix_vector_count, 1347 ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
1192 sizeof(u32), GFP_KERNEL); 1348 ioc->msix_vector_count);
1193 if (!ioc->msix_table_backup) { 1349
1194 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " 1350 entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
1195 "msix_table_backup failed!!!\n", ioc->name)); 1351 GFP_KERNEL);
1352 if (!entries) {
1353 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc "
1354 "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__,
1355 __LINE__, __func__));
1196 goto try_ioapic; 1356 goto try_ioapic;
1197 } 1357 }
1198 1358
1199 memset(&entries, 0, sizeof(struct msix_entry)); 1359 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
1200 r = pci_enable_msix(ioc->pdev, &entries, 1); 1360 a->entry = i;
1361
1362 r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
1201 if (r) { 1363 if (r) {
1202 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix " 1364 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1203 "failed (r=%d) !!!\n", ioc->name, r)); 1365 "failed (r=%d) !!!\n", ioc->name, r));
1366 kfree(entries);
1204 goto try_ioapic; 1367 goto try_ioapic;
1205 } 1368 }
1206 1369
1207 r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED, 1370 ioc->msix_enable = 1;
1208 ioc->name, ioc); 1371 for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
1209 if (r) { 1372 r = _base_request_irq(ioc, i, a->vector);
1210 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate " 1373 if (r) {
1211 "interrupt %d !!!\n", ioc->name, entries.vector)); 1374 _base_free_irq(ioc);
1212 pci_disable_msix(ioc->pdev); 1375 _base_disable_msix(ioc);
1213 goto try_ioapic; 1376 kfree(entries);
1377 goto try_ioapic;
1378 }
1214 } 1379 }
1215 1380
1216 ioc->pci_irq = entries.vector; 1381 kfree(entries);
1217 ioc->msix_enable = 1;
1218 return 0; 1382 return 0;
1219 1383
1220/* failback to io_apic interrupt routing */ 1384/* failback to io_apic interrupt routing */
1221 try_ioapic: 1385 try_ioapic:
1222 1386
1223 r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED, 1387 r = _base_request_irq(ioc, 0, ioc->pdev->irq);
1224 ioc->name, ioc);
1225 if (r) {
1226 printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1227 ioc->name, ioc->pdev->irq);
1228 r = -EBUSY;
1229 goto out_fail;
1230 }
1231 1388
1232 ioc->pci_irq = ioc->pdev->irq;
1233 return 0;
1234
1235 out_fail:
1236 return r; 1389 return r;
1237} 1390}
1238 1391
@@ -1251,6 +1404,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1251 int i, r = 0; 1404 int i, r = 0;
1252 u64 pio_chip = 0; 1405 u64 pio_chip = 0;
1253 u64 chip_phys = 0; 1406 u64 chip_phys = 0;
1407 struct adapter_reply_queue *reply_q;
1254 1408
1255 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", 1409 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n",
1256 ioc->name, __func__)); 1410 ioc->name, __func__));
@@ -1313,9 +1467,11 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1313 if (r) 1467 if (r)
1314 goto out_fail; 1468 goto out_fail;
1315 1469
1316 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", 1470 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
1317 ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : 1471 printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1318 "IO-APIC enabled"), ioc->pci_irq); 1472 reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1473 "IO-APIC enabled"), reply_q->vector);
1474
1319 printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", 1475 printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
1320 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); 1476 ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
1321 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", 1477 printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n",
@@ -1330,7 +1486,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1330 if (ioc->chip_phys) 1486 if (ioc->chip_phys)
1331 iounmap(ioc->chip); 1487 iounmap(ioc->chip);
1332 ioc->chip_phys = 0; 1488 ioc->chip_phys = 0;
1333 ioc->pci_irq = -1;
1334 pci_release_selected_regions(ioc->pdev, ioc->bars); 1489 pci_release_selected_regions(ioc->pdev, ioc->bars);
1335 pci_disable_pcie_error_reporting(pdev); 1490 pci_disable_pcie_error_reporting(pdev);
1336 pci_disable_device(pdev); 1491 pci_disable_device(pdev);
@@ -1577,6 +1732,12 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1577} 1732}
1578#endif 1733#endif
1579 1734
1735static inline u8
1736_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1737{
1738 return ioc->cpu_msix_table[smp_processor_id()];
1739}
1740
1580/** 1741/**
1581 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware 1742 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1582 * @ioc: per adapter object 1743 * @ioc: per adapter object
@@ -1593,7 +1754,7 @@ mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1593 1754
1594 1755
1595 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1756 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1596 descriptor.SCSIIO.MSIxIndex = 0; /* TODO */ 1757 descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
1597 descriptor.SCSIIO.SMID = cpu_to_le16(smid); 1758 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1598 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); 1759 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1599 descriptor.SCSIIO.LMID = 0; 1760 descriptor.SCSIIO.LMID = 0;
@@ -1617,7 +1778,7 @@ mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1617 1778
1618 descriptor.HighPriority.RequestFlags = 1779 descriptor.HighPriority.RequestFlags =
1619 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1780 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1620 descriptor.HighPriority.MSIxIndex = 0; /* TODO */ 1781 descriptor.HighPriority.MSIxIndex = 0;
1621 descriptor.HighPriority.SMID = cpu_to_le16(smid); 1782 descriptor.HighPriority.SMID = cpu_to_le16(smid);
1622 descriptor.HighPriority.LMID = 0; 1783 descriptor.HighPriority.LMID = 0;
1623 descriptor.HighPriority.Reserved1 = 0; 1784 descriptor.HighPriority.Reserved1 = 0;
@@ -1639,7 +1800,7 @@ mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1639 u64 *request = (u64 *)&descriptor; 1800 u64 *request = (u64 *)&descriptor;
1640 1801
1641 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1802 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1642 descriptor.Default.MSIxIndex = 0; /* TODO */ 1803 descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
1643 descriptor.Default.SMID = cpu_to_le16(smid); 1804 descriptor.Default.SMID = cpu_to_le16(smid);
1644 descriptor.Default.LMID = 0; 1805 descriptor.Default.LMID = 0;
1645 descriptor.Default.DescriptorTypeDependent = 0; 1806 descriptor.Default.DescriptorTypeDependent = 0;
@@ -1664,7 +1825,7 @@ mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1664 1825
1665 descriptor.SCSITarget.RequestFlags = 1826 descriptor.SCSITarget.RequestFlags =
1666 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; 1827 MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1667 descriptor.SCSITarget.MSIxIndex = 0; /* TODO */ 1828 descriptor.SCSITarget.MSIxIndex = _base_get_msix_index(ioc);
1668 descriptor.SCSITarget.SMID = cpu_to_le16(smid); 1829 descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1669 descriptor.SCSITarget.LMID = 0; 1830 descriptor.SCSITarget.LMID = 0;
1670 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index); 1831 descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
@@ -2171,7 +2332,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2171 u16 max_sge_elements; 2332 u16 max_sge_elements;
2172 u16 num_of_reply_frames; 2333 u16 num_of_reply_frames;
2173 u16 chains_needed_per_io; 2334 u16 chains_needed_per_io;
2174 u32 sz, total_sz; 2335 u32 sz, total_sz, reply_post_free_sz;
2175 u32 retry_sz; 2336 u32 retry_sz;
2176 u16 max_request_credit; 2337 u16 max_request_credit;
2177 int i; 2338 int i;
@@ -2498,7 +2659,12 @@ chain_done:
2498 total_sz += sz; 2659 total_sz += sz;
2499 2660
2500 /* reply post queue, 16 byte align */ 2661 /* reply post queue, 16 byte align */
2501 sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t); 2662 reply_post_free_sz = ioc->reply_post_queue_depth *
2663 sizeof(Mpi2DefaultReplyDescriptor_t);
2664 if (_base_is_controller_msix_enabled(ioc))
2665 sz = reply_post_free_sz * ioc->reply_queue_count;
2666 else
2667 sz = reply_post_free_sz;
2502 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", 2668 ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2503 ioc->pdev, sz, 16, 0); 2669 ioc->pdev, sz, 16, 0);
2504 if (!ioc->reply_post_free_dma_pool) { 2670 if (!ioc->reply_post_free_dma_pool) {
@@ -3186,6 +3352,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3186 facts->MaxChainDepth = mpi_reply.MaxChainDepth; 3352 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
3187 facts->WhoInit = mpi_reply.WhoInit; 3353 facts->WhoInit = mpi_reply.WhoInit;
3188 facts->NumberOfPorts = mpi_reply.NumberOfPorts; 3354 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
3355 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
3189 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); 3356 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
3190 facts->MaxReplyDescriptorPostQueueDepth = 3357 facts->MaxReplyDescriptorPostQueueDepth =
3191 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); 3358 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
@@ -3243,7 +3410,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3243 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); 3410 mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
3244 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); 3411 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
3245 3412
3246 3413 if (_base_is_controller_msix_enabled(ioc))
3414 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
3247 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); 3415 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
3248 mpi_request.ReplyDescriptorPostQueueDepth = 3416 mpi_request.ReplyDescriptorPostQueueDepth =
3249 cpu_to_le16(ioc->reply_post_queue_depth); 3417 cpu_to_le16(ioc->reply_post_queue_depth);
@@ -3512,9 +3680,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3512 u32 hcb_size; 3680 u32 hcb_size;
3513 3681
3514 printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name); 3682 printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3515
3516 _base_save_msix_table(ioc);
3517
3518 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n", 3683 drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n",
3519 ioc->name)); 3684 ioc->name));
3520 3685
@@ -3610,7 +3775,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3610 goto out; 3775 goto out;
3611 } 3776 }
3612 3777
3613 _base_restore_msix_table(ioc);
3614 printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name); 3778 printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
3615 return 0; 3779 return 0;
3616 3780
@@ -3691,6 +3855,9 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3691 u16 smid; 3855 u16 smid;
3692 struct _tr_list *delayed_tr, *delayed_tr_next; 3856 struct _tr_list *delayed_tr, *delayed_tr_next;
3693 u8 hide_flag; 3857 u8 hide_flag;
3858 struct adapter_reply_queue *reply_q;
3859 long reply_post_free;
3860 u32 reply_post_free_sz;
3694 3861
3695 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 3862 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3696 __func__)); 3863 __func__));
@@ -3756,19 +3923,43 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3756 ioc->reply_sz) 3923 ioc->reply_sz)
3757 ioc->reply_free[i] = cpu_to_le32(reply_address); 3924 ioc->reply_free[i] = cpu_to_le32(reply_address);
3758 3925
3926 /* initialize reply queues */
3927 _base_assign_reply_queues(ioc);
3928
3759 /* initialize Reply Post Free Queue */ 3929 /* initialize Reply Post Free Queue */
3760 for (i = 0; i < ioc->reply_post_queue_depth; i++) 3930 reply_post_free = (long)ioc->reply_post_free;
3761 ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX); 3931 reply_post_free_sz = ioc->reply_post_queue_depth *
3932 sizeof(Mpi2DefaultReplyDescriptor_t);
3933 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3934 reply_q->reply_post_host_index = 0;
3935 reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
3936 reply_post_free;
3937 for (i = 0; i < ioc->reply_post_queue_depth; i++)
3938 reply_q->reply_post_free[i].Words =
3939 cpu_to_le64(ULLONG_MAX);
3940 if (!_base_is_controller_msix_enabled(ioc))
3941 goto skip_init_reply_post_free_queue;
3942 reply_post_free += reply_post_free_sz;
3943 }
3944 skip_init_reply_post_free_queue:
3762 3945
3763 r = _base_send_ioc_init(ioc, sleep_flag); 3946 r = _base_send_ioc_init(ioc, sleep_flag);
3764 if (r) 3947 if (r)
3765 return r; 3948 return r;
3766 3949
3767 /* initialize the index's */ 3950 /* initialize reply free host index */
3768 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; 3951 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
3769 ioc->reply_post_host_index = 0;
3770 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); 3952 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
3771 writel(0, &ioc->chip->ReplyPostHostIndex); 3953
3954 /* initialize reply post host index */
3955 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3956 writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
3957 &ioc->chip->ReplyPostHostIndex);
3958 if (!_base_is_controller_msix_enabled(ioc))
3959 goto skip_init_reply_post_host_index;
3960 }
3961
3962 skip_init_reply_post_host_index:
3772 3963
3773 _base_unmask_interrupts(ioc); 3964 _base_unmask_interrupts(ioc);
3774 r = _base_event_notification(ioc, sleep_flag); 3965 r = _base_event_notification(ioc, sleep_flag);
@@ -3819,14 +4010,10 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3819 ioc->shost_recovery = 1; 4010 ioc->shost_recovery = 1;
3820 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 4011 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3821 ioc->shost_recovery = 0; 4012 ioc->shost_recovery = 0;
3822 if (ioc->pci_irq) { 4013 _base_free_irq(ioc);
3823 synchronize_irq(pdev->irq);
3824 free_irq(ioc->pci_irq, ioc);
3825 }
3826 _base_disable_msix(ioc); 4014 _base_disable_msix(ioc);
3827 if (ioc->chip_phys) 4015 if (ioc->chip_phys)
3828 iounmap(ioc->chip); 4016 iounmap(ioc->chip);
3829 ioc->pci_irq = -1;
3830 ioc->chip_phys = 0; 4017 ioc->chip_phys = 0;
3831 pci_release_selected_regions(ioc->pdev, ioc->bars); 4018 pci_release_selected_regions(ioc->pdev, ioc->bars);
3832 pci_disable_pcie_error_reporting(pdev); 4019 pci_disable_pcie_error_reporting(pdev);
@@ -3844,14 +4031,50 @@ int
3844mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) 4031mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3845{ 4032{
3846 int r, i; 4033 int r, i;
4034 int cpu_id, last_cpu_id = 0;
3847 4035
3848 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, 4036 dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
3849 __func__)); 4037 __func__));
3850 4038
4039 /* setup cpu_msix_table */
4040 ioc->cpu_count = num_online_cpus();
4041 for_each_online_cpu(cpu_id)
4042 last_cpu_id = cpu_id;
4043 ioc->cpu_msix_table_sz = last_cpu_id + 1;
4044 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
4045 ioc->reply_queue_count = 1;
4046 if (!ioc->cpu_msix_table) {
4047 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
4048 "cpu_msix_table failed!!!\n", ioc->name));
4049 r = -ENOMEM;
4050 goto out_free_resources;
4051 }
4052
4053 if (ioc->is_warpdrive) {
4054 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
4055 sizeof(resource_size_t *), GFP_KERNEL);
4056 if (!ioc->reply_post_host_index) {
4057 dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation "
4058 "for cpu_msix_table failed!!!\n", ioc->name));
4059 r = -ENOMEM;
4060 goto out_free_resources;
4061 }
4062 }
4063
3851 r = mpt2sas_base_map_resources(ioc); 4064 r = mpt2sas_base_map_resources(ioc);
3852 if (r) 4065 if (r)
3853 return r; 4066 return r;
3854 4067
4068 if (ioc->is_warpdrive) {
4069 ioc->reply_post_host_index[0] =
4070 (resource_size_t *)&ioc->chip->ReplyPostHostIndex;
4071
4072 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
4073 ioc->reply_post_host_index[i] = (resource_size_t *)
4074 ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
4075 * 4)));
4076 }
4077
3855 pci_set_drvdata(ioc->pdev, ioc->shost); 4078 pci_set_drvdata(ioc->pdev, ioc->shost);
3856 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 4079 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
3857 if (r) 4080 if (r)
@@ -3972,6 +4195,9 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3972 mpt2sas_base_free_resources(ioc); 4195 mpt2sas_base_free_resources(ioc);
3973 _base_release_memory_pools(ioc); 4196 _base_release_memory_pools(ioc);
3974 pci_set_drvdata(ioc->pdev, NULL); 4197 pci_set_drvdata(ioc->pdev, NULL);
4198 kfree(ioc->cpu_msix_table);
4199 if (ioc->is_warpdrive)
4200 kfree(ioc->reply_post_host_index);
3975 kfree(ioc->pd_handles); 4201 kfree(ioc->pd_handles);
3976 kfree(ioc->tm_cmds.reply); 4202 kfree(ioc->tm_cmds.reply);
3977 kfree(ioc->transport_cmds.reply); 4203 kfree(ioc->transport_cmds.reply);
@@ -4009,6 +4235,9 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
4009 mpt2sas_base_free_resources(ioc); 4235 mpt2sas_base_free_resources(ioc);
4010 _base_release_memory_pools(ioc); 4236 _base_release_memory_pools(ioc);
4011 pci_set_drvdata(ioc->pdev, NULL); 4237 pci_set_drvdata(ioc->pdev, NULL);
4238 kfree(ioc->cpu_msix_table);
4239 if (ioc->is_warpdrive)
4240 kfree(ioc->reply_post_host_index);
4012 kfree(ioc->pd_handles); 4241 kfree(ioc->pd_handles);
4013 kfree(ioc->pfacts); 4242 kfree(ioc->pfacts);
4014 kfree(ioc->ctl_cmds.reply); 4243 kfree(ioc->ctl_cmds.reply);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 8d5be2120c6..59354dba68c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,11 +69,11 @@
69#define MPT2SAS_DRIVER_NAME "mpt2sas" 69#define MPT2SAS_DRIVER_NAME "mpt2sas"
70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" 70#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" 71#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
72#define MPT2SAS_DRIVER_VERSION "09.100.00.00" 72#define MPT2SAS_DRIVER_VERSION "09.100.00.01"
73#define MPT2SAS_MAJOR_VERSION 09 73#define MPT2SAS_MAJOR_VERSION 09
74#define MPT2SAS_MINOR_VERSION 100 74#define MPT2SAS_MINOR_VERSION 100
75#define MPT2SAS_BUILD_VERSION 00 75#define MPT2SAS_BUILD_VERSION 00
76#define MPT2SAS_RELEASE_VERSION 00 76#define MPT2SAS_RELEASE_VERSION 01
77 77
78/* 78/*
79 * Set MPT2SAS_SG_DEPTH value based on user input. 79 * Set MPT2SAS_SG_DEPTH value based on user input.
@@ -544,6 +544,28 @@ struct _tr_list {
544 544
545typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); 545typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
546 546
547/**
548 * struct adapter_reply_queue - the reply queue struct
549 * @ioc: per adapter object
550 * @msix_index: msix index into vector table
551 * @vector: irq vector
552 * @reply_post_host_index: head index in the pool where FW completes IO
553 * @reply_post_free: reply post base virt address
554 * @name: the name registered to request_irq()
555 * @busy: isr is actively processing replies on another cpu
556 * @list: this list
557*/
558struct adapter_reply_queue {
559 struct MPT2SAS_ADAPTER *ioc;
560 u8 msix_index;
561 unsigned int vector;
562 u32 reply_post_host_index;
563 Mpi2ReplyDescriptorsUnion_t *reply_post_free;
564 char name[MPT_NAME_LENGTH];
565 atomic_t busy;
566 struct list_head list;
567};
568
547/* IOC Facts and Port Facts converted from little endian to cpu */ 569/* IOC Facts and Port Facts converted from little endian to cpu */
548union mpi2_version_union { 570union mpi2_version_union {
549 MPI2_VERSION_STRUCT Struct; 571 MPI2_VERSION_STRUCT Struct;
@@ -606,7 +628,7 @@ enum mutex_type {
606 * @list: ioc_list 628 * @list: ioc_list
607 * @shost: shost object 629 * @shost: shost object
608 * @id: unique adapter id 630 * @id: unique adapter id
609 * @pci_irq: irq number 631 * @cpu_count: number online cpus
610 * @name: generic ioc string 632 * @name: generic ioc string
611 * @tmp_string: tmp string used for logging 633 * @tmp_string: tmp string used for logging
612 * @pdev: pci pdev object 634 * @pdev: pci pdev object
@@ -636,8 +658,8 @@ enum mutex_type {
636 * @wait_for_port_enable_to_complete: 658 * @wait_for_port_enable_to_complete:
637 * @msix_enable: flag indicating msix is enabled 659 * @msix_enable: flag indicating msix is enabled
638 * @msix_vector_count: number msix vectors 660 * @msix_vector_count: number msix vectors
639 * @msix_table: virt address to the msix table 661 * @cpu_msix_table: table for mapping cpus to msix index
640 * @msix_table_backup: backup msix table 662 * @cpu_msix_table_sz: table size
641 * @scsi_io_cb_idx: shost generated commands 663 * @scsi_io_cb_idx: shost generated commands
642 * @tm_cb_idx: task management commands 664 * @tm_cb_idx: task management commands
643 * @scsih_cb_idx: scsih internal commands 665 * @scsih_cb_idx: scsih internal commands
@@ -728,7 +750,8 @@ enum mutex_type {
728 * @reply_post_queue_depth: reply post queue depth 750 * @reply_post_queue_depth: reply post queue depth
729 * @reply_post_free: pool for reply post (64bit descriptor) 751 * @reply_post_free: pool for reply post (64bit descriptor)
730 * @reply_post_free_dma: 752 * @reply_post_free_dma:
731 * @reply_post_free_dma_pool: 753 * @reply_queue_count: number of reply queue's
754 * @reply_queue_list: link list contaning the reply queue info
732 * @reply_post_host_index: head index in the pool where FW completes IO 755 * @reply_post_host_index: head index in the pool where FW completes IO
733 * @delayed_tr_list: target reset link list 756 * @delayed_tr_list: target reset link list
734 * @delayed_tr_volume_list: volume target reset link list 757 * @delayed_tr_volume_list: volume target reset link list
@@ -737,7 +760,7 @@ struct MPT2SAS_ADAPTER {
737 struct list_head list; 760 struct list_head list;
738 struct Scsi_Host *shost; 761 struct Scsi_Host *shost;
739 u8 id; 762 u8 id;
740 u32 pci_irq; 763 int cpu_count;
741 char name[MPT_NAME_LENGTH]; 764 char name[MPT_NAME_LENGTH];
742 char tmp_string[MPT_STRING_LENGTH]; 765 char tmp_string[MPT_STRING_LENGTH];
743 struct pci_dev *pdev; 766 struct pci_dev *pdev;
@@ -779,8 +802,9 @@ struct MPT2SAS_ADAPTER {
779 802
780 u8 msix_enable; 803 u8 msix_enable;
781 u16 msix_vector_count; 804 u16 msix_vector_count;
782 u32 *msix_table; 805 u8 *cpu_msix_table;
783 u32 *msix_table_backup; 806 resource_size_t **reply_post_host_index;
807 u16 cpu_msix_table_sz;
784 u32 ioc_reset_count; 808 u32 ioc_reset_count;
785 809
786 /* internal commands, callback index */ 810 /* internal commands, callback index */
@@ -911,7 +935,8 @@ struct MPT2SAS_ADAPTER {
911 Mpi2ReplyDescriptorsUnion_t *reply_post_free; 935 Mpi2ReplyDescriptorsUnion_t *reply_post_free;
912 dma_addr_t reply_post_free_dma; 936 dma_addr_t reply_post_free_dma;
913 struct dma_pool *reply_post_free_dma_pool; 937 struct dma_pool *reply_post_free_dma_pool;
914 u32 reply_post_host_index; 938 u8 reply_queue_count;
939 struct list_head reply_queue_list;
915 940
916 struct list_head delayed_tr_list; 941 struct list_head delayed_tr_list;
917 struct list_head delayed_tr_volume_list; 942 struct list_head delayed_tr_volume_list;
@@ -955,6 +980,7 @@ void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid);
955void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); 980void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr);
956__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, 981__le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc,
957 u16 smid); 982 u16 smid);
983void mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc);
958 984
959/* hi-priority queue */ 985/* hi-priority queue */
960u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx); 986u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 246d5fbc6e5..9adb0133d6f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -2704,6 +2704,33 @@ _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
2704static DEVICE_ATTR(ioc_reset_count, S_IRUGO, 2704static DEVICE_ATTR(ioc_reset_count, S_IRUGO,
2705 _ctl_ioc_reset_count_show, NULL); 2705 _ctl_ioc_reset_count_show, NULL);
2706 2706
2707/**
2708 * _ctl_ioc_reply_queue_count_show - number of reply queues
2709 * @cdev - pointer to embedded class device
2710 * @buf - the buffer returned
2711 *
2712 * This is number of reply queues
2713 *
2714 * A sysfs 'read-only' shost attribute.
2715 */
2716static ssize_t
2717_ctl_ioc_reply_queue_count_show(struct device *cdev,
2718 struct device_attribute *attr, char *buf)
2719{
2720 u8 reply_queue_count;
2721 struct Scsi_Host *shost = class_to_shost(cdev);
2722 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
2723
2724 if ((ioc->facts.IOCCapabilities &
2725 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
2726 reply_queue_count = ioc->reply_queue_count;
2727 else
2728 reply_queue_count = 1;
2729 return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
2730}
2731static DEVICE_ATTR(reply_queue_count, S_IRUGO,
2732 _ctl_ioc_reply_queue_count_show, NULL);
2733
2707struct DIAG_BUFFER_START { 2734struct DIAG_BUFFER_START {
2708 __le32 Size; 2735 __le32 Size;
2709 __le32 DiagVersion; 2736 __le32 DiagVersion;
@@ -2914,6 +2941,7 @@ struct device_attribute *mpt2sas_host_attrs[] = {
2914 &dev_attr_host_trace_buffer_size, 2941 &dev_attr_host_trace_buffer_size,
2915 &dev_attr_host_trace_buffer, 2942 &dev_attr_host_trace_buffer,
2916 &dev_attr_host_trace_buffer_enable, 2943 &dev_attr_host_trace_buffer_enable,
2944 &dev_attr_reply_queue_count,
2917 NULL, 2945 NULL,
2918}; 2946};
2919 2947
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 5202de3f3d3..1da1aa1a11e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2161,6 +2161,7 @@ _scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2161 return 1; 2161 return 1;
2162 if (ioc->tm_cmds.smid != smid) 2162 if (ioc->tm_cmds.smid != smid)
2163 return 1; 2163 return 1;
2164 mpt2sas_base_flush_reply_queues(ioc);
2164 ioc->tm_cmds.status |= MPT2_CMD_COMPLETE; 2165 ioc->tm_cmds.status |= MPT2_CMD_COMPLETE;
2165 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); 2166 mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
2166 if (mpi_reply) { 2167 if (mpi_reply) {
@@ -7353,6 +7354,7 @@ _scsih_remove(struct pci_dev *pdev)
7353 } 7354 }
7354 7355
7355 sas_remove_host(shost); 7356 sas_remove_host(shost);
7357 mpt2sas_base_detach(ioc);
7356 list_del(&ioc->list); 7358 list_del(&ioc->list);
7357 scsi_remove_host(shost); 7359 scsi_remove_host(shost);
7358 scsi_host_put(shost); 7360 scsi_host_put(shost);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 15c79802621..230732241aa 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -163,7 +163,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle,
163 return -EIO; 163 return -EIO;
164 } 164 }
165 165
166 memset(identify, 0, sizeof(identify)); 166 memset(identify, 0, sizeof(*identify));
167 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); 167 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
168 168
169 /* sas_address */ 169 /* sas_address */
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 3501291618f..7e423e5ad5e 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -398,6 +398,16 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
398 /* init phys */ 398 /* init phys */
399 mvs_phy_hacks(mvi); 399 mvs_phy_hacks(mvi);
400 400
401 /* disable non data frame retry */
402 tmp = mvs_cr32(mvi, CMD_SAS_CTL1);
403 if ((revision == VANIR_A0_REV) ||
404 (revision == VANIR_B0_REV) ||
405 (revision == VANIR_C0_REV)) {
406 tmp &= ~0xffff;
407 tmp |= 0x007f;
408 mvs_cw32(mvi, CMD_SAS_CTL1, tmp);
409 }
410
401 /* set LED blink when IO*/ 411 /* set LED blink when IO*/
402 mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED); 412 mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED);
403 tmp = mr32(MVS_PA_VSR_PORT); 413 tmp = mr32(MVS_PA_VSR_PORT);
@@ -500,6 +510,27 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi)
500 tmp |= CINT_PHY_MASK; 510 tmp |= CINT_PHY_MASK;
501 mw32(MVS_INT_MASK, tmp); 511 mw32(MVS_INT_MASK, tmp);
502 512
513 tmp = mvs_cr32(mvi, CMD_LINK_TIMER);
514 tmp |= 0xFFFF0000;
515 mvs_cw32(mvi, CMD_LINK_TIMER, tmp);
516
517 /* tune STP performance */
518 tmp = 0x003F003F;
519 mvs_cw32(mvi, CMD_PL_TIMER, tmp);
520
521 /* This can improve expander large block size seq write performance */
522 tmp = mvs_cr32(mvi, CMD_PORT_LAYER_TIMER1);
523 tmp |= 0xFFFF007F;
524 mvs_cw32(mvi, CMD_PORT_LAYER_TIMER1, tmp);
525
526 /* change the connection open-close behavior (bit 9)
527 * set bit8 to 1 for performance tuning */
528 tmp = mvs_cr32(mvi, CMD_SL_MODE0);
529 tmp |= 0x00000300;
530 /* set bit0 to 0 to enable retry for no_dest reject case */
531 tmp &= 0xFFFFFFFE;
532 mvs_cw32(mvi, CMD_SL_MODE0, tmp);
533
503 /* Enable SRS interrupt */ 534 /* Enable SRS interrupt */
504 mw32(MVS_INT_MASK_SRS_0, 0xFFFF); 535 mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
505 536
@@ -823,6 +854,10 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
823 phy->att_dev_info = PORT_DEV_STP_TRGT | 1; 854 phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
824 } 855 }
825 856
857 /* enable spin up bit */
858 mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
859 mvs_write_port_cfg_data(mvi, i, 0x04);
860
826} 861}
827 862
828void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 863void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index dec7cadb748..f5451940d28 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -387,6 +387,8 @@ enum sas_cmd_port_registers {
387 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ 387 CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
388 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ 388 CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
389 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ 389 CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
390 CMD_PORT_LAYER_TIMER1 = 0x1E0, /* Port Layer Timer 1 */
391 CMD_LINK_TIMER = 0x1E4, /* Link Timer */
390}; 392};
391 393
392enum mvs_info_flags { 394enum mvs_info_flags {
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 4e9af66fd1d..621b5e07275 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -59,7 +59,7 @@ static struct scsi_host_template mvs_sht = {
59 .name = DRV_NAME, 59 .name = DRV_NAME,
60 .queuecommand = sas_queuecommand, 60 .queuecommand = sas_queuecommand,
61 .target_alloc = sas_target_alloc, 61 .target_alloc = sas_target_alloc,
62 .slave_configure = mvs_slave_configure, 62 .slave_configure = sas_slave_configure,
63 .slave_destroy = sas_slave_destroy, 63 .slave_destroy = sas_slave_destroy,
64 .scan_finished = mvs_scan_finished, 64 .scan_finished = mvs_scan_finished,
65 .scan_start = mvs_scan_start, 65 .scan_start = mvs_scan_start,
@@ -74,7 +74,7 @@ static struct scsi_host_template mvs_sht = {
74 .use_clustering = ENABLE_CLUSTERING, 74 .use_clustering = ENABLE_CLUSTERING,
75 .eh_device_reset_handler = sas_eh_device_reset_handler, 75 .eh_device_reset_handler = sas_eh_device_reset_handler,
76 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 76 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
77 .slave_alloc = mvs_slave_alloc, 77 .slave_alloc = sas_slave_alloc,
78 .target_destroy = sas_target_destroy, 78 .target_destroy = sas_target_destroy,
79 .ioctl = sas_ioctl, 79 .ioctl = sas_ioctl,
80 .shost_attrs = mvst_host_attrs, 80 .shost_attrs = mvst_host_attrs,
@@ -707,6 +707,15 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = {
707 { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, 707 { PCI_VDEVICE(TTI, 0x2760), chip_9480 },
708 { 708 {
709 .vendor = 0x1b4b, 709 .vendor = 0x1b4b,
710 .device = 0x9480,
711 .subvendor = PCI_ANY_ID,
712 .subdevice = 0x9480,
713 .class = 0,
714 .class_mask = 0,
715 .driver_data = chip_9480,
716 },
717 {
718 .vendor = 0x1b4b,
710 .device = 0x9445, 719 .device = 0x9445,
711 .subvendor = PCI_ANY_ID, 720 .subvendor = PCI_ANY_ID,
712 .subdevice = 0x9480, 721 .subdevice = 0x9480,
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 4958fefff36..a4884a57cf7 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -214,7 +214,7 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
214 break; 214 break;
215 case PHY_FUNC_RELEASE_SPINUP_HOLD: 215 case PHY_FUNC_RELEASE_SPINUP_HOLD:
216 default: 216 default:
217 rc = -EOPNOTSUPP; 217 rc = -ENOSYS;
218 } 218 }
219 msleep(200); 219 msleep(200);
220 return rc; 220 return rc;
@@ -265,6 +265,12 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
265 id->dev_type = phy->identify.device_type; 265 id->dev_type = phy->identify.device_type;
266 id->initiator_bits = SAS_PROTOCOL_ALL; 266 id->initiator_bits = SAS_PROTOCOL_ALL;
267 id->target_bits = phy->identify.target_port_protocols; 267 id->target_bits = phy->identify.target_port_protocols;
268
269 /* direct attached SAS device */
270 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
271 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
272 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00);
273 }
268 } else if (phy->phy_type & PORT_TYPE_SATA) { 274 } else if (phy->phy_type & PORT_TYPE_SATA) {
269 /*Nothing*/ 275 /*Nothing*/
270 } 276 }
@@ -276,36 +282,6 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
276 PORTE_BYTES_DMAED); 282 PORTE_BYTES_DMAED);
277} 283}
278 284
279int mvs_slave_alloc(struct scsi_device *scsi_dev)
280{
281 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
282 if (dev_is_sata(dev)) {
283 /* We don't need to rescan targets
284 * if REPORT_LUNS request is failed
285 */
286 if (scsi_dev->lun > 0)
287 return -ENXIO;
288 scsi_dev->tagged_supported = 1;
289 }
290
291 return sas_slave_alloc(scsi_dev);
292}
293
294int mvs_slave_configure(struct scsi_device *sdev)
295{
296 struct domain_device *dev = sdev_to_domain_dev(sdev);
297 int ret = sas_slave_configure(sdev);
298
299 if (ret)
300 return ret;
301 if (!dev_is_sata(dev)) {
302 sas_change_queue_depth(sdev,
303 MVS_QUEUE_SIZE,
304 SCSI_QDEPTH_DEFAULT);
305 }
306 return 0;
307}
308
309void mvs_scan_start(struct Scsi_Host *shost) 285void mvs_scan_start(struct Scsi_Host *shost)
310{ 286{
311 int i, j; 287 int i, j;
@@ -426,7 +402,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
426 /* generate open address frame hdr (first 12 bytes) */ 402 /* generate open address frame hdr (first 12 bytes) */
427 /* initiator, SMP, ftype 1h */ 403 /* initiator, SMP, ftype 1h */
428 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; 404 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
429 buf_oaf[1] = dev->linkrate & 0xf; 405 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
430 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ 406 *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
431 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 407 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
432 408
@@ -571,7 +547,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
571 /* generate open address frame hdr (first 12 bytes) */ 547 /* generate open address frame hdr (first 12 bytes) */
572 /* initiator, STP, ftype 1h */ 548 /* initiator, STP, ftype 1h */
573 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; 549 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
574 buf_oaf[1] = dev->linkrate & 0xf; 550 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
575 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 551 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
576 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 552 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
577 553
@@ -679,7 +655,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi,
679 /* generate open address frame hdr (first 12 bytes) */ 655 /* generate open address frame hdr (first 12 bytes) */
680 /* initiator, SSP, ftype 1h */ 656 /* initiator, SSP, ftype 1h */
681 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; 657 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
682 buf_oaf[1] = dev->linkrate & 0xf; 658 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
683 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); 659 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
684 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); 660 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
685 661
@@ -1241,6 +1217,12 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1241 port->wide_port_phymap = sas_port->phy_mask; 1217 port->wide_port_phymap = sas_port->phy_mask;
1242 mv_printk("set wide port phy map %x\n", sas_port->phy_mask); 1218 mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1243 mvs_update_wideport(mvi, sas_phy->id); 1219 mvs_update_wideport(mvi, sas_phy->id);
1220
1221 /* direct attached SAS device */
1222 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
1223 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
1224 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04);
1225 }
1244 } 1226 }
1245 if (lock) 1227 if (lock)
1246 spin_unlock_irqrestore(&mvi->lock, flags); 1228 spin_unlock_irqrestore(&mvi->lock, flags);
@@ -1387,28 +1369,6 @@ void mvs_dev_gone(struct domain_device *dev)
1387 mvs_dev_gone_notify(dev); 1369 mvs_dev_gone_notify(dev);
1388} 1370}
1389 1371
1390static struct sas_task *mvs_alloc_task(void)
1391{
1392 struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
1393
1394 if (task) {
1395 INIT_LIST_HEAD(&task->list);
1396 spin_lock_init(&task->task_state_lock);
1397 task->task_state_flags = SAS_TASK_STATE_PENDING;
1398 init_timer(&task->timer);
1399 init_completion(&task->completion);
1400 }
1401 return task;
1402}
1403
1404static void mvs_free_task(struct sas_task *task)
1405{
1406 if (task) {
1407 BUG_ON(!list_empty(&task->list));
1408 kfree(task);
1409 }
1410}
1411
1412static void mvs_task_done(struct sas_task *task) 1372static void mvs_task_done(struct sas_task *task)
1413{ 1373{
1414 if (!del_timer(&task->timer)) 1374 if (!del_timer(&task->timer))
@@ -1432,7 +1392,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1432 struct sas_task *task = NULL; 1392 struct sas_task *task = NULL;
1433 1393
1434 for (retry = 0; retry < 3; retry++) { 1394 for (retry = 0; retry < 3; retry++) {
1435 task = mvs_alloc_task(); 1395 task = sas_alloc_task(GFP_KERNEL);
1436 if (!task) 1396 if (!task)
1437 return -ENOMEM; 1397 return -ENOMEM;
1438 1398
@@ -1490,15 +1450,14 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1490 SAS_ADDR(dev->sas_addr), 1450 SAS_ADDR(dev->sas_addr),
1491 task->task_status.resp, 1451 task->task_status.resp,
1492 task->task_status.stat); 1452 task->task_status.stat);
1493 mvs_free_task(task); 1453 sas_free_task(task);
1494 task = NULL; 1454 task = NULL;
1495 1455
1496 } 1456 }
1497 } 1457 }
1498ex_err: 1458ex_err:
1499 BUG_ON(retry == 3 && task != NULL); 1459 BUG_ON(retry == 3 && task != NULL);
1500 if (task != NULL) 1460 sas_free_task(task);
1501 mvs_free_task(task);
1502 return res; 1461 return res;
1503} 1462}
1504 1463
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 44b47451322..c04a4f5b597 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -46,7 +46,7 @@
46#include "mv_defs.h" 46#include "mv_defs.h"
47 47
48#define DRV_NAME "mvsas" 48#define DRV_NAME "mvsas"
49#define DRV_VERSION "0.8.2" 49#define DRV_VERSION "0.8.16"
50#define MVS_ID_NOT_MAPPED 0x7f 50#define MVS_ID_NOT_MAPPED 0x7f
51#define WIDE_PORT_MAX_PHY 4 51#define WIDE_PORT_MAX_PHY 4
52#define mv_printk(fmt, arg ...) \ 52#define mv_printk(fmt, arg ...) \
@@ -458,8 +458,6 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
458 void *funcdata); 458 void *funcdata);
459void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, 459void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
460 u32 off_lo, u32 off_hi, u64 sas_addr); 460 u32 off_lo, u32 off_hi, u64 sas_addr);
461int mvs_slave_alloc(struct scsi_device *scsi_dev);
462int mvs_slave_configure(struct scsi_device *sdev);
463void mvs_scan_start(struct Scsi_Host *shost); 461void mvs_scan_start(struct Scsi_Host *shost);
464int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); 462int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
465int mvs_queue_command(struct sas_task *task, const int num, 463int mvs_queue_command(struct sas_task *task, const int num,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
new file mode 100644
index 00000000000..88cf1db21a7
--- /dev/null
+++ b/drivers/scsi/mvumi.c
@@ -0,0 +1,2018 @@
1/*
2 * Marvell UMI driver
3 *
4 * Copyright 2011 Marvell. <jyli@marvell.com>
5 *
6 * This file is licensed under GPLv2.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; version 2 of the
11 * License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22*/
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/init.h>
28#include <linux/device.h>
29#include <linux/pci.h>
30#include <linux/list.h>
31#include <linux/spinlock.h>
32#include <linux/interrupt.h>
33#include <linux/delay.h>
34#include <linux/blkdev.h>
35#include <linux/io.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_cmnd.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_transport.h>
40#include <scsi/scsi_eh.h>
41#include <linux/uaccess.h>
42
43#include "mvumi.h"
44
45MODULE_LICENSE("GPL");
46MODULE_AUTHOR("jyli@marvell.com");
47MODULE_DESCRIPTION("Marvell UMI Driver");
48
49static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
50 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
51 { 0 }
52};
53
54MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
55
56static void tag_init(struct mvumi_tag *st, unsigned short size)
57{
58 unsigned short i;
59 BUG_ON(size != st->size);
60 st->top = size;
61 for (i = 0; i < size; i++)
62 st->stack[i] = size - 1 - i;
63}
64
65static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
66{
67 BUG_ON(st->top <= 0);
68 return st->stack[--st->top];
69}
70
71static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
72 unsigned short tag)
73{
74 BUG_ON(st->top >= st->size);
75 st->stack[st->top++] = tag;
76}
77
78static bool tag_is_empty(struct mvumi_tag *st)
79{
80 if (st->top == 0)
81 return 1;
82 else
83 return 0;
84}
85
86static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
87{
88 int i;
89
90 for (i = 0; i < MAX_BASE_ADDRESS; i++)
91 if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
92 addr_array[i])
93 pci_iounmap(dev, addr_array[i]);
94}
95
96static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
97{
98 int i;
99
100 for (i = 0; i < MAX_BASE_ADDRESS; i++) {
101 if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
102 addr_array[i] = pci_iomap(dev, i, 0);
103 if (!addr_array[i]) {
104 dev_err(&dev->dev, "failed to map Bar[%d]\n",
105 i);
106 mvumi_unmap_pci_addr(dev, addr_array);
107 return -ENOMEM;
108 }
109 } else
110 addr_array[i] = NULL;
111
112 dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
113 }
114
115 return 0;
116}
117
118static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
119 enum resource_type type, unsigned int size)
120{
121 struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
122
123 if (!res) {
124 dev_err(&mhba->pdev->dev,
125 "Failed to allocate memory for resouce manager.\n");
126 return NULL;
127 }
128
129 switch (type) {
130 case RESOURCE_CACHED_MEMORY:
131 res->virt_addr = kzalloc(size, GFP_KERNEL);
132 if (!res->virt_addr) {
133 dev_err(&mhba->pdev->dev,
134 "unable to allocate memory,size = %d.\n", size);
135 kfree(res);
136 return NULL;
137 }
138 break;
139
140 case RESOURCE_UNCACHED_MEMORY:
141 size = round_up(size, 8);
142 res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
143 &res->bus_addr);
144 if (!res->virt_addr) {
145 dev_err(&mhba->pdev->dev,
146 "unable to allocate consistent mem,"
147 "size = %d.\n", size);
148 kfree(res);
149 return NULL;
150 }
151 memset(res->virt_addr, 0, size);
152 break;
153
154 default:
155 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
156 kfree(res);
157 return NULL;
158 }
159
160 res->type = type;
161 res->size = size;
162 INIT_LIST_HEAD(&res->entry);
163 list_add_tail(&res->entry, &mhba->res_list);
164
165 return res;
166}
167
168static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
169{
170 struct mvumi_res *res, *tmp;
171
172 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
173 switch (res->type) {
174 case RESOURCE_UNCACHED_MEMORY:
175 pci_free_consistent(mhba->pdev, res->size,
176 res->virt_addr, res->bus_addr);
177 break;
178 case RESOURCE_CACHED_MEMORY:
179 kfree(res->virt_addr);
180 break;
181 default:
182 dev_err(&mhba->pdev->dev,
183 "unknown resource type %d\n", res->type);
184 break;
185 }
186 list_del(&res->entry);
187 kfree(res);
188 }
189 mhba->fw_flag &= ~MVUMI_FW_ALLOC;
190}
191
192/**
193 * mvumi_make_sgl - Prepares SGL
194 * @mhba: Adapter soft state
195 * @scmd: SCSI command from the mid-layer
196 * @sgl_p: SGL to be filled in
197 * @sg_count return the number of SG elements
198 *
199 * If successful, this function returns 0. otherwise, it returns -1.
200 */
201static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
202 void *sgl_p, unsigned char *sg_count)
203{
204 struct scatterlist *sg;
205 struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
206 unsigned int i;
207 unsigned int sgnum = scsi_sg_count(scmd);
208 dma_addr_t busaddr;
209
210 if (sgnum) {
211 sg = scsi_sglist(scmd);
212 *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
213 (int) scmd->sc_data_direction);
214 if (*sg_count > mhba->max_sge) {
215 dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
216 "than max sg[0x%x].\n",
217 *sg_count, mhba->max_sge);
218 return -1;
219 }
220 for (i = 0; i < *sg_count; i++) {
221 busaddr = sg_dma_address(&sg[i]);
222 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
223 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
224 m_sg->flags = 0;
225 m_sg->size = cpu_to_le32(sg_dma_len(&sg[i]));
226 if ((i + 1) == *sg_count)
227 m_sg->flags |= SGD_EOT;
228
229 m_sg++;
230 }
231 } else {
232 scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
233 pci_map_single(mhba->pdev, scsi_sglist(scmd),
234 scsi_bufflen(scmd),
235 (int) scmd->sc_data_direction)
236 : 0;
237 busaddr = scmd->SCp.dma_handle;
238 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
239 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
240 m_sg->flags = SGD_EOT;
241 m_sg->size = cpu_to_le32(scsi_bufflen(scmd));
242 *sg_count = 1;
243 }
244
245 return 0;
246}
247
248static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
249 unsigned int size)
250{
251 struct mvumi_sgl *m_sg;
252 void *virt_addr;
253 dma_addr_t phy_addr;
254
255 if (size == 0)
256 return 0;
257
258 virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
259 if (!virt_addr)
260 return -1;
261
262 memset(virt_addr, 0, size);
263
264 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
265 cmd->frame->sg_counts = 1;
266 cmd->data_buf = virt_addr;
267
268 m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
269 m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
270 m_sg->flags = SGD_EOT;
271 m_sg->size = cpu_to_le32(size);
272
273 return 0;
274}
275
276static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
277 unsigned int buf_size)
278{
279 struct mvumi_cmd *cmd;
280
281 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
282 if (!cmd) {
283 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
284 return NULL;
285 }
286 INIT_LIST_HEAD(&cmd->queue_pointer);
287
288 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
289 if (!cmd->frame) {
290 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
291 " frame,size = %d.\n", mhba->ib_max_size);
292 kfree(cmd);
293 return NULL;
294 }
295
296 if (buf_size) {
297 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
298 dev_err(&mhba->pdev->dev, "failed to allocate memory"
299 " for internal frame\n");
300 kfree(cmd->frame);
301 kfree(cmd);
302 return NULL;
303 }
304 } else
305 cmd->frame->sg_counts = 0;
306
307 return cmd;
308}
309
310static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
311 struct mvumi_cmd *cmd)
312{
313 struct mvumi_sgl *m_sg;
314 unsigned int size;
315 dma_addr_t phy_addr;
316
317 if (cmd && cmd->frame) {
318 if (cmd->frame->sg_counts) {
319 m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
320 size = m_sg->size;
321
322 phy_addr = (dma_addr_t) m_sg->baseaddr_l |
323 (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
324
325 pci_free_consistent(mhba->pdev, size, cmd->data_buf,
326 phy_addr);
327 }
328 kfree(cmd->frame);
329 kfree(cmd);
330 }
331}
332
333/**
334 * mvumi_get_cmd - Get a command from the free pool
335 * @mhba: Adapter soft state
336 *
337 * Returns a free command from the pool
338 */
339static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
340{
341 struct mvumi_cmd *cmd = NULL;
342
343 if (likely(!list_empty(&mhba->cmd_pool))) {
344 cmd = list_entry((&mhba->cmd_pool)->next,
345 struct mvumi_cmd, queue_pointer);
346 list_del_init(&cmd->queue_pointer);
347 } else
348 dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
349
350 return cmd;
351}
352
353/**
354 * mvumi_return_cmd - Return a cmd to free command pool
355 * @mhba: Adapter soft state
356 * @cmd: Command packet to be returned to free command pool
357 */
358static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
359 struct mvumi_cmd *cmd)
360{
361 cmd->scmd = NULL;
362 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
363}
364
365/**
366 * mvumi_free_cmds - Free all the cmds in the free cmd pool
367 * @mhba: Adapter soft state
368 */
369static void mvumi_free_cmds(struct mvumi_hba *mhba)
370{
371 struct mvumi_cmd *cmd;
372
373 while (!list_empty(&mhba->cmd_pool)) {
374 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
375 queue_pointer);
376 list_del(&cmd->queue_pointer);
377 kfree(cmd->frame);
378 kfree(cmd);
379 }
380}
381
382/**
383 * mvumi_alloc_cmds - Allocates the command packets
384 * @mhba: Adapter soft state
385 *
386 */
387static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
388{
389 int i;
390 struct mvumi_cmd *cmd;
391
392 for (i = 0; i < mhba->max_io; i++) {
393 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
394 if (!cmd)
395 goto err_exit;
396
397 INIT_LIST_HEAD(&cmd->queue_pointer);
398 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
399 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
400 if (!cmd->frame)
401 goto err_exit;
402 }
403 return 0;
404
405err_exit:
406 dev_err(&mhba->pdev->dev,
407 "failed to allocate memory for cmd[0x%x].\n", i);
408 while (!list_empty(&mhba->cmd_pool)) {
409 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
410 queue_pointer);
411 list_del(&cmd->queue_pointer);
412 kfree(cmd->frame);
413 kfree(cmd);
414 }
415 return -ENOMEM;
416}
417
418static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
419{
420 unsigned int ib_rp_reg, cur_ib_entry;
421
422 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
423 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
424 return -1;
425 }
426 ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER);
427
428 if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) ==
429 (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) &&
430 ((ib_rp_reg & CL_POINTER_TOGGLE) !=
431 (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) {
432 dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
433 return -1;
434 }
435
436 cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK;
437 cur_ib_entry++;
438 if (cur_ib_entry >= mhba->list_num_io) {
439 cur_ib_entry -= mhba->list_num_io;
440 mhba->ib_cur_slot ^= CL_POINTER_TOGGLE;
441 }
442 mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
443 mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
444 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
445 atomic_inc(&mhba->fw_outstanding);
446
447 return 0;
448}
449
450static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
451{
452 iowrite32(0xfff, mhba->ib_shadow);
453 iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER);
454}
455
456static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
457 unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
458{
459 unsigned short tag, request_id;
460
461 udelay(1);
462 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
463 request_id = p_outb_frame->request_id;
464 tag = p_outb_frame->tag;
465 if (tag > mhba->tag_pool.size) {
466 dev_err(&mhba->pdev->dev, "ob frame data error\n");
467 return -1;
468 }
469 if (mhba->tag_cmd[tag] == NULL) {
470 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
471 return -1;
472 } else if (mhba->tag_cmd[tag]->request_id != request_id &&
473 mhba->request_id_enabled) {
474 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
475 "cmd request ID:0x%x\n", request_id,
476 mhba->tag_cmd[tag]->request_id);
477 return -1;
478 }
479
480 return 0;
481}
482
483static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
484{
485 unsigned int ob_write_reg, ob_write_shadow_reg;
486 unsigned int cur_obf, assign_obf_end, i;
487 struct mvumi_ob_data *ob_data;
488 struct mvumi_rsp_frame *p_outb_frame;
489
490 do {
491 ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER);
492 ob_write_shadow_reg = ioread32(mhba->ob_shadow);
493 } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg);
494
495 cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK;
496 assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK;
497
498 if ((ob_write_reg & CL_POINTER_TOGGLE) !=
499 (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) {
500 assign_obf_end += mhba->list_num_io;
501 }
502
503 for (i = (assign_obf_end - cur_obf); i != 0; i--) {
504 cur_obf++;
505 if (cur_obf >= mhba->list_num_io) {
506 cur_obf -= mhba->list_num_io;
507 mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
508 }
509
510 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
511
512 /* Copy pointer may point to entry in outbound list
513 * before entry has valid data
514 */
515 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
516 mhba->tag_cmd[p_outb_frame->tag] == NULL ||
517 p_outb_frame->request_id !=
518 mhba->tag_cmd[p_outb_frame->tag]->request_id))
519 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
520 continue;
521
522 if (!list_empty(&mhba->ob_data_list)) {
523 ob_data = (struct mvumi_ob_data *)
524 list_first_entry(&mhba->ob_data_list,
525 struct mvumi_ob_data, list);
526 list_del_init(&ob_data->list);
527 } else {
528 ob_data = NULL;
529 if (cur_obf == 0) {
530 cur_obf = mhba->list_num_io - 1;
531 mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
532 } else
533 cur_obf -= 1;
534 break;
535 }
536
537 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
538 p_outb_frame->tag = 0xff;
539
540 list_add_tail(&ob_data->list, &mhba->free_ob_list);
541 }
542 mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK;
543 mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK);
544 iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER);
545}
546
547static void mvumi_reset(void *regs)
548{
549 iowrite32(0, regs + CPU_ENPOINTA_MASK_REG);
550 if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
551 return;
552
553 iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
554}
555
556static unsigned char mvumi_start(struct mvumi_hba *mhba);
557
558static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
559{
560 mhba->fw_state = FW_STATE_ABORT;
561 mvumi_reset(mhba->mmio);
562
563 if (mvumi_start(mhba))
564 return FAILED;
565 else
566 return SUCCESS;
567}
568
569static int mvumi_host_reset(struct scsi_cmnd *scmd)
570{
571 struct mvumi_hba *mhba;
572
573 mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
574
575 scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
576 scmd->serial_number, scmd->cmnd[0], scmd->retries);
577
578 return mvumi_wait_for_outstanding(mhba);
579}
580
581static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
582 struct mvumi_cmd *cmd)
583{
584 unsigned long flags;
585
586 cmd->cmd_status = REQ_STATUS_PENDING;
587
588 if (atomic_read(&cmd->sync_cmd)) {
589 dev_err(&mhba->pdev->dev,
590 "last blocked cmd not finished, sync_cmd = %d\n",
591 atomic_read(&cmd->sync_cmd));
592 BUG_ON(1);
593 return -1;
594 }
595 atomic_inc(&cmd->sync_cmd);
596 spin_lock_irqsave(mhba->shost->host_lock, flags);
597 mhba->instancet->fire_cmd(mhba, cmd);
598 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
599
600 wait_event_timeout(mhba->int_cmd_wait_q,
601 (cmd->cmd_status != REQ_STATUS_PENDING),
602 MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
603
604 /* command timeout */
605 if (atomic_read(&cmd->sync_cmd)) {
606 spin_lock_irqsave(mhba->shost->host_lock, flags);
607 atomic_dec(&cmd->sync_cmd);
608 if (mhba->tag_cmd[cmd->frame->tag]) {
609 mhba->tag_cmd[cmd->frame->tag] = 0;
610 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
611 cmd->frame->tag);
612 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
613 }
614 if (!list_empty(&cmd->queue_pointer)) {
615 dev_warn(&mhba->pdev->dev,
616 "TIMEOUT:A internal command doesn't send!\n");
617 list_del_init(&cmd->queue_pointer);
618 } else
619 atomic_dec(&mhba->fw_outstanding);
620
621 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
622 }
623 return 0;
624}
625
626static void mvumi_release_fw(struct mvumi_hba *mhba)
627{
628 mvumi_free_cmds(mhba);
629 mvumi_release_mem_resource(mhba);
630 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
631 kfree(mhba->handshake_page);
632 pci_release_regions(mhba->pdev);
633}
634
635static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
636{
637 struct mvumi_cmd *cmd;
638 struct mvumi_msg_frame *frame;
639 unsigned char device_id, retry = 0;
640 unsigned char bitcount = sizeof(unsigned char) * 8;
641
642 for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
643 if (!(mhba->target_map[device_id / bitcount] &
644 (1 << (device_id % bitcount))))
645 continue;
646get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
647 if (!cmd) {
648 if (retry++ >= 5) {
649 dev_err(&mhba->pdev->dev, "failed to get memory"
650 " for internal flush cache cmd for "
651 "device %d", device_id);
652 retry = 0;
653 continue;
654 } else
655 goto get_cmd;
656 }
657 cmd->scmd = NULL;
658 cmd->cmd_status = REQ_STATUS_PENDING;
659 atomic_set(&cmd->sync_cmd, 0);
660 frame = cmd->frame;
661 frame->req_function = CL_FUN_SCSI_CMD;
662 frame->device_id = device_id;
663 frame->cmd_flag = CMD_FLAG_NON_DATA;
664 frame->data_transfer_length = 0;
665 frame->cdb_length = MAX_COMMAND_SIZE;
666 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
667 frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
668 frame->cdb[2] = CDB_CORE_SHUTDOWN;
669
670 mvumi_issue_blocked_cmd(mhba, cmd);
671 if (cmd->cmd_status != SAM_STAT_GOOD) {
672 dev_err(&mhba->pdev->dev,
673 "device %d flush cache failed, status=0x%x.\n",
674 device_id, cmd->cmd_status);
675 }
676
677 mvumi_delete_internal_cmd(mhba, cmd);
678 }
679 return 0;
680}
681
682static unsigned char
683mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
684 unsigned short len)
685{
686 unsigned char *ptr;
687 unsigned char ret = 0, i;
688
689 ptr = (unsigned char *) p_header->frame_content;
690 for (i = 0; i < len; i++) {
691 ret ^= *ptr;
692 ptr++;
693 }
694
695 return ret;
696}
697
698void mvumi_hs_build_page(struct mvumi_hba *mhba,
699 struct mvumi_hs_header *hs_header)
700{
701 struct mvumi_hs_page2 *hs_page2;
702 struct mvumi_hs_page4 *hs_page4;
703 struct mvumi_hs_page3 *hs_page3;
704 struct timeval time;
705 unsigned int local_time;
706
707 switch (hs_header->page_code) {
708 case HS_PAGE_HOST_INFO:
709 hs_page2 = (struct mvumi_hs_page2 *) hs_header;
710 hs_header->frame_length = sizeof(*hs_page2) - 4;
711 memset(hs_header->frame_content, 0, hs_header->frame_length);
712 hs_page2->host_type = 3; /* 3 mean linux*/
713 hs_page2->host_ver.ver_major = VER_MAJOR;
714 hs_page2->host_ver.ver_minor = VER_MINOR;
715 hs_page2->host_ver.ver_oem = VER_OEM;
716 hs_page2->host_ver.ver_build = VER_BUILD;
717 hs_page2->system_io_bus = 0;
718 hs_page2->slot_number = 0;
719 hs_page2->intr_level = 0;
720 hs_page2->intr_vector = 0;
721 do_gettimeofday(&time);
722 local_time = (unsigned int) (time.tv_sec -
723 (sys_tz.tz_minuteswest * 60));
724 hs_page2->seconds_since1970 = local_time;
725 hs_header->checksum = mvumi_calculate_checksum(hs_header,
726 hs_header->frame_length);
727 break;
728
729 case HS_PAGE_FIRM_CTL:
730 hs_page3 = (struct mvumi_hs_page3 *) hs_header;
731 hs_header->frame_length = sizeof(*hs_page3) - 4;
732 memset(hs_header->frame_content, 0, hs_header->frame_length);
733 hs_header->checksum = mvumi_calculate_checksum(hs_header,
734 hs_header->frame_length);
735 break;
736
737 case HS_PAGE_CL_INFO:
738 hs_page4 = (struct mvumi_hs_page4 *) hs_header;
739 hs_header->frame_length = sizeof(*hs_page4) - 4;
740 memset(hs_header->frame_content, 0, hs_header->frame_length);
741 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
742 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
743
744 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
745 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
746 hs_page4->ib_entry_size = mhba->ib_max_size_setting;
747 hs_page4->ob_entry_size = mhba->ob_max_size_setting;
748 hs_page4->ob_depth = mhba->list_num_io;
749 hs_page4->ib_depth = mhba->list_num_io;
750 hs_header->checksum = mvumi_calculate_checksum(hs_header,
751 hs_header->frame_length);
752 break;
753
754 default:
755 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
756 hs_header->page_code);
757 break;
758 }
759}
760
761/**
762 * mvumi_init_data - Initialize requested date for FW
763 * @mhba: Adapter soft state
764 */
765static int mvumi_init_data(struct mvumi_hba *mhba)
766{
767 struct mvumi_ob_data *ob_pool;
768 struct mvumi_res *res_mgnt;
769 unsigned int tmp_size, offset, i;
770 void *virmem, *v;
771 dma_addr_t p;
772
773 if (mhba->fw_flag & MVUMI_FW_ALLOC)
774 return 0;
775
776 tmp_size = mhba->ib_max_size * mhba->max_io;
777 tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
778 tmp_size += 8 + sizeof(u32) + 16;
779
780 res_mgnt = mvumi_alloc_mem_resource(mhba,
781 RESOURCE_UNCACHED_MEMORY, tmp_size);
782 if (!res_mgnt) {
783 dev_err(&mhba->pdev->dev,
784 "failed to allocate memory for inbound list\n");
785 goto fail_alloc_dma_buf;
786 }
787
788 p = res_mgnt->bus_addr;
789 v = res_mgnt->virt_addr;
790 /* ib_list */
791 offset = round_up(p, 128) - p;
792 p += offset;
793 v += offset;
794 mhba->ib_list = v;
795 mhba->ib_list_phys = p;
796 v += mhba->ib_max_size * mhba->max_io;
797 p += mhba->ib_max_size * mhba->max_io;
798 /* ib shadow */
799 offset = round_up(p, 8) - p;
800 p += offset;
801 v += offset;
802 mhba->ib_shadow = v;
803 mhba->ib_shadow_phys = p;
804 p += sizeof(u32);
805 v += sizeof(u32);
806 /* ob shadow */
807 offset = round_up(p, 8) - p;
808 p += offset;
809 v += offset;
810 mhba->ob_shadow = v;
811 mhba->ob_shadow_phys = p;
812 p += 8;
813 v += 8;
814
815 /* ob list */
816 offset = round_up(p, 128) - p;
817 p += offset;
818 v += offset;
819
820 mhba->ob_list = v;
821 mhba->ob_list_phys = p;
822
823 /* ob data pool */
824 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
825 tmp_size = round_up(tmp_size, 8);
826
827 res_mgnt = mvumi_alloc_mem_resource(mhba,
828 RESOURCE_CACHED_MEMORY, tmp_size);
829 if (!res_mgnt) {
830 dev_err(&mhba->pdev->dev,
831 "failed to allocate memory for outbound data buffer\n");
832 goto fail_alloc_dma_buf;
833 }
834 virmem = res_mgnt->virt_addr;
835
836 for (i = mhba->max_io; i != 0; i--) {
837 ob_pool = (struct mvumi_ob_data *) virmem;
838 list_add_tail(&ob_pool->list, &mhba->ob_data_list);
839 virmem += mhba->ob_max_size + sizeof(*ob_pool);
840 }
841
842 tmp_size = sizeof(unsigned short) * mhba->max_io +
843 sizeof(struct mvumi_cmd *) * mhba->max_io;
844 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
845 (sizeof(unsigned char) * 8);
846
847 res_mgnt = mvumi_alloc_mem_resource(mhba,
848 RESOURCE_CACHED_MEMORY, tmp_size);
849 if (!res_mgnt) {
850 dev_err(&mhba->pdev->dev,
851 "failed to allocate memory for tag and target map\n");
852 goto fail_alloc_dma_buf;
853 }
854
855 virmem = res_mgnt->virt_addr;
856 mhba->tag_pool.stack = virmem;
857 mhba->tag_pool.size = mhba->max_io;
858 tag_init(&mhba->tag_pool, mhba->max_io);
859 virmem += sizeof(unsigned short) * mhba->max_io;
860
861 mhba->tag_cmd = virmem;
862 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
863
864 mhba->target_map = virmem;
865
866 mhba->fw_flag |= MVUMI_FW_ALLOC;
867 return 0;
868
869fail_alloc_dma_buf:
870 mvumi_release_mem_resource(mhba);
871 return -1;
872}
873
874static int mvumi_hs_process_page(struct mvumi_hba *mhba,
875 struct mvumi_hs_header *hs_header)
876{
877 struct mvumi_hs_page1 *hs_page1;
878 unsigned char page_checksum;
879
880 page_checksum = mvumi_calculate_checksum(hs_header,
881 hs_header->frame_length);
882 if (page_checksum != hs_header->checksum) {
883 dev_err(&mhba->pdev->dev, "checksum error\n");
884 return -1;
885 }
886
887 switch (hs_header->page_code) {
888 case HS_PAGE_FIRM_CAP:
889 hs_page1 = (struct mvumi_hs_page1 *) hs_header;
890
891 mhba->max_io = hs_page1->max_io_support;
892 mhba->list_num_io = hs_page1->cl_inout_list_depth;
893 mhba->max_transfer_size = hs_page1->max_transfer_size;
894 mhba->max_target_id = hs_page1->max_devices_support;
895 mhba->hba_capability = hs_page1->capability;
896 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
897 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
898
899 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
900 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
901
902 dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
903 hs_page1->fw_ver.ver_build);
904
905 break;
906 default:
907 dev_err(&mhba->pdev->dev, "handshake: page code error\n");
908 return -1;
909 }
910 return 0;
911}
912
913/**
914 * mvumi_handshake - Move the FW to READY state
915 * @mhba: Adapter soft state
916 *
917 * During the initialization, FW passes can potentially be in any one of
918 * several possible states. If the FW in operational, waiting-for-handshake
919 * states, driver must take steps to bring it to ready state. Otherwise, it
920 * has to wait for the ready state.
921 */
922static int mvumi_handshake(struct mvumi_hba *mhba)
923{
924 unsigned int hs_state, tmp, hs_fun;
925 struct mvumi_hs_header *hs_header;
926 void *regs = mhba->mmio;
927
928 if (mhba->fw_state == FW_STATE_STARTING)
929 hs_state = HS_S_START;
930 else {
931 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0);
932 hs_state = HS_GET_STATE(tmp);
933 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
934 if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
935 mhba->fw_state = FW_STATE_STARTING;
936 return -1;
937 }
938 }
939
940 hs_fun = 0;
941 switch (hs_state) {
942 case HS_S_START:
943 mhba->fw_state = FW_STATE_HANDSHAKING;
944 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
945 HS_SET_STATE(hs_fun, HS_S_RESET);
946 iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1);
947 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
948 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
949 break;
950
951 case HS_S_RESET:
952 iowrite32(lower_32_bits(mhba->handshake_page_phys),
953 regs + CPU_PCIEA_TO_ARM_MSG1);
954 iowrite32(upper_32_bits(mhba->handshake_page_phys),
955 regs + CPU_ARM_TO_PCIEA_MSG1);
956 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
957 HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
958 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
959 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
960
961 break;
962
963 case HS_S_PAGE_ADDR:
964 case HS_S_QUERY_PAGE:
965 case HS_S_SEND_PAGE:
966 hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
967 if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
968 mhba->hba_total_pages =
969 ((struct mvumi_hs_page1 *) hs_header)->total_pages;
970
971 if (mhba->hba_total_pages == 0)
972 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
973 }
974
975 if (hs_state == HS_S_QUERY_PAGE) {
976 if (mvumi_hs_process_page(mhba, hs_header)) {
977 HS_SET_STATE(hs_fun, HS_S_ABORT);
978 return -1;
979 }
980 if (mvumi_init_data(mhba)) {
981 HS_SET_STATE(hs_fun, HS_S_ABORT);
982 return -1;
983 }
984 } else if (hs_state == HS_S_PAGE_ADDR) {
985 hs_header->page_code = 0;
986 mhba->hba_total_pages = HS_PAGE_TOTAL-1;
987 }
988
989 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
990 hs_header->page_code++;
991 if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
992 mvumi_hs_build_page(mhba, hs_header);
993 HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
994 } else
995 HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
996 } else
997 HS_SET_STATE(hs_fun, HS_S_END);
998
999 HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1000 iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
1001 iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
1002 break;
1003
1004 case HS_S_END:
1005 /* Set communication list ISR */
1006 tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1007 tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR;
1008 iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
1009 iowrite32(mhba->list_num_io, mhba->ib_shadow);
1010 /* Set InBound List Avaliable count shadow */
1011 iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1012 regs + CLA_INB_AVAL_COUNT_BASEL);
1013 iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1014 regs + CLA_INB_AVAL_COUNT_BASEH);
1015
1016 /* Set OutBound List Avaliable count shadow */
1017 iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE,
1018 mhba->ob_shadow);
1019 iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0);
1020 iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4);
1021
1022 mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
1023 mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
1024 mhba->fw_state = FW_STATE_STARTED;
1025
1026 break;
1027 default:
1028 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1029 hs_state);
1030 return -1;
1031 }
1032 return 0;
1033}
1034
1035static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1036{
1037 unsigned int isr_status;
1038 unsigned long before;
1039
1040 before = jiffies;
1041 mvumi_handshake(mhba);
1042 do {
1043 isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio);
1044
1045 if (mhba->fw_state == FW_STATE_STARTED)
1046 return 0;
1047 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1048 dev_err(&mhba->pdev->dev,
1049 "no handshake response at state 0x%x.\n",
1050 mhba->fw_state);
1051 dev_err(&mhba->pdev->dev,
1052 "isr : global=0x%x,status=0x%x.\n",
1053 mhba->global_isr, isr_status);
1054 return -1;
1055 }
1056 rmb();
1057 usleep_range(1000, 2000);
1058 } while (!(isr_status & DRBL_HANDSHAKE_ISR));
1059
1060 return 0;
1061}
1062
1063static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1064{
1065 void *regs = mhba->mmio;
1066 unsigned int tmp;
1067 unsigned long before;
1068
1069 before = jiffies;
1070 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
1071 while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1072 if (tmp != HANDSHAKE_READYSTATE)
1073 iowrite32(DRBL_MU_RESET,
1074 regs + CPU_PCIEA_TO_ARM_DRBL_REG);
1075 if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1076 dev_err(&mhba->pdev->dev,
1077 "invalid signature [0x%x].\n", tmp);
1078 return -1;
1079 }
1080 usleep_range(1000, 2000);
1081 rmb();
1082 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
1083 }
1084
1085 mhba->fw_state = FW_STATE_STARTING;
1086 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1087 do {
1088 if (mvumi_handshake_event(mhba)) {
1089 dev_err(&mhba->pdev->dev,
1090 "handshake failed at state 0x%x.\n",
1091 mhba->fw_state);
1092 return -1;
1093 }
1094 } while (mhba->fw_state != FW_STATE_STARTED);
1095
1096 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1097
1098 return 0;
1099}
1100
1101static unsigned char mvumi_start(struct mvumi_hba *mhba)
1102{
1103 void *regs = mhba->mmio;
1104 unsigned int tmp;
1105 /* clear Door bell */
1106 tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1107 iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1108
1109 iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1110 tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA;
1111 iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
1112 if (mvumi_check_handshake(mhba))
1113 return -1;
1114
1115 return 0;
1116}
1117
1118/**
1119 * mvumi_complete_cmd - Completes a command
1120 * @mhba: Adapter soft state
1121 * @cmd: Command to be completed
1122 */
1123static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1124 struct mvumi_rsp_frame *ob_frame)
1125{
1126 struct scsi_cmnd *scmd = cmd->scmd;
1127
1128 cmd->scmd->SCp.ptr = NULL;
1129 scmd->result = ob_frame->req_status;
1130
1131 switch (ob_frame->req_status) {
1132 case SAM_STAT_GOOD:
1133 scmd->result |= DID_OK << 16;
1134 break;
1135 case SAM_STAT_BUSY:
1136 scmd->result |= DID_BUS_BUSY << 16;
1137 break;
1138 case SAM_STAT_CHECK_CONDITION:
1139 scmd->result |= (DID_OK << 16);
1140 if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1141 memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1142 sizeof(struct mvumi_sense_data));
1143 scmd->result |= (DRIVER_SENSE << 24);
1144 }
1145 break;
1146 default:
1147 scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1148 break;
1149 }
1150
1151 if (scsi_bufflen(scmd)) {
1152 if (scsi_sg_count(scmd)) {
1153 pci_unmap_sg(mhba->pdev,
1154 scsi_sglist(scmd),
1155 scsi_sg_count(scmd),
1156 (int) scmd->sc_data_direction);
1157 } else {
1158 pci_unmap_single(mhba->pdev,
1159 scmd->SCp.dma_handle,
1160 scsi_bufflen(scmd),
1161 (int) scmd->sc_data_direction);
1162
1163 scmd->SCp.dma_handle = 0;
1164 }
1165 }
1166 cmd->scmd->scsi_done(scmd);
1167 mvumi_return_cmd(mhba, cmd);
1168}
1169static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1170 struct mvumi_cmd *cmd,
1171 struct mvumi_rsp_frame *ob_frame)
1172{
1173 if (atomic_read(&cmd->sync_cmd)) {
1174 cmd->cmd_status = ob_frame->req_status;
1175
1176 if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1177 (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1178 cmd->data_buf) {
1179 memcpy(cmd->data_buf, ob_frame->payload,
1180 sizeof(struct mvumi_sense_data));
1181 }
1182 atomic_dec(&cmd->sync_cmd);
1183 wake_up(&mhba->int_cmd_wait_q);
1184 }
1185}
1186
1187static void mvumi_show_event(struct mvumi_hba *mhba,
1188 struct mvumi_driver_event *ptr)
1189{
1190 unsigned int i;
1191
1192 dev_warn(&mhba->pdev->dev,
1193 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1194 ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1195 if (ptr->param_count) {
1196 printk(KERN_WARNING "Event param(len 0x%x): ",
1197 ptr->param_count);
1198 for (i = 0; i < ptr->param_count; i++)
1199 printk(KERN_WARNING "0x%x ", ptr->params[i]);
1200
1201 printk(KERN_WARNING "\n");
1202 }
1203
1204 if (ptr->sense_data_length) {
1205 printk(KERN_WARNING "Event sense data(len 0x%x): ",
1206 ptr->sense_data_length);
1207 for (i = 0; i < ptr->sense_data_length; i++)
1208 printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1209 printk(KERN_WARNING "\n");
1210 }
1211}
1212
1213static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1214{
1215 if (msg == APICDB1_EVENT_GETEVENT) {
1216 int i, count;
1217 struct mvumi_driver_event *param = NULL;
1218 struct mvumi_event_req *er = buffer;
1219 count = er->count;
1220 if (count > MAX_EVENTS_RETURNED) {
1221 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1222 " than max event count[0x%x].\n",
1223 count, MAX_EVENTS_RETURNED);
1224 return;
1225 }
1226 for (i = 0; i < count; i++) {
1227 param = &er->events[i];
1228 mvumi_show_event(mhba, param);
1229 }
1230 }
1231}
1232
1233static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1234{
1235 struct mvumi_cmd *cmd;
1236 struct mvumi_msg_frame *frame;
1237
1238 cmd = mvumi_create_internal_cmd(mhba, 512);
1239 if (!cmd)
1240 return -1;
1241 cmd->scmd = NULL;
1242 cmd->cmd_status = REQ_STATUS_PENDING;
1243 atomic_set(&cmd->sync_cmd, 0);
1244 frame = cmd->frame;
1245 frame->device_id = 0;
1246 frame->cmd_flag = CMD_FLAG_DATA_IN;
1247 frame->req_function = CL_FUN_SCSI_CMD;
1248 frame->cdb_length = MAX_COMMAND_SIZE;
1249 frame->data_transfer_length = sizeof(struct mvumi_event_req);
1250 memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1251 frame->cdb[0] = APICDB0_EVENT;
1252 frame->cdb[1] = msg;
1253 mvumi_issue_blocked_cmd(mhba, cmd);
1254
1255 if (cmd->cmd_status != SAM_STAT_GOOD)
1256 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1257 cmd->cmd_status);
1258 else
1259 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1260
1261 mvumi_delete_internal_cmd(mhba, cmd);
1262 return 0;
1263}
1264
1265static void mvumi_scan_events(struct work_struct *work)
1266{
1267 struct mvumi_events_wq *mu_ev =
1268 container_of(work, struct mvumi_events_wq, work_q);
1269
1270 mvumi_get_event(mu_ev->mhba, mu_ev->event);
1271 kfree(mu_ev);
1272}
1273
1274static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg)
1275{
1276 struct mvumi_events_wq *mu_ev;
1277
1278 mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1279 if (mu_ev) {
1280 INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1281 mu_ev->mhba = mhba;
1282 mu_ev->event = msg;
1283 mu_ev->param = NULL;
1284 schedule_work(&mu_ev->work_q);
1285 }
1286}
1287
1288static void mvumi_handle_clob(struct mvumi_hba *mhba)
1289{
1290 struct mvumi_rsp_frame *ob_frame;
1291 struct mvumi_cmd *cmd;
1292 struct mvumi_ob_data *pool;
1293
1294 while (!list_empty(&mhba->free_ob_list)) {
1295 pool = list_first_entry(&mhba->free_ob_list,
1296 struct mvumi_ob_data, list);
1297 list_del_init(&pool->list);
1298 list_add_tail(&pool->list, &mhba->ob_data_list);
1299
1300 ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1301 cmd = mhba->tag_cmd[ob_frame->tag];
1302
1303 atomic_dec(&mhba->fw_outstanding);
1304 mhba->tag_cmd[ob_frame->tag] = 0;
1305 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1306 if (cmd->scmd)
1307 mvumi_complete_cmd(mhba, cmd, ob_frame);
1308 else
1309 mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1310 }
1311 mhba->instancet->fire_cmd(mhba, NULL);
1312}
1313
1314static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1315{
1316 struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1317 unsigned long flags;
1318
1319 spin_lock_irqsave(mhba->shost->host_lock, flags);
1320 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1321 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1322 return IRQ_NONE;
1323 }
1324
1325 if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) {
1326 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1327 dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1328 mvumi_handshake(mhba);
1329 }
1330 if (mhba->isr_status & DRBL_EVENT_NOTIFY)
1331 mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
1332 }
1333
1334 if (mhba->global_isr & INT_MAP_COMAOUT)
1335 mvumi_receive_ob_list_entry(mhba);
1336
1337 mhba->global_isr = 0;
1338 mhba->isr_status = 0;
1339 if (mhba->fw_state == FW_STATE_STARTED)
1340 mvumi_handle_clob(mhba);
1341 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1342 return IRQ_HANDLED;
1343}
1344
1345static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1346 struct mvumi_cmd *cmd)
1347{
1348 void *ib_entry;
1349 struct mvumi_msg_frame *ib_frame;
1350 unsigned int frame_len;
1351
1352 ib_frame = cmd->frame;
1353 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1354 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1355 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1356 }
1357 if (tag_is_empty(&mhba->tag_pool)) {
1358 dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1359 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1360 }
1361 if (mvumi_get_ib_list_entry(mhba, &ib_entry))
1362 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1363
1364 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1365 cmd->frame->request_id = mhba->io_seq++;
1366 cmd->request_id = cmd->frame->request_id;
1367 mhba->tag_cmd[cmd->frame->tag] = cmd;
1368 frame_len = sizeof(*ib_frame) - 4 +
1369 ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1370 memcpy(ib_entry, ib_frame, frame_len);
1371 return MV_QUEUE_COMMAND_RESULT_SENT;
1372}
1373
1374static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1375{
1376 unsigned short num_of_cl_sent = 0;
1377 enum mvumi_qc_result result;
1378
1379 if (cmd)
1380 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1381
1382 while (!list_empty(&mhba->waiting_req_list)) {
1383 cmd = list_first_entry(&mhba->waiting_req_list,
1384 struct mvumi_cmd, queue_pointer);
1385 list_del_init(&cmd->queue_pointer);
1386 result = mvumi_send_command(mhba, cmd);
1387 switch (result) {
1388 case MV_QUEUE_COMMAND_RESULT_SENT:
1389 num_of_cl_sent++;
1390 break;
1391 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1392 list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1393 if (num_of_cl_sent > 0)
1394 mvumi_send_ib_list_entry(mhba);
1395
1396 return;
1397 }
1398 }
1399 if (num_of_cl_sent > 0)
1400 mvumi_send_ib_list_entry(mhba);
1401}
1402
1403/**
1404 * mvumi_enable_intr - Enables interrupts
1405 * @regs: FW register set
1406 */
1407static void mvumi_enable_intr(void *regs)
1408{
1409 unsigned int mask;
1410
1411 iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1412 mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1413 mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR;
1414 iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
1415}
1416
1417/**
1418 * mvumi_disable_intr -Disables interrupt
1419 * @regs: FW register set
1420 */
1421static void mvumi_disable_intr(void *regs)
1422{
1423 unsigned int mask;
1424
1425 iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1426 mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1427 mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR);
1428 iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
1429}
1430
1431static int mvumi_clear_intr(void *extend)
1432{
1433 struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1434 unsigned int status, isr_status = 0, tmp = 0;
1435 void *regs = mhba->mmio;
1436
1437 status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG);
1438 if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF)
1439 return 1;
1440 if (unlikely(status & INT_MAP_COMAERR)) {
1441 tmp = ioread32(regs + CLA_ISR_CAUSE);
1442 if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ))
1443 iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ),
1444 regs + CLA_ISR_CAUSE);
1445 status ^= INT_MAP_COMAERR;
1446 /* inbound or outbound parity error, command will timeout */
1447 }
1448 if (status & INT_MAP_COMAOUT) {
1449 tmp = ioread32(regs + CLA_ISR_CAUSE);
1450 if (tmp & CLIC_OUT_IRQ)
1451 iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE);
1452 }
1453 if (status & INT_MAP_DL_CPU2PCIEA) {
1454 isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1455 if (isr_status)
1456 iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1457 }
1458
1459 mhba->global_isr = status;
1460 mhba->isr_status = isr_status;
1461
1462 return 0;
1463}
1464
1465/**
1466 * mvumi_read_fw_status_reg - returns the current FW status value
1467 * @regs: FW register set
1468 */
1469static unsigned int mvumi_read_fw_status_reg(void *regs)
1470{
1471 unsigned int status;
1472
1473 status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1474 if (status)
1475 iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1476 return status;
1477}
1478
1479static struct mvumi_instance_template mvumi_instance_template = {
1480 .fire_cmd = mvumi_fire_cmd,
1481 .enable_intr = mvumi_enable_intr,
1482 .disable_intr = mvumi_disable_intr,
1483 .clear_intr = mvumi_clear_intr,
1484 .read_fw_status_reg = mvumi_read_fw_status_reg,
1485};
1486
1487static int mvumi_slave_configure(struct scsi_device *sdev)
1488{
1489 struct mvumi_hba *mhba;
1490 unsigned char bitcount = sizeof(unsigned char) * 8;
1491
1492 mhba = (struct mvumi_hba *) sdev->host->hostdata;
1493 if (sdev->id >= mhba->max_target_id)
1494 return -EINVAL;
1495
1496 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
1497 return 0;
1498}
1499
1500/**
1501 * mvumi_build_frame - Prepares a direct cdb (DCDB) command
1502 * @mhba: Adapter soft state
1503 * @scmd: SCSI command
1504 * @cmd: Command to be prepared in
1505 *
1506 * This function prepares CDB commands. These are typcially pass-through
1507 * commands to the devices.
1508 */
1509static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
1510 struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
1511{
1512 struct mvumi_msg_frame *pframe;
1513
1514 cmd->scmd = scmd;
1515 cmd->cmd_status = REQ_STATUS_PENDING;
1516 pframe = cmd->frame;
1517 pframe->device_id = ((unsigned short) scmd->device->id) |
1518 (((unsigned short) scmd->device->lun) << 8);
1519 pframe->cmd_flag = 0;
1520
1521 switch (scmd->sc_data_direction) {
1522 case DMA_NONE:
1523 pframe->cmd_flag |= CMD_FLAG_NON_DATA;
1524 break;
1525 case DMA_FROM_DEVICE:
1526 pframe->cmd_flag |= CMD_FLAG_DATA_IN;
1527 break;
1528 case DMA_TO_DEVICE:
1529 pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
1530 break;
1531 case DMA_BIDIRECTIONAL:
1532 default:
1533 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
1534 "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
1535 goto error;
1536 }
1537
1538 pframe->cdb_length = scmd->cmd_len;
1539 memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
1540 pframe->req_function = CL_FUN_SCSI_CMD;
1541 if (scsi_bufflen(scmd)) {
1542 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
1543 &pframe->sg_counts))
1544 goto error;
1545
1546 pframe->data_transfer_length = scsi_bufflen(scmd);
1547 } else {
1548 pframe->sg_counts = 0;
1549 pframe->data_transfer_length = 0;
1550 }
1551 return 0;
1552
1553error:
1554 scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
1555 SAM_STAT_CHECK_CONDITION;
1556 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
1557 0);
1558 return -1;
1559}
1560
1561/**
1562 * mvumi_queue_command - Queue entry point
1563 * @scmd: SCSI command to be queued
1564 * @done: Callback entry point
1565 */
1566static int mvumi_queue_command(struct Scsi_Host *shost,
1567 struct scsi_cmnd *scmd)
1568{
1569 struct mvumi_cmd *cmd;
1570 struct mvumi_hba *mhba;
1571 unsigned long irq_flags;
1572
1573 spin_lock_irqsave(shost->host_lock, irq_flags);
1574 scsi_cmd_get_serial(shost, scmd);
1575
1576 mhba = (struct mvumi_hba *) shost->hostdata;
1577 scmd->result = 0;
1578 cmd = mvumi_get_cmd(mhba);
1579 if (unlikely(!cmd)) {
1580 spin_unlock_irqrestore(shost->host_lock, irq_flags);
1581 return SCSI_MLQUEUE_HOST_BUSY;
1582 }
1583
1584 if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
1585 goto out_return_cmd;
1586
1587 cmd->scmd = scmd;
1588 scmd->SCp.ptr = (char *) cmd;
1589 mhba->instancet->fire_cmd(mhba, cmd);
1590 spin_unlock_irqrestore(shost->host_lock, irq_flags);
1591 return 0;
1592
1593out_return_cmd:
1594 mvumi_return_cmd(mhba, cmd);
1595 scmd->scsi_done(scmd);
1596 spin_unlock_irqrestore(shost->host_lock, irq_flags);
1597 return 0;
1598}
1599
1600static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
1601{
1602 struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
1603 struct Scsi_Host *host = scmd->device->host;
1604 struct mvumi_hba *mhba = shost_priv(host);
1605 unsigned long flags;
1606
1607 spin_lock_irqsave(mhba->shost->host_lock, flags);
1608
1609 if (mhba->tag_cmd[cmd->frame->tag]) {
1610 mhba->tag_cmd[cmd->frame->tag] = 0;
1611 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
1612 }
1613 if (!list_empty(&cmd->queue_pointer))
1614 list_del_init(&cmd->queue_pointer);
1615 else
1616 atomic_dec(&mhba->fw_outstanding);
1617
1618 scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1619 scmd->SCp.ptr = NULL;
1620 if (scsi_bufflen(scmd)) {
1621 if (scsi_sg_count(scmd)) {
1622 pci_unmap_sg(mhba->pdev,
1623 scsi_sglist(scmd),
1624 scsi_sg_count(scmd),
1625 (int)scmd->sc_data_direction);
1626 } else {
1627 pci_unmap_single(mhba->pdev,
1628 scmd->SCp.dma_handle,
1629 scsi_bufflen(scmd),
1630 (int)scmd->sc_data_direction);
1631
1632 scmd->SCp.dma_handle = 0;
1633 }
1634 }
1635 mvumi_return_cmd(mhba, cmd);
1636 spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1637
1638 return BLK_EH_NOT_HANDLED;
1639}
1640
1641static int
1642mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1643 sector_t capacity, int geom[])
1644{
1645 int heads, sectors;
1646 sector_t cylinders;
1647 unsigned long tmp;
1648
1649 heads = 64;
1650 sectors = 32;
1651 tmp = heads * sectors;
1652 cylinders = capacity;
1653 sector_div(cylinders, tmp);
1654
1655 if (capacity >= 0x200000) {
1656 heads = 255;
1657 sectors = 63;
1658 tmp = heads * sectors;
1659 cylinders = capacity;
1660 sector_div(cylinders, tmp);
1661 }
1662 geom[0] = heads;
1663 geom[1] = sectors;
1664 geom[2] = cylinders;
1665
1666 return 0;
1667}
1668
1669static struct scsi_host_template mvumi_template = {
1670
1671 .module = THIS_MODULE,
1672 .name = "Marvell Storage Controller",
1673 .slave_configure = mvumi_slave_configure,
1674 .queuecommand = mvumi_queue_command,
1675 .eh_host_reset_handler = mvumi_host_reset,
1676 .bios_param = mvumi_bios_param,
1677 .this_id = -1,
1678};
1679
1680static struct scsi_transport_template mvumi_transport_template = {
1681 .eh_timed_out = mvumi_timed_out,
1682};
1683
1684/**
1685 * mvumi_init_fw - Initializes the FW
1686 * @mhba: Adapter soft state
1687 *
1688 * This is the main function for initializing firmware.
1689 */
1690static int mvumi_init_fw(struct mvumi_hba *mhba)
1691{
1692 int ret = 0;
1693
1694 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
1695 dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
1696 return -EBUSY;
1697 }
1698 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
1699 if (ret)
1700 goto fail_ioremap;
1701
1702 mhba->mmio = mhba->base_addr[0];
1703
1704 switch (mhba->pdev->device) {
1705 case PCI_DEVICE_ID_MARVELL_MV9143:
1706 mhba->instancet = &mvumi_instance_template;
1707 mhba->io_seq = 0;
1708 mhba->max_sge = MVUMI_MAX_SG_ENTRY;
1709 mhba->request_id_enabled = 1;
1710 break;
1711 default:
1712 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
1713 mhba->pdev->device);
1714 mhba->instancet = NULL;
1715 ret = -EINVAL;
1716 goto fail_alloc_mem;
1717 }
1718 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
1719 mhba->pdev->device);
1720
1721 mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL);
1722 if (!mhba->handshake_page) {
1723 dev_err(&mhba->pdev->dev,
1724 "failed to allocate memory for handshake\n");
1725 ret = -ENOMEM;
1726 goto fail_alloc_mem;
1727 }
1728 mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
1729
1730 if (mvumi_start(mhba)) {
1731 ret = -EINVAL;
1732 goto fail_ready_state;
1733 }
1734 ret = mvumi_alloc_cmds(mhba);
1735 if (ret)
1736 goto fail_ready_state;
1737
1738 return 0;
1739
1740fail_ready_state:
1741 mvumi_release_mem_resource(mhba);
1742 kfree(mhba->handshake_page);
1743fail_alloc_mem:
1744 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
1745fail_ioremap:
1746 pci_release_regions(mhba->pdev);
1747
1748 return ret;
1749}
1750
1751/**
1752 * mvumi_io_attach - Attaches this driver to SCSI mid-layer
1753 * @mhba: Adapter soft state
1754 */
1755static int mvumi_io_attach(struct mvumi_hba *mhba)
1756{
1757 struct Scsi_Host *host = mhba->shost;
1758 int ret;
1759 unsigned int max_sg = (mhba->ib_max_size + 4 -
1760 sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
1761
1762 host->irq = mhba->pdev->irq;
1763 host->unique_id = mhba->unique_id;
1764 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1765 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
1766 host->max_sectors = mhba->max_transfer_size / 512;
1767 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1768 host->max_id = mhba->max_target_id;
1769 host->max_cmd_len = MAX_COMMAND_SIZE;
1770 host->transportt = &mvumi_transport_template;
1771
1772 ret = scsi_add_host(host, &mhba->pdev->dev);
1773 if (ret) {
1774 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
1775 return ret;
1776 }
1777 mhba->fw_flag |= MVUMI_FW_ATTACH;
1778 scsi_scan_host(host);
1779
1780 return 0;
1781}
1782
1783/**
1784 * mvumi_probe_one - PCI hotplug entry point
1785 * @pdev: PCI device structure
1786 * @id: PCI ids of supported hotplugged adapter
1787 */
1788static int __devinit mvumi_probe_one(struct pci_dev *pdev,
1789 const struct pci_device_id *id)
1790{
1791 struct Scsi_Host *host;
1792 struct mvumi_hba *mhba;
1793 int ret;
1794
1795 dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
1796 pdev->vendor, pdev->device, pdev->subsystem_vendor,
1797 pdev->subsystem_device);
1798
1799 ret = pci_enable_device(pdev);
1800 if (ret)
1801 return ret;
1802
1803 pci_set_master(pdev);
1804
1805 if (IS_DMA64) {
1806 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1807 if (ret) {
1808 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1809 if (ret)
1810 goto fail_set_dma_mask;
1811 }
1812 } else {
1813 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1814 if (ret)
1815 goto fail_set_dma_mask;
1816 }
1817
1818 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
1819 if (!host) {
1820 dev_err(&pdev->dev, "scsi_host_alloc failed\n");
1821 ret = -ENOMEM;
1822 goto fail_alloc_instance;
1823 }
1824 mhba = shost_priv(host);
1825
1826 INIT_LIST_HEAD(&mhba->cmd_pool);
1827 INIT_LIST_HEAD(&mhba->ob_data_list);
1828 INIT_LIST_HEAD(&mhba->free_ob_list);
1829 INIT_LIST_HEAD(&mhba->res_list);
1830 INIT_LIST_HEAD(&mhba->waiting_req_list);
1831 atomic_set(&mhba->fw_outstanding, 0);
1832 init_waitqueue_head(&mhba->int_cmd_wait_q);
1833
1834 mhba->pdev = pdev;
1835 mhba->shost = host;
1836 mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
1837
1838 ret = mvumi_init_fw(mhba);
1839 if (ret)
1840 goto fail_init_fw;
1841
1842 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
1843 "mvumi", mhba);
1844 if (ret) {
1845 dev_err(&pdev->dev, "failed to register IRQ\n");
1846 goto fail_init_irq;
1847 }
1848 mhba->instancet->enable_intr(mhba->mmio);
1849 pci_set_drvdata(pdev, mhba);
1850
1851 ret = mvumi_io_attach(mhba);
1852 if (ret)
1853 goto fail_io_attach;
1854 dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
1855
1856 return 0;
1857
1858fail_io_attach:
1859 pci_set_drvdata(pdev, NULL);
1860 mhba->instancet->disable_intr(mhba->mmio);
1861 free_irq(mhba->pdev->irq, mhba);
1862fail_init_irq:
1863 mvumi_release_fw(mhba);
1864fail_init_fw:
1865 scsi_host_put(host);
1866
1867fail_alloc_instance:
1868fail_set_dma_mask:
1869 pci_disable_device(pdev);
1870
1871 return ret;
1872}
1873
1874static void mvumi_detach_one(struct pci_dev *pdev)
1875{
1876 struct Scsi_Host *host;
1877 struct mvumi_hba *mhba;
1878
1879 mhba = pci_get_drvdata(pdev);
1880 host = mhba->shost;
1881 scsi_remove_host(mhba->shost);
1882 mvumi_flush_cache(mhba);
1883
1884 mhba->instancet->disable_intr(mhba->mmio);
1885 free_irq(mhba->pdev->irq, mhba);
1886 mvumi_release_fw(mhba);
1887 scsi_host_put(host);
1888 pci_set_drvdata(pdev, NULL);
1889 pci_disable_device(pdev);
1890 dev_dbg(&pdev->dev, "driver is removed!\n");
1891}
1892
1893/**
1894 * mvumi_shutdown - Shutdown entry point
1895 * @device: Generic device structure
1896 */
1897static void mvumi_shutdown(struct pci_dev *pdev)
1898{
1899 struct mvumi_hba *mhba = pci_get_drvdata(pdev);
1900
1901 mvumi_flush_cache(mhba);
1902}
1903
1904static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
1905{
1906 struct mvumi_hba *mhba = NULL;
1907
1908 mhba = pci_get_drvdata(pdev);
1909 mvumi_flush_cache(mhba);
1910
1911 pci_set_drvdata(pdev, mhba);
1912 mhba->instancet->disable_intr(mhba->mmio);
1913 free_irq(mhba->pdev->irq, mhba);
1914 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1915 pci_release_regions(pdev);
1916 pci_save_state(pdev);
1917 pci_disable_device(pdev);
1918 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1919
1920 return 0;
1921}
1922
1923static int mvumi_resume(struct pci_dev *pdev)
1924{
1925 int ret;
1926 struct mvumi_hba *mhba = NULL;
1927
1928 mhba = pci_get_drvdata(pdev);
1929
1930 pci_set_power_state(pdev, PCI_D0);
1931 pci_enable_wake(pdev, PCI_D0, 0);
1932 pci_restore_state(pdev);
1933
1934 ret = pci_enable_device(pdev);
1935 if (ret) {
1936 dev_err(&pdev->dev, "enable device failed\n");
1937 return ret;
1938 }
1939 pci_set_master(pdev);
1940 if (IS_DMA64) {
1941 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1942 if (ret) {
1943 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1944 if (ret)
1945 goto fail;
1946 }
1947 } else {
1948 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1949 if (ret)
1950 goto fail;
1951 }
1952 ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
1953 if (ret)
1954 goto fail;
1955 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
1956 if (ret)
1957 goto release_regions;
1958
1959 mhba->mmio = mhba->base_addr[0];
1960 mvumi_reset(mhba->mmio);
1961
1962 if (mvumi_start(mhba)) {
1963 ret = -EINVAL;
1964 goto unmap_pci_addr;
1965 }
1966
1967 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
1968 "mvumi", mhba);
1969 if (ret) {
1970 dev_err(&pdev->dev, "failed to register IRQ\n");
1971 goto unmap_pci_addr;
1972 }
1973 mhba->instancet->enable_intr(mhba->mmio);
1974
1975 return 0;
1976
1977unmap_pci_addr:
1978 mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1979release_regions:
1980 pci_release_regions(pdev);
1981fail:
1982 pci_disable_device(pdev);
1983
1984 return ret;
1985}
1986
1987static struct pci_driver mvumi_pci_driver = {
1988
1989 .name = MV_DRIVER_NAME,
1990 .id_table = mvumi_pci_table,
1991 .probe = mvumi_probe_one,
1992 .remove = __devexit_p(mvumi_detach_one),
1993 .shutdown = mvumi_shutdown,
1994#ifdef CONFIG_PM
1995 .suspend = mvumi_suspend,
1996 .resume = mvumi_resume,
1997#endif
1998};
1999
2000/**
2001 * mvumi_init - Driver load entry point
2002 */
2003static int __init mvumi_init(void)
2004{
2005 return pci_register_driver(&mvumi_pci_driver);
2006}
2007
2008/**
2009 * mvumi_exit - Driver unload entry point
2010 */
2011static void __exit mvumi_exit(void)
2012{
2013
2014 pci_unregister_driver(&mvumi_pci_driver);
2015}
2016
2017module_init(mvumi_init);
2018module_exit(mvumi_exit);
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
new file mode 100644
index 00000000000..10b9237566f
--- /dev/null
+++ b/drivers/scsi/mvumi.h
@@ -0,0 +1,505 @@
1/*
2 * Marvell UMI head file
3 *
4 * Copyright 2011 Marvell. <jyli@marvell.com>
5 *
6 * This file is licensed under GPLv2.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; version 2 of the
11 * License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * USA
22 */
23
24#ifndef MVUMI_H
25#define MVUMI_H
26
27#define MAX_BASE_ADDRESS 6
28
29#define VER_MAJOR 1
30#define VER_MINOR 1
31#define VER_OEM 0
32#define VER_BUILD 1500
33
34#define MV_DRIVER_NAME "mvumi"
35#define PCI_VENDOR_ID_MARVELL_2 0x1b4b
36#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
37
38#define MVUMI_INTERNAL_CMD_WAIT_TIME 45
39
40#define IS_DMA64 (sizeof(dma_addr_t) == 8)
41
42enum mvumi_qc_result {
43 MV_QUEUE_COMMAND_RESULT_SENT = 0,
44 MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
45};
46
47enum {
48 /*******************************************/
49
50 /* ARM Mbus Registers Map */
51
52 /*******************************************/
53 CPU_MAIN_INT_CAUSE_REG = 0x20200,
54 CPU_MAIN_IRQ_MASK_REG = 0x20204,
55 CPU_MAIN_FIQ_MASK_REG = 0x20208,
56 CPU_ENPOINTA_MASK_REG = 0x2020C,
57 CPU_ENPOINTB_MASK_REG = 0x20210,
58
59 INT_MAP_COMAERR = 1 << 6,
60 INT_MAP_COMAIN = 1 << 7,
61 INT_MAP_COMAOUT = 1 << 8,
62 INT_MAP_COMBERR = 1 << 9,
63 INT_MAP_COMBIN = 1 << 10,
64 INT_MAP_COMBOUT = 1 << 11,
65
66 INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR),
67 INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR),
68
69 INT_MAP_DL_PCIEA2CPU = 1 << 0,
70 INT_MAP_DL_CPU2PCIEA = 1 << 1,
71
72 /***************************************/
73
74 /* ARM Doorbell Registers Map */
75
76 /***************************************/
77 CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400,
78 CPU_PCIEA_TO_ARM_MASK_REG = 0x20404,
79 CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408,
80 CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C,
81
82 DRBL_HANDSHAKE = 1 << 0,
83 DRBL_SOFT_RESET = 1 << 1,
84 DRBL_BUS_CHANGE = 1 << 2,
85 DRBL_EVENT_NOTIFY = 1 << 3,
86 DRBL_MU_RESET = 1 << 4,
87 DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE,
88
89 CPU_PCIEA_TO_ARM_MSG0 = 0x20430,
90 CPU_PCIEA_TO_ARM_MSG1 = 0x20434,
91 CPU_ARM_TO_PCIEA_MSG0 = 0x20438,
92 CPU_ARM_TO_PCIEA_MSG1 = 0x2043C,
93
94 /*******************************************/
95
96 /* ARM Communication List Registers Map */
97
98 /*******************************************/
99 CLA_INB_LIST_BASEL = 0x500,
100 CLA_INB_LIST_BASEH = 0x504,
101 CLA_INB_AVAL_COUNT_BASEL = 0x508,
102 CLA_INB_AVAL_COUNT_BASEH = 0x50C,
103 CLA_INB_DESTI_LIST_BASEL = 0x510,
104 CLA_INB_DESTI_LIST_BASEH = 0x514,
105 CLA_INB_WRITE_POINTER = 0x518,
106 CLA_INB_READ_POINTER = 0x51C,
107
108 CLA_OUTB_LIST_BASEL = 0x530,
109 CLA_OUTB_LIST_BASEH = 0x534,
110 CLA_OUTB_SOURCE_LIST_BASEL = 0x538,
111 CLA_OUTB_SOURCE_LIST_BASEH = 0x53C,
112 CLA_OUTB_COPY_POINTER = 0x544,
113 CLA_OUTB_READ_POINTER = 0x548,
114
115 CLA_ISR_CAUSE = 0x560,
116 CLA_ISR_MASK = 0x564,
117
118 INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT),
119
120 CL_POINTER_TOGGLE = 1 << 12,
121
122 CLIC_IN_IRQ = 1 << 0,
123 CLIC_OUT_IRQ = 1 << 1,
124 CLIC_IN_ERR_IRQ = 1 << 8,
125 CLIC_OUT_ERR_IRQ = 1 << 12,
126
127 CL_SLOT_NUM_MASK = 0xFFF,
128
129 /*
130 * Command flag is the flag for the CDB command itself
131 */
132 /* 1-non data; 0-data command */
133 CMD_FLAG_NON_DATA = 1 << 0,
134 CMD_FLAG_DMA = 1 << 1,
135 CMD_FLAG_PIO = 1 << 2,
136 /* 1-host read data */
137 CMD_FLAG_DATA_IN = 1 << 3,
138 /* 1-host write data */
139 CMD_FLAG_DATA_OUT = 1 << 4,
140
141 SCSI_CMD_MARVELL_SPECIFIC = 0xE1,
142 CDB_CORE_SHUTDOWN = 0xB,
143};
144
145#define APICDB0_EVENT 0xF4
146#define APICDB1_EVENT_GETEVENT 0
147#define MAX_EVENTS_RETURNED 6
148
149struct mvumi_driver_event {
150 u32 time_stamp;
151 u32 sequence_no;
152 u32 event_id;
153 u8 severity;
154 u8 param_count;
155 u16 device_id;
156 u32 params[4];
157 u8 sense_data_length;
158 u8 Reserved1;
159 u8 sense_data[30];
160};
161
162struct mvumi_event_req {
163 unsigned char count;
164 unsigned char reserved[3];
165 struct mvumi_driver_event events[MAX_EVENTS_RETURNED];
166};
167
168struct mvumi_events_wq {
169 struct work_struct work_q;
170 struct mvumi_hba *mhba;
171 unsigned int event;
172 void *param;
173};
174
175#define MVUMI_MAX_SG_ENTRY 32
176#define SGD_EOT (1L << 27)
177
178struct mvumi_sgl {
179 u32 baseaddr_l;
180 u32 baseaddr_h;
181 u32 flags;
182 u32 size;
183};
184
185struct mvumi_res {
186 struct list_head entry;
187 dma_addr_t bus_addr;
188 void *virt_addr;
189 unsigned int size;
190 unsigned short type; /* enum Resource_Type */
191};
192
193/* Resource type */
194enum resource_type {
195 RESOURCE_CACHED_MEMORY = 0,
196 RESOURCE_UNCACHED_MEMORY
197};
198
199struct mvumi_sense_data {
200 u8 error_eode:7;
201 u8 valid:1;
202 u8 segment_number;
203 u8 sense_key:4;
204 u8 reserved:1;
205 u8 incorrect_length:1;
206 u8 end_of_media:1;
207 u8 file_mark:1;
208 u8 information[4];
209 u8 additional_sense_length;
210 u8 command_specific_information[4];
211 u8 additional_sense_code;
212 u8 additional_sense_code_qualifier;
213 u8 field_replaceable_unit_code;
214 u8 sense_key_specific[3];
215};
216
217/* Request initiator must set the status to REQ_STATUS_PENDING. */
218#define REQ_STATUS_PENDING 0x80
219
220struct mvumi_cmd {
221 struct list_head queue_pointer;
222 struct mvumi_msg_frame *frame;
223 struct scsi_cmnd *scmd;
224 atomic_t sync_cmd;
225 void *data_buf;
226 unsigned short request_id;
227 unsigned char cmd_status;
228};
229
230/*
231 * the function type of the in bound frame
232 */
233#define CL_FUN_SCSI_CMD 0x1
234
235struct mvumi_msg_frame {
236 u16 device_id;
237 u16 tag;
238 u8 cmd_flag;
239 u8 req_function;
240 u8 cdb_length;
241 u8 sg_counts;
242 u32 data_transfer_length;
243 u16 request_id;
244 u16 reserved1;
245 u8 cdb[MAX_COMMAND_SIZE];
246 u32 payload[1];
247};
248
249/*
250 * the respond flag for data_payload of the out bound frame
251 */
252#define CL_RSP_FLAG_NODATA 0x0
253#define CL_RSP_FLAG_SENSEDATA 0x1
254
255struct mvumi_rsp_frame {
256 u16 device_id;
257 u16 tag;
258 u8 req_status;
259 u8 rsp_flag; /* Indicates the type of Data_Payload.*/
260 u16 request_id;
261 u32 payload[1];
262};
263
264struct mvumi_ob_data {
265 struct list_head list;
266 unsigned char data[0];
267};
268
269struct version_info {
270 u32 ver_major;
271 u32 ver_minor;
272 u32 ver_oem;
273 u32 ver_build;
274};
275
276#define FW_MAX_DELAY 30
277#define MVUMI_FW_BUSY (1U << 0)
278#define MVUMI_FW_ATTACH (1U << 1)
279#define MVUMI_FW_ALLOC (1U << 2)
280
281/*
282 * State is the state of the MU
283 */
284#define FW_STATE_IDLE 0
285#define FW_STATE_STARTING 1
286#define FW_STATE_HANDSHAKING 2
287#define FW_STATE_STARTED 3
288#define FW_STATE_ABORT 4
289
290#define HANDSHAKE_SIGNATURE 0x5A5A5A5AL
291#define HANDSHAKE_READYSTATE 0x55AA5AA5L
292#define HANDSHAKE_DONESTATE 0x55AAA55AL
293
294/* HandShake Status definition */
295#define HS_STATUS_OK 1
296#define HS_STATUS_ERR 2
297#define HS_STATUS_INVALID 3
298
299/* HandShake State/Cmd definition */
300#define HS_S_START 1
301#define HS_S_RESET 2
302#define HS_S_PAGE_ADDR 3
303#define HS_S_QUERY_PAGE 4
304#define HS_S_SEND_PAGE 5
305#define HS_S_END 6
306#define HS_S_ABORT 7
307#define HS_PAGE_VERIFY_SIZE 128
308
309#define HS_GET_STATE(a) (a & 0xFFFF)
310#define HS_GET_STATUS(a) ((a & 0xFFFF0000) >> 16)
311#define HS_SET_STATE(a, b) (a |= (b & 0xFFFF))
312#define HS_SET_STATUS(a, b) (a |= ((b & 0xFFFF) << 16))
313
314/* handshake frame */
315struct mvumi_hs_frame {
316 u16 size;
317 /* host information */
318 u8 host_type;
319 u8 reserved_1[1];
320 struct version_info host_ver; /* bios or driver version */
321
322 /* controller information */
323 u32 system_io_bus;
324 u32 slot_number;
325 u32 intr_level;
326 u32 intr_vector;
327
328 /* communication list configuration */
329 u32 ib_baseaddr_l;
330 u32 ib_baseaddr_h;
331 u32 ob_baseaddr_l;
332 u32 ob_baseaddr_h;
333
334 u8 ib_entry_size;
335 u8 ob_entry_size;
336 u8 ob_depth;
337 u8 ib_depth;
338
339 /* system time */
340 u64 seconds_since1970;
341};
342
343struct mvumi_hs_header {
344 u8 page_code;
345 u8 checksum;
346 u16 frame_length;
347 u32 frame_content[1];
348};
349
350/*
351 * the page code type of the handshake header
352 */
353#define HS_PAGE_FIRM_CAP 0x1
354#define HS_PAGE_HOST_INFO 0x2
355#define HS_PAGE_FIRM_CTL 0x3
356#define HS_PAGE_CL_INFO 0x4
357#define HS_PAGE_TOTAL 0x5
358
359#define HSP_SIZE(i) sizeof(struct mvumi_hs_page##i)
360
361#define HSP_MAX_SIZE ({ \
362 int size, m1, m2; \
363 m1 = max(HSP_SIZE(1), HSP_SIZE(3)); \
364 m2 = max(HSP_SIZE(2), HSP_SIZE(4)); \
365 size = max(m1, m2); \
366 size; \
367})
368
369/* The format of the page code for Firmware capability */
370struct mvumi_hs_page1 {
371 u8 pagecode;
372 u8 checksum;
373 u16 frame_length;
374
375 u16 number_of_ports;
376 u16 max_devices_support;
377 u16 max_io_support;
378 u16 umi_ver;
379 u32 max_transfer_size;
380 struct version_info fw_ver;
381 u8 cl_in_max_entry_size;
382 u8 cl_out_max_entry_size;
383 u8 cl_inout_list_depth;
384 u8 total_pages;
385 u16 capability;
386 u16 reserved1;
387};
388
389/* The format of the page code for Host information */
390struct mvumi_hs_page2 {
391 u8 pagecode;
392 u8 checksum;
393 u16 frame_length;
394
395 u8 host_type;
396 u8 reserved[3];
397 struct version_info host_ver;
398 u32 system_io_bus;
399 u32 slot_number;
400 u32 intr_level;
401 u32 intr_vector;
402 u64 seconds_since1970;
403};
404
405/* The format of the page code for firmware control */
406struct mvumi_hs_page3 {
407 u8 pagecode;
408 u8 checksum;
409 u16 frame_length;
410 u16 control;
411 u8 reserved[2];
412 u32 host_bufferaddr_l;
413 u32 host_bufferaddr_h;
414 u32 host_eventaddr_l;
415 u32 host_eventaddr_h;
416};
417
418struct mvumi_hs_page4 {
419 u8 pagecode;
420 u8 checksum;
421 u16 frame_length;
422 u32 ib_baseaddr_l;
423 u32 ib_baseaddr_h;
424 u32 ob_baseaddr_l;
425 u32 ob_baseaddr_h;
426 u8 ib_entry_size;
427 u8 ob_entry_size;
428 u8 ob_depth;
429 u8 ib_depth;
430};
431
432struct mvumi_tag {
433 unsigned short *stack;
434 unsigned short top;
435 unsigned short size;
436};
437
438struct mvumi_hba {
439 void *base_addr[MAX_BASE_ADDRESS];
440 void *mmio;
441 struct list_head cmd_pool;
442 struct Scsi_Host *shost;
443 wait_queue_head_t int_cmd_wait_q;
444 struct pci_dev *pdev;
445 unsigned int unique_id;
446 atomic_t fw_outstanding;
447 struct mvumi_instance_template *instancet;
448
449 void *ib_list;
450 dma_addr_t ib_list_phys;
451
452 void *ob_list;
453 dma_addr_t ob_list_phys;
454
455 void *ib_shadow;
456 dma_addr_t ib_shadow_phys;
457
458 void *ob_shadow;
459 dma_addr_t ob_shadow_phys;
460
461 void *handshake_page;
462 dma_addr_t handshake_page_phys;
463
464 unsigned int global_isr;
465 unsigned int isr_status;
466
467 unsigned short max_sge;
468 unsigned short max_target_id;
469 unsigned char *target_map;
470 unsigned int max_io;
471 unsigned int list_num_io;
472 unsigned int ib_max_size;
473 unsigned int ob_max_size;
474 unsigned int ib_max_size_setting;
475 unsigned int ob_max_size_setting;
476 unsigned int max_transfer_size;
477 unsigned char hba_total_pages;
478 unsigned char fw_flag;
479 unsigned char request_id_enabled;
480 unsigned short hba_capability;
481 unsigned short io_seq;
482
483 unsigned int ib_cur_slot;
484 unsigned int ob_cur_slot;
485 unsigned int fw_state;
486
487 struct list_head ob_data_list;
488 struct list_head free_ob_list;
489 struct list_head res_list;
490 struct list_head waiting_req_list;
491
492 struct mvumi_tag tag_pool;
493 struct mvumi_cmd **tag_cmd;
494};
495
496struct mvumi_instance_template {
497 void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *);
498 void (*enable_intr)(void *) ;
499 void (*disable_intr)(void *);
500 int (*clear_intr)(void *);
501 unsigned int (*read_fw_status_reg)(void *);
502};
503
504extern struct timezone sys_tz;
505#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 8b7db1e53c1..b7b92f7be2a 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -567,11 +567,11 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
567 value = pm8001_cr32(pm8001_ha, 0, 0x44); 567 value = pm8001_cr32(pm8001_ha, 0, 0x44);
568 offset = value & 0x03FFFFFF; 568 offset = value & 0x03FFFFFF;
569 PM8001_INIT_DBG(pm8001_ha, 569 PM8001_INIT_DBG(pm8001_ha,
570 pm8001_printk("Scratchpad 0 Offset: %x \n", offset)); 570 pm8001_printk("Scratchpad 0 Offset: %x\n", offset));
571 pcilogic = (value & 0xFC000000) >> 26; 571 pcilogic = (value & 0xFC000000) >> 26;
572 pcibar = get_pci_bar_index(pcilogic); 572 pcibar = get_pci_bar_index(pcilogic);
573 PM8001_INIT_DBG(pm8001_ha, 573 PM8001_INIT_DBG(pm8001_ha,
574 pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar)); 574 pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar));
575 pm8001_ha->main_cfg_tbl_addr = base_addr = 575 pm8001_ha->main_cfg_tbl_addr = base_addr =
576 pm8001_ha->io_mem[pcibar].memvirtaddr + offset; 576 pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
577 pm8001_ha->general_stat_tbl_addr = 577 pm8001_ha->general_stat_tbl_addr =
@@ -1245,7 +1245,7 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1245 1245
1246 if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { 1246 if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) {
1247 PM8001_IO_DBG(pm8001_ha, 1247 PM8001_IO_DBG(pm8001_ha,
1248 pm8001_printk("No free mpi buffer \n")); 1248 pm8001_printk("No free mpi buffer\n"));
1249 return -1; 1249 return -1;
1250 } 1250 }
1251 BUG_ON(!payload); 1251 BUG_ON(!payload);
@@ -1262,7 +1262,7 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
1262 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, 1262 pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar,
1263 circularQ->pi_offset, circularQ->producer_idx); 1263 circularQ->pi_offset, circularQ->producer_idx);
1264 PM8001_IO_DBG(pm8001_ha, 1264 PM8001_IO_DBG(pm8001_ha,
1265 pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx, 1265 pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx,
1266 circularQ->consumer_index)); 1266 circularQ->consumer_index));
1267 return 0; 1267 return 0;
1268} 1268}
@@ -1474,7 +1474,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1474 switch (status) { 1474 switch (status) {
1475 case IO_SUCCESS: 1475 case IO_SUCCESS:
1476 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" 1476 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS"
1477 ",param = %d \n", param)); 1477 ",param = %d\n", param));
1478 if (param == 0) { 1478 if (param == 0) {
1479 ts->resp = SAS_TASK_COMPLETE; 1479 ts->resp = SAS_TASK_COMPLETE;
1480 ts->stat = SAM_STAT_GOOD; 1480 ts->stat = SAM_STAT_GOOD;
@@ -1490,14 +1490,14 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1490 break; 1490 break;
1491 case IO_ABORTED: 1491 case IO_ABORTED:
1492 PM8001_IO_DBG(pm8001_ha, 1492 PM8001_IO_DBG(pm8001_ha,
1493 pm8001_printk("IO_ABORTED IOMB Tag \n")); 1493 pm8001_printk("IO_ABORTED IOMB Tag\n"));
1494 ts->resp = SAS_TASK_COMPLETE; 1494 ts->resp = SAS_TASK_COMPLETE;
1495 ts->stat = SAS_ABORTED_TASK; 1495 ts->stat = SAS_ABORTED_TASK;
1496 break; 1496 break;
1497 case IO_UNDERFLOW: 1497 case IO_UNDERFLOW:
1498 /* SSP Completion with error */ 1498 /* SSP Completion with error */
1499 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW" 1499 PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW"
1500 ",param = %d \n", param)); 1500 ",param = %d\n", param));
1501 ts->resp = SAS_TASK_COMPLETE; 1501 ts->resp = SAS_TASK_COMPLETE;
1502 ts->stat = SAS_DATA_UNDERRUN; 1502 ts->stat = SAS_DATA_UNDERRUN;
1503 ts->residual = param; 1503 ts->residual = param;
@@ -1649,6 +1649,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1649 ts->resp = SAS_TASK_COMPLETE; 1649 ts->resp = SAS_TASK_COMPLETE;
1650 ts->stat = SAS_OPEN_REJECT; 1650 ts->stat = SAS_OPEN_REJECT;
1651 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1651 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1652 break;
1652 default: 1653 default:
1653 PM8001_IO_DBG(pm8001_ha, 1654 PM8001_IO_DBG(pm8001_ha,
1654 pm8001_printk("Unknown status 0x%x\n", status)); 1655 pm8001_printk("Unknown status 0x%x\n", status));
@@ -1937,14 +1938,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
1937 ts->buf_valid_size = sizeof(*resp); 1938 ts->buf_valid_size = sizeof(*resp);
1938 } else 1939 } else
1939 PM8001_IO_DBG(pm8001_ha, 1940 PM8001_IO_DBG(pm8001_ha,
1940 pm8001_printk("response to large \n")); 1941 pm8001_printk("response to large\n"));
1941 } 1942 }
1942 if (pm8001_dev) 1943 if (pm8001_dev)
1943 pm8001_dev->running_req--; 1944 pm8001_dev->running_req--;
1944 break; 1945 break;
1945 case IO_ABORTED: 1946 case IO_ABORTED:
1946 PM8001_IO_DBG(pm8001_ha, 1947 PM8001_IO_DBG(pm8001_ha,
1947 pm8001_printk("IO_ABORTED IOMB Tag \n")); 1948 pm8001_printk("IO_ABORTED IOMB Tag\n"));
1948 ts->resp = SAS_TASK_COMPLETE; 1949 ts->resp = SAS_TASK_COMPLETE;
1949 ts->stat = SAS_ABORTED_TASK; 1950 ts->stat = SAS_ABORTED_TASK;
1950 if (pm8001_dev) 1951 if (pm8001_dev)
@@ -2728,11 +2729,11 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb)
2728 u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; 2729 u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS;
2729 if (status != 0) { 2730 if (status != 0) {
2730 PM8001_MSG_DBG(pm8001_ha, 2731 PM8001_MSG_DBG(pm8001_ha,
2731 pm8001_printk("%x phy execute %x phy op failed! \n", 2732 pm8001_printk("%x phy execute %x phy op failed!\n",
2732 phy_id, phy_op)); 2733 phy_id, phy_op));
2733 } else 2734 } else
2734 PM8001_MSG_DBG(pm8001_ha, 2735 PM8001_MSG_DBG(pm8001_ha,
2735 pm8001_printk("%x phy execute %x phy op success! \n", 2736 pm8001_printk("%x phy execute %x phy op success!\n",
2736 phy_id, phy_op)); 2737 phy_id, phy_op));
2737 return 0; 2738 return 0;
2738} 2739}
@@ -3018,7 +3019,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3018 break; 3019 break;
3019 case PORT_INVALID: 3020 case PORT_INVALID:
3020 PM8001_MSG_DBG(pm8001_ha, 3021 PM8001_MSG_DBG(pm8001_ha,
3021 pm8001_printk(" PortInvalid portID %d \n", port_id)); 3022 pm8001_printk(" PortInvalid portID %d\n", port_id));
3022 PM8001_MSG_DBG(pm8001_ha, 3023 PM8001_MSG_DBG(pm8001_ha,
3023 pm8001_printk(" Last phy Down and port invalid\n")); 3024 pm8001_printk(" Last phy Down and port invalid\n"));
3024 port->port_attached = 0; 3025 port->port_attached = 0;
@@ -3027,7 +3028,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3027 break; 3028 break;
3028 case PORT_IN_RESET: 3029 case PORT_IN_RESET:
3029 PM8001_MSG_DBG(pm8001_ha, 3030 PM8001_MSG_DBG(pm8001_ha,
3030 pm8001_printk(" Port In Reset portID %d \n", port_id)); 3031 pm8001_printk(" Port In Reset portID %d\n", port_id));
3031 break; 3032 break;
3032 case PORT_NOT_ESTABLISHED: 3033 case PORT_NOT_ESTABLISHED:
3033 PM8001_MSG_DBG(pm8001_ha, 3034 PM8001_MSG_DBG(pm8001_ha,
@@ -3220,7 +3221,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb)
3220 pm8001_printk(" status = 0x%x\n", status)); 3221 pm8001_printk(" status = 0x%x\n", status));
3221 for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) 3222 for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++)
3222 PM8001_MSG_DBG(pm8001_ha, 3223 PM8001_MSG_DBG(pm8001_ha,
3223 pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i, 3224 pm8001_printk("inb_IOMB_payload[0x%x] 0x%x,\n", i,
3224 pPayload->inb_IOMB_payload[i])); 3225 pPayload->inb_IOMB_payload[i]));
3225 return 0; 3226 return 0;
3226} 3227}
@@ -3312,12 +3313,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3312 break; 3313 break;
3313 case HW_EVENT_SAS_PHY_UP: 3314 case HW_EVENT_SAS_PHY_UP:
3314 PM8001_MSG_DBG(pm8001_ha, 3315 PM8001_MSG_DBG(pm8001_ha,
3315 pm8001_printk("HW_EVENT_PHY_START_STATUS \n")); 3316 pm8001_printk("HW_EVENT_PHY_START_STATUS\n"));
3316 hw_event_sas_phy_up(pm8001_ha, piomb); 3317 hw_event_sas_phy_up(pm8001_ha, piomb);
3317 break; 3318 break;
3318 case HW_EVENT_SATA_PHY_UP: 3319 case HW_EVENT_SATA_PHY_UP:
3319 PM8001_MSG_DBG(pm8001_ha, 3320 PM8001_MSG_DBG(pm8001_ha,
3320 pm8001_printk("HW_EVENT_SATA_PHY_UP \n")); 3321 pm8001_printk("HW_EVENT_SATA_PHY_UP\n"));
3321 hw_event_sata_phy_up(pm8001_ha, piomb); 3322 hw_event_sata_phy_up(pm8001_ha, piomb);
3322 break; 3323 break;
3323 case HW_EVENT_PHY_STOP_STATUS: 3324 case HW_EVENT_PHY_STOP_STATUS:
@@ -3329,12 +3330,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3329 break; 3330 break;
3330 case HW_EVENT_SATA_SPINUP_HOLD: 3331 case HW_EVENT_SATA_SPINUP_HOLD:
3331 PM8001_MSG_DBG(pm8001_ha, 3332 PM8001_MSG_DBG(pm8001_ha,
3332 pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n")); 3333 pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n"));
3333 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); 3334 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
3334 break; 3335 break;
3335 case HW_EVENT_PHY_DOWN: 3336 case HW_EVENT_PHY_DOWN:
3336 PM8001_MSG_DBG(pm8001_ha, 3337 PM8001_MSG_DBG(pm8001_ha,
3337 pm8001_printk("HW_EVENT_PHY_DOWN \n")); 3338 pm8001_printk("HW_EVENT_PHY_DOWN\n"));
3338 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); 3339 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
3339 phy->phy_attached = 0; 3340 phy->phy_attached = 0;
3340 phy->phy_state = 0; 3341 phy->phy_state = 0;
@@ -3446,7 +3447,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3446 break; 3447 break;
3447 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: 3448 case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
3448 PM8001_MSG_DBG(pm8001_ha, 3449 PM8001_MSG_DBG(pm8001_ha,
3449 pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n")); 3450 pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n"));
3450 pm8001_hw_event_ack_req(pm8001_ha, 0, 3451 pm8001_hw_event_ack_req(pm8001_ha, 0,
3451 HW_EVENT_LINK_ERR_PHY_RESET_FAILED, 3452 HW_EVENT_LINK_ERR_PHY_RESET_FAILED,
3452 port_id, phy_id, 0, 0); 3453 port_id, phy_id, 0, 0);
@@ -3456,25 +3457,25 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
3456 break; 3457 break;
3457 case HW_EVENT_PORT_RESET_TIMER_TMO: 3458 case HW_EVENT_PORT_RESET_TIMER_TMO:
3458 PM8001_MSG_DBG(pm8001_ha, 3459 PM8001_MSG_DBG(pm8001_ha,
3459 pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n")); 3460 pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n"));
3460 sas_phy_disconnected(sas_phy); 3461 sas_phy_disconnected(sas_phy);
3461 phy->phy_attached = 0; 3462 phy->phy_attached = 0;
3462 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); 3463 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3463 break; 3464 break;
3464 case HW_EVENT_PORT_RECOVERY_TIMER_TMO: 3465 case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
3465 PM8001_MSG_DBG(pm8001_ha, 3466 PM8001_MSG_DBG(pm8001_ha,
3466 pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n")); 3467 pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n"));
3467 sas_phy_disconnected(sas_phy); 3468 sas_phy_disconnected(sas_phy);
3468 phy->phy_attached = 0; 3469 phy->phy_attached = 0;
3469 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); 3470 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3470 break; 3471 break;
3471 case HW_EVENT_PORT_RECOVER: 3472 case HW_EVENT_PORT_RECOVER:
3472 PM8001_MSG_DBG(pm8001_ha, 3473 PM8001_MSG_DBG(pm8001_ha,
3473 pm8001_printk("HW_EVENT_PORT_RECOVER \n")); 3474 pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
3474 break; 3475 break;
3475 case HW_EVENT_PORT_RESET_COMPLETE: 3476 case HW_EVENT_PORT_RESET_COMPLETE:
3476 PM8001_MSG_DBG(pm8001_ha, 3477 PM8001_MSG_DBG(pm8001_ha,
3477 pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n")); 3478 pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n"));
3478 break; 3479 break;
3479 case EVENT_BROADCAST_ASYNCH_EVENT: 3480 case EVENT_BROADCAST_ASYNCH_EVENT:
3480 PM8001_MSG_DBG(pm8001_ha, 3481 PM8001_MSG_DBG(pm8001_ha,
@@ -3502,21 +3503,21 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3502 3503
3503 switch (opc) { 3504 switch (opc) {
3504 case OPC_OUB_ECHO: 3505 case OPC_OUB_ECHO:
3505 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n")); 3506 PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n"));
3506 break; 3507 break;
3507 case OPC_OUB_HW_EVENT: 3508 case OPC_OUB_HW_EVENT:
3508 PM8001_MSG_DBG(pm8001_ha, 3509 PM8001_MSG_DBG(pm8001_ha,
3509 pm8001_printk("OPC_OUB_HW_EVENT \n")); 3510 pm8001_printk("OPC_OUB_HW_EVENT\n"));
3510 mpi_hw_event(pm8001_ha, piomb); 3511 mpi_hw_event(pm8001_ha, piomb);
3511 break; 3512 break;
3512 case OPC_OUB_SSP_COMP: 3513 case OPC_OUB_SSP_COMP:
3513 PM8001_MSG_DBG(pm8001_ha, 3514 PM8001_MSG_DBG(pm8001_ha,
3514 pm8001_printk("OPC_OUB_SSP_COMP \n")); 3515 pm8001_printk("OPC_OUB_SSP_COMP\n"));
3515 mpi_ssp_completion(pm8001_ha, piomb); 3516 mpi_ssp_completion(pm8001_ha, piomb);
3516 break; 3517 break;
3517 case OPC_OUB_SMP_COMP: 3518 case OPC_OUB_SMP_COMP:
3518 PM8001_MSG_DBG(pm8001_ha, 3519 PM8001_MSG_DBG(pm8001_ha,
3519 pm8001_printk("OPC_OUB_SMP_COMP \n")); 3520 pm8001_printk("OPC_OUB_SMP_COMP\n"));
3520 mpi_smp_completion(pm8001_ha, piomb); 3521 mpi_smp_completion(pm8001_ha, piomb);
3521 break; 3522 break;
3522 case OPC_OUB_LOCAL_PHY_CNTRL: 3523 case OPC_OUB_LOCAL_PHY_CNTRL:
@@ -3526,26 +3527,26 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
3526 break; 3527 break;
3527 case OPC_OUB_DEV_REGIST: 3528 case OPC_OUB_DEV_REGIST:
3528 PM8001_MSG_DBG(pm8001_ha, 3529 PM8001_MSG_DBG(pm8001_ha,
3529 pm8001_printk("OPC_OUB_DEV_REGIST \n")); 3530 pm8001_printk("OPC_OUB_DEV_REGIST\n"));
3530 mpi_reg_resp(pm8001_ha, piomb); 3531 mpi_reg_resp(pm8001_ha, piomb);
3531 break; 3532 break;
3532 case OPC_OUB_DEREG_DEV: 3533 case OPC_OUB_DEREG_DEV:
3533 PM8001_MSG_DBG(pm8001_ha, 3534 PM8001_MSG_DBG(pm8001_ha,
3534 pm8001_printk("unresgister the deviece \n")); 3535 pm8001_printk("unresgister the deviece\n"));
3535 mpi_dereg_resp(pm8001_ha, piomb); 3536 mpi_dereg_resp(pm8001_ha, piomb);
3536 break; 3537 break;
3537 case OPC_OUB_GET_DEV_HANDLE: 3538 case OPC_OUB_GET_DEV_HANDLE:
3538 PM8001_MSG_DBG(pm8001_ha, 3539 PM8001_MSG_DBG(pm8001_ha,
3539 pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n")); 3540 pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n"));
3540 break; 3541 break;
3541 case OPC_OUB_SATA_COMP: 3542 case OPC_OUB_SATA_COMP:
3542 PM8001_MSG_DBG(pm8001_ha, 3543 PM8001_MSG_DBG(pm8001_ha,
3543 pm8001_printk("OPC_OUB_SATA_COMP \n")); 3544 pm8001_printk("OPC_OUB_SATA_COMP\n"));
3544 mpi_sata_completion(pm8001_ha, piomb); 3545 mpi_sata_completion(pm8001_ha, piomb);
3545 break; 3546 break;
3546 case OPC_OUB_SATA_EVENT: 3547 case OPC_OUB_SATA_EVENT:
3547 PM8001_MSG_DBG(pm8001_ha, 3548 PM8001_MSG_DBG(pm8001_ha,
3548 pm8001_printk("OPC_OUB_SATA_EVENT \n")); 3549 pm8001_printk("OPC_OUB_SATA_EVENT\n"));
3549 mpi_sata_event(pm8001_ha, piomb); 3550 mpi_sata_event(pm8001_ha, piomb);
3550 break; 3551 break;
3551 case OPC_OUB_SSP_EVENT: 3552 case OPC_OUB_SSP_EVENT:
@@ -3858,19 +3859,19 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
3858 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 3859 circularQ = &pm8001_ha->inbnd_q_tbl[0];
3859 if (task->data_dir == PCI_DMA_NONE) { 3860 if (task->data_dir == PCI_DMA_NONE) {
3860 ATAP = 0x04; /* no data*/ 3861 ATAP = 0x04; /* no data*/
3861 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n")); 3862 PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
3862 } else if (likely(!task->ata_task.device_control_reg_update)) { 3863 } else if (likely(!task->ata_task.device_control_reg_update)) {
3863 if (task->ata_task.dma_xfer) { 3864 if (task->ata_task.dma_xfer) {
3864 ATAP = 0x06; /* DMA */ 3865 ATAP = 0x06; /* DMA */
3865 PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n")); 3866 PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n"));
3866 } else { 3867 } else {
3867 ATAP = 0x05; /* PIO*/ 3868 ATAP = 0x05; /* PIO*/
3868 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n")); 3869 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
3869 } 3870 }
3870 if (task->ata_task.use_ncq && 3871 if (task->ata_task.use_ncq &&
3871 dev->sata_dev.command_set != ATAPI_COMMAND_SET) { 3872 dev->sata_dev.command_set != ATAPI_COMMAND_SET) {
3872 ATAP = 0x07; /* FPDMA */ 3873 ATAP = 0x07; /* FPDMA */
3873 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n")); 3874 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
3874 } 3875 }
3875 } 3876 }
3876 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) 3877 if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag))
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 172cefb6deb..c21a2163f9f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -61,7 +61,7 @@ static struct scsi_host_template pm8001_sht = {
61 .name = DRV_NAME, 61 .name = DRV_NAME,
62 .queuecommand = sas_queuecommand, 62 .queuecommand = sas_queuecommand,
63 .target_alloc = sas_target_alloc, 63 .target_alloc = sas_target_alloc,
64 .slave_configure = pm8001_slave_configure, 64 .slave_configure = sas_slave_configure,
65 .slave_destroy = sas_slave_destroy, 65 .slave_destroy = sas_slave_destroy,
66 .scan_finished = pm8001_scan_finished, 66 .scan_finished = pm8001_scan_finished,
67 .scan_start = pm8001_scan_start, 67 .scan_start = pm8001_scan_start,
@@ -76,7 +76,7 @@ static struct scsi_host_template pm8001_sht = {
76 .use_clustering = ENABLE_CLUSTERING, 76 .use_clustering = ENABLE_CLUSTERING,
77 .eh_device_reset_handler = sas_eh_device_reset_handler, 77 .eh_device_reset_handler = sas_eh_device_reset_handler,
78 .eh_bus_reset_handler = sas_eh_bus_reset_handler, 78 .eh_bus_reset_handler = sas_eh_bus_reset_handler,
79 .slave_alloc = pm8001_slave_alloc, 79 .slave_alloc = sas_slave_alloc,
80 .target_destroy = sas_target_destroy, 80 .target_destroy = sas_target_destroy,
81 .ioctl = sas_ioctl, 81 .ioctl = sas_ioctl,
82 .shost_attrs = pm8001_host_attrs, 82 .shost_attrs = pm8001_host_attrs,
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 6ae059ebb4b..fb3dc997886 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -210,26 +210,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
210 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); 210 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
211 break; 211 break;
212 default: 212 default:
213 rc = -EOPNOTSUPP; 213 rc = -ENOSYS;
214 } 214 }
215 msleep(300); 215 msleep(300);
216 return rc; 216 return rc;
217} 217}
218 218
219int pm8001_slave_alloc(struct scsi_device *scsi_dev)
220{
221 struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
222 if (dev_is_sata(dev)) {
223 /* We don't need to rescan targets
224 * if REPORT_LUNS request is failed
225 */
226 if (scsi_dev->lun > 0)
227 return -ENXIO;
228 scsi_dev->tagged_supported = 1;
229 }
230 return sas_slave_alloc(scsi_dev);
231}
232
233/** 219/**
234 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start 220 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
235 * command to HBA. 221 * command to HBA.
@@ -314,22 +300,7 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
314{ 300{
315 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); 301 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
316} 302}
317int pm8001_slave_configure(struct scsi_device *sdev) 303
318{
319 struct domain_device *dev = sdev_to_domain_dev(sdev);
320 int ret = sas_slave_configure(sdev);
321 if (ret)
322 return ret;
323 if (dev_is_sata(dev)) {
324 #ifdef PM8001_DISABLE_NCQ
325 struct ata_port *ap = dev->sata_dev.ap;
326 struct ata_device *adev = ap->link.device;
327 adev->flags |= ATA_DFLAG_NCQ_OFF;
328 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
329 #endif
330 }
331 return 0;
332}
333 /* Find the local port id that's attached to this device */ 304 /* Find the local port id that's attached to this device */
334static int sas_find_local_port_id(struct domain_device *dev) 305static int sas_find_local_port_id(struct domain_device *dev)
335{ 306{
@@ -385,21 +356,8 @@ static int pm8001_task_exec(struct sas_task *task, const int num,
385 do { 356 do {
386 dev = t->dev; 357 dev = t->dev;
387 pm8001_dev = dev->lldd_dev; 358 pm8001_dev = dev->lldd_dev;
388 if (DEV_IS_GONE(pm8001_dev)) {
389 if (pm8001_dev) {
390 PM8001_IO_DBG(pm8001_ha,
391 pm8001_printk("device %d not ready.\n",
392 pm8001_dev->device_id));
393 } else {
394 PM8001_IO_DBG(pm8001_ha,
395 pm8001_printk("device %016llx not "
396 "ready.\n", SAS_ADDR(dev->sas_addr)));
397 }
398 rc = SAS_PHY_DOWN;
399 goto out_done;
400 }
401 port = &pm8001_ha->port[sas_find_local_port_id(dev)]; 359 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
402 if (!port->port_attached) { 360 if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
403 if (sas_protocol_ata(t->task_proto)) { 361 if (sas_protocol_ata(t->task_proto)) {
404 struct task_status_struct *ts = &t->task_status; 362 struct task_status_struct *ts = &t->task_status;
405 ts->resp = SAS_TASK_UNDELIVERED; 363 ts->resp = SAS_TASK_UNDELIVERED;
@@ -651,7 +609,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
651 flag = 1; /* directly sata*/ 609 flag = 1; /* directly sata*/
652 } 610 }
653 } /*register this device to HBA*/ 611 } /*register this device to HBA*/
654 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n")); 612 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
655 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); 613 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
656 spin_unlock_irqrestore(&pm8001_ha->lock, flags); 614 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
657 wait_for_completion(&completion); 615 wait_for_completion(&completion);
@@ -669,30 +627,6 @@ int pm8001_dev_found(struct domain_device *dev)
669 return pm8001_dev_found_notify(dev); 627 return pm8001_dev_found_notify(dev);
670} 628}
671 629
672/**
673 * pm8001_alloc_task - allocate a task structure for TMF
674 */
675static struct sas_task *pm8001_alloc_task(void)
676{
677 struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
678 if (task) {
679 INIT_LIST_HEAD(&task->list);
680 spin_lock_init(&task->task_state_lock);
681 task->task_state_flags = SAS_TASK_STATE_PENDING;
682 init_timer(&task->timer);
683 init_completion(&task->completion);
684 }
685 return task;
686}
687
688static void pm8001_free_task(struct sas_task *task)
689{
690 if (task) {
691 BUG_ON(!list_empty(&task->list));
692 kfree(task);
693 }
694}
695
696static void pm8001_task_done(struct sas_task *task) 630static void pm8001_task_done(struct sas_task *task)
697{ 631{
698 if (!del_timer(&task->timer)) 632 if (!del_timer(&task->timer))
@@ -728,7 +662,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
728 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); 662 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
729 663
730 for (retry = 0; retry < 3; retry++) { 664 for (retry = 0; retry < 3; retry++) {
731 task = pm8001_alloc_task(); 665 task = sas_alloc_task(GFP_KERNEL);
732 if (!task) 666 if (!task)
733 return -ENOMEM; 667 return -ENOMEM;
734 668
@@ -789,14 +723,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
789 SAS_ADDR(dev->sas_addr), 723 SAS_ADDR(dev->sas_addr),
790 task->task_status.resp, 724 task->task_status.resp,
791 task->task_status.stat)); 725 task->task_status.stat));
792 pm8001_free_task(task); 726 sas_free_task(task);
793 task = NULL; 727 task = NULL;
794 } 728 }
795 } 729 }
796ex_err: 730ex_err:
797 BUG_ON(retry == 3 && task != NULL); 731 BUG_ON(retry == 3 && task != NULL);
798 if (task != NULL) 732 sas_free_task(task);
799 pm8001_free_task(task);
800 return res; 733 return res;
801} 734}
802 735
@@ -811,7 +744,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
811 struct sas_task *task = NULL; 744 struct sas_task *task = NULL;
812 745
813 for (retry = 0; retry < 3; retry++) { 746 for (retry = 0; retry < 3; retry++) {
814 task = pm8001_alloc_task(); 747 task = sas_alloc_task(GFP_KERNEL);
815 if (!task) 748 if (!task)
816 return -ENOMEM; 749 return -ENOMEM;
817 750
@@ -864,14 +797,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
864 SAS_ADDR(dev->sas_addr), 797 SAS_ADDR(dev->sas_addr),
865 task->task_status.resp, 798 task->task_status.resp,
866 task->task_status.stat)); 799 task->task_status.stat));
867 pm8001_free_task(task); 800 sas_free_task(task);
868 task = NULL; 801 task = NULL;
869 } 802 }
870 } 803 }
871ex_err: 804ex_err:
872 BUG_ON(retry == 3 && task != NULL); 805 BUG_ON(retry == 3 && task != NULL);
873 if (task != NULL) 806 sas_free_task(task);
874 pm8001_free_task(task);
875 return res; 807 return res;
876} 808}
877 809
@@ -1026,13 +958,14 @@ int pm8001_query_task(struct sas_task *task)
1026 /* The task is still in Lun, release it then */ 958 /* The task is still in Lun, release it then */
1027 case TMF_RESP_FUNC_SUCC: 959 case TMF_RESP_FUNC_SUCC:
1028 PM8001_EH_DBG(pm8001_ha, 960 PM8001_EH_DBG(pm8001_ha,
1029 pm8001_printk("The task is still in Lun \n")); 961 pm8001_printk("The task is still in Lun\n"));
962 break;
1030 /* The task is not in Lun or failed, reset the phy */ 963 /* The task is not in Lun or failed, reset the phy */
1031 case TMF_RESP_FUNC_FAILED: 964 case TMF_RESP_FUNC_FAILED:
1032 case TMF_RESP_FUNC_COMPLETE: 965 case TMF_RESP_FUNC_COMPLETE:
1033 PM8001_EH_DBG(pm8001_ha, 966 PM8001_EH_DBG(pm8001_ha,
1034 pm8001_printk("The task is not in Lun or failed," 967 pm8001_printk("The task is not in Lun or failed,"
1035 " reset the phy \n")); 968 " reset the phy\n"));
1036 break; 969 break;
1037 } 970 }
1038 } 971 }
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index b97c8ab0c20..93959febe20 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -471,8 +471,6 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
471 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx); 471 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx);
472int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, 472int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
473 void *funcdata); 473 void *funcdata);
474int pm8001_slave_alloc(struct scsi_device *scsi_dev);
475int pm8001_slave_configure(struct scsi_device *sdev);
476void pm8001_scan_start(struct Scsi_Host *shost); 474void pm8001_scan_start(struct Scsi_Host *shost);
477int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); 475int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time);
478int pm8001_queue_command(struct sas_task *task, const int num, 476int pm8001_queue_command(struct sas_task *task, const int num,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index a31e05f3bfd..ac326c41e93 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -23,11 +23,23 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, 23 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24 struct device, kobj))); 24 struct device, kobj)));
25 struct qla_hw_data *ha = vha->hw; 25 struct qla_hw_data *ha = vha->hw;
26 int rval = 0;
26 27
27 if (ha->fw_dump_reading == 0) 28 if (ha->fw_dump_reading == 0)
28 return 0; 29 return 0;
29 30
30 return memory_read_from_buffer(buf, count, &off, ha->fw_dump, 31 if (IS_QLA82XX(ha)) {
32 if (off < ha->md_template_size) {
33 rval = memory_read_from_buffer(buf, count,
34 &off, ha->md_tmplt_hdr, ha->md_template_size);
35 return rval;
36 }
37 off -= ha->md_template_size;
38 rval = memory_read_from_buffer(buf, count,
39 &off, ha->md_dump, ha->md_dump_size);
40 return rval;
41 } else
42 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
31 ha->fw_dump_len); 43 ha->fw_dump_len);
32} 44}
33 45
@@ -41,12 +53,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
41 struct qla_hw_data *ha = vha->hw; 53 struct qla_hw_data *ha = vha->hw;
42 int reading; 54 int reading;
43 55
44 if (IS_QLA82XX(ha)) {
45 ql_dbg(ql_dbg_user, vha, 0x705b,
46 "Firmware dump not supported for ISP82xx\n");
47 return count;
48 }
49
50 if (off != 0) 56 if (off != 0)
51 return (0); 57 return (0);
52 58
@@ -59,6 +65,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
59 ql_log(ql_log_info, vha, 0x705d, 65 ql_log(ql_log_info, vha, 0x705d,
60 "Firmware dump cleared on (%ld).\n", vha->host_no); 66 "Firmware dump cleared on (%ld).\n", vha->host_no);
61 67
68 if (IS_QLA82XX(vha->hw)) {
69 qla82xx_md_free(vha);
70 qla82xx_md_prep(vha);
71 }
62 ha->fw_dump_reading = 0; 72 ha->fw_dump_reading = 0;
63 ha->fw_dumped = 0; 73 ha->fw_dumped = 0;
64 break; 74 break;
@@ -75,10 +85,29 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
75 qla2x00_alloc_fw_dump(vha); 85 qla2x00_alloc_fw_dump(vha);
76 break; 86 break;
77 case 3: 87 case 3:
78 qla2x00_system_error(vha); 88 if (IS_QLA82XX(ha)) {
89 qla82xx_idc_lock(ha);
90 qla82xx_set_reset_owner(vha);
91 qla82xx_idc_unlock(ha);
92 } else
93 qla2x00_system_error(vha);
94 break;
95 case 4:
96 if (IS_QLA82XX(ha)) {
97 if (ha->md_tmplt_hdr)
98 ql_dbg(ql_dbg_user, vha, 0x705b,
99 "MiniDump supported with this firmware.\n");
100 else
101 ql_dbg(ql_dbg_user, vha, 0x709d,
102 "MiniDump not supported with this firmware.\n");
103 }
104 break;
105 case 5:
106 if (IS_QLA82XX(ha))
107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
79 break; 108 break;
80 } 109 }
81 return (count); 110 return -EINVAL;
82} 111}
83 112
84static struct bin_attribute sysfs_fw_dump_attr = { 113static struct bin_attribute sysfs_fw_dump_attr = {
@@ -122,7 +151,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
122 151
123 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || 152 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
124 !ha->isp_ops->write_nvram) 153 !ha->isp_ops->write_nvram)
125 return 0; 154 return -EINVAL;
126 155
127 /* Checksum NVRAM. */ 156 /* Checksum NVRAM. */
128 if (IS_FWI2_CAPABLE(ha)) { 157 if (IS_FWI2_CAPABLE(ha)) {
@@ -165,7 +194,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
165 qla2xxx_wake_dpc(vha); 194 qla2xxx_wake_dpc(vha);
166 qla2x00_wait_for_chip_reset(vha); 195 qla2x00_wait_for_chip_reset(vha);
167 196
168 return (count); 197 return count;
169} 198}
170 199
171static struct bin_attribute sysfs_nvram_attr = { 200static struct bin_attribute sysfs_nvram_attr = {
@@ -239,10 +268,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
239 int val, valid; 268 int val, valid;
240 269
241 if (off) 270 if (off)
242 return 0; 271 return -EINVAL;
243 272
244 if (unlikely(pci_channel_offline(ha->pdev))) 273 if (unlikely(pci_channel_offline(ha->pdev)))
245 return 0; 274 return -EAGAIN;
246 275
247 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) 276 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
248 return -EINVAL; 277 return -EINVAL;
@@ -253,7 +282,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
253 case 0: 282 case 0:
254 if (ha->optrom_state != QLA_SREADING && 283 if (ha->optrom_state != QLA_SREADING &&
255 ha->optrom_state != QLA_SWRITING) 284 ha->optrom_state != QLA_SWRITING)
256 break; 285 return -EINVAL;
257 286
258 ha->optrom_state = QLA_SWAITING; 287 ha->optrom_state = QLA_SWAITING;
259 288
@@ -266,7 +295,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
266 break; 295 break;
267 case 1: 296 case 1:
268 if (ha->optrom_state != QLA_SWAITING) 297 if (ha->optrom_state != QLA_SWAITING)
269 break; 298 return -EINVAL;
270 299
271 ha->optrom_region_start = start; 300 ha->optrom_region_start = start;
272 ha->optrom_region_size = start + size > ha->optrom_size ? 301 ha->optrom_region_size = start + size > ha->optrom_size ?
@@ -280,7 +309,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
280 "(%x).\n", ha->optrom_region_size); 309 "(%x).\n", ha->optrom_region_size);
281 310
282 ha->optrom_state = QLA_SWAITING; 311 ha->optrom_state = QLA_SWAITING;
283 return count; 312 return -ENOMEM;
284 } 313 }
285 314
286 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 315 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
@@ -299,7 +328,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
299 break; 328 break;
300 case 2: 329 case 2:
301 if (ha->optrom_state != QLA_SWAITING) 330 if (ha->optrom_state != QLA_SWAITING)
302 break; 331 return -EINVAL;
303 332
304 /* 333 /*
305 * We need to be more restrictive on which FLASH regions are 334 * We need to be more restrictive on which FLASH regions are
@@ -347,7 +376,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
347 "(%x)\n", ha->optrom_region_size); 376 "(%x)\n", ha->optrom_region_size);
348 377
349 ha->optrom_state = QLA_SWAITING; 378 ha->optrom_state = QLA_SWAITING;
350 return count; 379 return -ENOMEM;
351 } 380 }
352 381
353 ql_dbg(ql_dbg_user, vha, 0x7067, 382 ql_dbg(ql_dbg_user, vha, 0x7067,
@@ -358,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
358 break; 387 break;
359 case 3: 388 case 3:
360 if (ha->optrom_state != QLA_SWRITING) 389 if (ha->optrom_state != QLA_SWRITING)
361 break; 390 return -ENOMEM;
362 391
363 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 392 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
364 ql_log(ql_log_warn, vha, 0x7068, 393 ql_log(ql_log_warn, vha, 0x7068,
@@ -374,7 +403,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
374 ha->optrom_region_start, ha->optrom_region_size); 403 ha->optrom_region_start, ha->optrom_region_size);
375 break; 404 break;
376 default: 405 default:
377 count = -EINVAL; 406 return -EINVAL;
378 } 407 }
379 return count; 408 return count;
380} 409}
@@ -398,10 +427,10 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
398 struct qla_hw_data *ha = vha->hw; 427 struct qla_hw_data *ha = vha->hw;
399 428
400 if (unlikely(pci_channel_offline(ha->pdev))) 429 if (unlikely(pci_channel_offline(ha->pdev)))
401 return 0; 430 return -EAGAIN;
402 431
403 if (!capable(CAP_SYS_ADMIN)) 432 if (!capable(CAP_SYS_ADMIN))
404 return 0; 433 return -EINVAL;
405 434
406 if (IS_NOCACHE_VPD_TYPE(ha)) 435 if (IS_NOCACHE_VPD_TYPE(ha))
407 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, 436 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
@@ -438,17 +467,17 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
438 467
439 /* Update flash version information for 4Gb & above. */ 468 /* Update flash version information for 4Gb & above. */
440 if (!IS_FWI2_CAPABLE(ha)) 469 if (!IS_FWI2_CAPABLE(ha))
441 goto done; 470 return -EINVAL;
442 471
443 tmp_data = vmalloc(256); 472 tmp_data = vmalloc(256);
444 if (!tmp_data) { 473 if (!tmp_data) {
445 ql_log(ql_log_warn, vha, 0x706b, 474 ql_log(ql_log_warn, vha, 0x706b,
446 "Unable to allocate memory for VPD information update.\n"); 475 "Unable to allocate memory for VPD information update.\n");
447 goto done; 476 return -ENOMEM;
448 } 477 }
449 ha->isp_ops->get_flash_version(vha, tmp_data); 478 ha->isp_ops->get_flash_version(vha, tmp_data);
450 vfree(tmp_data); 479 vfree(tmp_data);
451done: 480
452 return count; 481 return count;
453} 482}
454 483
@@ -505,8 +534,7 @@ do_read:
505 "Unable to read SFP data (%x/%x/%x).\n", rval, 534 "Unable to read SFP data (%x/%x/%x).\n", rval,
506 addr, offset); 535 addr, offset);
507 536
508 count = 0; 537 return -EIO;
509 break;
510 } 538 }
511 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); 539 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
512 buf += SFP_BLOCK_SIZE; 540 buf += SFP_BLOCK_SIZE;
@@ -536,7 +564,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
536 int type; 564 int type;
537 565
538 if (off != 0) 566 if (off != 0)
539 return 0; 567 return -EINVAL;
540 568
541 type = simple_strtol(buf, NULL, 10); 569 type = simple_strtol(buf, NULL, 10);
542 switch (type) { 570 switch (type) {
@@ -546,13 +574,18 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
546 574
547 scsi_block_requests(vha->host); 575 scsi_block_requests(vha->host);
548 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 576 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
577 if (IS_QLA82XX(ha)) {
578 qla82xx_idc_lock(ha);
579 qla82xx_set_reset_owner(vha);
580 qla82xx_idc_unlock(ha);
581 }
549 qla2xxx_wake_dpc(vha); 582 qla2xxx_wake_dpc(vha);
550 qla2x00_wait_for_chip_reset(vha); 583 qla2x00_wait_for_chip_reset(vha);
551 scsi_unblock_requests(vha->host); 584 scsi_unblock_requests(vha->host);
552 break; 585 break;
553 case 0x2025d: 586 case 0x2025d:
554 if (!IS_QLA81XX(ha)) 587 if (!IS_QLA81XX(ha))
555 break; 588 return -EPERM;
556 589
557 ql_log(ql_log_info, vha, 0x706f, 590 ql_log(ql_log_info, vha, 0x706f,
558 "Issuing MPI reset.\n"); 591 "Issuing MPI reset.\n");
@@ -571,7 +604,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
571 if (!IS_QLA82XX(ha) || vha != base_vha) { 604 if (!IS_QLA82XX(ha) || vha != base_vha) {
572 ql_log(ql_log_info, vha, 0x7071, 605 ql_log(ql_log_info, vha, 0x7071,
573 "FCoE ctx reset no supported.\n"); 606 "FCoE ctx reset no supported.\n");
574 return count; 607 return -EPERM;
575 } 608 }
576 609
577 ql_log(ql_log_info, vha, 0x7072, 610 ql_log(ql_log_info, vha, 0x7072,
@@ -607,7 +640,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
607 ha->edc_data_len = 0; 640 ha->edc_data_len = 0;
608 641
609 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 642 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
610 return 0; 643 return -EINVAL;
611 644
612 if (!ha->edc_data) { 645 if (!ha->edc_data) {
613 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 646 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -615,7 +648,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
615 if (!ha->edc_data) { 648 if (!ha->edc_data) {
616 ql_log(ql_log_warn, vha, 0x7073, 649 ql_log(ql_log_warn, vha, 0x7073,
617 "Unable to allocate memory for EDC write.\n"); 650 "Unable to allocate memory for EDC write.\n");
618 return 0; 651 return -ENOMEM;
619 } 652 }
620 } 653 }
621 654
@@ -634,9 +667,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
634 dev, adr, len, opt); 667 dev, adr, len, opt);
635 if (rval != QLA_SUCCESS) { 668 if (rval != QLA_SUCCESS) {
636 ql_log(ql_log_warn, vha, 0x7074, 669 ql_log(ql_log_warn, vha, 0x7074,
637 "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n", 670 "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
638 rval, dev, adr, opt, len, buf[8]); 671 rval, dev, adr, opt, len, buf[8]);
639 return 0; 672 return -EIO;
640 } 673 }
641 674
642 return count; 675 return count;
@@ -665,7 +698,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
665 ha->edc_data_len = 0; 698 ha->edc_data_len = 0;
666 699
667 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) 700 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
668 return 0; 701 return -EINVAL;
669 702
670 if (!ha->edc_data) { 703 if (!ha->edc_data) {
671 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, 704 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -673,7 +706,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
673 if (!ha->edc_data) { 706 if (!ha->edc_data) {
674 ql_log(ql_log_warn, vha, 0x708c, 707 ql_log(ql_log_warn, vha, 0x708c,
675 "Unable to allocate memory for EDC status.\n"); 708 "Unable to allocate memory for EDC status.\n");
676 return 0; 709 return -ENOMEM;
677 } 710 }
678 } 711 }
679 712
@@ -693,7 +726,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
693 ql_log(ql_log_info, vha, 0x7075, 726 ql_log(ql_log_info, vha, 0x7075,
694 "Unable to write EDC status (%x) %02x:%04x:%02x.\n", 727 "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
695 rval, dev, adr, opt, len); 728 rval, dev, adr, opt, len);
696 return 0; 729 return -EIO;
697 } 730 }
698 731
699 ha->edc_data_len = len; 732 ha->edc_data_len = len;
@@ -805,7 +838,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
805 if (!ha->dcbx_tlv) { 838 if (!ha->dcbx_tlv) {
806 ql_log(ql_log_warn, vha, 0x7078, 839 ql_log(ql_log_warn, vha, 0x7078,
807 "Unable to allocate memory for DCBX TLV read-data.\n"); 840 "Unable to allocate memory for DCBX TLV read-data.\n");
808 return 0; 841 return -ENOMEM;
809 } 842 }
810 843
811do_read: 844do_read:
@@ -817,7 +850,7 @@ do_read:
817 if (rval != QLA_SUCCESS) { 850 if (rval != QLA_SUCCESS) {
818 ql_log(ql_log_warn, vha, 0x7079, 851 ql_log(ql_log_warn, vha, 0x7079,
819 "Unable to read DCBX TLV (%x).\n", rval); 852 "Unable to read DCBX TLV (%x).\n", rval);
820 count = 0; 853 return -EIO;
821 } 854 }
822 855
823 memcpy(buf, ha->dcbx_tlv, count); 856 memcpy(buf, ha->dcbx_tlv, count);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 07d1767cd26..8b641a8a0c7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -704,6 +704,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
704 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 704 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
705 705
706 if ((ha->current_topology == ISP_CFG_F || 706 if ((ha->current_topology == ISP_CFG_F ||
707 (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
707 (IS_QLA81XX(ha) && 708 (IS_QLA81XX(ha) &&
708 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 709 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
709 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 710 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
@@ -1447,6 +1448,148 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1447} 1448}
1448 1449
1449static int 1450static int
1451qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1452{
1453 struct Scsi_Host *host = bsg_job->shost;
1454 scsi_qla_host_t *vha = shost_priv(host);
1455 struct qla_hw_data *ha = vha->hw;
1456 int rval = 0;
1457 uint8_t bsg[DMA_POOL_SIZE];
1458 struct qla_image_version_list *list = (void *)bsg;
1459 struct qla_image_version *image;
1460 uint32_t count;
1461 dma_addr_t sfp_dma;
1462 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1463 if (!sfp) {
1464 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1465 EXT_STATUS_NO_MEMORY;
1466 goto done;
1467 }
1468
1469 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1470 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1471
1472 image = list->version;
1473 count = list->count;
1474 while (count--) {
1475 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1476 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1477 image->field_address.device, image->field_address.offset,
1478 sizeof(image->field_info), image->field_address.option);
1479 if (rval) {
1480 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1481 EXT_STATUS_MAILBOX;
1482 goto dealloc;
1483 }
1484 image++;
1485 }
1486
1487 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1488
1489dealloc:
1490 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1491
1492done:
1493 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1494 bsg_job->reply->result = DID_OK << 16;
1495 bsg_job->job_done(bsg_job);
1496
1497 return 0;
1498}
1499
1500static int
1501qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1502{
1503 struct Scsi_Host *host = bsg_job->shost;
1504 scsi_qla_host_t *vha = shost_priv(host);
1505 struct qla_hw_data *ha = vha->hw;
1506 int rval = 0;
1507 uint8_t bsg[DMA_POOL_SIZE];
1508 struct qla_status_reg *sr = (void *)bsg;
1509 dma_addr_t sfp_dma;
1510 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1511 if (!sfp) {
1512 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1513 EXT_STATUS_NO_MEMORY;
1514 goto done;
1515 }
1516
1517 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1518 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1519
1520 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1521 sr->field_address.device, sr->field_address.offset,
1522 sizeof(sr->status_reg), sr->field_address.option);
1523 sr->status_reg = *sfp;
1524
1525 if (rval) {
1526 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1527 EXT_STATUS_MAILBOX;
1528 goto dealloc;
1529 }
1530
1531 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1532 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1533
1534 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1535
1536dealloc:
1537 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1538
1539done:
1540 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1541 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1542 bsg_job->reply->result = DID_OK << 16;
1543 bsg_job->job_done(bsg_job);
1544
1545 return 0;
1546}
1547
1548static int
1549qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1550{
1551 struct Scsi_Host *host = bsg_job->shost;
1552 scsi_qla_host_t *vha = shost_priv(host);
1553 struct qla_hw_data *ha = vha->hw;
1554 int rval = 0;
1555 uint8_t bsg[DMA_POOL_SIZE];
1556 struct qla_status_reg *sr = (void *)bsg;
1557 dma_addr_t sfp_dma;
1558 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1559 if (!sfp) {
1560 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1561 EXT_STATUS_NO_MEMORY;
1562 goto done;
1563 }
1564
1565 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1566 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1567
1568 *sfp = sr->status_reg;
1569 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1570 sr->field_address.device, sr->field_address.offset,
1571 sizeof(sr->status_reg), sr->field_address.option);
1572
1573 if (rval) {
1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1575 EXT_STATUS_MAILBOX;
1576 goto dealloc;
1577 }
1578
1579 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1580
1581dealloc:
1582 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1583
1584done:
1585 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1586 bsg_job->reply->result = DID_OK << 16;
1587 bsg_job->job_done(bsg_job);
1588
1589 return 0;
1590}
1591
1592static int
1450qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) 1593qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1451{ 1594{
1452 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { 1595 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1474,6 +1617,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
1474 case QL_VND_UPDATE_FLASH: 1617 case QL_VND_UPDATE_FLASH:
1475 return qla2x00_update_optrom(bsg_job); 1618 return qla2x00_update_optrom(bsg_job);
1476 1619
1620 case QL_VND_SET_FRU_VERSION:
1621 return qla2x00_update_fru_versions(bsg_job);
1622
1623 case QL_VND_READ_FRU_STATUS:
1624 return qla2x00_read_fru_status(bsg_job);
1625
1626 case QL_VND_WRITE_FRU_STATUS:
1627 return qla2x00_write_fru_status(bsg_job);
1628
1477 default: 1629 default:
1478 bsg_job->reply->result = (DID_ERROR << 16); 1630 bsg_job->reply->result = (DID_ERROR << 16);
1479 bsg_job->job_done(bsg_job); 1631 bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 0f0f54e35f0..70caa63a893 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -16,6 +16,16 @@
16#define QL_VND_FCP_PRIO_CFG_CMD 0x06 16#define QL_VND_FCP_PRIO_CFG_CMD 0x06
17#define QL_VND_READ_FLASH 0x07 17#define QL_VND_READ_FLASH 0x07
18#define QL_VND_UPDATE_FLASH 0x08 18#define QL_VND_UPDATE_FLASH 0x08
19#define QL_VND_SET_FRU_VERSION 0x0B
20#define QL_VND_READ_FRU_STATUS 0x0C
21#define QL_VND_WRITE_FRU_STATUS 0x0D
22
23/* BSG Vendor specific subcode returns */
24#define EXT_STATUS_OK 0
25#define EXT_STATUS_ERR 1
26#define EXT_STATUS_INVALID_PARAM 6
27#define EXT_STATUS_MAILBOX 11
28#define EXT_STATUS_NO_MEMORY 17
19 29
20/* BSG definations for interpreting CommandSent field */ 30/* BSG definations for interpreting CommandSent field */
21#define INT_DEF_LB_LOOPBACK_CMD 0 31#define INT_DEF_LB_LOOPBACK_CMD 0
@@ -141,4 +151,36 @@ struct qla_port_param {
141 uint16_t mode; 151 uint16_t mode;
142 uint16_t speed; 152 uint16_t speed;
143} __attribute__ ((packed)); 153} __attribute__ ((packed));
154
155
156/* FRU VPD */
157
158#define MAX_FRU_SIZE 36
159
160struct qla_field_address {
161 uint16_t offset;
162 uint16_t device;
163 uint16_t option;
164} __packed;
165
166struct qla_field_info {
167 uint8_t version[MAX_FRU_SIZE];
168} __packed;
169
170struct qla_image_version {
171 struct qla_field_address field_address;
172 struct qla_field_info field_info;
173} __packed;
174
175struct qla_image_version_list {
176 uint32_t count;
177 struct qla_image_version version[0];
178} __packed;
179
180struct qla_status_reg {
181 struct qla_field_address field_address;
182 uint8_t status_reg;
183 uint8_t reserved[7];
184} __packed;
185
144#endif 186#endif
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index d79cd8a5f83..9df4787715c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -12,7 +12,7 @@
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0116 | | 14 * | Module Init and Probe | 0x0116 | |
15 * | Mailbox commands | 0x1126 | | 15 * | Mailbox commands | 0x1129 | |
16 * | Device Discovery | 0x2083 | | 16 * | Device Discovery | 0x2083 | |
17 * | Queue Command and IO tracing | 0x302e | 0x3008 | 17 * | Queue Command and IO tracing | 0x302e | 0x3008 |
18 * | DPC Thread | 0x401c | | 18 * | DPC Thread | 0x401c | |
@@ -22,7 +22,7 @@
22 * | Task Management | 0x8041 | | 22 * | Task Management | 0x8041 | |
23 * | AER/EEH | 0x900f | | 23 * | AER/EEH | 0x900f | |
24 * | Virtual Port | 0xa007 | | 24 * | Virtual Port | 0xa007 | |
25 * | ISP82XX Specific | 0xb04f | | 25 * | ISP82XX Specific | 0xb051 | |
26 * | MultiQ | 0xc00b | | 26 * | MultiQ | 0xc00b | |
27 * | Misc | 0xd00b | | 27 * | Misc | 0xd00b | |
28 * ---------------------------------------------------------------------- 28 * ----------------------------------------------------------------------
@@ -403,7 +403,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
403 return ptr + sizeof(struct qla2xxx_mq_chain); 403 return ptr + sizeof(struct qla2xxx_mq_chain);
404} 404}
405 405
406static void 406void
407qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) 407qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
408{ 408{
409 struct qla_hw_data *ha = vha->hw; 409 struct qla_hw_data *ha = vha->hw;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a03eaf40f37..fcf052c50bf 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2438,7 +2438,8 @@ struct qla_hw_data {
2438 uint32_t quiesce_owner:1; 2438 uint32_t quiesce_owner:1;
2439 uint32_t thermal_supported:1; 2439 uint32_t thermal_supported:1;
2440 uint32_t isp82xx_reset_hdlr_active:1; 2440 uint32_t isp82xx_reset_hdlr_active:1;
2441 /* 26 bits */ 2441 uint32_t isp82xx_reset_owner:1;
2442 /* 28 bits */
2442 } flags; 2443 } flags;
2443 2444
2444 /* This spinlock is used to protect "io transactions", you must 2445 /* This spinlock is used to protect "io transactions", you must
@@ -2822,6 +2823,12 @@ struct qla_hw_data {
2822 2823
2823 uint8_t fw_type; 2824 uint8_t fw_type;
2824 __le32 file_prd_off; /* File firmware product offset */ 2825 __le32 file_prd_off; /* File firmware product offset */
2826
2827 uint32_t md_template_size;
2828 void *md_tmplt_hdr;
2829 dma_addr_t md_tmplt_hdr_dma;
2830 void *md_dump;
2831 uint32_t md_dump_size;
2825}; 2832};
2826 2833
2827/* 2834/*
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 29b1a3e2823..ce32d8135c9 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -104,6 +104,8 @@ extern int ql2xenablehba_err_chk;
104extern int ql2xtargetreset; 104extern int ql2xtargetreset;
105extern int ql2xdontresethba; 105extern int ql2xdontresethba;
106extern unsigned int ql2xmaxlun; 106extern unsigned int ql2xmaxlun;
107extern int ql2xmdcapmask;
108extern int ql2xmdenable;
107 109
108extern int qla2x00_loop_reset(scsi_qla_host_t *); 110extern int qla2x00_loop_reset(scsi_qla_host_t *);
109extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); 111extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -407,6 +409,8 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *);
407extern int qla24xx_beacon_on(struct scsi_qla_host *); 409extern int qla24xx_beacon_on(struct scsi_qla_host *);
408extern int qla24xx_beacon_off(struct scsi_qla_host *); 410extern int qla24xx_beacon_off(struct scsi_qla_host *);
409extern void qla24xx_beacon_blink(struct scsi_qla_host *); 411extern void qla24xx_beacon_blink(struct scsi_qla_host *);
412extern int qla82xx_beacon_on(struct scsi_qla_host *);
413extern int qla82xx_beacon_off(struct scsi_qla_host *);
410 414
411extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, 415extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
412 uint32_t, uint32_t); 416 uint32_t, uint32_t);
@@ -442,6 +446,7 @@ extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
442extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); 446extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
443extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, 447extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
444 uint8_t *, uint32_t); 448 uint8_t *, uint32_t);
449extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
445 450
446/* 451/*
447 * Global Function Prototypes in qla_gs.c source file. 452 * Global Function Prototypes in qla_gs.c source file.
@@ -569,7 +574,10 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
569extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); 574extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
570extern void qla82xx_start_iocbs(srb_t *); 575extern void qla82xx_start_iocbs(srb_t *);
571extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); 576extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
577extern int qla82xx_check_md_needed(scsi_qla_host_t *);
572extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); 578extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
579extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
580extern char *qdev_state(uint32_t);
573 581
574/* BSG related functions */ 582/* BSG related functions */
575extern int qla24xx_bsg_request(struct fc_bsg_job *); 583extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -579,4 +587,14 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *,
579 dma_addr_t, size_t, uint32_t); 587 dma_addr_t, size_t, uint32_t);
580extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, 588extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t,
581 uint16_t *, uint16_t *); 589 uint16_t *, uint16_t *);
590
591/* Minidump related functions */
592extern int qla82xx_md_get_template_size(scsi_qla_host_t *);
593extern int qla82xx_md_get_template(scsi_qla_host_t *);
594extern int qla82xx_md_alloc(scsi_qla_host_t *);
595extern void qla82xx_md_free(scsi_qla_host_t *);
596extern int qla82xx_md_collect(scsi_qla_host_t *);
597extern void qla82xx_md_prep(scsi_qla_host_t *);
598extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
599
582#endif /* _QLA_GBL_H */ 600#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 37da04d3db2..f03e915f187 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1480,13 +1480,19 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
1480 if (rval == QLA_SUCCESS) { 1480 if (rval == QLA_SUCCESS) {
1481enable_82xx_npiv: 1481enable_82xx_npiv:
1482 fw_major_version = ha->fw_major_version; 1482 fw_major_version = ha->fw_major_version;
1483 rval = qla2x00_get_fw_version(vha, 1483 if (IS_QLA82XX(ha))
1484 &ha->fw_major_version, 1484 qla82xx_check_md_needed(vha);
1485 &ha->fw_minor_version, 1485 else {
1486 &ha->fw_subminor_version, 1486 rval = qla2x00_get_fw_version(vha,
1487 &ha->fw_attributes, &ha->fw_memory_size, 1487 &ha->fw_major_version,
1488 ha->mpi_version, &ha->mpi_capabilities, 1488 &ha->fw_minor_version,
1489 ha->phy_version); 1489 &ha->fw_subminor_version,
1490 &ha->fw_attributes,
1491 &ha->fw_memory_size,
1492 ha->mpi_version,
1493 &ha->mpi_capabilities,
1494 ha->phy_version);
1495 }
1490 if (rval != QLA_SUCCESS) 1496 if (rval != QLA_SUCCESS)
1491 goto failed; 1497 goto failed;
1492 ha->flags.npiv_supported = 0; 1498 ha->flags.npiv_supported = 0;
@@ -1503,10 +1509,8 @@ enable_82xx_npiv:
1503 &ha->fw_xcb_count, NULL, NULL, 1509 &ha->fw_xcb_count, NULL, NULL,
1504 &ha->max_npiv_vports, NULL); 1510 &ha->max_npiv_vports, NULL);
1505 1511
1506 if (!fw_major_version && ql2xallocfwdump) { 1512 if (!fw_major_version && ql2xallocfwdump)
1507 if (!IS_QLA82XX(ha)) 1513 qla2x00_alloc_fw_dump(vha);
1508 qla2x00_alloc_fw_dump(vha);
1509 }
1510 } 1514 }
1511 } else { 1515 } else {
1512 ql_log(ql_log_fatal, vha, 0x00cd, 1516 ql_log(ql_log_fatal, vha, 0x00cd,
@@ -1924,7 +1928,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1924 rval = qla84xx_init_chip(vha); 1928 rval = qla84xx_init_chip(vha);
1925 if (rval != QLA_SUCCESS) { 1929 if (rval != QLA_SUCCESS) {
1926 ql_log(ql_log_warn, 1930 ql_log(ql_log_warn,
1927 vha, 0x8043, 1931 vha, 0x8026,
1928 "Init chip failed.\n"); 1932 "Init chip failed.\n");
1929 break; 1933 break;
1930 } 1934 }
@@ -1933,7 +1937,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
1933 cs84xx_time = jiffies - cs84xx_time; 1937 cs84xx_time = jiffies - cs84xx_time;
1934 wtime += cs84xx_time; 1938 wtime += cs84xx_time;
1935 mtime += cs84xx_time; 1939 mtime += cs84xx_time;
1936 ql_dbg(ql_dbg_taskm, vha, 0x8042, 1940 ql_dbg(ql_dbg_taskm, vha, 0x8025,
1937 "Increasing wait time by %ld. " 1941 "Increasing wait time by %ld. "
1938 "New time %ld.\n", cs84xx_time, 1942 "New time %ld.\n", cs84xx_time,
1939 wtime); 1943 wtime);
@@ -5443,11 +5447,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
5443 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); 5447 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
5444 5448
5445 /* Update the firmware version */ 5449 /* Update the firmware version */
5446 qla2x00_get_fw_version(vha, &ha->fw_major_version, 5450 status = qla82xx_check_md_needed(vha);
5447 &ha->fw_minor_version, &ha->fw_subminor_version,
5448 &ha->fw_attributes, &ha->fw_memory_size,
5449 ha->mpi_version, &ha->mpi_capabilities,
5450 ha->phy_version);
5451 5451
5452 if (ha->fce) { 5452 if (ha->fce) {
5453 ha->flags.fce_enabled = 1; 5453 ha->flags.fce_enabled = 1;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 8a7591f035e..3474e86e98a 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2060,6 +2060,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2060 case ELS_IOCB_TYPE: 2060 case ELS_IOCB_TYPE:
2061 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2061 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2062 break; 2062 break;
2063 case MARKER_TYPE:
2064 /* Do nothing in this case, this check is to prevent it
2065 * from falling into default case
2066 */
2067 break;
2063 default: 2068 default:
2064 /* Type Not Supported. */ 2069 /* Type Not Supported. */
2065 ql_dbg(ql_dbg_async, vha, 0x5042, 2070 ql_dbg(ql_dbg_async, vha, 0x5042,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index f7604ea1af8..3b3cec9f6ac 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4186,3 +4186,130 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4186 4186
4187 return rval; 4187 return rval;
4188} 4188}
4189
4190int
4191qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4192{
4193 struct qla_hw_data *ha = vha->hw;
4194 mbx_cmd_t mc;
4195 mbx_cmd_t *mcp = &mc;
4196 int rval = QLA_FUNCTION_FAILED;
4197
4198 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
4199
4200 memset(mcp->mb, 0 , sizeof(mcp->mb));
4201 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4202 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4203 mcp->mb[2] = LSW(RQST_TMPLT_SIZE);
4204 mcp->mb[3] = MSW(RQST_TMPLT_SIZE);
4205
4206 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4207 mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
4208 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4209
4210 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4211 mcp->tov = MBX_TOV_SECONDS;
4212 rval = qla2x00_mailbox_command(vha, mcp);
4213
4214 /* Always copy back return mailbox values. */
4215 if (rval != QLA_SUCCESS) {
4216 ql_dbg(ql_dbg_mbx, vha, 0x1120,
4217 "mailbox command FAILED=0x%x, subcode=%x.\n",
4218 (mcp->mb[1] << 16) | mcp->mb[0],
4219 (mcp->mb[3] << 16) | mcp->mb[2]);
4220 } else {
4221 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
4222 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4223 if (!ha->md_template_size) {
4224 ql_dbg(ql_dbg_mbx, vha, 0x1122,
4225 "Null template size obtained.\n");
4226 rval = QLA_FUNCTION_FAILED;
4227 }
4228 }
4229 return rval;
4230}
4231
4232int
4233qla82xx_md_get_template(scsi_qla_host_t *vha)
4234{
4235 struct qla_hw_data *ha = vha->hw;
4236 mbx_cmd_t mc;
4237 mbx_cmd_t *mcp = &mc;
4238 int rval = QLA_FUNCTION_FAILED;
4239
4240 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
4241
4242 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4243 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
4244 if (!ha->md_tmplt_hdr) {
4245 ql_log(ql_log_warn, vha, 0x1124,
4246 "Unable to allocate memory for Minidump template.\n");
4247 return rval;
4248 }
4249
4250 memset(mcp->mb, 0 , sizeof(mcp->mb));
4251 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4252 mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
4253 mcp->mb[2] = LSW(RQST_TMPLT);
4254 mcp->mb[3] = MSW(RQST_TMPLT);
4255 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
4256 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
4257 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
4258 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
4259 mcp->mb[8] = LSW(ha->md_template_size);
4260 mcp->mb[9] = MSW(ha->md_template_size);
4261
4262 mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
4263 mcp->tov = MBX_TOV_SECONDS;
4264 mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
4265 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
4266 mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
4267 rval = qla2x00_mailbox_command(vha, mcp);
4268
4269 if (rval != QLA_SUCCESS) {
4270 ql_dbg(ql_dbg_mbx, vha, 0x1125,
4271 "mailbox command FAILED=0x%x, subcode=%x.\n",
4272 ((mcp->mb[1] << 16) | mcp->mb[0]),
4273 ((mcp->mb[3] << 16) | mcp->mb[2]));
4274 } else
4275 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
4276 return rval;
4277}
4278
4279int
4280qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4281{
4282 int rval;
4283 struct qla_hw_data *ha = vha->hw;
4284 mbx_cmd_t mc;
4285 mbx_cmd_t *mcp = &mc;
4286
4287 if (!IS_QLA82XX(ha))
4288 return QLA_FUNCTION_FAILED;
4289
4290 ql_dbg(ql_dbg_mbx, vha, 0x1127,
4291 "Entered %s.\n", __func__);
4292
4293 memset(mcp, 0, sizeof(mbx_cmd_t));
4294 mcp->mb[0] = MBC_SET_LED_CONFIG;
4295 if (enable)
4296 mcp->mb[7] = 0xE;
4297 else
4298 mcp->mb[7] = 0xD;
4299
4300 mcp->out_mb = MBX_7|MBX_0;
4301 mcp->in_mb = MBX_0;
4302 mcp->tov = 30;
4303 mcp->flags = 0;
4304
4305 rval = qla2x00_mailbox_command(vha, mcp);
4306 if (rval != QLA_SUCCESS) {
4307 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4308 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4309 } else {
4310 ql_dbg(ql_dbg_mbx, vha, 0x1129,
4311 "Done %s.\n", __func__);
4312 }
4313
4314 return rval;
4315}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 049807cda41..94bded5ddce 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -7,6 +7,8 @@
7#include "qla_def.h" 7#include "qla_def.h"
8#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/ratelimit.h>
11#include <linux/vmalloc.h>
10#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
11 13
12#define MASK(n) ((1ULL<<(n))-1) 14#define MASK(n) ((1ULL<<(n))-1)
@@ -328,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = {
328}; 330};
329 331
330/* Device states */ 332/* Device states */
331char *qdev_state[] = { 333char *q_dev_state[] = {
332 "Unknown", 334 "Unknown",
333 "Cold", 335 "Cold",
334 "Initializing", 336 "Initializing",
@@ -339,6 +341,11 @@ char *qdev_state[] = {
339 "Quiescent", 341 "Quiescent",
340}; 342};
341 343
344char *qdev_state(uint32_t dev_state)
345{
346 return q_dev_state[dev_state];
347}
348
342/* 349/*
343 * In: 'off' is offset from CRB space in 128M pci map 350 * In: 'off' is offset from CRB space in 128M pci map
344 * Out: 'off' is 2M pci map addr 351 * Out: 'off' is 2M pci map addr
@@ -2355,9 +2362,13 @@ qla82xx_need_reset(struct qla_hw_data *ha)
2355 uint32_t drv_state; 2362 uint32_t drv_state;
2356 int rval; 2363 int rval;
2357 2364
2358 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2365 if (ha->flags.isp82xx_reset_owner)
2359 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2366 return 1;
2360 return rval; 2367 else {
2368 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2369 rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2370 return rval;
2371 }
2361} 2372}
2362 2373
2363static inline void 2374static inline void
@@ -2374,8 +2385,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha)
2374 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 2385 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
2375 } 2386 }
2376 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); 2387 drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4));
2377 ql_log(ql_log_info, vha, 0x00bb, 2388 ql_dbg(ql_dbg_init, vha, 0x00bb,
2378 "drv_state = 0x%x.\n", drv_state); 2389 "drv_state = 0x%08x.\n", drv_state);
2379 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 2390 qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
2380} 2391}
2381 2392
@@ -2598,7 +2609,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
2598 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; 2609 dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2599 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2610 *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2600 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2611 *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
2601 *dsd_seg++ = cpu_to_le32(dsd_list_len); 2612 cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
2602 } else { 2613 } else {
2603 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); 2614 *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
2604 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); 2615 *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
@@ -3529,6 +3540,7 @@ static void
3529qla82xx_need_reset_handler(scsi_qla_host_t *vha) 3540qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3530{ 3541{
3531 uint32_t dev_state, drv_state, drv_active; 3542 uint32_t dev_state, drv_state, drv_active;
3543 uint32_t active_mask = 0;
3532 unsigned long reset_timeout; 3544 unsigned long reset_timeout;
3533 struct qla_hw_data *ha = vha->hw; 3545 struct qla_hw_data *ha = vha->hw;
3534 struct req_que *req = ha->req_q_map[0]; 3546 struct req_que *req = ha->req_q_map[0];
@@ -3541,15 +3553,32 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3541 qla82xx_idc_lock(ha); 3553 qla82xx_idc_lock(ha);
3542 } 3554 }
3543 3555
3544 qla82xx_set_rst_ready(ha); 3556 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3557 if (!ha->flags.isp82xx_reset_owner) {
3558 ql_dbg(ql_dbg_p3p, vha, 0xb028,
3559 "reset_acknowledged by 0x%x\n", ha->portnum);
3560 qla82xx_set_rst_ready(ha);
3561 } else {
3562 active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4));
3563 drv_active &= active_mask;
3564 ql_dbg(ql_dbg_p3p, vha, 0xb029,
3565 "active_mask: 0x%08x\n", active_mask);
3566 }
3545 3567
3546 /* wait for 10 seconds for reset ack from all functions */ 3568 /* wait for 10 seconds for reset ack from all functions */
3547 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 3569 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3548 3570
3549 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3571 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3550 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3572 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3573 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3551 3574
3552 while (drv_state != drv_active) { 3575 ql_dbg(ql_dbg_p3p, vha, 0xb02a,
3576 "drv_state: 0x%08x, drv_active: 0x%08x, "
3577 "dev_state: 0x%08x, active_mask: 0x%08x\n",
3578 drv_state, drv_active, dev_state, active_mask);
3579
3580 while (drv_state != drv_active &&
3581 dev_state != QLA82XX_DEV_INITIALIZING) {
3553 if (time_after_eq(jiffies, reset_timeout)) { 3582 if (time_after_eq(jiffies, reset_timeout)) {
3554 ql_log(ql_log_warn, vha, 0x00b5, 3583 ql_log(ql_log_warn, vha, 0x00b5,
3555 "Reset timeout.\n"); 3584 "Reset timeout.\n");
@@ -3560,23 +3589,87 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3560 qla82xx_idc_lock(ha); 3589 qla82xx_idc_lock(ha);
3561 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 3590 drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3562 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 3591 drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3592 if (ha->flags.isp82xx_reset_owner)
3593 drv_active &= active_mask;
3594 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3563 } 3595 }
3564 3596
3565 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3597 ql_dbg(ql_dbg_p3p, vha, 0xb02b,
3598 "drv_state: 0x%08x, drv_active: 0x%08x, "
3599 "dev_state: 0x%08x, active_mask: 0x%08x\n",
3600 drv_state, drv_active, dev_state, active_mask);
3601
3566 ql_log(ql_log_info, vha, 0x00b6, 3602 ql_log(ql_log_info, vha, 0x00b6,
3567 "Device state is 0x%x = %s.\n", 3603 "Device state is 0x%x = %s.\n",
3568 dev_state, 3604 dev_state,
3569 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3605 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3570 3606
3571 /* Force to DEV_COLD unless someone else is starting a reset */ 3607 /* Force to DEV_COLD unless someone else is starting a reset */
3572 if (dev_state != QLA82XX_DEV_INITIALIZING) { 3608 if (dev_state != QLA82XX_DEV_INITIALIZING &&
3609 dev_state != QLA82XX_DEV_COLD) {
3573 ql_log(ql_log_info, vha, 0x00b7, 3610 ql_log(ql_log_info, vha, 0x00b7,
3574 "HW State: COLD/RE-INIT.\n"); 3611 "HW State: COLD/RE-INIT.\n");
3575 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 3612 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
3613 if (ql2xmdenable) {
3614 if (qla82xx_md_collect(vha))
3615 ql_log(ql_log_warn, vha, 0xb02c,
3616 "Not able to collect minidump.\n");
3617 } else
3618 ql_log(ql_log_warn, vha, 0xb04f,
3619 "Minidump disabled.\n");
3576 } 3620 }
3577} 3621}
3578 3622
3579int 3623int
3624qla82xx_check_md_needed(scsi_qla_host_t *vha)
3625{
3626 struct qla_hw_data *ha = vha->hw;
3627 uint16_t fw_major_version, fw_minor_version, fw_subminor_version;
3628 int rval = QLA_SUCCESS;
3629
3630 fw_major_version = ha->fw_major_version;
3631 fw_minor_version = ha->fw_minor_version;
3632 fw_subminor_version = ha->fw_subminor_version;
3633
3634 rval = qla2x00_get_fw_version(vha, &ha->fw_major_version,
3635 &ha->fw_minor_version, &ha->fw_subminor_version,
3636 &ha->fw_attributes, &ha->fw_memory_size,
3637 ha->mpi_version, &ha->mpi_capabilities,
3638 ha->phy_version);
3639
3640 if (rval != QLA_SUCCESS)
3641 return rval;
3642
3643 if (ql2xmdenable) {
3644 if (!ha->fw_dumped) {
3645 if (fw_major_version != ha->fw_major_version ||
3646 fw_minor_version != ha->fw_minor_version ||
3647 fw_subminor_version != ha->fw_subminor_version) {
3648
3649 ql_log(ql_log_info, vha, 0xb02d,
3650 "Firmware version differs "
3651 "Previous version: %d:%d:%d - "
3652 "New version: %d:%d:%d\n",
3653 ha->fw_major_version,
3654 ha->fw_minor_version,
3655 ha->fw_subminor_version,
3656 fw_major_version, fw_minor_version,
3657 fw_subminor_version);
3658 /* Release MiniDump resources */
3659 qla82xx_md_free(vha);
3660 /* ALlocate MiniDump resources */
3661 qla82xx_md_prep(vha);
3662 } else
3663 ql_log(ql_log_info, vha, 0xb02e,
3664 "Firmware dump available to retrieve\n",
3665 vha->host_no);
3666 }
3667 }
3668 return rval;
3669}
3670
3671
3672int
3580qla82xx_check_fw_alive(scsi_qla_host_t *vha) 3673qla82xx_check_fw_alive(scsi_qla_host_t *vha)
3581{ 3674{
3582 uint32_t fw_heartbeat_counter; 3675 uint32_t fw_heartbeat_counter;
@@ -3637,7 +3730,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3637 ql_log(ql_log_info, vha, 0x009b, 3730 ql_log(ql_log_info, vha, 0x009b,
3638 "Device state is 0x%x = %s.\n", 3731 "Device state is 0x%x = %s.\n",
3639 dev_state, 3732 dev_state,
3640 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 3733 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3641 3734
3642 /* wait for 30 seconds for device to go ready */ 3735 /* wait for 30 seconds for device to go ready */
3643 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 3736 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -3659,26 +3752,33 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
3659 ql_log(ql_log_info, vha, 0x009d, 3752 ql_log(ql_log_info, vha, 0x009d,
3660 "Device state is 0x%x = %s.\n", 3753 "Device state is 0x%x = %s.\n",
3661 dev_state, 3754 dev_state,
3662 dev_state < MAX_STATES ? qdev_state[dev_state] : 3755 dev_state < MAX_STATES ? qdev_state(dev_state) :
3663 "Unknown"); 3756 "Unknown");
3664 } 3757 }
3665 3758
3666 switch (dev_state) { 3759 switch (dev_state) {
3667 case QLA82XX_DEV_READY: 3760 case QLA82XX_DEV_READY:
3761 qla82xx_check_md_needed(vha);
3762 ha->flags.isp82xx_reset_owner = 0;
3668 goto exit; 3763 goto exit;
3669 case QLA82XX_DEV_COLD: 3764 case QLA82XX_DEV_COLD:
3670 rval = qla82xx_device_bootstrap(vha); 3765 rval = qla82xx_device_bootstrap(vha);
3671 goto exit; 3766 break;
3672 case QLA82XX_DEV_INITIALIZING: 3767 case QLA82XX_DEV_INITIALIZING:
3673 qla82xx_idc_unlock(ha); 3768 qla82xx_idc_unlock(ha);
3674 msleep(1000); 3769 msleep(1000);
3675 qla82xx_idc_lock(ha); 3770 qla82xx_idc_lock(ha);
3676 break; 3771 break;
3677 case QLA82XX_DEV_NEED_RESET: 3772 case QLA82XX_DEV_NEED_RESET:
3678 if (!ql2xdontresethba) 3773 if (!ql2xdontresethba)
3679 qla82xx_need_reset_handler(vha); 3774 qla82xx_need_reset_handler(vha);
3775 else {
3776 qla82xx_idc_unlock(ha);
3777 msleep(1000);
3778 qla82xx_idc_lock(ha);
3779 }
3680 dev_init_timeout = jiffies + 3780 dev_init_timeout = jiffies +
3681 (ha->nx_dev_init_timeout * HZ); 3781 (ha->nx_dev_init_timeout * HZ);
3682 break; 3782 break;
3683 case QLA82XX_DEV_NEED_QUIESCENT: 3783 case QLA82XX_DEV_NEED_QUIESCENT:
3684 qla82xx_need_qsnt_handler(vha); 3784 qla82xx_need_qsnt_handler(vha);
@@ -3791,6 +3891,28 @@ int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
3791 return rval; 3891 return rval;
3792} 3892}
3793 3893
3894void
3895qla82xx_set_reset_owner(scsi_qla_host_t *vha)
3896{
3897 struct qla_hw_data *ha = vha->hw;
3898 uint32_t dev_state;
3899
3900 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3901 if (dev_state == QLA82XX_DEV_READY) {
3902 ql_log(ql_log_info, vha, 0xb02f,
3903 "HW State: NEED RESET\n");
3904 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3905 QLA82XX_DEV_NEED_RESET);
3906 ha->flags.isp82xx_reset_owner = 1;
3907 ql_dbg(ql_dbg_p3p, vha, 0xb030,
3908 "reset_owner is 0x%x\n", ha->portnum);
3909 } else
3910 ql_log(ql_log_info, vha, 0xb031,
3911 "Device state is 0x%x = %s.\n",
3912 dev_state,
3913 dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown");
3914}
3915
3794/* 3916/*
3795 * qla82xx_abort_isp 3917 * qla82xx_abort_isp
3796 * Resets ISP and aborts all outstanding commands. 3918 * Resets ISP and aborts all outstanding commands.
@@ -3806,7 +3928,6 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3806{ 3928{
3807 int rval; 3929 int rval;
3808 struct qla_hw_data *ha = vha->hw; 3930 struct qla_hw_data *ha = vha->hw;
3809 uint32_t dev_state;
3810 3931
3811 if (vha->device_flags & DFLG_DEV_FAILED) { 3932 if (vha->device_flags & DFLG_DEV_FAILED) {
3812 ql_log(ql_log_warn, vha, 0x8024, 3933 ql_log(ql_log_warn, vha, 0x8024,
@@ -3816,16 +3937,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
3816 ha->flags.isp82xx_reset_hdlr_active = 1; 3937 ha->flags.isp82xx_reset_hdlr_active = 1;
3817 3938
3818 qla82xx_idc_lock(ha); 3939 qla82xx_idc_lock(ha);
3819 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3940 qla82xx_set_reset_owner(vha);
3820 if (dev_state == QLA82XX_DEV_READY) {
3821 ql_log(ql_log_info, vha, 0x8025,
3822 "HW State: NEED RESET.\n");
3823 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
3824 QLA82XX_DEV_NEED_RESET);
3825 } else
3826 ql_log(ql_log_info, vha, 0x8026,
3827 "Hw State: %s.\n", dev_state < MAX_STATES ?
3828 qdev_state[dev_state] : "Unknown");
3829 qla82xx_idc_unlock(ha); 3941 qla82xx_idc_unlock(ha);
3830 3942
3831 rval = qla82xx_device_state_handler(vha); 3943 rval = qla82xx_device_state_handler(vha);
@@ -4016,3 +4128,803 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
4016 } 4128 }
4017 } 4129 }
4018} 4130}
4131
4132/* Minidump related functions */
4133int
4134qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
4135{
4136 uint32_t off_value, rval = 0;
4137
4138 WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
4139 (off & 0xFFFF0000));
4140
4141 /* Read back value to make sure write has gone through */
4142 RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
4143 off_value = (off & 0x0000FFFF);
4144
4145 if (flag)
4146 WRT_REG_DWORD((void *)
4147 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
4148 data);
4149 else
4150 rval = RD_REG_DWORD((void *)
4151 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
4152
4153 return rval;
4154}
4155
4156static int
4157qla82xx_minidump_process_control(scsi_qla_host_t *vha,
4158 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4159{
4160 struct qla_hw_data *ha = vha->hw;
4161 struct qla82xx_md_entry_crb *crb_entry;
4162 uint32_t read_value, opcode, poll_time;
4163 uint32_t addr, index, crb_addr;
4164 unsigned long wtime;
4165 struct qla82xx_md_template_hdr *tmplt_hdr;
4166 uint32_t rval = QLA_SUCCESS;
4167 int i;
4168
4169 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4170 crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr;
4171 crb_addr = crb_entry->addr;
4172
4173 for (i = 0; i < crb_entry->op_count; i++) {
4174 opcode = crb_entry->crb_ctrl.opcode;
4175 if (opcode & QLA82XX_DBG_OPCODE_WR) {
4176 qla82xx_md_rw_32(ha, crb_addr,
4177 crb_entry->value_1, 1);
4178 opcode &= ~QLA82XX_DBG_OPCODE_WR;
4179 }
4180
4181 if (opcode & QLA82XX_DBG_OPCODE_RW) {
4182 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4183 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4184 opcode &= ~QLA82XX_DBG_OPCODE_RW;
4185 }
4186
4187 if (opcode & QLA82XX_DBG_OPCODE_AND) {
4188 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4189 read_value &= crb_entry->value_2;
4190 opcode &= ~QLA82XX_DBG_OPCODE_AND;
4191 if (opcode & QLA82XX_DBG_OPCODE_OR) {
4192 read_value |= crb_entry->value_3;
4193 opcode &= ~QLA82XX_DBG_OPCODE_OR;
4194 }
4195 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4196 }
4197
4198 if (opcode & QLA82XX_DBG_OPCODE_OR) {
4199 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4200 read_value |= crb_entry->value_3;
4201 qla82xx_md_rw_32(ha, crb_addr, read_value, 1);
4202 opcode &= ~QLA82XX_DBG_OPCODE_OR;
4203 }
4204
4205 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
4206 poll_time = crb_entry->crb_strd.poll_timeout;
4207 wtime = jiffies + poll_time;
4208 read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0);
4209
4210 do {
4211 if ((read_value & crb_entry->value_2)
4212 == crb_entry->value_1)
4213 break;
4214 else if (time_after_eq(jiffies, wtime)) {
4215 /* capturing dump failed */
4216 rval = QLA_FUNCTION_FAILED;
4217 break;
4218 } else
4219 read_value = qla82xx_md_rw_32(ha,
4220 crb_addr, 0, 0);
4221 } while (1);
4222 opcode &= ~QLA82XX_DBG_OPCODE_POLL;
4223 }
4224
4225 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
4226 if (crb_entry->crb_strd.state_index_a) {
4227 index = crb_entry->crb_strd.state_index_a;
4228 addr = tmplt_hdr->saved_state_array[index];
4229 } else
4230 addr = crb_addr;
4231
4232 read_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4233 index = crb_entry->crb_ctrl.state_index_v;
4234 tmplt_hdr->saved_state_array[index] = read_value;
4235 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
4236 }
4237
4238 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
4239 if (crb_entry->crb_strd.state_index_a) {
4240 index = crb_entry->crb_strd.state_index_a;
4241 addr = tmplt_hdr->saved_state_array[index];
4242 } else
4243 addr = crb_addr;
4244
4245 if (crb_entry->crb_ctrl.state_index_v) {
4246 index = crb_entry->crb_ctrl.state_index_v;
4247 read_value =
4248 tmplt_hdr->saved_state_array[index];
4249 } else
4250 read_value = crb_entry->value_1;
4251
4252 qla82xx_md_rw_32(ha, addr, read_value, 1);
4253 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
4254 }
4255
4256 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
4257 index = crb_entry->crb_ctrl.state_index_v;
4258 read_value = tmplt_hdr->saved_state_array[index];
4259 read_value <<= crb_entry->crb_ctrl.shl;
4260 read_value >>= crb_entry->crb_ctrl.shr;
4261 if (crb_entry->value_2)
4262 read_value &= crb_entry->value_2;
4263 read_value |= crb_entry->value_3;
4264 read_value += crb_entry->value_1;
4265 tmplt_hdr->saved_state_array[index] = read_value;
4266 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
4267 }
4268 crb_addr += crb_entry->crb_strd.addr_stride;
4269 }
4270 return rval;
4271}
4272
4273static void
4274qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
4275 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4276{
4277 struct qla_hw_data *ha = vha->hw;
4278 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4279 struct qla82xx_md_entry_rdocm *ocm_hdr;
4280 uint32_t *data_ptr = *d_ptr;
4281
4282 ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr;
4283 r_addr = ocm_hdr->read_addr;
4284 r_stride = ocm_hdr->read_addr_stride;
4285 loop_cnt = ocm_hdr->op_count;
4286
4287 for (i = 0; i < loop_cnt; i++) {
4288 r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
4289 *data_ptr++ = cpu_to_le32(r_value);
4290 r_addr += r_stride;
4291 }
4292 *d_ptr = data_ptr;
4293}
4294
4295static void
4296qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha,
4297 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4298{
4299 struct qla_hw_data *ha = vha->hw;
4300 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
4301 struct qla82xx_md_entry_mux *mux_hdr;
4302 uint32_t *data_ptr = *d_ptr;
4303
4304 mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr;
4305 r_addr = mux_hdr->read_addr;
4306 s_addr = mux_hdr->select_addr;
4307 s_stride = mux_hdr->select_value_stride;
4308 s_value = mux_hdr->select_value;
4309 loop_cnt = mux_hdr->op_count;
4310
4311 for (i = 0; i < loop_cnt; i++) {
4312 qla82xx_md_rw_32(ha, s_addr, s_value, 1);
4313 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4314 *data_ptr++ = cpu_to_le32(s_value);
4315 *data_ptr++ = cpu_to_le32(r_value);
4316 s_value += s_stride;
4317 }
4318 *d_ptr = data_ptr;
4319}
4320
4321static void
4322qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha,
4323 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4324{
4325 struct qla_hw_data *ha = vha->hw;
4326 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
4327 struct qla82xx_md_entry_crb *crb_hdr;
4328 uint32_t *data_ptr = *d_ptr;
4329
4330 crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr;
4331 r_addr = crb_hdr->addr;
4332 r_stride = crb_hdr->crb_strd.addr_stride;
4333 loop_cnt = crb_hdr->op_count;
4334
4335 for (i = 0; i < loop_cnt; i++) {
4336 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4337 *data_ptr++ = cpu_to_le32(r_addr);
4338 *data_ptr++ = cpu_to_le32(r_value);
4339 r_addr += r_stride;
4340 }
4341 *d_ptr = data_ptr;
4342}
4343
4344static int
4345qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha,
4346 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4347{
4348 struct qla_hw_data *ha = vha->hw;
4349 uint32_t addr, r_addr, c_addr, t_r_addr;
4350 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4351 unsigned long p_wait, w_time, p_mask;
4352 uint32_t c_value_w, c_value_r;
4353 struct qla82xx_md_entry_cache *cache_hdr;
4354 int rval = QLA_FUNCTION_FAILED;
4355 uint32_t *data_ptr = *d_ptr;
4356
4357 cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4358 loop_count = cache_hdr->op_count;
4359 r_addr = cache_hdr->read_addr;
4360 c_addr = cache_hdr->control_addr;
4361 c_value_w = cache_hdr->cache_ctrl.write_value;
4362
4363 t_r_addr = cache_hdr->tag_reg_addr;
4364 t_value = cache_hdr->addr_ctrl.init_tag_value;
4365 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4366 p_wait = cache_hdr->cache_ctrl.poll_wait;
4367 p_mask = cache_hdr->cache_ctrl.poll_mask;
4368
4369 for (i = 0; i < loop_count; i++) {
4370 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4371 if (c_value_w)
4372 qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4373
4374 if (p_mask) {
4375 w_time = jiffies + p_wait;
4376 do {
4377 c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0);
4378 if ((c_value_r & p_mask) == 0)
4379 break;
4380 else if (time_after_eq(jiffies, w_time)) {
4381 /* capturing dump failed */
4382 ql_dbg(ql_dbg_p3p, vha, 0xb032,
4383 "c_value_r: 0x%x, poll_mask: 0x%lx, "
4384 "w_time: 0x%lx\n",
4385 c_value_r, p_mask, w_time);
4386 return rval;
4387 }
4388 } while (1);
4389 }
4390
4391 addr = r_addr;
4392 for (k = 0; k < r_cnt; k++) {
4393 r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4394 *data_ptr++ = cpu_to_le32(r_value);
4395 addr += cache_hdr->read_ctrl.read_addr_stride;
4396 }
4397 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4398 }
4399 *d_ptr = data_ptr;
4400 return QLA_SUCCESS;
4401}
4402
4403static void
4404qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha,
4405 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4406{
4407 struct qla_hw_data *ha = vha->hw;
4408 uint32_t addr, r_addr, c_addr, t_r_addr;
4409 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
4410 uint32_t c_value_w;
4411 struct qla82xx_md_entry_cache *cache_hdr;
4412 uint32_t *data_ptr = *d_ptr;
4413
4414 cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr;
4415 loop_count = cache_hdr->op_count;
4416 r_addr = cache_hdr->read_addr;
4417 c_addr = cache_hdr->control_addr;
4418 c_value_w = cache_hdr->cache_ctrl.write_value;
4419
4420 t_r_addr = cache_hdr->tag_reg_addr;
4421 t_value = cache_hdr->addr_ctrl.init_tag_value;
4422 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
4423
4424 for (i = 0; i < loop_count; i++) {
4425 qla82xx_md_rw_32(ha, t_r_addr, t_value, 1);
4426 qla82xx_md_rw_32(ha, c_addr, c_value_w, 1);
4427 addr = r_addr;
4428 for (k = 0; k < r_cnt; k++) {
4429 r_value = qla82xx_md_rw_32(ha, addr, 0, 0);
4430 *data_ptr++ = cpu_to_le32(r_value);
4431 addr += cache_hdr->read_ctrl.read_addr_stride;
4432 }
4433 t_value += cache_hdr->addr_ctrl.tag_value_stride;
4434 }
4435 *d_ptr = data_ptr;
4436}
4437
4438static void
4439qla82xx_minidump_process_queue(scsi_qla_host_t *vha,
4440 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4441{
4442 struct qla_hw_data *ha = vha->hw;
4443 uint32_t s_addr, r_addr;
4444 uint32_t r_stride, r_value, r_cnt, qid = 0;
4445 uint32_t i, k, loop_cnt;
4446 struct qla82xx_md_entry_queue *q_hdr;
4447 uint32_t *data_ptr = *d_ptr;
4448
4449 q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr;
4450 s_addr = q_hdr->select_addr;
4451 r_cnt = q_hdr->rd_strd.read_addr_cnt;
4452 r_stride = q_hdr->rd_strd.read_addr_stride;
4453 loop_cnt = q_hdr->op_count;
4454
4455 for (i = 0; i < loop_cnt; i++) {
4456 qla82xx_md_rw_32(ha, s_addr, qid, 1);
4457 r_addr = q_hdr->read_addr;
4458 for (k = 0; k < r_cnt; k++) {
4459 r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0);
4460 *data_ptr++ = cpu_to_le32(r_value);
4461 r_addr += r_stride;
4462 }
4463 qid += q_hdr->q_strd.queue_id_stride;
4464 }
4465 *d_ptr = data_ptr;
4466}
4467
4468static void
4469qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha,
4470 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4471{
4472 struct qla_hw_data *ha = vha->hw;
4473 uint32_t r_addr, r_value;
4474 uint32_t i, loop_cnt;
4475 struct qla82xx_md_entry_rdrom *rom_hdr;
4476 uint32_t *data_ptr = *d_ptr;
4477
4478 rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr;
4479 r_addr = rom_hdr->read_addr;
4480 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
4481
4482 for (i = 0; i < loop_cnt; i++) {
4483 qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
4484 (r_addr & 0xFFFF0000), 1);
4485 r_value = qla82xx_md_rw_32(ha,
4486 MD_DIRECT_ROM_READ_BASE +
4487 (r_addr & 0x0000FFFF), 0, 0);
4488 *data_ptr++ = cpu_to_le32(r_value);
4489 r_addr += sizeof(uint32_t);
4490 }
4491 *d_ptr = data_ptr;
4492}
4493
4494static int
4495qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
4496 qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr)
4497{
4498 struct qla_hw_data *ha = vha->hw;
4499 uint32_t r_addr, r_value, r_data;
4500 uint32_t i, j, loop_cnt;
4501 struct qla82xx_md_entry_rdmem *m_hdr;
4502 unsigned long flags;
4503 int rval = QLA_FUNCTION_FAILED;
4504 uint32_t *data_ptr = *d_ptr;
4505
4506 m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr;
4507 r_addr = m_hdr->read_addr;
4508 loop_cnt = m_hdr->read_data_size/16;
4509
4510 if (r_addr & 0xf) {
4511 ql_log(ql_log_warn, vha, 0xb033,
4512 "Read addr 0x%x not 16 bytes alligned\n", r_addr);
4513 return rval;
4514 }
4515
4516 if (m_hdr->read_data_size % 16) {
4517 ql_log(ql_log_warn, vha, 0xb034,
4518 "Read data[0x%x] not multiple of 16 bytes\n",
4519 m_hdr->read_data_size);
4520 return rval;
4521 }
4522
4523 ql_dbg(ql_dbg_p3p, vha, 0xb035,
4524 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
4525 __func__, r_addr, m_hdr->read_data_size, loop_cnt);
4526
4527 write_lock_irqsave(&ha->hw_lock, flags);
4528 for (i = 0; i < loop_cnt; i++) {
4529 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
4530 r_value = 0;
4531 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
4532 r_value = MIU_TA_CTL_ENABLE;
4533 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4534 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
4535 qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
4536
4537 for (j = 0; j < MAX_CTL_CHECK; j++) {
4538 r_value = qla82xx_md_rw_32(ha,
4539 MD_MIU_TEST_AGT_CTRL, 0, 0);
4540 if ((r_value & MIU_TA_CTL_BUSY) == 0)
4541 break;
4542 }
4543
4544 if (j >= MAX_CTL_CHECK) {
4545 printk_ratelimited(KERN_ERR
4546 "failed to read through agent\n");
4547 write_unlock_irqrestore(&ha->hw_lock, flags);
4548 return rval;
4549 }
4550
4551 for (j = 0; j < 4; j++) {
4552 r_data = qla82xx_md_rw_32(ha,
4553 MD_MIU_TEST_AGT_RDDATA[j], 0, 0);
4554 *data_ptr++ = cpu_to_le32(r_data);
4555 }
4556 r_addr += 16;
4557 }
4558 write_unlock_irqrestore(&ha->hw_lock, flags);
4559 *d_ptr = data_ptr;
4560 return QLA_SUCCESS;
4561}
4562
4563static int
4564qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
4565{
4566 struct qla_hw_data *ha = vha->hw;
4567 uint64_t chksum = 0;
4568 uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr;
4569 int count = ha->md_template_size/sizeof(uint32_t);
4570
4571 while (count-- > 0)
4572 chksum += *d_ptr++;
4573 while (chksum >> 32)
4574 chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32);
4575 return ~chksum;
4576}
4577
4578static void
4579qla82xx_mark_entry_skipped(scsi_qla_host_t *vha,
4580 qla82xx_md_entry_hdr_t *entry_hdr, int index)
4581{
4582 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
4583 ql_dbg(ql_dbg_p3p, vha, 0xb036,
4584 "Skipping entry[%d]: "
4585 "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4586 index, entry_hdr->entry_type,
4587 entry_hdr->d_ctrl.entry_capture_mask);
4588}
4589
4590int
4591qla82xx_md_collect(scsi_qla_host_t *vha)
4592{
4593 struct qla_hw_data *ha = vha->hw;
4594 int no_entry_hdr = 0;
4595 qla82xx_md_entry_hdr_t *entry_hdr;
4596 struct qla82xx_md_template_hdr *tmplt_hdr;
4597 uint32_t *data_ptr;
4598 uint32_t total_data_size = 0, f_capture_mask, data_collected = 0;
4599 int i = 0, rval = QLA_FUNCTION_FAILED;
4600
4601 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4602 data_ptr = (uint32_t *)ha->md_dump;
4603
4604 if (ha->fw_dumped) {
4605 ql_log(ql_log_info, vha, 0xb037,
4606 "Firmware dump available to retrive\n");
4607 goto md_failed;
4608 }
4609
4610 ha->fw_dumped = 0;
4611
4612 if (!ha->md_tmplt_hdr || !ha->md_dump) {
4613 ql_log(ql_log_warn, vha, 0xb038,
4614 "Memory not allocated for minidump capture\n");
4615 goto md_failed;
4616 }
4617
4618 if (qla82xx_validate_template_chksum(vha)) {
4619 ql_log(ql_log_info, vha, 0xb039,
4620 "Template checksum validation error\n");
4621 goto md_failed;
4622 }
4623
4624 no_entry_hdr = tmplt_hdr->num_of_entries;
4625 ql_dbg(ql_dbg_p3p, vha, 0xb03a,
4626 "No of entry headers in Template: 0x%x\n", no_entry_hdr);
4627
4628 ql_dbg(ql_dbg_p3p, vha, 0xb03b,
4629 "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
4630
4631 f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
4632
4633 /* Validate whether required debug level is set */
4634 if ((f_capture_mask & 0x3) != 0x3) {
4635 ql_log(ql_log_warn, vha, 0xb03c,
4636 "Minimum required capture mask[0x%x] level not set\n",
4637 f_capture_mask);
4638 goto md_failed;
4639 }
4640 tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
4641
4642 tmplt_hdr->driver_info[0] = vha->host_no;
4643 tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) |
4644 (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) |
4645 QLA_DRIVER_BETA_VER;
4646
4647 total_data_size = ha->md_dump_size;
4648
4649 ql_dbg(ql_log_info, vha, 0xb03d,
4650 "Total minidump data_size 0x%x to be captured\n", total_data_size);
4651
4652 /* Check whether template obtained is valid */
4653 if (tmplt_hdr->entry_type != QLA82XX_TLHDR) {
4654 ql_log(ql_log_warn, vha, 0xb04e,
4655 "Bad template header entry type: 0x%x obtained\n",
4656 tmplt_hdr->entry_type);
4657 goto md_failed;
4658 }
4659
4660 entry_hdr = (qla82xx_md_entry_hdr_t *) \
4661 (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
4662
4663 /* Walk through the entry headers */
4664 for (i = 0; i < no_entry_hdr; i++) {
4665
4666 if (data_collected > total_data_size) {
4667 ql_log(ql_log_warn, vha, 0xb03e,
4668 "More MiniDump data collected: [0x%x]\n",
4669 data_collected);
4670 goto md_failed;
4671 }
4672
4673 if (!(entry_hdr->d_ctrl.entry_capture_mask &
4674 ql2xmdcapmask)) {
4675 entry_hdr->d_ctrl.driver_flags |=
4676 QLA82XX_DBG_SKIPPED_FLAG;
4677 ql_dbg(ql_dbg_p3p, vha, 0xb03f,
4678 "Skipping entry[%d]: "
4679 "ETYPE[0x%x]-ELEVEL[0x%x]\n",
4680 i, entry_hdr->entry_type,
4681 entry_hdr->d_ctrl.entry_capture_mask);
4682 goto skip_nxt_entry;
4683 }
4684
4685 ql_dbg(ql_dbg_p3p, vha, 0xb040,
4686 "[%s]: data ptr[%d]: %p, entry_hdr: %p\n"
4687 "entry_type: 0x%x, captrue_mask: 0x%x\n",
4688 __func__, i, data_ptr, entry_hdr,
4689 entry_hdr->entry_type,
4690 entry_hdr->d_ctrl.entry_capture_mask);
4691
4692 ql_dbg(ql_dbg_p3p, vha, 0xb041,
4693 "Data collected: [0x%x], Dump size left:[0x%x]\n",
4694 data_collected, (ha->md_dump_size - data_collected));
4695
4696 /* Decode the entry type and take
4697 * required action to capture debug data */
4698 switch (entry_hdr->entry_type) {
4699 case QLA82XX_RDEND:
4700 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4701 break;
4702 case QLA82XX_CNTRL:
4703 rval = qla82xx_minidump_process_control(vha,
4704 entry_hdr, &data_ptr);
4705 if (rval != QLA_SUCCESS) {
4706 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4707 goto md_failed;
4708 }
4709 break;
4710 case QLA82XX_RDCRB:
4711 qla82xx_minidump_process_rdcrb(vha,
4712 entry_hdr, &data_ptr);
4713 break;
4714 case QLA82XX_RDMEM:
4715 rval = qla82xx_minidump_process_rdmem(vha,
4716 entry_hdr, &data_ptr);
4717 if (rval != QLA_SUCCESS) {
4718 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4719 goto md_failed;
4720 }
4721 break;
4722 case QLA82XX_BOARD:
4723 case QLA82XX_RDROM:
4724 qla82xx_minidump_process_rdrom(vha,
4725 entry_hdr, &data_ptr);
4726 break;
4727 case QLA82XX_L2DTG:
4728 case QLA82XX_L2ITG:
4729 case QLA82XX_L2DAT:
4730 case QLA82XX_L2INS:
4731 rval = qla82xx_minidump_process_l2tag(vha,
4732 entry_hdr, &data_ptr);
4733 if (rval != QLA_SUCCESS) {
4734 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4735 goto md_failed;
4736 }
4737 break;
4738 case QLA82XX_L1DAT:
4739 case QLA82XX_L1INS:
4740 qla82xx_minidump_process_l1cache(vha,
4741 entry_hdr, &data_ptr);
4742 break;
4743 case QLA82XX_RDOCM:
4744 qla82xx_minidump_process_rdocm(vha,
4745 entry_hdr, &data_ptr);
4746 break;
4747 case QLA82XX_RDMUX:
4748 qla82xx_minidump_process_rdmux(vha,
4749 entry_hdr, &data_ptr);
4750 break;
4751 case QLA82XX_QUEUE:
4752 qla82xx_minidump_process_queue(vha,
4753 entry_hdr, &data_ptr);
4754 break;
4755 case QLA82XX_RDNOP:
4756 default:
4757 qla82xx_mark_entry_skipped(vha, entry_hdr, i);
4758 break;
4759 }
4760
4761 ql_dbg(ql_dbg_p3p, vha, 0xb042,
4762 "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr);
4763
4764 data_collected = (uint8_t *)data_ptr -
4765 (uint8_t *)ha->md_dump;
4766skip_nxt_entry:
4767 entry_hdr = (qla82xx_md_entry_hdr_t *) \
4768 (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
4769 }
4770
4771 if (data_collected != total_data_size) {
4772 ql_dbg(ql_log_warn, vha, 0xb043,
4773 "MiniDump data mismatch: Data collected: [0x%x],"
4774 "total_data_size:[0x%x]\n",
4775 data_collected, total_data_size);
4776 goto md_failed;
4777 }
4778
4779 ql_log(ql_log_info, vha, 0xb044,
4780 "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
4781 vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
4782 ha->fw_dumped = 1;
4783 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
4784
4785md_failed:
4786 return rval;
4787}
4788
4789int
4790qla82xx_md_alloc(scsi_qla_host_t *vha)
4791{
4792 struct qla_hw_data *ha = vha->hw;
4793 int i, k;
4794 struct qla82xx_md_template_hdr *tmplt_hdr;
4795
4796 tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr;
4797
4798 if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) {
4799 ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF;
4800 ql_log(ql_log_info, vha, 0xb045,
4801 "Forcing driver capture mask to firmware default capture mask: 0x%x.\n",
4802 ql2xmdcapmask);
4803 }
4804
4805 for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) {
4806 if (i & ql2xmdcapmask)
4807 ha->md_dump_size += tmplt_hdr->capture_size_array[k];
4808 }
4809
4810 if (ha->md_dump) {
4811 ql_log(ql_log_warn, vha, 0xb046,
4812 "Firmware dump previously allocated.\n");
4813 return 1;
4814 }
4815
4816 ha->md_dump = vmalloc(ha->md_dump_size);
4817 if (ha->md_dump == NULL) {
4818 ql_log(ql_log_warn, vha, 0xb047,
4819 "Unable to allocate memory for Minidump size "
4820 "(0x%x).\n", ha->md_dump_size);
4821 return 1;
4822 }
4823 return 0;
4824}
4825
4826void
4827qla82xx_md_free(scsi_qla_host_t *vha)
4828{
4829 struct qla_hw_data *ha = vha->hw;
4830
4831 /* Release the template header allocated */
4832 if (ha->md_tmplt_hdr) {
4833 ql_log(ql_log_info, vha, 0xb048,
4834 "Free MiniDump template: %p, size (%d KB)\n",
4835 ha->md_tmplt_hdr, ha->md_template_size / 1024);
4836 dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
4837 ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4838 ha->md_tmplt_hdr = 0;
4839 }
4840
4841 /* Release the template data buffer allocated */
4842 if (ha->md_dump) {
4843 ql_log(ql_log_info, vha, 0xb049,
4844 "Free MiniDump memory: %p, size (%d KB)\n",
4845 ha->md_dump, ha->md_dump_size / 1024);
4846 vfree(ha->md_dump);
4847 ha->md_dump_size = 0;
4848 ha->md_dump = 0;
4849 }
4850}
4851
4852void
4853qla82xx_md_prep(scsi_qla_host_t *vha)
4854{
4855 struct qla_hw_data *ha = vha->hw;
4856 int rval;
4857
4858 /* Get Minidump template size */
4859 rval = qla82xx_md_get_template_size(vha);
4860 if (rval == QLA_SUCCESS) {
4861 ql_log(ql_log_info, vha, 0xb04a,
4862 "MiniDump Template size obtained (%d KB)\n",
4863 ha->md_template_size / 1024);
4864
4865 /* Get Minidump template */
4866 rval = qla82xx_md_get_template(vha);
4867 if (rval == QLA_SUCCESS) {
4868 ql_dbg(ql_dbg_p3p, vha, 0xb04b,
4869 "MiniDump Template obtained\n");
4870
4871 /* Allocate memory for minidump */
4872 rval = qla82xx_md_alloc(vha);
4873 if (rval == QLA_SUCCESS)
4874 ql_log(ql_log_info, vha, 0xb04c,
4875 "MiniDump memory allocated (%d KB)\n",
4876 ha->md_dump_size / 1024);
4877 else {
4878 ql_log(ql_log_info, vha, 0xb04d,
4879 "Free MiniDump template: %p, size: (%d KB)\n",
4880 ha->md_tmplt_hdr,
4881 ha->md_template_size / 1024);
4882 dma_free_coherent(&ha->pdev->dev,
4883 ha->md_template_size,
4884 ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
4885 ha->md_tmplt_hdr = 0;
4886 }
4887
4888 }
4889 }
4890}
4891
4892int
4893qla82xx_beacon_on(struct scsi_qla_host *vha)
4894{
4895
4896 int rval;
4897 struct qla_hw_data *ha = vha->hw;
4898 qla82xx_idc_lock(ha);
4899 rval = qla82xx_mbx_beacon_ctl(vha, 1);
4900
4901 if (rval) {
4902 ql_log(ql_log_warn, vha, 0xb050,
4903 "mbx set led config failed in %s\n", __func__);
4904 goto exit;
4905 }
4906 ha->beacon_blink_led = 1;
4907exit:
4908 qla82xx_idc_unlock(ha);
4909 return rval;
4910}
4911
4912int
4913qla82xx_beacon_off(struct scsi_qla_host *vha)
4914{
4915
4916 int rval;
4917 struct qla_hw_data *ha = vha->hw;
4918 qla82xx_idc_lock(ha);
4919 rval = qla82xx_mbx_beacon_ctl(vha, 0);
4920
4921 if (rval) {
4922 ql_log(ql_log_warn, vha, 0xb051,
4923 "mbx set led config failed in %s\n", __func__);
4924 goto exit;
4925 }
4926 ha->beacon_blink_led = 0;
4927exit:
4928 qla82xx_idc_unlock(ha);
4929 return rval;
4930}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 8a21832c669..57820c199bc 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -484,8 +484,6 @@
484#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) 484#define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL)
485#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) 485#define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL)
486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) 486#define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL)
487
488#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
489#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) 487#define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
490 488
491#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 489#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000
@@ -890,6 +888,7 @@ struct ct6_dsd {
890}; 888};
891 889
892#define MBC_TOGGLE_INTERRUPT 0x10 890#define MBC_TOGGLE_INTERRUPT 0x10
891#define MBC_SET_LED_CONFIG 0x125
893 892
894/* Flash offset */ 893/* Flash offset */
895#define FLT_REG_BOOTLOAD_82XX 0x72 894#define FLT_REG_BOOTLOAD_82XX 0x72
@@ -922,4 +921,256 @@ struct ct6_dsd {
922#define M25P_INSTR_DP 0xb9 921#define M25P_INSTR_DP 0xb9
923#define M25P_INSTR_RES 0xab 922#define M25P_INSTR_RES 0xab
924 923
924/* Minidump related */
925
926/*
927 * Version of the template
928 * 4 Bytes
929 * X.Major.Minor.RELEASE
930 */
931#define QLA82XX_MINIDUMP_VERSION 0x10101
932
933/*
934 * Entry Type Defines
935 */
936#define QLA82XX_RDNOP 0
937#define QLA82XX_RDCRB 1
938#define QLA82XX_RDMUX 2
939#define QLA82XX_QUEUE 3
940#define QLA82XX_BOARD 4
941#define QLA82XX_RDSRE 5
942#define QLA82XX_RDOCM 6
943#define QLA82XX_CACHE 10
944#define QLA82XX_L1DAT 11
945#define QLA82XX_L1INS 12
946#define QLA82XX_L2DTG 21
947#define QLA82XX_L2ITG 22
948#define QLA82XX_L2DAT 23
949#define QLA82XX_L2INS 24
950#define QLA82XX_RDROM 71
951#define QLA82XX_RDMEM 72
952#define QLA82XX_CNTRL 98
953#define QLA82XX_TLHDR 99
954#define QLA82XX_RDEND 255
955
956/*
957 * Opcodes for Control Entries.
958 * These Flags are bit fields.
959 */
960#define QLA82XX_DBG_OPCODE_WR 0x01
961#define QLA82XX_DBG_OPCODE_RW 0x02
962#define QLA82XX_DBG_OPCODE_AND 0x04
963#define QLA82XX_DBG_OPCODE_OR 0x08
964#define QLA82XX_DBG_OPCODE_POLL 0x10
965#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
966#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
967#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
968
969/*
970 * Template Header and Entry Header definitions start here.
971 */
972
973/*
974 * Template Header
975 * Parts of the template header can be modified by the driver.
976 * These include the saved_state_array, capture_debug_level, driver_timestamp
977 */
978
979#define QLA82XX_DBG_STATE_ARRAY_LEN 16
980#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
981#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
982
983/*
984 * Driver Flags
985 */
986#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
987#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */
988
989struct qla82xx_md_template_hdr {
990 uint32_t entry_type;
991 uint32_t first_entry_offset;
992 uint32_t size_of_template;
993 uint32_t capture_debug_level;
994
995 uint32_t num_of_entries;
996 uint32_t version;
997 uint32_t driver_timestamp;
998 uint32_t template_checksum;
999
1000 uint32_t driver_capture_mask;
1001 uint32_t driver_info[3];
1002
1003 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
1004 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
1005
1006 /* markers_array used to capture some special locations on board */
1007 uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN];
1008 uint32_t num_of_free_entries; /* For internal use */
1009 uint32_t free_entry_offset; /* For internal use */
1010 uint32_t total_table_size; /* For internal use */
1011 uint32_t bkup_table_offset; /* For internal use */
1012} __packed;
1013
1014/*
1015 * Entry Header: Common to All Entry Types
1016 */
1017
1018/*
1019 * Driver Code is for driver to write some info about the entry.
1020 * Currently not used.
1021 */
1022typedef struct qla82xx_md_entry_hdr {
1023 uint32_t entry_type;
1024 uint32_t entry_size;
1025 uint32_t entry_capture_size;
1026 struct {
1027 uint8_t entry_capture_mask;
1028 uint8_t entry_code;
1029 uint8_t driver_code;
1030 uint8_t driver_flags;
1031 } d_ctrl;
1032} __packed qla82xx_md_entry_hdr_t;
1033
1034/*
1035 * Read CRB entry header
1036 */
1037struct qla82xx_md_entry_crb {
1038 qla82xx_md_entry_hdr_t h;
1039 uint32_t addr;
1040 struct {
1041 uint8_t addr_stride;
1042 uint8_t state_index_a;
1043 uint16_t poll_timeout;
1044 } crb_strd;
1045
1046 uint32_t data_size;
1047 uint32_t op_count;
1048
1049 struct {
1050 uint8_t opcode;
1051 uint8_t state_index_v;
1052 uint8_t shl;
1053 uint8_t shr;
1054 } crb_ctrl;
1055
1056 uint32_t value_1;
1057 uint32_t value_2;
1058 uint32_t value_3;
1059} __packed;
1060
1061/*
1062 * Cache entry header
1063 */
1064struct qla82xx_md_entry_cache {
1065 qla82xx_md_entry_hdr_t h;
1066
1067 uint32_t tag_reg_addr;
1068 struct {
1069 uint16_t tag_value_stride;
1070 uint16_t init_tag_value;
1071 } addr_ctrl;
1072
1073 uint32_t data_size;
1074 uint32_t op_count;
1075
1076 uint32_t control_addr;
1077 struct {
1078 uint16_t write_value;
1079 uint8_t poll_mask;
1080 uint8_t poll_wait;
1081 } cache_ctrl;
1082
1083 uint32_t read_addr;
1084 struct {
1085 uint8_t read_addr_stride;
1086 uint8_t read_addr_cnt;
1087 uint16_t rsvd_1;
1088 } read_ctrl;
1089} __packed;
1090
1091/*
1092 * Read OCM
1093 */
1094struct qla82xx_md_entry_rdocm {
1095 qla82xx_md_entry_hdr_t h;
1096
1097 uint32_t rsvd_0;
1098 uint32_t rsvd_1;
1099 uint32_t data_size;
1100 uint32_t op_count;
1101
1102 uint32_t rsvd_2;
1103 uint32_t rsvd_3;
1104 uint32_t read_addr;
1105 uint32_t read_addr_stride;
1106 uint32_t read_addr_cntrl;
1107} __packed;
1108
1109/*
1110 * Read Memory
1111 */
1112struct qla82xx_md_entry_rdmem {
1113 qla82xx_md_entry_hdr_t h;
1114 uint32_t rsvd[6];
1115 uint32_t read_addr;
1116 uint32_t read_data_size;
1117} __packed;
1118
1119/*
1120 * Read ROM
1121 */
1122struct qla82xx_md_entry_rdrom {
1123 qla82xx_md_entry_hdr_t h;
1124 uint32_t rsvd[6];
1125 uint32_t read_addr;
1126 uint32_t read_data_size;
1127} __packed;
1128
1129struct qla82xx_md_entry_mux {
1130 qla82xx_md_entry_hdr_t h;
1131
1132 uint32_t select_addr;
1133 uint32_t rsvd_0;
1134 uint32_t data_size;
1135 uint32_t op_count;
1136
1137 uint32_t select_value;
1138 uint32_t select_value_stride;
1139 uint32_t read_addr;
1140 uint32_t rsvd_1;
1141} __packed;
1142
1143struct qla82xx_md_entry_queue {
1144 qla82xx_md_entry_hdr_t h;
1145
1146 uint32_t select_addr;
1147 struct {
1148 uint16_t queue_id_stride;
1149 uint16_t rsvd_0;
1150 } q_strd;
1151
1152 uint32_t data_size;
1153 uint32_t op_count;
1154 uint32_t rsvd_1;
1155 uint32_t rsvd_2;
1156
1157 uint32_t read_addr;
1158 struct {
1159 uint8_t read_addr_stride;
1160 uint8_t read_addr_cnt;
1161 uint16_t rsvd_3;
1162 } rd_strd;
1163} __packed;
1164
1165#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
1166#define RQST_TMPLT_SIZE 0x0
1167#define RQST_TMPLT 0x1
1168#define MD_DIRECT_ROM_WINDOW 0x42110030
1169#define MD_DIRECT_ROM_READ_BASE 0x42150000
1170#define MD_MIU_TEST_AGT_CTRL 0x41000090
1171#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1172#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1173
1174static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1175 0x410000B8, 0x410000BC };
925#endif 1176#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1e69527f1e4..fd14c7bfc62 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -143,7 +143,7 @@ MODULE_PARM_DESC(ql2xmultique_tag,
143 "Set it to 1 to turn on the cpu affinity."); 143 "Set it to 1 to turn on the cpu affinity.");
144 144
145int ql2xfwloadbin; 145int ql2xfwloadbin;
146module_param(ql2xfwloadbin, int, S_IRUGO); 146module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR);
147MODULE_PARM_DESC(ql2xfwloadbin, 147MODULE_PARM_DESC(ql2xfwloadbin,
148 "Option to specify location from which to load ISP firmware:.\n" 148 "Option to specify location from which to load ISP firmware:.\n"
149 " 2 -- load firmware via the request_firmware() (hotplug).\n" 149 " 2 -- load firmware via the request_firmware() (hotplug).\n"
@@ -158,11 +158,11 @@ MODULE_PARM_DESC(ql2xetsenable,
158 "Default is 0 - skip ETS enablement."); 158 "Default is 0 - skip ETS enablement.");
159 159
160int ql2xdbwr = 1; 160int ql2xdbwr = 1;
161module_param(ql2xdbwr, int, S_IRUGO); 161module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR);
162MODULE_PARM_DESC(ql2xdbwr, 162MODULE_PARM_DESC(ql2xdbwr,
163 "Option to specify scheme for request queue posting.\n" 163 "Option to specify scheme for request queue posting.\n"
164 " 0 -- Regular doorbell.\n" 164 " 0 -- Regular doorbell.\n"
165 " 1 -- CAMRAM doorbell (faster).\n"); 165 " 1 -- CAMRAM doorbell (faster).\n");
166 166
167int ql2xtargetreset = 1; 167int ql2xtargetreset = 1;
168module_param(ql2xtargetreset, int, S_IRUGO); 168module_param(ql2xtargetreset, int, S_IRUGO);
@@ -183,11 +183,11 @@ MODULE_PARM_DESC(ql2xasynctmfenable,
183 "Default is 0 - Issue TM IOCBs via mailbox mechanism."); 183 "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
184 184
185int ql2xdontresethba; 185int ql2xdontresethba;
186module_param(ql2xdontresethba, int, S_IRUGO); 186module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
187MODULE_PARM_DESC(ql2xdontresethba, 187MODULE_PARM_DESC(ql2xdontresethba,
188 "Option to specify reset behaviour.\n" 188 "Option to specify reset behaviour.\n"
189 " 0 (Default) -- Reset on failure.\n" 189 " 0 (Default) -- Reset on failure.\n"
190 " 1 -- Do not reset on failure.\n"); 190 " 1 -- Do not reset on failure.\n");
191 191
192uint ql2xmaxlun = MAX_LUNS; 192uint ql2xmaxlun = MAX_LUNS;
193module_param(ql2xmaxlun, uint, S_IRUGO); 193module_param(ql2xmaxlun, uint, S_IRUGO);
@@ -195,6 +195,19 @@ MODULE_PARM_DESC(ql2xmaxlun,
195 "Defines the maximum LU number to register with the SCSI " 195 "Defines the maximum LU number to register with the SCSI "
196 "midlayer. Default is 65535."); 196 "midlayer. Default is 65535.");
197 197
198int ql2xmdcapmask = 0x1F;
199module_param(ql2xmdcapmask, int, S_IRUGO);
200MODULE_PARM_DESC(ql2xmdcapmask,
201 "Set the Minidump driver capture mask level. "
202 "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
203
204int ql2xmdenable;
205module_param(ql2xmdenable, int, S_IRUGO);
206MODULE_PARM_DESC(ql2xmdenable,
207 "Enable/disable MiniDump. "
208 "0 (Default) - MiniDump disabled. "
209 "1 - MiniDump enabled.");
210
198/* 211/*
199 * SCSI host template entry points 212 * SCSI host template entry points
200 */ 213 */
@@ -1750,9 +1763,9 @@ static struct isp_operations qla82xx_isp_ops = {
1750 .read_nvram = qla24xx_read_nvram_data, 1763 .read_nvram = qla24xx_read_nvram_data,
1751 .write_nvram = qla24xx_write_nvram_data, 1764 .write_nvram = qla24xx_write_nvram_data,
1752 .fw_dump = qla24xx_fw_dump, 1765 .fw_dump = qla24xx_fw_dump,
1753 .beacon_on = qla24xx_beacon_on, 1766 .beacon_on = qla82xx_beacon_on,
1754 .beacon_off = qla24xx_beacon_off, 1767 .beacon_off = qla82xx_beacon_off,
1755 .beacon_blink = qla24xx_beacon_blink, 1768 .beacon_blink = NULL,
1756 .read_optrom = qla82xx_read_optrom_data, 1769 .read_optrom = qla82xx_read_optrom_data,
1757 .write_optrom = qla82xx_write_optrom_data, 1770 .write_optrom = qla82xx_write_optrom_data,
1758 .get_flash_version = qla24xx_get_flash_version, 1771 .get_flash_version = qla24xx_get_flash_version,
@@ -2670,6 +2683,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2670 2683
2671 qla2x00_mem_free(ha); 2684 qla2x00_mem_free(ha);
2672 2685
2686 qla82xx_md_free(vha);
2687
2673 qla2x00_free_queues(ha); 2688 qla2x00_free_queues(ha);
2674} 2689}
2675 2690
@@ -3903,8 +3918,11 @@ qla2x00_timer(scsi_qla_host_t *vha)
3903 3918
3904 /* Check if beacon LED needs to be blinked for physical host only */ 3919 /* Check if beacon LED needs to be blinked for physical host only */
3905 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { 3920 if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
3906 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); 3921 /* There is no beacon_blink function for ISP82xx */
3907 start_dpc++; 3922 if (!IS_QLA82XX(ha)) {
3923 set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
3924 start_dpc++;
3925 }
3908 } 3926 }
3909 3927
3910 /* Process any deferred work. */ 3928 /* Process any deferred work. */
diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig
index 0f5599e0abf..f1ad02ea212 100644
--- a/drivers/scsi/qla4xxx/Kconfig
+++ b/drivers/scsi/qla4xxx/Kconfig
@@ -2,6 +2,7 @@ config SCSI_QLA_ISCSI
2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support" 2 tristate "QLogic ISP4XXX and ISP82XX host adapter family support"
3 depends on PCI && SCSI && NET 3 depends on PCI && SCSI && NET
4 select SCSI_ISCSI_ATTRS 4 select SCSI_ISCSI_ATTRS
5 select ISCSI_BOOT_SYSFS
5 ---help--- 6 ---help---
6 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) 7 This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX)
7 iSCSI host adapter family. 8 iSCSI host adapter family.
diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile
index 252523d7847..5b44139ff43 100644
--- a/drivers/scsi/qla4xxx/Makefile
+++ b/drivers/scsi/qla4xxx/Makefile
@@ -1,5 +1,5 @@
1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ 1qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o 2 ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o
3 3
4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o 4obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o
5 5
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 864d018631c..0b0a7d42137 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -55,15 +55,91 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
55 ha->bootload_patch, ha->bootload_build); 55 ha->bootload_patch, ha->bootload_build);
56} 56}
57 57
58static ssize_t
59qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr,
60 char *buf)
61{
62 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
63 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id);
64}
65
66static ssize_t
67qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr,
68 char *buf)
69{
70 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
71
72 qla4xxx_get_firmware_state(ha);
73 return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state,
74 ha->addl_fw_state);
75}
76
77static ssize_t
78qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr,
79 char *buf)
80{
81 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
82
83 if (!is_qla8022(ha))
84 return -ENOSYS;
85
86 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt);
87}
88
89static ssize_t
90qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr,
91 char *buf)
92{
93 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
94
95 if (!is_qla8022(ha))
96 return -ENOSYS;
97
98 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num);
99}
100
101static ssize_t
102qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr,
103 char *buf)
104{
105 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
106
107 if (!is_qla8022(ha))
108 return -ENOSYS;
109
110 return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt);
111}
112
113static ssize_t
114qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116{
117 struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
118
119 return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
120}
121
58static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL); 122static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
59static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL); 123static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
60static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL); 124static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
61static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL); 125static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL);
126static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL);
127static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL);
128static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
129static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
130static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
131static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
62 132
63struct device_attribute *qla4xxx_host_attrs[] = { 133struct device_attribute *qla4xxx_host_attrs[] = {
64 &dev_attr_fw_version, 134 &dev_attr_fw_version,
65 &dev_attr_serial_num, 135 &dev_attr_serial_num,
66 &dev_attr_iscsi_version, 136 &dev_attr_iscsi_version,
67 &dev_attr_optrom_version, 137 &dev_attr_optrom_version,
138 &dev_attr_board_id,
139 &dev_attr_fw_state,
140 &dev_attr_phy_port_cnt,
141 &dev_attr_phy_port_num,
142 &dev_attr_iscsi_func_cnt,
143 &dev_attr_hba_model,
68 NULL, 144 NULL,
69}; 145};
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
new file mode 100644
index 00000000000..8acdc582ff6
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -0,0 +1,513 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2011 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_bsg.h"
11
12static int
13qla4xxx_read_flash(struct bsg_job *bsg_job)
14{
15 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
16 struct scsi_qla_host *ha = to_qla_host(host);
17 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
18 struct iscsi_bsg_request *bsg_req = bsg_job->request;
19 uint32_t offset = 0;
20 uint32_t length = 0;
21 dma_addr_t flash_dma;
22 uint8_t *flash = NULL;
23 int rval = -EINVAL;
24
25 bsg_reply->reply_payload_rcv_len = 0;
26
27 if (unlikely(pci_channel_offline(ha->pdev)))
28 goto leave;
29
30 if (ql4xxx_reset_active(ha)) {
31 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
32 rval = -EBUSY;
33 goto leave;
34 }
35
36 if (ha->flash_state != QLFLASH_WAITING) {
37 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
38 "active\n", __func__);
39 rval = -EBUSY;
40 goto leave;
41 }
42
43 ha->flash_state = QLFLASH_READING;
44 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
45 length = bsg_job->reply_payload.payload_len;
46
47 flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
48 GFP_KERNEL);
49 if (!flash) {
50 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
51 "data\n", __func__);
52 rval = -ENOMEM;
53 goto leave;
54 }
55
56 rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
57 if (rval) {
58 ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
59 bsg_reply->result = DID_ERROR << 16;
60 rval = -EIO;
61 } else {
62 bsg_reply->reply_payload_rcv_len =
63 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
64 bsg_job->reply_payload.sg_cnt,
65 flash, length);
66 bsg_reply->result = DID_OK << 16;
67 }
68
69 bsg_job_done(bsg_job, bsg_reply->result,
70 bsg_reply->reply_payload_rcv_len);
71 dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
72leave:
73 ha->flash_state = QLFLASH_WAITING;
74 return rval;
75}
76
77static int
78qla4xxx_update_flash(struct bsg_job *bsg_job)
79{
80 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
81 struct scsi_qla_host *ha = to_qla_host(host);
82 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
83 struct iscsi_bsg_request *bsg_req = bsg_job->request;
84 uint32_t length = 0;
85 uint32_t offset = 0;
86 uint32_t options = 0;
87 dma_addr_t flash_dma;
88 uint8_t *flash = NULL;
89 int rval = -EINVAL;
90
91 bsg_reply->reply_payload_rcv_len = 0;
92
93 if (unlikely(pci_channel_offline(ha->pdev)))
94 goto leave;
95
96 if (ql4xxx_reset_active(ha)) {
97 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
98 rval = -EBUSY;
99 goto leave;
100 }
101
102 if (ha->flash_state != QLFLASH_WAITING) {
103 ql4_printk(KERN_ERR, ha, "%s: another flash operation "
104 "active\n", __func__);
105 rval = -EBUSY;
106 goto leave;
107 }
108
109 ha->flash_state = QLFLASH_WRITING;
110 length = bsg_job->request_payload.payload_len;
111 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
112 options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
113
114 flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
115 GFP_KERNEL);
116 if (!flash) {
117 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
118 "data\n", __func__);
119 rval = -ENOMEM;
120 goto leave;
121 }
122
123 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
124 bsg_job->request_payload.sg_cnt, flash, length);
125
126 rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
127 if (rval) {
128 ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
129 bsg_reply->result = DID_ERROR << 16;
130 rval = -EIO;
131 } else
132 bsg_reply->result = DID_OK << 16;
133
134 bsg_job_done(bsg_job, bsg_reply->result,
135 bsg_reply->reply_payload_rcv_len);
136 dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
137leave:
138 ha->flash_state = QLFLASH_WAITING;
139 return rval;
140}
141
142static int
143qla4xxx_get_acb_state(struct bsg_job *bsg_job)
144{
145 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
146 struct scsi_qla_host *ha = to_qla_host(host);
147 struct iscsi_bsg_request *bsg_req = bsg_job->request;
148 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
149 uint32_t status[MBOX_REG_COUNT];
150 uint32_t acb_idx;
151 uint32_t ip_idx;
152 int rval = -EINVAL;
153
154 bsg_reply->reply_payload_rcv_len = 0;
155
156 if (unlikely(pci_channel_offline(ha->pdev)))
157 goto leave;
158
159 /* Only 4022 and above adapters are supported */
160 if (is_qla4010(ha))
161 goto leave;
162
163 if (ql4xxx_reset_active(ha)) {
164 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
165 rval = -EBUSY;
166 goto leave;
167 }
168
169 if (bsg_job->reply_payload.payload_len < sizeof(status)) {
170 ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
171 __func__, bsg_job->reply_payload.payload_len);
172 rval = -EINVAL;
173 goto leave;
174 }
175
176 acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
177 ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
178
179 rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
180 if (rval) {
181 ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
182 __func__);
183 bsg_reply->result = DID_ERROR << 16;
184 rval = -EIO;
185 } else {
186 bsg_reply->reply_payload_rcv_len =
187 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
188 bsg_job->reply_payload.sg_cnt,
189 status, sizeof(status));
190 bsg_reply->result = DID_OK << 16;
191 }
192
193 bsg_job_done(bsg_job, bsg_reply->result,
194 bsg_reply->reply_payload_rcv_len);
195leave:
196 return rval;
197}
198
199static int
200qla4xxx_read_nvram(struct bsg_job *bsg_job)
201{
202 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
203 struct scsi_qla_host *ha = to_qla_host(host);
204 struct iscsi_bsg_request *bsg_req = bsg_job->request;
205 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
206 uint32_t offset = 0;
207 uint32_t len = 0;
208 uint32_t total_len = 0;
209 dma_addr_t nvram_dma;
210 uint8_t *nvram = NULL;
211 int rval = -EINVAL;
212
213 bsg_reply->reply_payload_rcv_len = 0;
214
215 if (unlikely(pci_channel_offline(ha->pdev)))
216 goto leave;
217
218 /* Only 40xx adapters are supported */
219 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
220 goto leave;
221
222 if (ql4xxx_reset_active(ha)) {
223 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
224 rval = -EBUSY;
225 goto leave;
226 }
227
228 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
229 len = bsg_job->reply_payload.payload_len;
230 total_len = offset + len;
231
232 /* total len should not be greater than max NVRAM size */
233 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
234 ((is_qla4022(ha) || is_qla4032(ha)) &&
235 total_len > QL40X2_NVRAM_SIZE)) {
236 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
237 " nvram size, offset=%d len=%d\n",
238 __func__, offset, len);
239 goto leave;
240 }
241
242 nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
243 GFP_KERNEL);
244 if (!nvram) {
245 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
246 "data\n", __func__);
247 rval = -ENOMEM;
248 goto leave;
249 }
250
251 rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
252 if (rval) {
253 ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
254 bsg_reply->result = DID_ERROR << 16;
255 rval = -EIO;
256 } else {
257 bsg_reply->reply_payload_rcv_len =
258 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
259 bsg_job->reply_payload.sg_cnt,
260 nvram, len);
261 bsg_reply->result = DID_OK << 16;
262 }
263
264 bsg_job_done(bsg_job, bsg_reply->result,
265 bsg_reply->reply_payload_rcv_len);
266 dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
267leave:
268 return rval;
269}
270
271static int
272qla4xxx_update_nvram(struct bsg_job *bsg_job)
273{
274 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
275 struct scsi_qla_host *ha = to_qla_host(host);
276 struct iscsi_bsg_request *bsg_req = bsg_job->request;
277 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
278 uint32_t offset = 0;
279 uint32_t len = 0;
280 uint32_t total_len = 0;
281 dma_addr_t nvram_dma;
282 uint8_t *nvram = NULL;
283 int rval = -EINVAL;
284
285 bsg_reply->reply_payload_rcv_len = 0;
286
287 if (unlikely(pci_channel_offline(ha->pdev)))
288 goto leave;
289
290 if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
291 goto leave;
292
293 if (ql4xxx_reset_active(ha)) {
294 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
295 rval = -EBUSY;
296 goto leave;
297 }
298
299 offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
300 len = bsg_job->request_payload.payload_len;
301 total_len = offset + len;
302
303 /* total len should not be greater than max NVRAM size */
304 if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
305 ((is_qla4022(ha) || is_qla4032(ha)) &&
306 total_len > QL40X2_NVRAM_SIZE)) {
307 ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
308 " nvram size, offset=%d len=%d\n",
309 __func__, offset, len);
310 goto leave;
311 }
312
313 nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
314 GFP_KERNEL);
315 if (!nvram) {
316 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
317 "data\n", __func__);
318 rval = -ENOMEM;
319 goto leave;
320 }
321
322 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
323 bsg_job->request_payload.sg_cnt, nvram, len);
324
325 rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
326 if (rval) {
327 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
328 bsg_reply->result = DID_ERROR << 16;
329 rval = -EIO;
330 } else
331 bsg_reply->result = DID_OK << 16;
332
333 bsg_job_done(bsg_job, bsg_reply->result,
334 bsg_reply->reply_payload_rcv_len);
335 dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
336leave:
337 return rval;
338}
339
340static int
341qla4xxx_restore_defaults(struct bsg_job *bsg_job)
342{
343 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
344 struct scsi_qla_host *ha = to_qla_host(host);
345 struct iscsi_bsg_request *bsg_req = bsg_job->request;
346 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
347 uint32_t region = 0;
348 uint32_t field0 = 0;
349 uint32_t field1 = 0;
350 int rval = -EINVAL;
351
352 bsg_reply->reply_payload_rcv_len = 0;
353
354 if (unlikely(pci_channel_offline(ha->pdev)))
355 goto leave;
356
357 if (is_qla4010(ha))
358 goto leave;
359
360 if (ql4xxx_reset_active(ha)) {
361 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
362 rval = -EBUSY;
363 goto leave;
364 }
365
366 region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
367 field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
368 field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
369
370 rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
371 if (rval) {
372 ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
373 bsg_reply->result = DID_ERROR << 16;
374 rval = -EIO;
375 } else
376 bsg_reply->result = DID_OK << 16;
377
378 bsg_job_done(bsg_job, bsg_reply->result,
379 bsg_reply->reply_payload_rcv_len);
380leave:
381 return rval;
382}
383
384static int
385qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
386{
387 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
388 struct scsi_qla_host *ha = to_qla_host(host);
389 struct iscsi_bsg_request *bsg_req = bsg_job->request;
390 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
391 uint32_t acb_type = 0;
392 uint32_t len = 0;
393 dma_addr_t acb_dma;
394 uint8_t *acb = NULL;
395 int rval = -EINVAL;
396
397 bsg_reply->reply_payload_rcv_len = 0;
398
399 if (unlikely(pci_channel_offline(ha->pdev)))
400 goto leave;
401
402 /* Only 4022 and above adapters are supported */
403 if (is_qla4010(ha))
404 goto leave;
405
406 if (ql4xxx_reset_active(ha)) {
407 ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
408 rval = -EBUSY;
409 goto leave;
410 }
411
412 acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
413 len = bsg_job->reply_payload.payload_len;
414 if (len < sizeof(struct addr_ctrl_blk)) {
415 ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
416 __func__, len);
417 rval = -EINVAL;
418 goto leave;
419 }
420
421 acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
422 if (!acb) {
423 ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
424 "data\n", __func__);
425 rval = -ENOMEM;
426 goto leave;
427 }
428
429 rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
430 if (rval) {
431 ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
432 bsg_reply->result = DID_ERROR << 16;
433 rval = -EIO;
434 } else {
435 bsg_reply->reply_payload_rcv_len =
436 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
437 bsg_job->reply_payload.sg_cnt,
438 acb, len);
439 bsg_reply->result = DID_OK << 16;
440 }
441
442 bsg_job_done(bsg_job, bsg_reply->result,
443 bsg_reply->reply_payload_rcv_len);
444 dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
445leave:
446 return rval;
447}
448
449/**
450 * qla4xxx_process_vendor_specific - handle vendor specific bsg request
451 * @job: iscsi_bsg_job to handle
452 **/
453int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
454{
455 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
456 struct iscsi_bsg_request *bsg_req = bsg_job->request;
457 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
458 struct scsi_qla_host *ha = to_qla_host(host);
459
460 switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
461 case QLISCSI_VND_READ_FLASH:
462 return qla4xxx_read_flash(bsg_job);
463
464 case QLISCSI_VND_UPDATE_FLASH:
465 return qla4xxx_update_flash(bsg_job);
466
467 case QLISCSI_VND_GET_ACB_STATE:
468 return qla4xxx_get_acb_state(bsg_job);
469
470 case QLISCSI_VND_READ_NVRAM:
471 return qla4xxx_read_nvram(bsg_job);
472
473 case QLISCSI_VND_UPDATE_NVRAM:
474 return qla4xxx_update_nvram(bsg_job);
475
476 case QLISCSI_VND_RESTORE_DEFAULTS:
477 return qla4xxx_restore_defaults(bsg_job);
478
479 case QLISCSI_VND_GET_ACB:
480 return qla4xxx_bsg_get_acb(bsg_job);
481
482 default:
483 ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
484 "0x%x\n", __func__, bsg_req->msgcode);
485 bsg_reply->result = (DID_ERROR << 16);
486 bsg_reply->reply_payload_rcv_len = 0;
487 bsg_job_done(bsg_job, bsg_reply->result,
488 bsg_reply->reply_payload_rcv_len);
489 return -ENOSYS;
490 }
491}
492
493/**
494 * qla4xxx_bsg_request - handle bsg request from ISCSI transport
495 * @job: iscsi_bsg_job to handle
496 */
497int qla4xxx_bsg_request(struct bsg_job *bsg_job)
498{
499 struct iscsi_bsg_request *bsg_req = bsg_job->request;
500 struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
501 struct scsi_qla_host *ha = to_qla_host(host);
502
503 switch (bsg_req->msgcode) {
504 case ISCSI_BSG_HST_VENDOR:
505 return qla4xxx_process_vendor_specific(bsg_job);
506
507 default:
508 ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
509 __func__, bsg_req->msgcode);
510 }
511
512 return -ENOSYS;
513}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h
new file mode 100644
index 00000000000..c6a0364509f
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -0,0 +1,19 @@
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2011 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#ifndef __QL4_BSG_H
8#define __QL4_BSG_H
9
10/* BSG Vendor specific commands */
11#define QLISCSI_VND_READ_FLASH 1
12#define QLISCSI_VND_UPDATE_FLASH 2
13#define QLISCSI_VND_GET_ACB_STATE 3
14#define QLISCSI_VND_READ_NVRAM 4
15#define QLISCSI_VND_UPDATE_NVRAM 5
16#define QLISCSI_VND_RESTORE_DEFAULTS 6
17#define QLISCSI_VND_GET_ACB 7
18
19#endif
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 473c5c872b3..ace637bf254 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/aer.h> 27#include <linux/aer.h>
28#include <linux/bsg-lib.h>
28 29
29#include <net/tcp.h> 30#include <net/tcp.h>
30#include <scsi/scsi.h> 31#include <scsi/scsi.h>
@@ -33,9 +34,14 @@
33#include <scsi/scsi_cmnd.h> 34#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_transport.h> 35#include <scsi/scsi_transport.h>
35#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
37#include <scsi/scsi_bsg_iscsi.h>
38#include <scsi/scsi_netlink.h>
39#include <scsi/libiscsi.h>
36 40
37#include "ql4_dbg.h" 41#include "ql4_dbg.h"
38#include "ql4_nx.h" 42#include "ql4_nx.h"
43#include "ql4_fw.h"
44#include "ql4_nvram.h"
39 45
40#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 46#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
41#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 47#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
@@ -109,7 +115,7 @@
109#define MAX_BUSES 1 115#define MAX_BUSES 1
110#define MAX_TARGETS MAX_DEV_DB_ENTRIES 116#define MAX_TARGETS MAX_DEV_DB_ENTRIES
111#define MAX_LUNS 0xffff 117#define MAX_LUNS 0xffff
112#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */ 118#define MAX_AEN_ENTRIES MAX_DEV_DB_ENTRIES
113#define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES 119#define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES
114#define MAX_PDU_ENTRIES 32 120#define MAX_PDU_ENTRIES 32
115#define INVALID_ENTRY 0xFFFF 121#define INVALID_ENTRY 0xFFFF
@@ -166,6 +172,7 @@
166#define RELOGIN_TOV 18 172#define RELOGIN_TOV 18
167#define ISNS_DEREG_TOV 5 173#define ISNS_DEREG_TOV 5
168#define HBA_ONLINE_TOV 30 174#define HBA_ONLINE_TOV 30
175#define DISABLE_ACB_TOV 30
169 176
170#define MAX_RESET_HA_RETRIES 2 177#define MAX_RESET_HA_RETRIES 2
171 178
@@ -227,52 +234,12 @@ struct ql4_aen_log {
227 * Device Database (DDB) structure 234 * Device Database (DDB) structure
228 */ 235 */
229struct ddb_entry { 236struct ddb_entry {
230 struct list_head list; /* ddb list */
231 struct scsi_qla_host *ha; 237 struct scsi_qla_host *ha;
232 struct iscsi_cls_session *sess; 238 struct iscsi_cls_session *sess;
233 struct iscsi_cls_conn *conn; 239 struct iscsi_cls_conn *conn;
234 240
235 atomic_t state; /* DDB State */
236
237 unsigned long flags; /* DDB Flags */
238
239 uint16_t fw_ddb_index; /* DDB firmware index */ 241 uint16_t fw_ddb_index; /* DDB firmware index */
240 uint16_t options;
241 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ 242 uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
242
243 uint32_t CmdSn;
244 uint16_t target_session_id;
245 uint16_t connection_id;
246 uint16_t exe_throttle; /* Max mumber of cmds outstanding
247 * simultaneously */
248 uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to
249 * complete */
250 uint16_t default_relogin_timeout; /* Max time to wait for
251 * relogin to complete */
252 uint16_t tcp_source_port_num;
253 uint32_t default_time2wait; /* Default Min time between
254 * relogins (+aens) */
255
256 atomic_t retry_relogin_timer; /* Min Time between relogins
257 * (4000 only) */
258 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
259 atomic_t relogin_retry_count; /* Num of times relogin has been
260 * retried */
261
262 uint16_t port;
263 uint32_t tpgt;
264 uint8_t ip_addr[IP_ADDR_LEN];
265 uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
266 uint8_t iscsi_alias[0x20];
267 uint8_t isid[6];
268 uint16_t iscsi_max_burst_len;
269 uint16_t iscsi_max_outsnd_r2t;
270 uint16_t iscsi_first_burst_len;
271 uint16_t iscsi_max_rcv_data_seg_len;
272 uint16_t iscsi_max_snd_data_seg_len;
273
274 struct in6_addr remote_ipv6_addr;
275 struct in6_addr link_local_ipv6_addr;
276}; 243};
277 244
278/* 245/*
@@ -293,8 +260,6 @@ struct ddb_entry {
293#define DF_FO_MASKED 3 260#define DF_FO_MASKED 3
294 261
295 262
296#include "ql4_fw.h"
297#include "ql4_nvram.h"
298 263
299struct ql82xx_hw_data { 264struct ql82xx_hw_data {
300 /* Offsets for flash/nvram access (set to ~0 if not used). */ 265 /* Offsets for flash/nvram access (set to ~0 if not used). */
@@ -312,7 +277,10 @@ struct ql82xx_hw_data {
312 uint32_t flt_region_boot; 277 uint32_t flt_region_boot;
313 uint32_t flt_region_bootload; 278 uint32_t flt_region_bootload;
314 uint32_t flt_region_fw; 279 uint32_t flt_region_fw;
315 uint32_t reserved; 280
281 uint32_t flt_iscsi_param;
282 uint32_t flt_region_chap;
283 uint32_t flt_chap_size;
316}; 284};
317 285
318struct qla4_8xxx_legacy_intr_set { 286struct qla4_8xxx_legacy_intr_set {
@@ -357,6 +325,68 @@ struct isp_operations {
357 int (*get_sys_info) (struct scsi_qla_host *); 325 int (*get_sys_info) (struct scsi_qla_host *);
358}; 326};
359 327
328/*qla4xxx ipaddress configuration details */
329struct ipaddress_config {
330 uint16_t ipv4_options;
331 uint16_t tcp_options;
332 uint16_t ipv4_vlan_tag;
333 uint8_t ipv4_addr_state;
334 uint8_t ip_address[IP_ADDR_LEN];
335 uint8_t subnet_mask[IP_ADDR_LEN];
336 uint8_t gateway[IP_ADDR_LEN];
337 uint32_t ipv6_options;
338 uint32_t ipv6_addl_options;
339 uint8_t ipv6_link_local_state;
340 uint8_t ipv6_addr0_state;
341 uint8_t ipv6_addr1_state;
342 uint8_t ipv6_default_router_state;
343 uint16_t ipv6_vlan_tag;
344 struct in6_addr ipv6_link_local_addr;
345 struct in6_addr ipv6_addr0;
346 struct in6_addr ipv6_addr1;
347 struct in6_addr ipv6_default_router_addr;
348 uint16_t eth_mtu_size;
349 uint16_t ipv4_port;
350 uint16_t ipv6_port;
351};
352
353#define QL4_CHAP_MAX_NAME_LEN 256
354#define QL4_CHAP_MAX_SECRET_LEN 100
355#define LOCAL_CHAP 0
356#define BIDI_CHAP 1
357
358struct ql4_chap_format {
359 u8 intr_chap_name[QL4_CHAP_MAX_NAME_LEN];
360 u8 intr_secret[QL4_CHAP_MAX_SECRET_LEN];
361 u8 target_chap_name[QL4_CHAP_MAX_NAME_LEN];
362 u8 target_secret[QL4_CHAP_MAX_SECRET_LEN];
363 u16 intr_chap_name_length;
364 u16 intr_secret_length;
365 u16 target_chap_name_length;
366 u16 target_secret_length;
367};
368
369struct ip_address_format {
370 u8 ip_type;
371 u8 ip_address[16];
372};
373
374struct ql4_conn_info {
375 u16 dest_port;
376 struct ip_address_format dest_ipaddr;
377 struct ql4_chap_format chap;
378};
379
380struct ql4_boot_session_info {
381 u8 target_name[224];
382 struct ql4_conn_info conn_list[1];
383};
384
385struct ql4_boot_tgt_info {
386 struct ql4_boot_session_info boot_pri_sess;
387 struct ql4_boot_session_info boot_sec_sess;
388};
389
360/* 390/*
361 * Linux Host Adapter structure 391 * Linux Host Adapter structure
362 */ 392 */
@@ -451,10 +481,6 @@ struct scsi_qla_host {
451 /* --- From Init_FW --- */ 481 /* --- From Init_FW --- */
452 /* init_cb_t *init_cb; */ 482 /* init_cb_t *init_cb; */
453 uint16_t firmware_options; 483 uint16_t firmware_options;
454 uint16_t tcp_options;
455 uint8_t ip_address[IP_ADDR_LEN];
456 uint8_t subnet_mask[IP_ADDR_LEN];
457 uint8_t gateway[IP_ADDR_LEN];
458 uint8_t alias[32]; 484 uint8_t alias[32];
459 uint8_t name_string[256]; 485 uint8_t name_string[256];
460 uint8_t heartbeat_interval; 486 uint8_t heartbeat_interval;
@@ -462,7 +488,7 @@ struct scsi_qla_host {
462 /* --- From FlashSysInfo --- */ 488 /* --- From FlashSysInfo --- */
463 uint8_t my_mac[MAC_ADDR_LEN]; 489 uint8_t my_mac[MAC_ADDR_LEN];
464 uint8_t serial_number[16]; 490 uint8_t serial_number[16];
465 491 uint16_t port_num;
466 /* --- From GetFwState --- */ 492 /* --- From GetFwState --- */
467 uint32_t firmware_state; 493 uint32_t firmware_state;
468 uint32_t addl_fw_state; 494 uint32_t addl_fw_state;
@@ -524,31 +550,13 @@ struct scsi_qla_host {
524 volatile uint8_t mbox_status_count; 550 volatile uint8_t mbox_status_count;
525 volatile uint32_t mbox_status[MBOX_REG_COUNT]; 551 volatile uint32_t mbox_status[MBOX_REG_COUNT];
526 552
527 /* local device database list (contains internal ddb entries) */ 553 /* FW ddb index map */
528 struct list_head ddb_list;
529
530 /* Map ddb_list entry by FW ddb index */
531 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; 554 struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
532 555
533 /* Saved srb for status continuation entry processing */ 556 /* Saved srb for status continuation entry processing */
534 struct srb *status_srb; 557 struct srb *status_srb;
535 558
536 /* IPv6 support info from InitFW */
537 uint8_t acb_version; 559 uint8_t acb_version;
538 uint8_t ipv4_addr_state;
539 uint16_t ipv4_options;
540
541 uint32_t resvd2;
542 uint32_t ipv6_options;
543 uint32_t ipv6_addl_options;
544 uint8_t ipv6_link_local_state;
545 uint8_t ipv6_addr0_state;
546 uint8_t ipv6_addr1_state;
547 uint8_t ipv6_default_router_state;
548 struct in6_addr ipv6_link_local_addr;
549 struct in6_addr ipv6_addr0;
550 struct in6_addr ipv6_addr1;
551 struct in6_addr ipv6_default_router_addr;
552 560
553 /* qla82xx specific fields */ 561 /* qla82xx specific fields */
554 struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */ 562 struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */
@@ -584,6 +592,11 @@ struct scsi_qla_host {
584 592
585 struct completion mbx_intr_comp; 593 struct completion mbx_intr_comp;
586 594
595 struct ipaddress_config ip_config;
596 struct iscsi_iface *iface_ipv4;
597 struct iscsi_iface *iface_ipv6_0;
598 struct iscsi_iface *iface_ipv6_1;
599
587 /* --- From About Firmware --- */ 600 /* --- From About Firmware --- */
588 uint16_t iscsi_major; 601 uint16_t iscsi_major;
589 uint16_t iscsi_minor; 602 uint16_t iscsi_minor;
@@ -591,16 +604,60 @@ struct scsi_qla_host {
591 uint16_t bootload_minor; 604 uint16_t bootload_minor;
592 uint16_t bootload_patch; 605 uint16_t bootload_patch;
593 uint16_t bootload_build; 606 uint16_t bootload_build;
607
608 uint32_t flash_state;
609#define QLFLASH_WAITING 0
610#define QLFLASH_READING 1
611#define QLFLASH_WRITING 2
612 struct dma_pool *chap_dma_pool;
613 uint8_t *chap_list; /* CHAP table cache */
614 struct mutex chap_sem;
615#define CHAP_DMA_BLOCK_SIZE 512
616 struct workqueue_struct *task_wq;
617 unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG];
618#define SYSFS_FLAG_FW_SEL_BOOT 2
619 struct iscsi_boot_kset *boot_kset;
620 struct ql4_boot_tgt_info boot_tgt;
621 uint16_t phy_port_num;
622 uint16_t phy_port_cnt;
623 uint16_t iscsi_pci_func_cnt;
624 uint8_t model_name[16];
625 struct completion disable_acb_comp;
626};
627
628struct ql4_task_data {
629 struct scsi_qla_host *ha;
630 uint8_t iocb_req_cnt;
631 dma_addr_t data_dma;
632 void *req_buffer;
633 dma_addr_t req_dma;
634 uint32_t req_len;
635 void *resp_buffer;
636 dma_addr_t resp_dma;
637 uint32_t resp_len;
638 struct iscsi_task *task;
639 struct passthru_status sts;
640 struct work_struct task_work;
641};
642
643struct qla_endpoint {
644 struct Scsi_Host *host;
645 struct sockaddr dst_addr;
646};
647
648struct qla_conn {
649 struct qla_endpoint *qla_ep;
594}; 650};
595 651
596static inline int is_ipv4_enabled(struct scsi_qla_host *ha) 652static inline int is_ipv4_enabled(struct scsi_qla_host *ha)
597{ 653{
598 return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0); 654 return ((ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) != 0);
599} 655}
600 656
601static inline int is_ipv6_enabled(struct scsi_qla_host *ha) 657static inline int is_ipv6_enabled(struct scsi_qla_host *ha)
602{ 658{
603 return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0); 659 return ((ha->ip_config.ipv6_options &
660 IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0);
604} 661}
605 662
606static inline int is_qla4010(struct scsi_qla_host *ha) 663static inline int is_qla4010(struct scsi_qla_host *ha)
@@ -618,6 +675,11 @@ static inline int is_qla4032(struct scsi_qla_host *ha)
618 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032; 675 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032;
619} 676}
620 677
678static inline int is_qla40XX(struct scsi_qla_host *ha)
679{
680 return is_qla4032(ha) || is_qla4022(ha) || is_qla4010(ha);
681}
682
621static inline int is_qla8022(struct scsi_qla_host *ha) 683static inline int is_qla8022(struct scsi_qla_host *ha)
622{ 684{
623 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; 685 return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022;
@@ -640,7 +702,7 @@ static inline int adapter_up(struct scsi_qla_host *ha)
640 702
641static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost) 703static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
642{ 704{
643 return (struct scsi_qla_host *)shost->hostdata; 705 return (struct scsi_qla_host *)iscsi_host_priv(shost);
644} 706}
645 707
646static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha) 708static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
@@ -760,6 +822,16 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
760 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK); 822 ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
761} 823}
762 824
825static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
826{
827 return test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
828 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
829 test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) ||
830 test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
831 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) ||
832 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
833
834}
763/*---------------------------------------------------------------------------*/ 835/*---------------------------------------------------------------------------*/
764 836
765/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */ 837/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 01082aa7709..cbd5a20dbbd 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -146,6 +146,13 @@ struct isp_reg {
146#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16)) 146#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
147#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16)) 147#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
148 148
149/* nvram address for 4032 */
150#define NVRAM_PORT0_BOOT_MODE 0x03b1
151#define NVRAM_PORT0_BOOT_PRI_TGT 0x03b2
152#define NVRAM_PORT0_BOOT_SEC_TGT 0x03bb
153#define NVRAM_PORT1_BOOT_MODE 0x07b1
154#define NVRAM_PORT1_BOOT_PRI_TGT 0x07b2
155#define NVRAM_PORT1_BOOT_SEC_TGT 0x07bb
149 156
150 157
151/* Page # defines for 4022 */ 158/* Page # defines for 4022 */
@@ -194,6 +201,9 @@ static inline uint32_t clr_rmask(uint32_t val)
194/* ISP 4022 nvram definitions */ 201/* ISP 4022 nvram definitions */
195#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */ 202#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */
196 203
204#define QL4010_NVRAM_SIZE 0x200
205#define QL40X2_NVRAM_SIZE 0x800
206
197/* ISP port_status definitions */ 207/* ISP port_status definitions */
198 208
199/* ISP Semaphore definitions */ 209/* ISP Semaphore definitions */
@@ -241,6 +251,8 @@ union external_hw_config_reg {
241#define FA_BOOT_CODE_ADDR_82 0x20000 251#define FA_BOOT_CODE_ADDR_82 0x20000
242#define FA_RISC_CODE_ADDR_82 0x40000 252#define FA_RISC_CODE_ADDR_82 0x40000
243#define FA_GOLD_RISC_CODE_ADDR_82 0x80000 253#define FA_GOLD_RISC_CODE_ADDR_82 0x80000
254#define FA_FLASH_ISCSI_CHAP 0x540000
255#define FA_FLASH_CHAP_SIZE 0xC0000
244 256
245/* Flash Description Table */ 257/* Flash Description Table */
246struct qla_fdt_layout { 258struct qla_fdt_layout {
@@ -296,8 +308,11 @@ struct qla_flt_header {
296#define FLT_REG_FLT 0x1c 308#define FLT_REG_FLT 0x1c
297#define FLT_REG_BOOTLOAD_82 0x72 309#define FLT_REG_BOOTLOAD_82 0x72
298#define FLT_REG_FW_82 0x74 310#define FLT_REG_FW_82 0x74
311#define FLT_REG_FW_82_1 0x97
299#define FLT_REG_GOLD_FW_82 0x75 312#define FLT_REG_GOLD_FW_82 0x75
300#define FLT_REG_BOOT_CODE_82 0x78 313#define FLT_REG_BOOT_CODE_82 0x78
314#define FLT_REG_ISCSI_PARAM 0x65
315#define FLT_REG_ISCSI_CHAP 0x63
301 316
302struct qla_flt_region { 317struct qla_flt_region {
303 uint32_t code; 318 uint32_t code;
@@ -331,9 +346,11 @@ struct qla_flt_region {
331#define MBOX_CMD_WRITE_FLASH 0x0025 346#define MBOX_CMD_WRITE_FLASH 0x0025
332#define MBOX_CMD_READ_FLASH 0x0026 347#define MBOX_CMD_READ_FLASH 0x0026
333#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 348#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
349#define MBOX_CMD_CONN_OPEN 0x0074
334#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 350#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
335#define LOGOUT_OPTION_CLOSE_SESSION 0x01 351#define LOGOUT_OPTION_CLOSE_SESSION 0x0002
336#define LOGOUT_OPTION_RELOGIN 0x02 352#define LOGOUT_OPTION_RELOGIN 0x0004
353#define LOGOUT_OPTION_FREE_DDB 0x0008
337#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A 354#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
338#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060 355#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
339#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061 356#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
@@ -342,12 +359,15 @@ struct qla_flt_region {
342#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064 359#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064
343#define DDB_DS_UNASSIGNED 0x00 360#define DDB_DS_UNASSIGNED 0x00
344#define DDB_DS_NO_CONNECTION_ACTIVE 0x01 361#define DDB_DS_NO_CONNECTION_ACTIVE 0x01
362#define DDB_DS_DISCOVERY 0x02
345#define DDB_DS_SESSION_ACTIVE 0x04 363#define DDB_DS_SESSION_ACTIVE 0x04
346#define DDB_DS_SESSION_FAILED 0x06 364#define DDB_DS_SESSION_FAILED 0x06
347#define DDB_DS_LOGIN_IN_PROCESS 0x07 365#define DDB_DS_LOGIN_IN_PROCESS 0x07
348#define MBOX_CMD_GET_FW_STATE 0x0069 366#define MBOX_CMD_GET_FW_STATE 0x0069
349#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A 367#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
350#define MBOX_CMD_GET_SYS_INFO 0x0078 368#define MBOX_CMD_GET_SYS_INFO 0x0078
369#define MBOX_CMD_GET_NVRAM 0x0078 /* For 40xx */
370#define MBOX_CMD_SET_NVRAM 0x0079 /* For 40xx */
351#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087 371#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
352#define MBOX_CMD_SET_ACB 0x0088 372#define MBOX_CMD_SET_ACB 0x0088
353#define MBOX_CMD_GET_ACB 0x0089 373#define MBOX_CMD_GET_ACB 0x0089
@@ -375,7 +395,10 @@ struct qla_flt_region {
375#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008 395#define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008
376#define FW_ADDSTATE_LINK_UP 0x0010 396#define FW_ADDSTATE_LINK_UP 0x0010
377#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020 397#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
398
378#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B 399#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
400#define IPV6_DEFAULT_DDB_ENTRY 0x0001
401
379#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074 402#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074
380#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */ 403#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
381#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 404#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
@@ -434,6 +457,14 @@ struct qla_flt_region {
434#define ACB_STATE_VALID 0x05 457#define ACB_STATE_VALID 0x05
435#define ACB_STATE_DISABLING 0x06 458#define ACB_STATE_DISABLING 0x06
436 459
460/* FLASH offsets */
461#define FLASH_SEGMENT_IFCB 0x04000000
462
463#define FLASH_OPT_RMW_HOLD 0
464#define FLASH_OPT_RMW_INIT 1
465#define FLASH_OPT_COMMIT 2
466#define FLASH_OPT_RMW_COMMIT 3
467
437/*************************************************************************/ 468/*************************************************************************/
438 469
439/* Host Adapter Initialization Control Block (from host) */ 470/* Host Adapter Initialization Control Block (from host) */
@@ -455,7 +486,8 @@ struct addr_ctrl_blk {
455 uint8_t res0; /* 07 */ 486 uint8_t res0; /* 07 */
456 uint16_t eth_mtu_size; /* 08-09 */ 487 uint16_t eth_mtu_size; /* 08-09 */
457 uint16_t add_fw_options; /* 0A-0B */ 488 uint16_t add_fw_options; /* 0A-0B */
458#define SERIALIZE_TASK_MGMT 0x0400 489#define ADFWOPT_SERIALIZE_TASK_MGMT 0x0400
490#define ADFWOPT_AUTOCONN_DISABLE 0x0002
459 491
460 uint8_t hb_interval; /* 0C */ 492 uint8_t hb_interval; /* 0C */
461 uint8_t inst_num; /* 0D */ 493 uint8_t inst_num; /* 0D */
@@ -473,8 +505,10 @@ struct addr_ctrl_blk {
473 505
474 uint16_t iscsi_opts; /* 30-31 */ 506 uint16_t iscsi_opts; /* 30-31 */
475 uint16_t ipv4_tcp_opts; /* 32-33 */ 507 uint16_t ipv4_tcp_opts; /* 32-33 */
508#define TCPOPT_DHCP_ENABLE 0x0200
476 uint16_t ipv4_ip_opts; /* 34-35 */ 509 uint16_t ipv4_ip_opts; /* 34-35 */
477#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000 510#define IPOPT_IPV4_PROTOCOL_ENABLE 0x8000
511#define IPOPT_VLAN_TAGGING_ENABLE 0x2000
478 512
479 uint16_t iscsi_max_pdu_size; /* 36-37 */ 513 uint16_t iscsi_max_pdu_size; /* 36-37 */
480 uint8_t ipv4_tos; /* 38 */ 514 uint8_t ipv4_tos; /* 38 */
@@ -526,6 +560,7 @@ struct addr_ctrl_blk {
526 uint16_t ipv6_port; /* 204-205 */ 560 uint16_t ipv6_port; /* 204-205 */
527 uint16_t ipv6_opts; /* 206-207 */ 561 uint16_t ipv6_opts; /* 206-207 */
528#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000 562#define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000
563#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000
529 564
530 uint16_t ipv6_addtl_opts; /* 208-209 */ 565 uint16_t ipv6_addtl_opts; /* 208-209 */
531#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB 566#define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB
@@ -574,13 +609,105 @@ struct init_fw_ctrl_blk {
574/* struct addr_ctrl_blk sec;*/ 609/* struct addr_ctrl_blk sec;*/
575}; 610};
576 611
612#define PRIMARI_ACB 0
613#define SECONDARY_ACB 1
614
615struct addr_ctrl_blk_def {
616 uint8_t reserved1[1]; /* 00 */
617 uint8_t control; /* 01 */
618 uint8_t reserved2[11]; /* 02-0C */
619 uint8_t inst_num; /* 0D */
620 uint8_t reserved3[34]; /* 0E-2F */
621 uint16_t iscsi_opts; /* 30-31 */
622 uint16_t ipv4_tcp_opts; /* 32-33 */
623 uint16_t ipv4_ip_opts; /* 34-35 */
624 uint16_t iscsi_max_pdu_size; /* 36-37 */
625 uint8_t ipv4_tos; /* 38 */
626 uint8_t ipv4_ttl; /* 39 */
627 uint8_t reserved4[2]; /* 3A-3B */
628 uint16_t def_timeout; /* 3C-3D */
629 uint16_t iscsi_fburst_len; /* 3E-3F */
630 uint8_t reserved5[4]; /* 40-43 */
631 uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
632 uint8_t reserved6[2]; /* 46-47 */
633 uint16_t ipv4_port; /* 48-49 */
634 uint16_t iscsi_max_burst_len; /* 4A-4B */
635 uint8_t reserved7[4]; /* 4C-4F */
636 uint8_t ipv4_addr[4]; /* 50-53 */
637 uint16_t ipv4_vlan_tag; /* 54-55 */
638 uint8_t ipv4_addr_state; /* 56 */
639 uint8_t ipv4_cacheid; /* 57 */
640 uint8_t reserved8[8]; /* 58-5F */
641 uint8_t ipv4_subnet[4]; /* 60-63 */
642 uint8_t reserved9[12]; /* 64-6F */
643 uint8_t ipv4_gw_addr[4]; /* 70-73 */
644 uint8_t reserved10[84]; /* 74-C7 */
645 uint8_t abort_timer; /* C8 */
646 uint8_t ipv4_tcp_wsf; /* C9 */
647 uint8_t reserved11[10]; /* CA-D3 */
648 uint8_t ipv4_dhcp_vid_len; /* D4 */
649 uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
650 uint8_t reserved12[20]; /* E0-F3 */
651 uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
652 uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
653 uint8_t iscsi_name[224]; /* 100-1DF */
654 uint8_t reserved13[32]; /* 1E0-1FF */
655 uint32_t cookie; /* 200-203 */
656 uint16_t ipv6_port; /* 204-205 */
657 uint16_t ipv6_opts; /* 206-207 */
658 uint16_t ipv6_addtl_opts; /* 208-209 */
659 uint16_t ipv6_tcp_opts; /* 20A-20B */
660 uint8_t ipv6_tcp_wsf; /* 20C */
661 uint16_t ipv6_flow_lbl; /* 20D-20F */
662 uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */
663 uint16_t ipv6_vlan_tag; /* 220-221 */
664 uint8_t ipv6_lnk_lcl_addr_state; /* 222 */
665 uint8_t ipv6_addr0_state; /* 223 */
666 uint8_t ipv6_addr1_state; /* 224 */
667 uint8_t ipv6_dflt_rtr_state; /* 225 */
668 uint8_t ipv6_traffic_class; /* 226 */
669 uint8_t ipv6_hop_limit; /* 227 */
670 uint8_t ipv6_if_id[8]; /* 228-22F */
671 uint8_t ipv6_addr0[16]; /* 230-23F */
672 uint8_t ipv6_addr1[16]; /* 240-24F */
673 uint32_t ipv6_nd_reach_time; /* 250-253 */
674 uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
675 uint32_t ipv6_nd_stale_timeout; /* 258-25B */
676 uint8_t ipv6_dup_addr_detect_count; /* 25C */
677 uint8_t ipv6_cache_id; /* 25D */
678 uint8_t reserved14[18]; /* 25E-26F */
679 uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
680 uint8_t reserved15[140]; /* 274-2FF */
681};
682
577/*************************************************************************/ 683/*************************************************************************/
578 684
685#define MAX_CHAP_ENTRIES_40XX 128
686#define MAX_CHAP_ENTRIES_82XX 1024
687#define MAX_RESRV_CHAP_IDX 3
688#define FLASH_CHAP_OFFSET 0x06000000
689
690struct ql4_chap_table {
691 uint16_t link;
692 uint8_t flags;
693 uint8_t secret_len;
694#define MIN_CHAP_SECRET_LEN 12
695#define MAX_CHAP_SECRET_LEN 100
696 uint8_t secret[MAX_CHAP_SECRET_LEN];
697#define MAX_CHAP_NAME_LEN 256
698 uint8_t name[MAX_CHAP_NAME_LEN];
699 uint16_t reserved;
700#define CHAP_VALID_COOKIE 0x4092
701#define CHAP_INVALID_COOKIE 0xFFEE
702 uint16_t cookie;
703};
704
579struct dev_db_entry { 705struct dev_db_entry {
580 uint16_t options; /* 00-01 */ 706 uint16_t options; /* 00-01 */
581#define DDB_OPT_DISC_SESSION 0x10 707#define DDB_OPT_DISC_SESSION 0x10
582#define DDB_OPT_TARGET 0x02 /* device is a target */ 708#define DDB_OPT_TARGET 0x02 /* device is a target */
583#define DDB_OPT_IPV6_DEVICE 0x100 709#define DDB_OPT_IPV6_DEVICE 0x100
710#define DDB_OPT_AUTO_SENDTGTS_DISABLE 0x40
584#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */ 711#define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */
585#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */ 712#define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */
586 713
@@ -591,6 +718,7 @@ struct dev_db_entry {
591 uint16_t tcp_options; /* 0A-0B */ 718 uint16_t tcp_options; /* 0A-0B */
592 uint16_t ip_options; /* 0C-0D */ 719 uint16_t ip_options; /* 0C-0D */
593 uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */ 720 uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
721#define BYTE_UNITS 512
594 uint32_t res1; /* 10-13 */ 722 uint32_t res1; /* 10-13 */
595 uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */ 723 uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */
596 uint16_t iscsi_first_burst_len; /* 16-17 */ 724 uint16_t iscsi_first_burst_len; /* 16-17 */
@@ -627,7 +755,10 @@ struct dev_db_entry {
627 uint8_t tcp_rcv_wsf; /* 1C7 */ 755 uint8_t tcp_rcv_wsf; /* 1C7 */
628 uint32_t stat_sn; /* 1C8-1CB */ 756 uint32_t stat_sn; /* 1C8-1CB */
629 uint32_t exp_stat_sn; /* 1CC-1CF */ 757 uint32_t exp_stat_sn; /* 1CC-1CF */
630 uint8_t res6[0x30]; /* 1D0-1FF */ 758 uint8_t res6[0x2b]; /* 1D0-1FB */
759#define DDB_VALID_COOKIE 0x9034
760 uint16_t cookie; /* 1FC-1FD */
761 uint16_t len; /* 1FE-1FF */
631}; 762};
632 763
633/*************************************************************************/ 764/*************************************************************************/
@@ -639,6 +770,14 @@ struct dev_db_entry {
639#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes 770#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
640 * for EOF 771 * for EOF
641 * signature */ 772 * signature */
773#define FLASH_RAW_ACCESS_ADDR 0x8e000000
774
775#define BOOT_PARAM_OFFSET_PORT0 0x3b0
776#define BOOT_PARAM_OFFSET_PORT1 0x7b0
777
778#define FLASH_OFFSET_DB_INFO 0x05000000
779#define FLASH_OFFSET_DB_END (FLASH_OFFSET_DB_INFO + 0x7fff)
780
642 781
643struct sys_info_phys_addr { 782struct sys_info_phys_addr {
644 uint8_t address[6]; /* 00-05 */ 783 uint8_t address[6]; /* 00-05 */
@@ -774,6 +913,7 @@ struct qla4_header {
774 913
775 uint8_t entryStatus; 914 uint8_t entryStatus;
776 uint8_t systemDefined; 915 uint8_t systemDefined;
916#define SD_ISCSI_PDU 0x01
777 uint8_t entryCount; 917 uint8_t entryCount;
778 918
779 /* SyetemDefined definition */ 919 /* SyetemDefined definition */
@@ -931,21 +1071,22 @@ struct passthru0 {
931 struct qla4_header hdr; /* 00-03 */ 1071 struct qla4_header hdr; /* 00-03 */
932 uint32_t handle; /* 04-07 */ 1072 uint32_t handle; /* 04-07 */
933 uint16_t target; /* 08-09 */ 1073 uint16_t target; /* 08-09 */
934 uint16_t connectionID; /* 0A-0B */ 1074 uint16_t connection_id; /* 0A-0B */
935#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000) 1075#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000)
936 1076
937 uint16_t controlFlags; /* 0C-0D */ 1077 uint16_t control_flags; /* 0C-0D */
938#define PT_FLAG_ETHERNET_FRAME 0x8000 1078#define PT_FLAG_ETHERNET_FRAME 0x8000
939#define PT_FLAG_ISNS_PDU 0x8000 1079#define PT_FLAG_ISNS_PDU 0x8000
940#define PT_FLAG_SEND_BUFFER 0x0200 1080#define PT_FLAG_SEND_BUFFER 0x0200
941#define PT_FLAG_WAIT_4_RESPONSE 0x0100 1081#define PT_FLAG_WAIT_4_RESPONSE 0x0100
1082#define PT_FLAG_ISCSI_PDU 0x1000
942 1083
943 uint16_t timeout; /* 0E-0F */ 1084 uint16_t timeout; /* 0E-0F */
944#define PT_DEFAULT_TIMEOUT 30 /* seconds */ 1085#define PT_DEFAULT_TIMEOUT 30 /* seconds */
945 1086
946 struct data_seg_a64 outDataSeg64; /* 10-1B */ 1087 struct data_seg_a64 out_dsd; /* 10-1B */
947 uint32_t res1; /* 1C-1F */ 1088 uint32_t res1; /* 1C-1F */
948 struct data_seg_a64 inDataSeg64; /* 20-2B */ 1089 struct data_seg_a64 in_dsd; /* 20-2B */
949 uint8_t res2[20]; /* 2C-3F */ 1090 uint8_t res2[20]; /* 2C-3F */
950}; 1091};
951 1092
@@ -978,4 +1119,43 @@ struct response {
978#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ 1119#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
979}; 1120};
980 1121
1122struct ql_iscsi_stats {
1123 uint8_t reserved1[656]; /* 0000-028F */
1124 uint32_t tx_cmd_pdu; /* 0290-0293 */
1125 uint32_t tx_resp_pdu; /* 0294-0297 */
1126 uint32_t rx_cmd_pdu; /* 0298-029B */
1127 uint32_t rx_resp_pdu; /* 029C-029F */
1128
1129 uint64_t tx_data_octets; /* 02A0-02A7 */
1130 uint64_t rx_data_octets; /* 02A8-02AF */
1131
1132 uint32_t hdr_digest_err; /* 02B0–02B3 */
1133 uint32_t data_digest_err; /* 02B4–02B7 */
1134 uint32_t conn_timeout_err; /* 02B8–02BB */
1135 uint32_t framing_err; /* 02BC–02BF */
1136
1137 uint32_t tx_nopout_pdus; /* 02C0–02C3 */
1138 uint32_t tx_scsi_cmd_pdus; /* 02C4–02C7 */
1139 uint32_t tx_tmf_cmd_pdus; /* 02C8–02CB */
1140 uint32_t tx_login_cmd_pdus; /* 02CC–02CF */
1141 uint32_t tx_text_cmd_pdus; /* 02D0–02D3 */
1142 uint32_t tx_scsi_write_pdus; /* 02D4–02D7 */
1143 uint32_t tx_logout_cmd_pdus; /* 02D8–02DB */
1144 uint32_t tx_snack_req_pdus; /* 02DC–02DF */
1145
1146 uint32_t rx_nopin_pdus; /* 02E0–02E3 */
1147 uint32_t rx_scsi_resp_pdus; /* 02E4–02E7 */
1148 uint32_t rx_tmf_resp_pdus; /* 02E8–02EB */
1149 uint32_t rx_login_resp_pdus; /* 02EC–02EF */
1150 uint32_t rx_text_resp_pdus; /* 02F0–02F3 */
1151 uint32_t rx_scsi_read_pdus; /* 02F4–02F7 */
1152 uint32_t rx_logout_resp_pdus; /* 02F8–02FB */
1153
1154 uint32_t rx_r2t_pdus; /* 02FC–02FF */
1155 uint32_t rx_async_pdus; /* 0300–0303 */
1156 uint32_t rx_reject_pdus; /* 0304–0307 */
1157
1158 uint8_t reserved2[264]; /* 0x0308 - 0x040F */
1159};
1160
981#endif /* _QLA4X_FW_H */ 1161#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index a53a256c1f8..160db9d5ea2 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -12,20 +12,15 @@ struct iscsi_cls_conn;
12 12
13int qla4xxx_hw_reset(struct scsi_qla_host *ha); 13int qla4xxx_hw_reset(struct scsi_qla_host *ha);
14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a); 14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
15int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
16int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb); 15int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
17int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, 16int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
18 uint8_t renew_ddb_list);
19int qla4xxx_soft_reset(struct scsi_qla_host *ha); 17int qla4xxx_soft_reset(struct scsi_qla_host *ha);
20irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id); 18irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
21 19
22void qla4xxx_free_ddb_list(struct scsi_qla_host *ha);
23void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry); 20void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry);
24void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen); 21void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen);
25 22
26int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha); 23int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha);
27int qla4xxx_relogin_device(struct scsi_qla_host *ha,
28 struct ddb_entry *ddb_entry);
29int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb); 24int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb);
30int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, 25int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry,
31 int lun); 26 int lun);
@@ -51,15 +46,24 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
51 uint16_t *connection_id); 46 uint16_t *connection_id);
52 47
53int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, 48int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
54 dma_addr_t fw_ddb_entry_dma); 49 dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts);
55 50uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
56void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, 51 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma);
57 struct ddb_entry *ddb_entry); 52int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
53 uint16_t fw_ddb_index,
54 uint16_t connection_id,
55 uint16_t option);
56int qla4xxx_disable_acb(struct scsi_qla_host *ha);
57int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
58 uint32_t *mbox_sts, dma_addr_t acb_dma);
59int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
60 uint32_t acb_type, uint32_t len);
61int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
62 uint32_t ip_idx, uint32_t *sts);
63void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session);
58u16 rd_nvram_word(struct scsi_qla_host *ha, int offset); 64u16 rd_nvram_word(struct scsi_qla_host *ha, int offset);
65u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset);
59void qla4xxx_get_crash_record(struct scsi_qla_host *ha); 66void qla4xxx_get_crash_record(struct scsi_qla_host *ha);
60struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
61int qla4xxx_add_sess(struct ddb_entry *);
62void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
63int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha); 67int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha);
64int qla4xxx_about_firmware(struct scsi_qla_host *ha); 68int qla4xxx_about_firmware(struct scsi_qla_host *ha);
65void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha, 69void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha,
@@ -68,14 +72,13 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha);
68void qla4xxx_srb_compl(struct kref *ref); 72void qla4xxx_srb_compl(struct kref *ref);
69struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, 73struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
70 uint32_t index); 74 uint32_t index);
71int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha);
72int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, 75int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
73 uint32_t state, uint32_t conn_error); 76 uint32_t state, uint32_t conn_error);
74void qla4xxx_dump_buffer(void *b, uint32_t size); 77void qla4xxx_dump_buffer(void *b, uint32_t size);
75int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, 78int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
76 struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod); 79 struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod);
77int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err); 80int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
78 81 uint32_t offset, uint32_t length, uint32_t options);
79int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, 82int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
80 uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); 83 uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts);
81 84
@@ -95,6 +98,11 @@ void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
95void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); 98void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
96void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha); 99void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
97void qla4xxx_dump_registers(struct scsi_qla_host *ha); 100void qla4xxx_dump_registers(struct scsi_qla_host *ha);
101uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
102 uint32_t *mbox_cmd,
103 uint32_t *mbox_sts,
104 struct addr_ctrl_blk *init_fw_cb,
105 dma_addr_t init_fw_cb_dma);
98 106
99void qla4_8xxx_pci_config(struct scsi_qla_host *); 107void qla4_8xxx_pci_config(struct scsi_qla_host *);
100int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); 108int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
@@ -134,6 +142,37 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha);
134void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); 142void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha);
135void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); 143void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha);
136void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); 144void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha);
145int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index);
146int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
147 struct ddb_entry *ddb_entry,
148 struct iscsi_cls_conn *cls_conn,
149 uint32_t *mbx_sts);
150int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
151 struct ddb_entry *ddb_entry, int options);
152int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
153 uint32_t *mbx_sts);
154int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
155int qla4xxx_send_passthru0(struct iscsi_task *task);
156int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
157 uint16_t stats_size, dma_addr_t stats_dma);
158void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
159 struct ddb_entry *ddb_entry);
160int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
161 struct dev_db_entry *fw_ddb_entry,
162 dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
163int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username,
164 char *password, uint16_t idx);
165int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
166 uint32_t offset, uint32_t size);
167int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
168 uint32_t offset, uint32_t size);
169int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
170 uint32_t region, uint32_t field0,
171 uint32_t field1);
172
173/* BSG Functions */
174int qla4xxx_bsg_request(struct bsg_job *bsg_job);
175int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
137 176
138extern int ql4xextended_error_logging; 177extern int ql4xextended_error_logging;
139extern int ql4xdontresethba; 178extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 42ed5db2d53..3075fbaef55 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -11,9 +11,6 @@
11#include "ql4_dbg.h" 11#include "ql4_dbg.h"
12#include "ql4_inline.h" 12#include "ql4_inline.h"
13 13
14static struct ddb_entry *qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
15 uint32_t fw_ddb_index);
16
17static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) 14static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
18{ 15{
19 uint32_t value; 16 uint32_t value;
@@ -48,41 +45,15 @@ static void ql4xxx_set_mac_number(struct scsi_qla_host *ha)
48 * @ha: pointer to host adapter structure. 45 * @ha: pointer to host adapter structure.
49 * @ddb_entry: pointer to device database entry 46 * @ddb_entry: pointer to device database entry
50 * 47 *
51 * This routine deallocates and unlinks the specified ddb_entry from the 48 * This routine marks a DDB entry INVALID
52 * adapter's
53 **/ 49 **/
54void qla4xxx_free_ddb(struct scsi_qla_host *ha, 50void qla4xxx_free_ddb(struct scsi_qla_host *ha,
55 struct ddb_entry *ddb_entry) 51 struct ddb_entry *ddb_entry)
56{ 52{
57 /* Remove device entry from list */
58 list_del_init(&ddb_entry->list);
59
60 /* Remove device pointer from index mapping arrays */ 53 /* Remove device pointer from index mapping arrays */
61 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = 54 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] =
62 (struct ddb_entry *) INVALID_ENTRY; 55 (struct ddb_entry *) INVALID_ENTRY;
63 ha->tot_ddbs--; 56 ha->tot_ddbs--;
64
65 /* Free memory and scsi-ml struct for device entry */
66 qla4xxx_destroy_sess(ddb_entry);
67}
68
69/**
70 * qla4xxx_free_ddb_list - deallocate all ddbs
71 * @ha: pointer to host adapter structure.
72 *
73 * This routine deallocates and removes all devices on the sppecified adapter.
74 **/
75void qla4xxx_free_ddb_list(struct scsi_qla_host *ha)
76{
77 struct list_head *ptr;
78 struct ddb_entry *ddb_entry;
79
80 while (!list_empty(&ha->ddb_list)) {
81 ptr = ha->ddb_list.next;
82 /* Free memory for device entry and remove */
83 ddb_entry = list_entry(ptr, struct ddb_entry, list);
84 qla4xxx_free_ddb(ha, ddb_entry);
85 }
86} 57}
87 58
88/** 59/**
@@ -236,38 +207,44 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
236 FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) { 207 FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) {
237 ipv4_wait = 1; 208 ipv4_wait = 1;
238 } 209 }
239 if (((ha->ipv6_addl_options & 210 if (((ha->ip_config.ipv6_addl_options &
240 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) && 211 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) &&
241 ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) || 212 ((ha->ip_config.ipv6_link_local_state ==
242 (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) || 213 IP_ADDRSTATE_ACQUIRING) ||
243 (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) { 214 (ha->ip_config.ipv6_addr0_state ==
215 IP_ADDRSTATE_ACQUIRING) ||
216 (ha->ip_config.ipv6_addr1_state ==
217 IP_ADDRSTATE_ACQUIRING))) {
244 218
245 ipv6_wait = 1; 219 ipv6_wait = 1;
246 220
247 if ((ha->ipv6_link_local_state == 221 if ((ha->ip_config.ipv6_link_local_state ==
248 IP_ADDRSTATE_PREFERRED) || 222 IP_ADDRSTATE_PREFERRED) ||
249 (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) || 223 (ha->ip_config.ipv6_addr0_state ==
250 (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) { 224 IP_ADDRSTATE_PREFERRED) ||
225 (ha->ip_config.ipv6_addr1_state ==
226 IP_ADDRSTATE_PREFERRED)) {
251 DEBUG2(printk(KERN_INFO "scsi%ld: %s: " 227 DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
252 "Preferred IP configured." 228 "Preferred IP configured."
253 " Don't wait!\n", ha->host_no, 229 " Don't wait!\n", ha->host_no,
254 __func__)); 230 __func__));
255 ipv6_wait = 0; 231 ipv6_wait = 0;
256 } 232 }
257 if (memcmp(&ha->ipv6_default_router_addr, ip_address, 233 if (memcmp(&ha->ip_config.ipv6_default_router_addr,
258 IPv6_ADDR_LEN) == 0) { 234 ip_address, IPv6_ADDR_LEN) == 0) {
259 DEBUG2(printk(KERN_INFO "scsi%ld: %s: " 235 DEBUG2(printk(KERN_INFO "scsi%ld: %s: "
260 "No Router configured. " 236 "No Router configured. "
261 "Don't wait!\n", ha->host_no, 237 "Don't wait!\n", ha->host_no,
262 __func__)); 238 __func__));
263 ipv6_wait = 0; 239 ipv6_wait = 0;
264 } 240 }
265 if ((ha->ipv6_default_router_state == 241 if ((ha->ip_config.ipv6_default_router_state ==
266 IPV6_RTRSTATE_MANUAL) && 242 IPV6_RTRSTATE_MANUAL) &&
267 (ha->ipv6_link_local_state == 243 (ha->ip_config.ipv6_link_local_state ==
268 IP_ADDRSTATE_TENTATIVE) && 244 IP_ADDRSTATE_TENTATIVE) &&
269 (memcmp(&ha->ipv6_link_local_addr, 245 (memcmp(&ha->ip_config.ipv6_link_local_addr,
270 &ha->ipv6_default_router_addr, 4) == 0)) { 246 &ha->ip_config.ipv6_default_router_addr, 4) ==
247 0)) {
271 DEBUG2(printk("scsi%ld: %s: LinkLocal Router & " 248 DEBUG2(printk("scsi%ld: %s: LinkLocal Router & "
272 "IP configured. Don't wait!\n", 249 "IP configured. Don't wait!\n",
273 ha->host_no, __func__)); 250 ha->host_no, __func__));
@@ -279,11 +256,14 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
279 "IP(s) \"", ha->host_no, __func__)); 256 "IP(s) \"", ha->host_no, __func__));
280 if (ipv4_wait) 257 if (ipv4_wait)
281 DEBUG2(printk("IPv4 ")); 258 DEBUG2(printk("IPv4 "));
282 if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) 259 if (ha->ip_config.ipv6_link_local_state ==
260 IP_ADDRSTATE_ACQUIRING)
283 DEBUG2(printk("IPv6LinkLocal ")); 261 DEBUG2(printk("IPv6LinkLocal "));
284 if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) 262 if (ha->ip_config.ipv6_addr0_state ==
263 IP_ADDRSTATE_ACQUIRING)
285 DEBUG2(printk("IPv6Addr0 ")); 264 DEBUG2(printk("IPv6Addr0 "));
286 if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING) 265 if (ha->ip_config.ipv6_addr1_state ==
266 IP_ADDRSTATE_ACQUIRING)
287 DEBUG2(printk("IPv6Addr1 ")); 267 DEBUG2(printk("IPv6Addr1 "));
288 DEBUG2(printk("\"\n")); 268 DEBUG2(printk("\"\n"));
289 } 269 }
@@ -466,486 +446,19 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
466 return qla4xxx_get_firmware_status(ha); 446 return qla4xxx_get_firmware_status(ha);
467} 447}
468 448
469static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha, 449static void qla4xxx_set_model_info(struct scsi_qla_host *ha)
470 uint32_t fw_ddb_index,
471 uint32_t *new_tgt)
472{
473 struct dev_db_entry *fw_ddb_entry = NULL;
474 dma_addr_t fw_ddb_entry_dma;
475 struct ddb_entry *ddb_entry = NULL;
476 int found = 0;
477 uint32_t device_state;
478
479 *new_tgt = 0;
480 /* Make sure the dma buffer is valid */
481 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
482 sizeof(*fw_ddb_entry),
483 &fw_ddb_entry_dma, GFP_KERNEL);
484 if (fw_ddb_entry == NULL) {
485 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
486 ha->host_no, __func__));
487 goto exit_get_ddb_entry_no_free;
488 }
489
490 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
491 fw_ddb_entry_dma, NULL, NULL,
492 &device_state, NULL, NULL, NULL) ==
493 QLA_ERROR) {
494 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
495 "fw_ddb_index %d\n", ha->host_no, __func__,
496 fw_ddb_index));
497 goto exit_get_ddb_entry;
498 }
499
500 /* Allocate DDB if not already allocated. */
501 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
502 __func__, fw_ddb_index));
503 list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
504 if ((memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name,
505 ISCSI_NAME_SIZE) == 0) &&
506 (ddb_entry->tpgt ==
507 le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) &&
508 (memcmp(ddb_entry->isid, fw_ddb_entry->isid,
509 sizeof(ddb_entry->isid)) == 0)) {
510 found++;
511 break;
512 }
513 }
514
515 /* if not found allocate new ddb */
516 if (!found) {
517 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
518 "new ddb\n", ha->host_no, __func__,
519 fw_ddb_index));
520 *new_tgt = 1;
521 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
522 }
523
524exit_get_ddb_entry:
525 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
526 fw_ddb_entry_dma);
527
528exit_get_ddb_entry_no_free:
529 return ddb_entry;
530}
531
532/**
533 * qla4xxx_update_ddb_entry - update driver's internal ddb
534 * @ha: pointer to host adapter structure.
535 * @ddb_entry: pointer to device database structure to be filled
536 * @fw_ddb_index: index of the ddb entry in fw ddb table
537 *
538 * This routine updates the driver's internal device database entry
539 * with information retrieved from the firmware's device database
540 * entry for the specified device. The ddb_entry->fw_ddb_index field
541 * must be initialized prior to calling this routine
542 *
543 **/
544static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
545 struct ddb_entry *ddb_entry,
546 uint32_t fw_ddb_index)
547{
548 struct dev_db_entry *fw_ddb_entry = NULL;
549 dma_addr_t fw_ddb_entry_dma;
550 int status = QLA_ERROR;
551 uint32_t conn_err;
552
553 if (ddb_entry == NULL) {
554 DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no,
555 __func__));
556
557 goto exit_update_ddb_no_free;
558 }
559
560 /* Make sure the dma buffer is valid */
561 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
562 sizeof(*fw_ddb_entry),
563 &fw_ddb_entry_dma, GFP_KERNEL);
564 if (fw_ddb_entry == NULL) {
565 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
566 ha->host_no, __func__));
567
568 goto exit_update_ddb_no_free;
569 }
570
571 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
572 fw_ddb_entry_dma, NULL, NULL,
573 &ddb_entry->fw_ddb_device_state, &conn_err,
574 &ddb_entry->tcp_source_port_num,
575 &ddb_entry->connection_id) ==
576 QLA_ERROR) {
577 DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for "
578 "fw_ddb_index %d\n", ha->host_no, __func__,
579 fw_ddb_index));
580
581 goto exit_update_ddb;
582 }
583
584 status = QLA_SUCCESS;
585 ddb_entry->options = le16_to_cpu(fw_ddb_entry->options);
586 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
587 ddb_entry->task_mgmt_timeout =
588 le16_to_cpu(fw_ddb_entry->def_timeout);
589 ddb_entry->CmdSn = 0;
590 ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle);
591 ddb_entry->default_relogin_timeout =
592 le16_to_cpu(fw_ddb_entry->def_timeout);
593 ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
594
595 /* Update index in case it changed */
596 ddb_entry->fw_ddb_index = fw_ddb_index;
597 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
598
599 ddb_entry->port = le16_to_cpu(fw_ddb_entry->port);
600 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
601 memcpy(ddb_entry->isid, fw_ddb_entry->isid, sizeof(ddb_entry->isid));
602
603 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
604 min(sizeof(ddb_entry->iscsi_name),
605 sizeof(fw_ddb_entry->iscsi_name)));
606 memcpy(&ddb_entry->iscsi_alias[0], &fw_ddb_entry->iscsi_alias[0],
607 min(sizeof(ddb_entry->iscsi_alias),
608 sizeof(fw_ddb_entry->iscsi_alias)));
609 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
610 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
611
612 ddb_entry->iscsi_max_burst_len = fw_ddb_entry->iscsi_max_burst_len;
613 ddb_entry->iscsi_max_outsnd_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t;
614 ddb_entry->iscsi_first_burst_len = fw_ddb_entry->iscsi_first_burst_len;
615 ddb_entry->iscsi_max_rcv_data_seg_len =
616 fw_ddb_entry->iscsi_max_rcv_data_seg_len;
617 ddb_entry->iscsi_max_snd_data_seg_len =
618 fw_ddb_entry->iscsi_max_snd_data_seg_len;
619
620 if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
621 memcpy(&ddb_entry->remote_ipv6_addr,
622 fw_ddb_entry->ip_addr,
623 min(sizeof(ddb_entry->remote_ipv6_addr),
624 sizeof(fw_ddb_entry->ip_addr)));
625 memcpy(&ddb_entry->link_local_ipv6_addr,
626 fw_ddb_entry->link_local_ipv6_addr,
627 min(sizeof(ddb_entry->link_local_ipv6_addr),
628 sizeof(fw_ddb_entry->link_local_ipv6_addr)));
629
630 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x"
631 " ConnErr %08x IP %pI6 "
632 ":%04d \"%s\"\n",
633 __func__, fw_ddb_index,
634 ddb_entry->fw_ddb_device_state,
635 conn_err, fw_ddb_entry->ip_addr,
636 le16_to_cpu(fw_ddb_entry->port),
637 fw_ddb_entry->iscsi_name));
638 } else
639 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x"
640 " ConnErr %08x IP %pI4 "
641 ":%04d \"%s\"\n",
642 __func__, fw_ddb_index,
643 ddb_entry->fw_ddb_device_state,
644 conn_err, fw_ddb_entry->ip_addr,
645 le16_to_cpu(fw_ddb_entry->port),
646 fw_ddb_entry->iscsi_name));
647exit_update_ddb:
648 if (fw_ddb_entry)
649 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
650 fw_ddb_entry, fw_ddb_entry_dma);
651
652exit_update_ddb_no_free:
653 return status;
654}
655
656/**
657 * qla4xxx_alloc_ddb - allocate device database entry
658 * @ha: Pointer to host adapter structure.
659 * @fw_ddb_index: Firmware's device database index
660 *
661 * This routine allocates a ddb_entry, ititializes some values, and
662 * inserts it into the ddb list.
663 **/
664static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
665 uint32_t fw_ddb_index)
666{ 450{
667 struct ddb_entry *ddb_entry; 451 uint16_t board_id_string[8];
668 452 int i;
669 DEBUG2(printk("scsi%ld: %s: fw_ddb_index [%d]\n", ha->host_no, 453 int size = sizeof(ha->nvram->isp4022.boardIdStr);
670 __func__, fw_ddb_index)); 454 int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2;
671
672 ddb_entry = qla4xxx_alloc_sess(ha);
673 if (ddb_entry == NULL) {
674 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
675 "to add fw_ddb_index [%d]\n",
676 ha->host_no, __func__, fw_ddb_index));
677 return ddb_entry;
678 }
679 455
680 ddb_entry->fw_ddb_index = fw_ddb_index; 456 for (i = 0; i < (size / 2) ; i++) {
681 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); 457 board_id_string[i] = rd_nvram_word(ha, offset);
682 atomic_set(&ddb_entry->relogin_timer, 0); 458 offset += 1;
683 atomic_set(&ddb_entry->relogin_retry_count, 0);
684 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
685 list_add_tail(&ddb_entry->list, &ha->ddb_list);
686 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
687 ha->tot_ddbs++;
688
689 return ddb_entry;
690}
691
692/**
693 * qla4_is_relogin_allowed - Are we allowed to login?
694 * @ha: Pointer to host adapter structure.
695 * @conn_err: Last connection error associated with the ddb
696 *
697 * This routine tests the given connection error to determine if
698 * we are allowed to login.
699 **/
700int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err)
701{
702 uint32_t err_code, login_rsp_sts_class;
703 int relogin = 1;
704
705 err_code = ((conn_err & 0x00ff0000) >> 16);
706 login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8);
707 if (err_code == 0x1c || err_code == 0x06) {
708 DEBUG2(ql4_printk(KERN_INFO, ha,
709 ": conn_err=0x%08x, send target completed"
710 " or access denied failure\n", conn_err));
711 relogin = 0;
712 }
713 if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) {
714 /* Login Response PDU returned an error.
715 Login Response Status in Error Code Detail
716 indicates login should not be retried.*/
717 DEBUG2(ql4_printk(KERN_INFO, ha,
718 ": conn_err=0x%08x, do not retry relogin\n",
719 conn_err));
720 relogin = 0;
721 } 459 }
722 460
723 return relogin; 461 memcpy(ha->model_name, board_id_string, size);
724}
725
726static void qla4xxx_flush_AENS(struct scsi_qla_host *ha)
727{
728 unsigned long wtime;
729
730 /* Flush the 0x8014 AEN from the firmware as a result of
731 * Auto connect. We are basically doing get_firmware_ddb()
732 * to determine whether we need to log back in or not.
733 * Trying to do a set ddb before we have processed 0x8014
734 * will result in another set_ddb() for the same ddb. In other
735 * words there will be stale entries in the aen_q.
736 */
737 wtime = jiffies + (2 * HZ);
738 do {
739 if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS)
740 if (ha->firmware_state & (BIT_2 | BIT_0))
741 return;
742
743 if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags))
744 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
745
746 msleep(1000);
747 } while (!time_after_eq(jiffies, wtime));
748}
749
750/**
751 * qla4xxx_build_ddb_list - builds driver ddb list
752 * @ha: Pointer to host adapter structure.
753 *
754 * This routine searches for all valid firmware ddb entries and builds
755 * an internal ddb list. Ddbs that are considered valid are those with
756 * a device state of SESSION_ACTIVE.
757 * A relogin (set_ddb) is issued for DDBs that are not online.
758 **/
759static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
760{
761 int status = QLA_ERROR;
762 uint32_t fw_ddb_index = 0;
763 uint32_t next_fw_ddb_index = 0;
764 uint32_t ddb_state;
765 uint32_t conn_err;
766 struct ddb_entry *ddb_entry;
767 struct dev_db_entry *fw_ddb_entry = NULL;
768 dma_addr_t fw_ddb_entry_dma;
769 uint32_t ipv6_device;
770 uint32_t new_tgt;
771
772 qla4xxx_flush_AENS(ha);
773
774 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
775 &fw_ddb_entry_dma, GFP_KERNEL);
776 if (fw_ddb_entry == NULL) {
777 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DMA alloc failed\n",
778 __func__));
779
780 goto exit_build_ddb_list_no_free;
781 }
782
783 ql4_printk(KERN_INFO, ha, "Initializing DDBs ...\n");
784 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
785 fw_ddb_index = next_fw_ddb_index) {
786 /* First, let's see if a device exists here */
787 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry,
788 0, NULL, &next_fw_ddb_index,
789 &ddb_state, &conn_err,
790 NULL, NULL) ==
791 QLA_ERROR) {
792 DEBUG2(printk("scsi%ld: %s: get_ddb_entry, "
793 "fw_ddb_index %d failed", ha->host_no,
794 __func__, fw_ddb_index));
795 goto exit_build_ddb_list;
796 }
797
798 DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, "
799 "next_fw_ddb_index=%d.\n", ha->host_no, __func__,
800 fw_ddb_index, ddb_state, next_fw_ddb_index));
801
802 /* Issue relogin, if necessary. */
803 if (ddb_state == DDB_DS_SESSION_FAILED ||
804 ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) {
805 /* Try and login to device */
806 DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n",
807 ha->host_no, __func__, fw_ddb_index));
808 ipv6_device = le16_to_cpu(fw_ddb_entry->options) &
809 DDB_OPT_IPV6_DEVICE;
810 if (qla4_is_relogin_allowed(ha, conn_err) &&
811 ((!ipv6_device &&
812 *((uint32_t *)fw_ddb_entry->ip_addr))
813 || ipv6_device)) {
814 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
815 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
816 NULL, 0, NULL,
817 &next_fw_ddb_index,
818 &ddb_state, &conn_err,
819 NULL, NULL)
820 == QLA_ERROR) {
821 DEBUG2(printk("scsi%ld: %s:"
822 "get_ddb_entry %d failed\n",
823 ha->host_no,
824 __func__, fw_ddb_index));
825 goto exit_build_ddb_list;
826 }
827 }
828 }
829
830 if (ddb_state != DDB_DS_SESSION_ACTIVE)
831 goto next_one;
832 /*
833 * if fw_ddb with session active state found,
834 * add to ddb_list
835 */
836 DEBUG2(printk("scsi%ld: %s: DDB[%d] added to list\n",
837 ha->host_no, __func__, fw_ddb_index));
838
839 /* Add DDB to internal our ddb list. */
840 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
841 if (ddb_entry == NULL) {
842 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
843 "for device at fw_ddb_index %d\n",
844 ha->host_no, __func__, fw_ddb_index));
845 goto exit_build_ddb_list;
846 }
847 /* Fill in the device structure */
848 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
849 QLA_ERROR) {
850 ha->fw_ddb_index_map[fw_ddb_index] =
851 (struct ddb_entry *)INVALID_ENTRY;
852
853 DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed "
854 "for fw_ddb_index %d.\n",
855 ha->host_no, __func__, fw_ddb_index));
856 goto exit_build_ddb_list;
857 }
858
859next_one:
860 /* We know we've reached the last device when
861 * next_fw_ddb_index is 0 */
862 if (next_fw_ddb_index == 0)
863 break;
864 }
865
866 status = QLA_SUCCESS;
867 ql4_printk(KERN_INFO, ha, "DDB list done..\n");
868
869exit_build_ddb_list:
870 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry,
871 fw_ddb_entry_dma);
872
873exit_build_ddb_list_no_free:
874 return status;
875}
876
877static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha)
878{
879 uint16_t fw_ddb_index;
880 int status = QLA_SUCCESS;
881
882 /* free the ddb list if is not empty */
883 if (!list_empty(&ha->ddb_list))
884 qla4xxx_free_ddb_list(ha);
885
886 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++)
887 ha->fw_ddb_index_map[fw_ddb_index] =
888 (struct ddb_entry *)INVALID_ENTRY;
889
890 ha->tot_ddbs = 0;
891
892 /* Perform device discovery and build ddb list. */
893 status = qla4xxx_build_ddb_list(ha);
894
895 return status;
896}
897
898/**
899 * qla4xxx_reinitialize_ddb_list - update the driver ddb list
900 * @ha: pointer to host adapter structure.
901 *
902 * This routine obtains device information from the F/W database after
903 * firmware or adapter resets. The device table is preserved.
904 **/
905int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha)
906{
907 int status = QLA_SUCCESS;
908 struct ddb_entry *ddb_entry, *detemp;
909
910 /* Update the device information for all devices. */
911 list_for_each_entry_safe(ddb_entry, detemp, &ha->ddb_list, list) {
912 qla4xxx_update_ddb_entry(ha, ddb_entry,
913 ddb_entry->fw_ddb_index);
914 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
915 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
916 DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked "
917 "ONLINE\n", ha->host_no, __func__,
918 ddb_entry->fw_ddb_index));
919 iscsi_unblock_session(ddb_entry->sess);
920 } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
921 qla4xxx_mark_device_missing(ha, ddb_entry);
922 }
923 return status;
924}
925
926/**
927 * qla4xxx_relogin_device - re-establish session
928 * @ha: Pointer to host adapter structure.
929 * @ddb_entry: Pointer to device database entry
930 *
931 * This routine does a session relogin with the specified device.
932 * The ddb entry must be assigned prior to making this call.
933 **/
934int qla4xxx_relogin_device(struct scsi_qla_host *ha,
935 struct ddb_entry * ddb_entry)
936{
937 uint16_t relogin_timer;
938
939 relogin_timer = max(ddb_entry->default_relogin_timeout,
940 (uint16_t)RELOGIN_TOV);
941 atomic_set(&ddb_entry->relogin_timer, relogin_timer);
942
943 DEBUG2(printk("scsi%ld: Relogin ddb [%d]. TOV=%d\n", ha->host_no,
944 ddb_entry->fw_ddb_index, relogin_timer));
945
946 qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0);
947
948 return QLA_SUCCESS;
949} 462}
950 463
951static int qla4xxx_config_nvram(struct scsi_qla_host *ha) 464static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
@@ -983,6 +496,12 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
983 else 496 else
984 return QLA_ERROR; 497 return QLA_ERROR;
985 } 498 }
499
500 if (is_qla4022(ha) || is_qla4032(ha))
501 qla4xxx_set_model_info(ha);
502 else
503 strcpy(ha->model_name, "QLA4010");
504
986 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", 505 DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n",
987 ha->host_no, __func__, extHwConfig.Asuint32_t)); 506 ha->host_no, __func__, extHwConfig.Asuint32_t));
988 507
@@ -1246,23 +765,56 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
1246 } 765 }
1247 return status; 766 return status;
1248} 767}
768/**
769 * qla4xxx_free_ddb_index - Free DDBs reserved by firmware
770 * @ha: pointer to adapter structure
771 *
772 * Since firmware is not running in autoconnect mode the DDB indices should
773 * be freed so that when login happens from user space there are free DDB
774 * indices available.
775 **/
776static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
777{
778 int max_ddbs;
779 int ret;
780 uint32_t idx = 0, next_idx = 0;
781 uint32_t state = 0, conn_err = 0;
782
783 max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
784 MAX_DEV_DB_ENTRIES;
785
786 for (idx = 0; idx < max_ddbs; idx = next_idx) {
787 ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
788 &next_idx, &state, &conn_err,
789 NULL, NULL);
790 if (ret == QLA_ERROR)
791 continue;
792 if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
793 state == DDB_DS_SESSION_FAILED) {
794 DEBUG2(ql4_printk(KERN_INFO, ha,
795 "Freeing DDB index = 0x%x\n", idx));
796 ret = qla4xxx_clear_ddb_entry(ha, idx);
797 if (ret == QLA_ERROR)
798 ql4_printk(KERN_ERR, ha,
799 "Unable to clear DDB index = "
800 "0x%x\n", idx);
801 }
802 if (next_idx == 0)
803 break;
804 }
805}
1249 806
1250 807
1251/** 808/**
1252 * qla4xxx_initialize_adapter - initiailizes hba 809 * qla4xxx_initialize_adapter - initiailizes hba
1253 * @ha: Pointer to host adapter structure. 810 * @ha: Pointer to host adapter structure.
1254 * @renew_ddb_list: Indicates what to do with the adapter's ddb list
1255 * after adapter recovery has completed.
1256 * 0=preserve ddb list, 1=destroy and rebuild ddb list
1257 * 811 *
1258 * This routine parforms all of the steps necessary to initialize the adapter. 812 * This routine parforms all of the steps necessary to initialize the adapter.
1259 * 813 *
1260 **/ 814 **/
1261int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, 815int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
1262 uint8_t renew_ddb_list)
1263{ 816{
1264 int status = QLA_ERROR; 817 int status = QLA_ERROR;
1265 int8_t ip_address[IP_ADDR_LEN] = {0} ;
1266 818
1267 ha->eeprom_cmd_data = 0; 819 ha->eeprom_cmd_data = 0;
1268 820
@@ -1288,47 +840,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1288 if (status == QLA_ERROR) 840 if (status == QLA_ERROR)
1289 goto exit_init_hba; 841 goto exit_init_hba;
1290 842
1291 /* 843 qla4xxx_free_ddb_index(ha);
1292 * FW is waiting to get an IP address from DHCP server: Skip building
1293 * the ddb_list and wait for DHCP lease acquired aen to come in
1294 * followed by 0x8014 aen" to trigger the tgt discovery process.
1295 */
1296 if (ha->firmware_state & FW_STATE_CONFIGURING_IP)
1297 goto exit_init_online;
1298
1299 /* Skip device discovery if ip and subnet is zero */
1300 if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
1301 memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0)
1302 goto exit_init_online;
1303 844
1304 if (renew_ddb_list == PRESERVE_DDB_LIST) {
1305 /*
1306 * We want to preserve lun states (i.e. suspended, etc.)
1307 * for recovery initiated by the driver. So just update
1308 * the device states for the existing ddb_list.
1309 */
1310 qla4xxx_reinitialize_ddb_list(ha);
1311 } else if (renew_ddb_list == REBUILD_DDB_LIST) {
1312 /*
1313 * We want to build the ddb_list from scratch during
1314 * driver initialization and recovery initiated by the
1315 * INT_HBA_RESET IOCTL.
1316 */
1317 status = qla4xxx_initialize_ddb_list(ha);
1318 if (status == QLA_ERROR) {
1319 DEBUG2(printk("%s(%ld) Error occurred during build"
1320 "ddb list\n", __func__, ha->host_no));
1321 goto exit_init_hba;
1322 }
1323
1324 }
1325 if (!ha->tot_ddbs) {
1326 DEBUG2(printk("scsi%ld: Failed to initialize devices or none "
1327 "present in Firmware device database\n",
1328 ha->host_no));
1329 }
1330
1331exit_init_online:
1332 set_bit(AF_ONLINE, &ha->flags); 845 set_bit(AF_ONLINE, &ha->flags);
1333exit_init_hba: 846exit_init_hba:
1334 if (is_qla8022(ha) && (status == QLA_ERROR)) { 847 if (is_qla8022(ha) && (status == QLA_ERROR)) {
@@ -1343,61 +856,6 @@ exit_init_hba:
1343} 856}
1344 857
1345/** 858/**
1346 * qla4xxx_add_device_dynamically - ddb addition due to an AEN
1347 * @ha: Pointer to host adapter structure.
1348 * @fw_ddb_index: Firmware's device database index
1349 *
1350 * This routine processes adds a device as a result of an 8014h AEN.
1351 **/
1352static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1353 uint32_t fw_ddb_index)
1354{
1355 struct ddb_entry * ddb_entry;
1356 uint32_t new_tgt;
1357
1358 /* First allocate a device structure */
1359 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
1360 if (ddb_entry == NULL) {
1361 DEBUG2(printk(KERN_WARNING
1362 "scsi%ld: Unable to allocate memory to add "
1363 "fw_ddb_index %d\n", ha->host_no, fw_ddb_index));
1364 return;
1365 }
1366
1367 if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) {
1368 /* Target has been bound to a new fw_ddb_index */
1369 qla4xxx_free_ddb(ha, ddb_entry);
1370 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
1371 if (ddb_entry == NULL) {
1372 DEBUG2(printk(KERN_WARNING
1373 "scsi%ld: Unable to allocate memory"
1374 " to add fw_ddb_index %d\n",
1375 ha->host_no, fw_ddb_index));
1376 return;
1377 }
1378 }
1379 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
1380 QLA_ERROR) {
1381 ha->fw_ddb_index_map[fw_ddb_index] =
1382 (struct ddb_entry *)INVALID_ENTRY;
1383 DEBUG2(printk(KERN_WARNING
1384 "scsi%ld: failed to add new device at index "
1385 "[%d]\n Unable to retrieve fw ddb entry\n",
1386 ha->host_no, fw_ddb_index));
1387 qla4xxx_free_ddb(ha, ddb_entry);
1388 return;
1389 }
1390
1391 if (qla4xxx_add_sess(ddb_entry)) {
1392 DEBUG2(printk(KERN_WARNING
1393 "scsi%ld: failed to add new device at index "
1394 "[%d]\n Unable to add connection and session\n",
1395 ha->host_no, fw_ddb_index));
1396 qla4xxx_free_ddb(ha, ddb_entry);
1397 }
1398}
1399
1400/**
1401 * qla4xxx_process_ddb_changed - process ddb state change 859 * qla4xxx_process_ddb_changed - process ddb state change
1402 * @ha - Pointer to host adapter structure. 860 * @ha - Pointer to host adapter structure.
1403 * @fw_ddb_index - Firmware's device database index 861 * @fw_ddb_index - Firmware's device database index
@@ -1409,88 +867,94 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
1409 uint32_t state, uint32_t conn_err) 867 uint32_t state, uint32_t conn_err)
1410{ 868{
1411 struct ddb_entry * ddb_entry; 869 struct ddb_entry * ddb_entry;
870 uint32_t old_fw_ddb_device_state;
871 int status = QLA_ERROR;
1412 872
1413 /* check for out of range index */ 873 /* check for out of range index */
1414 if (fw_ddb_index >= MAX_DDB_ENTRIES) 874 if (fw_ddb_index >= MAX_DDB_ENTRIES)
1415 return QLA_ERROR; 875 goto exit_ddb_event;
1416 876
1417 /* Get the corresponging ddb entry */ 877 /* Get the corresponging ddb entry */
1418 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); 878 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
1419 /* Device does not currently exist in our database. */ 879 /* Device does not currently exist in our database. */
1420 if (ddb_entry == NULL) { 880 if (ddb_entry == NULL) {
1421 if (state == DDB_DS_SESSION_ACTIVE) 881 ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
1422 qla4xxx_add_device_dynamically(ha, fw_ddb_index); 882 __func__, fw_ddb_index);
1423 return QLA_SUCCESS; 883
884 if (state == DDB_DS_NO_CONNECTION_ACTIVE)
885 clear_bit(fw_ddb_index, ha->ddb_idx_map);
886
887 goto exit_ddb_event;
1424 } 888 }
1425 889
1426 /* Device already exists in our database. */ 890 old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
1427 DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " 891 DEBUG2(ql4_printk(KERN_INFO, ha,
1428 "index [%d]\n", ha->host_no, __func__, 892 "%s: DDB - old state = 0x%x, new state = 0x%x for "
1429 ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); 893 "index [%d]\n", __func__,
894 ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
1430 895
1431 ddb_entry->fw_ddb_device_state = state; 896 ddb_entry->fw_ddb_device_state = state;
1432 /* Device is back online. */
1433 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
1434 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
1435 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
1436 atomic_set(&ddb_entry->relogin_retry_count, 0);
1437 atomic_set(&ddb_entry->relogin_timer, 0);
1438 clear_bit(DF_RELOGIN, &ddb_entry->flags);
1439 iscsi_unblock_session(ddb_entry->sess);
1440 iscsi_session_event(ddb_entry->sess,
1441 ISCSI_KEVENT_CREATE_SESSION);
1442 /*
1443 * Change the lun state to READY in case the lun TIMEOUT before
1444 * the device came back.
1445 */
1446 } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) {
1447 /* Device went away, mark device missing */
1448 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) {
1449 DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing "
1450 "ddb_entry 0x%p sess 0x%p conn 0x%p\n",
1451 __func__, ddb_entry,
1452 ddb_entry->sess, ddb_entry->conn));
1453 qla4xxx_mark_device_missing(ha, ddb_entry);
1454 }
1455 897
1456 /* 898 switch (old_fw_ddb_device_state) {
1457 * Relogin if device state changed to a not active state. 899 case DDB_DS_LOGIN_IN_PROCESS:
1458 * However, do not relogin if a RELOGIN is in process, or 900 switch (state) {
1459 * we are not allowed to relogin to this DDB. 901 case DDB_DS_SESSION_ACTIVE:
1460 */ 902 case DDB_DS_DISCOVERY:
1461 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && 903 iscsi_conn_start(ddb_entry->conn);
1462 !test_bit(DF_RELOGIN, &ddb_entry->flags) && 904 iscsi_conn_login_event(ddb_entry->conn,
1463 qla4_is_relogin_allowed(ha, conn_err)) { 905 ISCSI_CONN_STATE_LOGGED_IN);
906 qla4xxx_update_session_conn_param(ha, ddb_entry);
907 status = QLA_SUCCESS;
908 break;
909 case DDB_DS_SESSION_FAILED:
910 case DDB_DS_NO_CONNECTION_ACTIVE:
911 iscsi_conn_login_event(ddb_entry->conn,
912 ISCSI_CONN_STATE_FREE);
913 status = QLA_SUCCESS;
914 break;
915 }
916 break;
917 case DDB_DS_SESSION_ACTIVE:
918 switch (state) {
919 case DDB_DS_SESSION_FAILED:
1464 /* 920 /*
1465 * This triggers a relogin. After the relogin_timer 921 * iscsi_session failure will cause userspace to
1466 * expires, the relogin gets scheduled. We must wait a 922 * stop the connection which in turn would block the
1467 * minimum amount of time since receiving an 0x8014 AEN 923 * iscsi_session and start relogin
1468 * with failed device_state or a logout response before
1469 * we can issue another relogin.
1470 */ 924 */
1471 /* Firmware pads this timeout: (time2wait +1). 925 iscsi_session_failure(ddb_entry->sess->dd_data,
1472 * Driver retry to login should be longer than F/W. 926 ISCSI_ERR_CONN_FAILED);
1473 * Otherwise F/W will fail 927 status = QLA_SUCCESS;
1474 * set_ddb() mbx cmd with 0x4005 since it still 928 break;
1475 * counting down its time2wait. 929 case DDB_DS_NO_CONNECTION_ACTIVE:
1476 */ 930 clear_bit(fw_ddb_index, ha->ddb_idx_map);
1477 atomic_set(&ddb_entry->relogin_timer, 0); 931 status = QLA_SUCCESS;
1478 atomic_set(&ddb_entry->retry_relogin_timer, 932 break;
1479 ddb_entry->default_time2wait + 4); 933 }
1480 DEBUG(printk("scsi%ld: %s: ddb[%d] " 934 break;
1481 "initiate relogin after %d seconds\n", 935 case DDB_DS_SESSION_FAILED:
1482 ha->host_no, __func__, 936 switch (state) {
1483 ddb_entry->fw_ddb_index, 937 case DDB_DS_SESSION_ACTIVE:
1484 ddb_entry->default_time2wait + 4)); 938 case DDB_DS_DISCOVERY:
1485 } else { 939 iscsi_conn_start(ddb_entry->conn);
1486 DEBUG(printk("scsi%ld: %s: ddb[%d] " 940 iscsi_conn_login_event(ddb_entry->conn,
1487 "relogin not initiated, state = %d, " 941 ISCSI_CONN_STATE_LOGGED_IN);
1488 "ddb_entry->flags = 0x%lx\n", 942 qla4xxx_update_session_conn_param(ha, ddb_entry);
1489 ha->host_no, __func__, 943 status = QLA_SUCCESS;
1490 ddb_entry->fw_ddb_index, 944 break;
1491 ddb_entry->fw_ddb_device_state, 945 case DDB_DS_SESSION_FAILED:
1492 ddb_entry->flags)); 946 iscsi_session_failure(ddb_entry->sess->dd_data,
947 ISCSI_ERR_CONN_FAILED);
948 status = QLA_SUCCESS;
949 break;
1493 } 950 }
951 break;
952 default:
953 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
954 __func__));
955 break;
1494 } 956 }
1495 return QLA_SUCCESS; 957
958exit_ddb_event:
959 return status;
1496} 960}
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 75fcd82a8fc..41066935190 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -313,10 +313,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
313 cmd_entry->hdr.entryType = ET_COMMAND; 313 cmd_entry->hdr.entryType = ET_COMMAND;
314 cmd_entry->handle = cpu_to_le32(index); 314 cmd_entry->handle = cpu_to_le32(index);
315 cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); 315 cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
316 cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
317 316
318 int_to_scsilun(cmd->device->lun, &cmd_entry->lun); 317 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
319 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
320 cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd)); 318 cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
321 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); 319 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
322 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); 320 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
@@ -381,3 +379,69 @@ queuing_error:
381 return QLA_ERROR; 379 return QLA_ERROR;
382} 380}
383 381
382int qla4xxx_send_passthru0(struct iscsi_task *task)
383{
384 struct passthru0 *passthru_iocb;
385 struct iscsi_session *sess = task->conn->session;
386 struct ddb_entry *ddb_entry = sess->dd_data;
387 struct scsi_qla_host *ha = ddb_entry->ha;
388 struct ql4_task_data *task_data = task->dd_data;
389 uint16_t ctrl_flags = 0;
390 unsigned long flags;
391 int ret = QLA_ERROR;
392
393 spin_lock_irqsave(&ha->hardware_lock, flags);
394 task_data->iocb_req_cnt = 1;
395 /* Put the IOCB on the request queue */
396 if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
397 goto queuing_error;
398
399 passthru_iocb = (struct passthru0 *) ha->request_ptr;
400
401 memset(passthru_iocb, 0, sizeof(struct passthru0));
402 passthru_iocb->hdr.entryType = ET_PASSTHRU0;
403 passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
404 passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
405 passthru_iocb->handle = task->itt;
406 passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
407 passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
408
409 /* Setup the out & in DSDs */
410 if (task_data->req_len) {
411 memcpy((uint8_t *)task_data->req_buffer +
412 sizeof(struct iscsi_hdr), task->data, task->data_count);
413 ctrl_flags |= PT_FLAG_SEND_BUFFER;
414 passthru_iocb->out_dsd.base.addrLow =
415 cpu_to_le32(LSDW(task_data->req_dma));
416 passthru_iocb->out_dsd.base.addrHigh =
417 cpu_to_le32(MSDW(task_data->req_dma));
418 passthru_iocb->out_dsd.count =
419 cpu_to_le32(task->data_count +
420 sizeof(struct iscsi_hdr));
421 }
422 if (task_data->resp_len) {
423 passthru_iocb->in_dsd.base.addrLow =
424 cpu_to_le32(LSDW(task_data->resp_dma));
425 passthru_iocb->in_dsd.base.addrHigh =
426 cpu_to_le32(MSDW(task_data->resp_dma));
427 passthru_iocb->in_dsd.count =
428 cpu_to_le32(task_data->resp_len);
429 }
430
431 ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
432 passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
433
434 /* Update the request pointer */
435 qla4xxx_advance_req_ring_ptr(ha);
436 wmb();
437
438 /* Track IOCB used */
439 ha->iocb_cnt += task_data->iocb_req_cnt;
440 ha->req_q_count -= task_data->iocb_req_cnt;
441 ha->isp_ops->queue_iocb(ha);
442 ret = QLA_SUCCESS;
443
444queuing_error:
445 spin_unlock_irqrestore(&ha->hardware_lock, flags);
446 return ret;
447}
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 0e72921c752..827e93078b9 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -224,8 +224,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
224 * I/O to this device. We should get a ddb state change 224 * I/O to this device. We should get a ddb state change
225 * AEN soon. 225 * AEN soon.
226 */ 226 */
227 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 227 if (iscsi_is_session_online(ddb_entry->sess))
228 qla4xxx_mark_device_missing(ha, ddb_entry); 228 qla4xxx_mark_device_missing(ddb_entry->sess);
229 break; 229 break;
230 230
231 case SCS_DATA_UNDERRUN: 231 case SCS_DATA_UNDERRUN:
@@ -306,8 +306,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
306 * send I/O to this device. We should get a ddb 306 * send I/O to this device. We should get a ddb
307 * state change AEN soon. 307 * state change AEN soon.
308 */ 308 */
309 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 309 if (iscsi_is_session_online(ddb_entry->sess))
310 qla4xxx_mark_device_missing(ha, ddb_entry); 310 qla4xxx_mark_device_missing(ddb_entry->sess);
311 311
312 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 312 cmd->result = DID_TRANSPORT_DISRUPTED << 16;
313 break; 313 break;
@@ -341,6 +341,51 @@ status_entry_exit:
341} 341}
342 342
343/** 343/**
344 * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
345 * @ha: Pointer to host adapter structure.
346 * @sts_entry: Pointer to status entry structure.
347 **/
348static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
349 struct passthru_status *sts_entry)
350{
351 struct iscsi_task *task;
352 struct ddb_entry *ddb_entry;
353 struct ql4_task_data *task_data;
354 struct iscsi_cls_conn *cls_conn;
355 struct iscsi_conn *conn;
356 itt_t itt;
357 uint32_t fw_ddb_index;
358
359 itt = sts_entry->handle;
360 fw_ddb_index = le32_to_cpu(sts_entry->target);
361
362 ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
363
364 if (ddb_entry == NULL) {
365 ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
366 __func__, sts_entry->target);
367 return;
368 }
369
370 cls_conn = ddb_entry->conn;
371 conn = cls_conn->dd_data;
372 spin_lock(&conn->session->lock);
373 task = iscsi_itt_to_task(conn, itt);
374 spin_unlock(&conn->session->lock);
375
376 if (task == NULL) {
377 ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
378 return;
379 }
380
381 task_data = task->dd_data;
382 memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
383 ha->req_q_count += task_data->iocb_req_cnt;
384 ha->iocb_cnt -= task_data->iocb_req_cnt;
385 queue_work(ha->task_wq, &task_data->task_work);
386}
387
388/**
344 * qla4xxx_process_response_queue - process response queue completions 389 * qla4xxx_process_response_queue - process response queue completions
345 * @ha: Pointer to host adapter structure. 390 * @ha: Pointer to host adapter structure.
346 * 391 *
@@ -375,6 +420,14 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
375 break; 420 break;
376 421
377 case ET_PASSTHRU_STATUS: 422 case ET_PASSTHRU_STATUS:
423 if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
424 qla4xxx_passthru_status_entry(ha,
425 (struct passthru_status *)sts_entry);
426 else
427 ql4_printk(KERN_ERR, ha,
428 "%s: Invalid status received\n",
429 __func__);
430
378 break; 431 break;
379 432
380 case ET_STATUS_CONTINUATION: 433 case ET_STATUS_CONTINUATION:
@@ -566,6 +619,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
566 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && 619 else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
567 (mbox_sts[2] == ACB_STATE_VALID)) 620 (mbox_sts[2] == ACB_STATE_VALID))
568 set_bit(DPC_RESET_HA, &ha->dpc_flags); 621 set_bit(DPC_RESET_HA, &ha->dpc_flags);
622 else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
623 complete(&ha->disable_acb_comp);
569 break; 624 break;
570 625
571 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 626 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
@@ -1009,23 +1064,23 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1009 1064
1010 switch (mbox_sts[0]) { 1065 switch (mbox_sts[0]) {
1011 case MBOX_ASTS_DATABASE_CHANGED: 1066 case MBOX_ASTS_DATABASE_CHANGED:
1012 if (process_aen == FLUSH_DDB_CHANGED_AENS) { 1067 switch (process_aen) {
1068 case FLUSH_DDB_CHANGED_AENS:
1013 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index " 1069 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
1014 "[%d] state=%04x FLUSHED!\n", 1070 "[%d] state=%04x FLUSHED!\n",
1015 ha->host_no, ha->aen_out, 1071 ha->host_no, ha->aen_out,
1016 mbox_sts[0], mbox_sts[2], 1072 mbox_sts[0], mbox_sts[2],
1017 mbox_sts[3])); 1073 mbox_sts[3]));
1018 break; 1074 break;
1075 case PROCESS_ALL_AENS:
1076 default:
1077 /* Specific device. */
1078 if (mbox_sts[1] == 1)
1079 qla4xxx_process_ddb_changed(ha,
1080 mbox_sts[2], mbox_sts[3],
1081 mbox_sts[4]);
1082 break;
1019 } 1083 }
1020 case PROCESS_ALL_AENS:
1021 default:
1022 if (mbox_sts[1] == 0) { /* Global DB change. */
1023 qla4xxx_reinitialize_ddb_list(ha);
1024 } else if (mbox_sts[1] == 1) { /* Specific device. */
1025 qla4xxx_process_ddb_changed(ha, mbox_sts[2],
1026 mbox_sts[3], mbox_sts[4]);
1027 }
1028 break;
1029 } 1084 }
1030 spin_lock_irqsave(&ha->hardware_lock, flags); 1085 spin_lock_irqsave(&ha->hardware_lock, flags);
1031 } 1086 }
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index fce8289e975..4c2b8487039 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -303,7 +303,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
303 return QLA_SUCCESS; 303 return QLA_SUCCESS;
304} 304}
305 305
306static uint8_t 306uint8_t
307qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, 307qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
308 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) 308 uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma)
309{ 309{
@@ -327,43 +327,69 @@ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
327 327
328static void 328static void
329qla4xxx_update_local_ip(struct scsi_qla_host *ha, 329qla4xxx_update_local_ip(struct scsi_qla_host *ha,
330 struct addr_ctrl_blk *init_fw_cb) 330 struct addr_ctrl_blk *init_fw_cb)
331{ 331{
332 ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
333 ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
334 ha->ip_config.ipv4_addr_state =
335 le16_to_cpu(init_fw_cb->ipv4_addr_state);
336 ha->ip_config.eth_mtu_size =
337 le16_to_cpu(init_fw_cb->eth_mtu_size);
338 ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port);
339
340 if (ha->acb_version == ACB_SUPPORTED) {
341 ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts);
342 ha->ip_config.ipv6_addl_options =
343 le16_to_cpu(init_fw_cb->ipv6_addtl_opts);
344 }
345
332 /* Save IPv4 Address Info */ 346 /* Save IPv4 Address Info */
333 memcpy(ha->ip_address, init_fw_cb->ipv4_addr, 347 memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr,
334 min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr))); 348 min(sizeof(ha->ip_config.ip_address),
335 memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet, 349 sizeof(init_fw_cb->ipv4_addr)));
336 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet))); 350 memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet,
337 memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr, 351 min(sizeof(ha->ip_config.subnet_mask),
338 min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr))); 352 sizeof(init_fw_cb->ipv4_subnet)));
353 memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr,
354 min(sizeof(ha->ip_config.gateway),
355 sizeof(init_fw_cb->ipv4_gw_addr)));
356
357 ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag);
339 358
340 if (is_ipv6_enabled(ha)) { 359 if (is_ipv6_enabled(ha)) {
341 /* Save IPv6 Address */ 360 /* Save IPv6 Address */
342 ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state; 361 ha->ip_config.ipv6_link_local_state =
343 ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state; 362 le16_to_cpu(init_fw_cb->ipv6_lnk_lcl_addr_state);
344 ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state; 363 ha->ip_config.ipv6_addr0_state =
345 ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state; 364 le16_to_cpu(init_fw_cb->ipv6_addr0_state);
346 ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE; 365 ha->ip_config.ipv6_addr1_state =
347 ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; 366 le16_to_cpu(init_fw_cb->ipv6_addr1_state);
348 367 ha->ip_config.ipv6_default_router_state =
349 memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8], 368 le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state);
350 init_fw_cb->ipv6_if_id, 369 ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE;
351 min(sizeof(ha->ipv6_link_local_addr)/2, 370 ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80;
352 sizeof(init_fw_cb->ipv6_if_id))); 371
353 memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0, 372 memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8],
354 min(sizeof(ha->ipv6_addr0), 373 init_fw_cb->ipv6_if_id,
355 sizeof(init_fw_cb->ipv6_addr0))); 374 min(sizeof(ha->ip_config.ipv6_link_local_addr)/2,
356 memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1, 375 sizeof(init_fw_cb->ipv6_if_id)));
357 min(sizeof(ha->ipv6_addr1), 376 memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0,
358 sizeof(init_fw_cb->ipv6_addr1))); 377 min(sizeof(ha->ip_config.ipv6_addr0),
359 memcpy(&ha->ipv6_default_router_addr, 378 sizeof(init_fw_cb->ipv6_addr0)));
360 init_fw_cb->ipv6_dflt_rtr_addr, 379 memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1,
361 min(sizeof(ha->ipv6_default_router_addr), 380 min(sizeof(ha->ip_config.ipv6_addr1),
362 sizeof(init_fw_cb->ipv6_dflt_rtr_addr))); 381 sizeof(init_fw_cb->ipv6_addr1)));
382 memcpy(&ha->ip_config.ipv6_default_router_addr,
383 init_fw_cb->ipv6_dflt_rtr_addr,
384 min(sizeof(ha->ip_config.ipv6_default_router_addr),
385 sizeof(init_fw_cb->ipv6_dflt_rtr_addr)));
386 ha->ip_config.ipv6_vlan_tag =
387 be16_to_cpu(init_fw_cb->ipv6_vlan_tag);
388 ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port);
363 } 389 }
364} 390}
365 391
366static uint8_t 392uint8_t
367qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, 393qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
368 uint32_t *mbox_cmd, 394 uint32_t *mbox_cmd,
369 uint32_t *mbox_sts, 395 uint32_t *mbox_sts,
@@ -383,9 +409,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
383 /* Save some info in adapter structure. */ 409 /* Save some info in adapter structure. */
384 ha->acb_version = init_fw_cb->acb_version; 410 ha->acb_version = init_fw_cb->acb_version;
385 ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options); 411 ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options);
386 ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts);
387 ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts);
388 ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state);
389 ha->heartbeat_interval = init_fw_cb->hb_interval; 412 ha->heartbeat_interval = init_fw_cb->hb_interval;
390 memcpy(ha->name_string, init_fw_cb->iscsi_name, 413 memcpy(ha->name_string, init_fw_cb->iscsi_name,
391 min(sizeof(ha->name_string), 414 min(sizeof(ha->name_string),
@@ -393,10 +416,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
393 /*memcpy(ha->alias, init_fw_cb->Alias, 416 /*memcpy(ha->alias, init_fw_cb->Alias,
394 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ 417 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
395 418
396 if (ha->acb_version == ACB_SUPPORTED) {
397 ha->ipv6_options = init_fw_cb->ipv6_opts;
398 ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts;
399 }
400 qla4xxx_update_local_ip(ha, init_fw_cb); 419 qla4xxx_update_local_ip(ha, init_fw_cb);
401 420
402 return QLA_SUCCESS; 421 return QLA_SUCCESS;
@@ -462,10 +481,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
462 481
463 init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); 482 init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
464 483
465 /* Set bit for "serialize task mgmt" all other bits need to be zero */
466 init_fw_cb->add_fw_options = 0; 484 init_fw_cb->add_fw_options = 0;
467 init_fw_cb->add_fw_options |= 485 init_fw_cb->add_fw_options |=
468 __constant_cpu_to_le16(SERIALIZE_TASK_MGMT); 486 __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT);
487 init_fw_cb->add_fw_options |=
488 __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE);
469 489
470 if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) 490 if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
471 != QLA_SUCCESS) { 491 != QLA_SUCCESS) {
@@ -691,19 +711,38 @@ exit_get_fwddb:
691 return status; 711 return status;
692} 712}
693 713
714int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index)
715{
716 uint32_t mbox_cmd[MBOX_REG_COUNT];
717 uint32_t mbox_sts[MBOX_REG_COUNT];
718 int status;
719
720 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
721 memset(&mbox_sts, 0, sizeof(mbox_sts));
722
723 mbox_cmd[0] = MBOX_CMD_CONN_OPEN;
724 mbox_cmd[1] = fw_ddb_index;
725
726 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
727 &mbox_sts[0]);
728 DEBUG2(ql4_printk(KERN_INFO, ha,
729 "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n",
730 __func__, status, mbox_sts[0], mbox_sts[1]));
731 return status;
732}
733
694/** 734/**
695 * qla4xxx_set_fwddb_entry - sets a ddb entry. 735 * qla4xxx_set_fwddb_entry - sets a ddb entry.
696 * @ha: Pointer to host adapter structure. 736 * @ha: Pointer to host adapter structure.
697 * @fw_ddb_index: Firmware's device database index 737 * @fw_ddb_index: Firmware's device database index
698 * @fw_ddb_entry: Pointer to firmware's ddb entry structure, or NULL. 738 * @fw_ddb_entry_dma: dma address of ddb entry
739 * @mbx_sts: mailbox 0 to be returned or NULL
699 * 740 *
700 * This routine initializes or updates the adapter's device database 741 * This routine initializes or updates the adapter's device database
701 * entry for the specified device. It also triggers a login for the 742 * entry for the specified device.
702 * specified device. Therefore, it may also be used as a secondary
703 * login routine when a NULL pointer is specified for the fw_ddb_entry.
704 **/ 743 **/
705int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, 744int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
706 dma_addr_t fw_ddb_entry_dma) 745 dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts)
707{ 746{
708 uint32_t mbox_cmd[MBOX_REG_COUNT]; 747 uint32_t mbox_cmd[MBOX_REG_COUNT];
709 uint32_t mbox_sts[MBOX_REG_COUNT]; 748 uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -722,13 +761,41 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
722 mbox_cmd[4] = sizeof(struct dev_db_entry); 761 mbox_cmd[4] = sizeof(struct dev_db_entry);
723 762
724 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], 763 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0],
725 &mbox_sts[0]); 764 &mbox_sts[0]);
765 if (mbx_sts)
766 *mbx_sts = mbox_sts[0];
726 DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n", 767 DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n",
727 ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);) 768 ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);)
728 769
729 return status; 770 return status;
730} 771}
731 772
773int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha,
774 struct ddb_entry *ddb_entry, int options)
775{
776 int status;
777 uint32_t mbox_cmd[MBOX_REG_COUNT];
778 uint32_t mbox_sts[MBOX_REG_COUNT];
779
780 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
781 memset(&mbox_sts, 0, sizeof(mbox_sts));
782
783 mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
784 mbox_cmd[1] = ddb_entry->fw_ddb_index;
785 mbox_cmd[3] = options;
786
787 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
788 &mbox_sts[0]);
789 if (status != QLA_SUCCESS) {
790 DEBUG2(ql4_printk(KERN_INFO, ha,
791 "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
792 "failed sts %04X %04X", __func__,
793 mbox_sts[0], mbox_sts[1]));
794 }
795
796 return status;
797}
798
732/** 799/**
733 * qla4xxx_get_crash_record - retrieves crash record. 800 * qla4xxx_get_crash_record - retrieves crash record.
734 * @ha: Pointer to host adapter structure. 801 * @ha: Pointer to host adapter structure.
@@ -805,7 +872,6 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
805 uint32_t max_event_log_entries; 872 uint32_t max_event_log_entries;
806 uint8_t i; 873 uint8_t i;
807 874
808
809 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 875 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
810 memset(&mbox_sts, 0, sizeof(mbox_cmd)); 876 memset(&mbox_sts, 0, sizeof(mbox_cmd));
811 877
@@ -1104,7 +1170,7 @@ exit_about_fw:
1104 return status; 1170 return status;
1105} 1171}
1106 1172
1107static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, 1173static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
1108 dma_addr_t dma_addr) 1174 dma_addr_t dma_addr)
1109{ 1175{
1110 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1176 uint32_t mbox_cmd[MBOX_REG_COUNT];
@@ -1114,6 +1180,7 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
1114 memset(&mbox_sts, 0, sizeof(mbox_sts)); 1180 memset(&mbox_sts, 0, sizeof(mbox_sts));
1115 1181
1116 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS; 1182 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
1183 mbox_cmd[1] = options;
1117 mbox_cmd[2] = LSDW(dma_addr); 1184 mbox_cmd[2] = LSDW(dma_addr);
1118 mbox_cmd[3] = MSDW(dma_addr); 1185 mbox_cmd[3] = MSDW(dma_addr);
1119 1186
@@ -1126,8 +1193,10 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
1126 return QLA_SUCCESS; 1193 return QLA_SUCCESS;
1127} 1194}
1128 1195
1129static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index) 1196int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index,
1197 uint32_t *mbx_sts)
1130{ 1198{
1199 int status;
1131 uint32_t mbox_cmd[MBOX_REG_COUNT]; 1200 uint32_t mbox_cmd[MBOX_REG_COUNT];
1132 uint32_t mbox_sts[MBOX_REG_COUNT]; 1201 uint32_t mbox_sts[MBOX_REG_COUNT];
1133 1202
@@ -1135,75 +1204,646 @@ static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
1135 memset(&mbox_sts, 0, sizeof(mbox_sts)); 1204 memset(&mbox_sts, 0, sizeof(mbox_sts));
1136 1205
1137 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY; 1206 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
1138 mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES; 1207 mbox_cmd[1] = ddb_index;
1139 1208
1140 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) != 1209 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
1141 QLA_SUCCESS) { 1210 &mbox_sts[0]);
1142 if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) { 1211 if (status != QLA_SUCCESS) {
1143 *ddb_index = mbox_sts[2]; 1212 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
1213 __func__, mbox_sts[0]));
1214 }
1215
1216 *mbx_sts = mbox_sts[0];
1217 return status;
1218}
1219
1220int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index)
1221{
1222 int status;
1223 uint32_t mbox_cmd[MBOX_REG_COUNT];
1224 uint32_t mbox_sts[MBOX_REG_COUNT];
1225
1226 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1227 memset(&mbox_sts, 0, sizeof(mbox_sts));
1228
1229 mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
1230 mbox_cmd[1] = ddb_index;
1231
1232 status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0],
1233 &mbox_sts[0]);
1234 if (status != QLA_SUCCESS) {
1235 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n",
1236 __func__, mbox_sts[0]));
1237 }
1238
1239 return status;
1240}
1241
1242int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr,
1243 uint32_t offset, uint32_t length, uint32_t options)
1244{
1245 uint32_t mbox_cmd[MBOX_REG_COUNT];
1246 uint32_t mbox_sts[MBOX_REG_COUNT];
1247 int status = QLA_SUCCESS;
1248
1249 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1250 memset(&mbox_sts, 0, sizeof(mbox_sts));
1251
1252 mbox_cmd[0] = MBOX_CMD_WRITE_FLASH;
1253 mbox_cmd[1] = LSDW(dma_addr);
1254 mbox_cmd[2] = MSDW(dma_addr);
1255 mbox_cmd[3] = offset;
1256 mbox_cmd[4] = length;
1257 mbox_cmd[5] = options;
1258
1259 status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]);
1260 if (status != QLA_SUCCESS) {
1261 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH "
1262 "failed w/ status %04X, mbx1 %04X\n",
1263 __func__, mbox_sts[0], mbox_sts[1]));
1264 }
1265 return status;
1266}
1267
1268int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
1269 struct dev_db_entry *fw_ddb_entry,
1270 dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index)
1271{
1272 uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO;
1273 uint32_t dev_db_end_offset;
1274 int status = QLA_ERROR;
1275
1276 memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
1277
1278 dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry));
1279 dev_db_end_offset = FLASH_OFFSET_DB_END;
1280
1281 if (dev_db_start_offset > dev_db_end_offset) {
1282 DEBUG2(ql4_printk(KERN_ERR, ha,
1283 "%s:Invalid DDB index %d", __func__,
1284 ddb_index));
1285 goto exit_bootdb_failed;
1286 }
1287
1288 if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset,
1289 sizeof(*fw_ddb_entry)) != QLA_SUCCESS) {
1290 ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
1291 "failed\n", ha->host_no, __func__);
1292 goto exit_bootdb_failed;
1293 }
1294
1295 if (fw_ddb_entry->cookie == DDB_VALID_COOKIE)
1296 status = QLA_SUCCESS;
1297
1298exit_bootdb_failed:
1299 return status;
1300}
1301
1302int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
1303 uint16_t idx)
1304{
1305 int ret = 0;
1306 int rval = QLA_ERROR;
1307 uint32_t offset = 0, chap_size;
1308 struct ql4_chap_table *chap_table;
1309 dma_addr_t chap_dma;
1310
1311 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
1312 if (chap_table == NULL) {
1313 ret = -ENOMEM;
1314 goto exit_get_chap;
1315 }
1316
1317 chap_size = sizeof(struct ql4_chap_table);
1318 memset(chap_table, 0, chap_size);
1319
1320 if (is_qla40XX(ha))
1321 offset = FLASH_CHAP_OFFSET | (idx * chap_size);
1322 else {
1323 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
1324 /* flt_chap_size is CHAP table size for both ports
1325 * so divide it by 2 to calculate the offset for second port
1326 */
1327 if (ha->port_num == 1)
1328 offset += (ha->hw.flt_chap_size / 2);
1329 offset += (idx * chap_size);
1330 }
1331
1332 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
1333 if (rval != QLA_SUCCESS) {
1334 ret = -EINVAL;
1335 goto exit_get_chap;
1336 }
1337
1338 DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n",
1339 __le16_to_cpu(chap_table->cookie)));
1340
1341 if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) {
1342 ql4_printk(KERN_ERR, ha, "No valid chap entry found\n");
1343 goto exit_get_chap;
1344 }
1345
1346 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
1347 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
1348 chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
1349
1350exit_get_chap:
1351 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
1352 return ret;
1353}
1354
1355static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username,
1356 char *password, uint16_t idx, int bidi)
1357{
1358 int ret = 0;
1359 int rval = QLA_ERROR;
1360 uint32_t offset = 0;
1361 struct ql4_chap_table *chap_table;
1362 dma_addr_t chap_dma;
1363
1364 chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
1365 if (chap_table == NULL) {
1366 ret = -ENOMEM;
1367 goto exit_set_chap;
1368 }
1369
1370 memset(chap_table, 0, sizeof(struct ql4_chap_table));
1371 if (bidi)
1372 chap_table->flags |= BIT_6; /* peer */
1373 else
1374 chap_table->flags |= BIT_7; /* local */
1375 chap_table->secret_len = strlen(password);
1376 strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
1377 strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
1378 chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
1379 offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table));
1380 rval = qla4xxx_set_flash(ha, chap_dma, offset,
1381 sizeof(struct ql4_chap_table),
1382 FLASH_OPT_RMW_COMMIT);
1383
1384 if (rval == QLA_SUCCESS && ha->chap_list) {
1385 /* Update ha chap_list cache */
1386 memcpy((struct ql4_chap_table *)ha->chap_list + idx,
1387 chap_table, sizeof(struct ql4_chap_table));
1388 }
1389 dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma);
1390 if (rval != QLA_SUCCESS)
1391 ret = -EINVAL;
1392
1393exit_set_chap:
1394 return ret;
1395}
1396
1397/**
1398 * qla4xxx_get_chap_index - Get chap index given username and secret
1399 * @ha: pointer to adapter structure
1400 * @username: CHAP username to be searched
1401 * @password: CHAP password to be searched
1402 * @bidi: Is this a BIDI CHAP
1403 * @chap_index: CHAP index to be returned
1404 *
1405 * Match the username and password in the chap_list, return the index if a
1406 * match is found. If a match is not found then add the entry in FLASH and
1407 * return the index at which entry is written in the FLASH.
1408 **/
1409static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username,
1410 char *password, int bidi, uint16_t *chap_index)
1411{
1412 int i, rval;
1413 int free_index = -1;
1414 int found_index = 0;
1415 int max_chap_entries = 0;
1416 struct ql4_chap_table *chap_table;
1417
1418 if (is_qla8022(ha))
1419 max_chap_entries = (ha->hw.flt_chap_size / 2) /
1420 sizeof(struct ql4_chap_table);
1421 else
1422 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
1423
1424 if (!ha->chap_list) {
1425 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
1426 return QLA_ERROR;
1427 }
1428
1429 mutex_lock(&ha->chap_sem);
1430 for (i = 0; i < max_chap_entries; i++) {
1431 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
1432 if (chap_table->cookie !=
1433 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
1434 if (i > MAX_RESRV_CHAP_IDX && free_index == -1)
1435 free_index = i;
1436 continue;
1437 }
1438 if (bidi) {
1439 if (chap_table->flags & BIT_7)
1440 continue;
1144 } else { 1441 } else {
1145 DEBUG2(printk("scsi%ld: %s: failed status %04X\n", 1442 if (chap_table->flags & BIT_6)
1146 ha->host_no, __func__, mbox_sts[0])); 1443 continue;
1147 return QLA_ERROR; 1444 }
1445 if (!strncmp(chap_table->secret, password,
1446 MAX_CHAP_SECRET_LEN) &&
1447 !strncmp(chap_table->name, username,
1448 MAX_CHAP_NAME_LEN)) {
1449 *chap_index = i;
1450 found_index = 1;
1451 break;
1148 } 1452 }
1149 } else {
1150 *ddb_index = MAX_PRST_DEV_DB_ENTRIES;
1151 } 1453 }
1152 1454
1153 return QLA_SUCCESS; 1455 /* If chap entry is not present and a free index is available then
1456 * write the entry in flash
1457 */
1458 if (!found_index && free_index != -1) {
1459 rval = qla4xxx_set_chap(ha, username, password,
1460 free_index, bidi);
1461 if (!rval) {
1462 *chap_index = free_index;
1463 found_index = 1;
1464 }
1465 }
1466
1467 mutex_unlock(&ha->chap_sem);
1468
1469 if (found_index)
1470 return QLA_SUCCESS;
1471 return QLA_ERROR;
1154} 1472}
1155 1473
1474int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
1475 uint16_t fw_ddb_index,
1476 uint16_t connection_id,
1477 uint16_t option)
1478{
1479 uint32_t mbox_cmd[MBOX_REG_COUNT];
1480 uint32_t mbox_sts[MBOX_REG_COUNT];
1481 int status = QLA_SUCCESS;
1482
1483 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1484 memset(&mbox_sts, 0, sizeof(mbox_sts));
1485
1486 mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
1487 mbox_cmd[1] = fw_ddb_index;
1488 mbox_cmd[2] = connection_id;
1489 mbox_cmd[3] = option;
1156 1490
1157int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port) 1491 status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]);
1492 if (status != QLA_SUCCESS) {
1493 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE "
1494 "option %04x failed w/ status %04X %04X\n",
1495 __func__, option, mbox_sts[0], mbox_sts[1]));
1496 }
1497 return status;
1498}
1499
1500int qla4xxx_disable_acb(struct scsi_qla_host *ha)
1158{ 1501{
1159 struct dev_db_entry *fw_ddb_entry; 1502 uint32_t mbox_cmd[MBOX_REG_COUNT];
1160 dma_addr_t fw_ddb_entry_dma; 1503 uint32_t mbox_sts[MBOX_REG_COUNT];
1161 uint32_t ddb_index; 1504 int status = QLA_SUCCESS;
1162 int ret_val = QLA_SUCCESS; 1505
1506 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1507 memset(&mbox_sts, 0, sizeof(mbox_sts));
1508
1509 mbox_cmd[0] = MBOX_CMD_DISABLE_ACB;
1163 1510
1511 status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]);
1512 if (status != QLA_SUCCESS) {
1513 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
1514 "failed w/ status %04X %04X %04X", __func__,
1515 mbox_sts[0], mbox_sts[1], mbox_sts[2]));
1516 }
1517 return status;
1518}
1519
1520int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
1521 uint32_t acb_type, uint32_t len)
1522{
1523 uint32_t mbox_cmd[MBOX_REG_COUNT];
1524 uint32_t mbox_sts[MBOX_REG_COUNT];
1525 int status = QLA_SUCCESS;
1164 1526
1165 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 1527 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1166 sizeof(*fw_ddb_entry), 1528 memset(&mbox_sts, 0, sizeof(mbox_sts));
1529
1530 mbox_cmd[0] = MBOX_CMD_GET_ACB;
1531 mbox_cmd[1] = acb_type;
1532 mbox_cmd[2] = LSDW(acb_dma);
1533 mbox_cmd[3] = MSDW(acb_dma);
1534 mbox_cmd[4] = len;
1535
1536 status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
1537 if (status != QLA_SUCCESS) {
1538 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB "
1539 "failed w/ status %04X\n", __func__,
1540 mbox_sts[0]));
1541 }
1542 return status;
1543}
1544
1545int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
1546 uint32_t *mbox_sts, dma_addr_t acb_dma)
1547{
1548 int status = QLA_SUCCESS;
1549
1550 memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
1551 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
1552 mbox_cmd[0] = MBOX_CMD_SET_ACB;
1553 mbox_cmd[1] = 0; /* Primary ACB */
1554 mbox_cmd[2] = LSDW(acb_dma);
1555 mbox_cmd[3] = MSDW(acb_dma);
1556 mbox_cmd[4] = sizeof(struct addr_ctrl_blk);
1557
1558 status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]);
1559 if (status != QLA_SUCCESS) {
1560 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB "
1561 "failed w/ status %04X\n", __func__,
1562 mbox_sts[0]));
1563 }
1564 return status;
1565}
1566
1567int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha,
1568 struct ddb_entry *ddb_entry,
1569 struct iscsi_cls_conn *cls_conn,
1570 uint32_t *mbx_sts)
1571{
1572 struct dev_db_entry *fw_ddb_entry;
1573 struct iscsi_conn *conn;
1574 struct iscsi_session *sess;
1575 struct qla_conn *qla_conn;
1576 struct sockaddr *dst_addr;
1577 dma_addr_t fw_ddb_entry_dma;
1578 int status = QLA_SUCCESS;
1579 int rval = 0;
1580 struct sockaddr_in *addr;
1581 struct sockaddr_in6 *addr6;
1582 char *ip;
1583 uint16_t iscsi_opts = 0;
1584 uint32_t options = 0;
1585 uint16_t idx;
1586
1587 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1167 &fw_ddb_entry_dma, GFP_KERNEL); 1588 &fw_ddb_entry_dma, GFP_KERNEL);
1168 if (!fw_ddb_entry) { 1589 if (!fw_ddb_entry) {
1169 DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", 1590 DEBUG2(ql4_printk(KERN_ERR, ha,
1170 ha->host_no, __func__)); 1591 "%s: Unable to allocate dma buffer.\n",
1171 ret_val = QLA_ERROR; 1592 __func__));
1172 goto exit_send_tgts_no_free; 1593 rval = -ENOMEM;
1594 goto exit_set_param_no_free;
1173 } 1595 }
1174 1596
1175 ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma); 1597 conn = cls_conn->dd_data;
1176 if (ret_val != QLA_SUCCESS) 1598 qla_conn = conn->dd_data;
1177 goto exit_send_tgts; 1599 sess = conn->session;
1600 dst_addr = &qla_conn->qla_ep->dst_addr;
1178 1601
1179 ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index); 1602 if (dst_addr->sa_family == AF_INET6)
1180 if (ret_val != QLA_SUCCESS) 1603 options |= IPV6_DEFAULT_DDB_ENTRY;
1181 goto exit_send_tgts;
1182 1604
1183 memset(fw_ddb_entry->iscsi_alias, 0, 1605 status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma);
1184 sizeof(fw_ddb_entry->iscsi_alias)); 1606 if (status == QLA_ERROR) {
1607 rval = -EINVAL;
1608 goto exit_set_param;
1609 }
1185 1610
1186 memset(fw_ddb_entry->iscsi_name, 0, 1611 iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options);
1187 sizeof(fw_ddb_entry->iscsi_name)); 1612 memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias));
1613
1614 memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name));
1615
1616 if (sess->targetname != NULL) {
1617 memcpy(fw_ddb_entry->iscsi_name, sess->targetname,
1618 min(strlen(sess->targetname),
1619 sizeof(fw_ddb_entry->iscsi_name)));
1620 }
1188 1621
1189 memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr)); 1622 memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
1190 memset(fw_ddb_entry->tgt_addr, 0, 1623 memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr));
1191 sizeof(fw_ddb_entry->tgt_addr)); 1624
1625 fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE;
1626
1627 if (dst_addr->sa_family == AF_INET) {
1628 addr = (struct sockaddr_in *)dst_addr;
1629 ip = (char *)&addr->sin_addr;
1630 memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN);
1631 fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port));
1632 DEBUG2(ql4_printk(KERN_INFO, ha,
1633 "%s: Destination Address [%pI4]: index [%d]\n",
1634 __func__, fw_ddb_entry->ip_addr,
1635 ddb_entry->fw_ddb_index));
1636 } else if (dst_addr->sa_family == AF_INET6) {
1637 addr6 = (struct sockaddr_in6 *)dst_addr;
1638 ip = (char *)&addr6->sin6_addr;
1639 memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN);
1640 fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port));
1641 fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE;
1642 DEBUG2(ql4_printk(KERN_INFO, ha,
1643 "%s: Destination Address [%pI6]: index [%d]\n",
1644 __func__, fw_ddb_entry->ip_addr,
1645 ddb_entry->fw_ddb_index));
1646 } else {
1647 ql4_printk(KERN_ERR, ha,
1648 "%s: Failed to get IP Address\n",
1649 __func__);
1650 rval = -EINVAL;
1651 goto exit_set_param;
1652 }
1653
1654 /* CHAP */
1655 if (sess->username != NULL && sess->password != NULL) {
1656 if (strlen(sess->username) && strlen(sess->password)) {
1657 iscsi_opts |= BIT_7;
1658
1659 rval = qla4xxx_get_chap_index(ha, sess->username,
1660 sess->password,
1661 LOCAL_CHAP, &idx);
1662 if (rval)
1663 goto exit_set_param;
1664
1665 fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx);
1666 }
1667 }
1668
1669 if (sess->username_in != NULL && sess->password_in != NULL) {
1670 /* Check if BIDI CHAP */
1671 if (strlen(sess->username_in) && strlen(sess->password_in)) {
1672 iscsi_opts |= BIT_4;
1673
1674 rval = qla4xxx_get_chap_index(ha, sess->username_in,
1675 sess->password_in,
1676 BIDI_CHAP, &idx);
1677 if (rval)
1678 goto exit_set_param;
1679 }
1680 }
1681
1682 if (sess->initial_r2t_en)
1683 iscsi_opts |= BIT_10;
1684
1685 if (sess->imm_data_en)
1686 iscsi_opts |= BIT_11;
1687
1688 fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts);
1689
1690 if (conn->max_recv_dlength)
1691 fw_ddb_entry->iscsi_max_rcv_data_seg_len =
1692 __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS));
1192 1693
1193 fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET); 1694 if (sess->max_r2t)
1194 fw_ddb_entry->port = cpu_to_le16(ntohs(port)); 1695 fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t);
1195 1696
1196 fw_ddb_entry->ip_addr[0] = *ip; 1697 if (sess->first_burst)
1197 fw_ddb_entry->ip_addr[1] = *(ip + 1); 1698 fw_ddb_entry->iscsi_first_burst_len =
1198 fw_ddb_entry->ip_addr[2] = *(ip + 2); 1699 __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS));
1199 fw_ddb_entry->ip_addr[3] = *(ip + 3);
1200 1700
1201 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma); 1701 if (sess->max_burst)
1702 fw_ddb_entry->iscsi_max_burst_len =
1703 __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS));
1202 1704
1203exit_send_tgts: 1705 if (sess->time2wait)
1706 fw_ddb_entry->iscsi_def_time2wait =
1707 cpu_to_le16(sess->time2wait);
1708
1709 if (sess->time2retain)
1710 fw_ddb_entry->iscsi_def_time2retain =
1711 cpu_to_le16(sess->time2retain);
1712
1713 status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
1714 fw_ddb_entry_dma, mbx_sts);
1715
1716 if (status != QLA_SUCCESS)
1717 rval = -EINVAL;
1718exit_set_param:
1204 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), 1719 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1205 fw_ddb_entry, fw_ddb_entry_dma); 1720 fw_ddb_entry, fw_ddb_entry_dma);
1206exit_send_tgts_no_free: 1721exit_set_param_no_free:
1207 return ret_val; 1722 return rval;
1723}
1724
1725int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
1726 uint16_t stats_size, dma_addr_t stats_dma)
1727{
1728 int status = QLA_SUCCESS;
1729 uint32_t mbox_cmd[MBOX_REG_COUNT];
1730 uint32_t mbox_sts[MBOX_REG_COUNT];
1731
1732 memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
1733 memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
1734 mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA;
1735 mbox_cmd[1] = fw_ddb_index;
1736 mbox_cmd[2] = LSDW(stats_dma);
1737 mbox_cmd[3] = MSDW(stats_dma);
1738 mbox_cmd[4] = stats_size;
1739
1740 status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]);
1741 if (status != QLA_SUCCESS) {
1742 DEBUG2(ql4_printk(KERN_WARNING, ha,
1743 "%s: MBOX_CMD_GET_MANAGEMENT_DATA "
1744 "failed w/ status %04X\n", __func__,
1745 mbox_sts[0]));
1746 }
1747 return status;
1208} 1748}
1209 1749
1750int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx,
1751 uint32_t ip_idx, uint32_t *sts)
1752{
1753 uint32_t mbox_cmd[MBOX_REG_COUNT];
1754 uint32_t mbox_sts[MBOX_REG_COUNT];
1755 int status = QLA_SUCCESS;
1756
1757 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1758 memset(&mbox_sts, 0, sizeof(mbox_sts));
1759 mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE;
1760 mbox_cmd[1] = acb_idx;
1761 mbox_cmd[2] = ip_idx;
1762
1763 status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]);
1764 if (status != QLA_SUCCESS) {
1765 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: "
1766 "MBOX_CMD_GET_IP_ADDR_STATE failed w/ "
1767 "status %04X\n", __func__, mbox_sts[0]));
1768 }
1769 memcpy(sts, mbox_sts, sizeof(mbox_sts));
1770 return status;
1771}
1772
1773int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
1774 uint32_t offset, uint32_t size)
1775{
1776 int status = QLA_SUCCESS;
1777 uint32_t mbox_cmd[MBOX_REG_COUNT];
1778 uint32_t mbox_sts[MBOX_REG_COUNT];
1779
1780 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1781 memset(&mbox_sts, 0, sizeof(mbox_sts));
1782
1783 mbox_cmd[0] = MBOX_CMD_GET_NVRAM;
1784 mbox_cmd[1] = LSDW(nvram_dma);
1785 mbox_cmd[2] = MSDW(nvram_dma);
1786 mbox_cmd[3] = offset;
1787 mbox_cmd[4] = size;
1788
1789 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
1790 &mbox_sts[0]);
1791 if (status != QLA_SUCCESS) {
1792 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1793 "status %04X\n", ha->host_no, __func__,
1794 mbox_sts[0]));
1795 }
1796 return status;
1797}
1798
1799int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
1800 uint32_t offset, uint32_t size)
1801{
1802 int status = QLA_SUCCESS;
1803 uint32_t mbox_cmd[MBOX_REG_COUNT];
1804 uint32_t mbox_sts[MBOX_REG_COUNT];
1805
1806 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1807 memset(&mbox_sts, 0, sizeof(mbox_sts));
1808
1809 mbox_cmd[0] = MBOX_CMD_SET_NVRAM;
1810 mbox_cmd[1] = LSDW(nvram_dma);
1811 mbox_cmd[2] = MSDW(nvram_dma);
1812 mbox_cmd[3] = offset;
1813 mbox_cmd[4] = size;
1814
1815 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
1816 &mbox_sts[0]);
1817 if (status != QLA_SUCCESS) {
1818 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1819 "status %04X\n", ha->host_no, __func__,
1820 mbox_sts[0]));
1821 }
1822 return status;
1823}
1824
1825int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
1826 uint32_t region, uint32_t field0,
1827 uint32_t field1)
1828{
1829 int status = QLA_SUCCESS;
1830 uint32_t mbox_cmd[MBOX_REG_COUNT];
1831 uint32_t mbox_sts[MBOX_REG_COUNT];
1832
1833 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
1834 memset(&mbox_sts, 0, sizeof(mbox_sts));
1835
1836 mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS;
1837 mbox_cmd[3] = region;
1838 mbox_cmd[4] = field0;
1839 mbox_cmd[5] = field1;
1840
1841 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0],
1842 &mbox_sts[0]);
1843 if (status != QLA_SUCCESS) {
1844 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1845 "status %04X\n", ha->host_no, __func__,
1846 mbox_sts[0]));
1847 }
1848 return status;
1849}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index b4b859b2d47..7851f314ba9 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -156,6 +156,27 @@ u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
156 return val; 156 return val;
157} 157}
158 158
159u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset)
160{
161 u16 val = 0;
162 u8 rval = 0;
163 int index = 0;
164
165 if (offset & 0x1)
166 index = (offset - 1) / 2;
167 else
168 index = offset / 2;
169
170 val = le16_to_cpu(rd_nvram_word(ha, index));
171
172 if (offset & 0x1)
173 rval = (u8)((val & 0xff00) >> 8);
174 else
175 rval = (u8)((val & 0x00ff));
176
177 return rval;
178}
179
159int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha) 180int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
160{ 181{
161 int status = QLA_ERROR; 182 int status = QLA_ERROR;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index fdfe27b3869..f484ff43819 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2015,11 +2015,19 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
2015 hw->flt_region_boot = start; 2015 hw->flt_region_boot = start;
2016 break; 2016 break;
2017 case FLT_REG_FW_82: 2017 case FLT_REG_FW_82:
2018 case FLT_REG_FW_82_1:
2018 hw->flt_region_fw = start; 2019 hw->flt_region_fw = start;
2019 break; 2020 break;
2020 case FLT_REG_BOOTLOAD_82: 2021 case FLT_REG_BOOTLOAD_82:
2021 hw->flt_region_bootload = start; 2022 hw->flt_region_bootload = start;
2022 break; 2023 break;
2024 case FLT_REG_ISCSI_PARAM:
2025 hw->flt_iscsi_param = start;
2026 break;
2027 case FLT_REG_ISCSI_CHAP:
2028 hw->flt_region_chap = start;
2029 hw->flt_chap_size = le32_to_cpu(region->size);
2030 break;
2023 } 2031 }
2024 } 2032 }
2025 goto done; 2033 goto done;
@@ -2032,6 +2040,9 @@ no_flash_data:
2032 hw->flt_region_boot = FA_BOOT_CODE_ADDR_82; 2040 hw->flt_region_boot = FA_BOOT_CODE_ADDR_82;
2033 hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82; 2041 hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
2034 hw->flt_region_fw = FA_RISC_CODE_ADDR_82; 2042 hw->flt_region_fw = FA_RISC_CODE_ADDR_82;
2043 hw->flt_region_chap = FA_FLASH_ISCSI_CHAP;
2044 hw->flt_chap_size = FA_FLASH_CHAP_SIZE;
2045
2035done: 2046done:
2036 DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x " 2047 DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x "
2037 "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt, 2048 "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt,
@@ -2258,10 +2269,16 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
2258 } 2269 }
2259 2270
2260 /* Save M.A.C. address & serial_number */ 2271 /* Save M.A.C. address & serial_number */
2272 ha->port_num = sys_info->port_num;
2261 memcpy(ha->my_mac, &sys_info->mac_addr[0], 2273 memcpy(ha->my_mac, &sys_info->mac_addr[0],
2262 min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr))); 2274 min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr)));
2263 memcpy(ha->serial_number, &sys_info->serial_number, 2275 memcpy(ha->serial_number, &sys_info->serial_number,
2264 min(sizeof(ha->serial_number), sizeof(sys_info->serial_number))); 2276 min(sizeof(ha->serial_number), sizeof(sys_info->serial_number)));
2277 memcpy(ha->model_name, &sys_info->board_id_str,
2278 min(sizeof(ha->model_name), sizeof(sys_info->board_id_str)));
2279 ha->phy_port_cnt = sys_info->phys_port_cnt;
2280 ha->phy_port_num = sys_info->port_num;
2281 ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
2265 2282
2266 DEBUG2(printk("scsi%ld: %s: " 2283 DEBUG2(printk("scsi%ld: %s: "
2267 "mac %02x:%02x:%02x:%02x:%02x:%02x " 2284 "mac %02x:%02x:%02x:%02x:%02x:%02x "
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index f2364ec59f0..30f31b127f3 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -6,6 +6,8 @@
6 */ 6 */
7#include <linux/moduleparam.h> 7#include <linux/moduleparam.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/blkdev.h>
10#include <linux/iscsi_boot_sysfs.h>
9 11
10#include <scsi/scsi_tcq.h> 12#include <scsi/scsi_tcq.h>
11#include <scsi/scsicam.h> 13#include <scsi/scsicam.h>
@@ -63,6 +65,7 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo,
63 "Target Session Recovery Timeout.\n" 65 "Target Session Recovery Timeout.\n"
64 " Default: 30 sec."); 66 " Default: 30 sec.");
65 67
68static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
66/* 69/*
67 * SCSI host template entry points 70 * SCSI host template entry points
68 */ 71 */
@@ -71,18 +74,41 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha);
71/* 74/*
72 * iSCSI template entry points 75 * iSCSI template entry points
73 */ 76 */
74static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost,
75 enum iscsi_tgt_dscvr type, uint32_t enable,
76 struct sockaddr *dst_addr);
77static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 77static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn,
78 enum iscsi_param param, char *buf); 78 enum iscsi_param param, char *buf);
79static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess,
80 enum iscsi_param param, char *buf);
81static int qla4xxx_host_get_param(struct Scsi_Host *shost, 79static int qla4xxx_host_get_param(struct Scsi_Host *shost,
82 enum iscsi_host_param param, char *buf); 80 enum iscsi_host_param param, char *buf);
83static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); 81static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data,
82 uint32_t len);
83static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
84 enum iscsi_param_type param_type,
85 int param, char *buf);
84static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); 86static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc);
85 87static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost,
88 struct sockaddr *dst_addr,
89 int non_blocking);
90static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms);
91static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep);
92static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
93 enum iscsi_param param, char *buf);
94static int qla4xxx_conn_start(struct iscsi_cls_conn *conn);
95static struct iscsi_cls_conn *
96qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx);
97static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
98 struct iscsi_cls_conn *cls_conn,
99 uint64_t transport_fd, int is_leading);
100static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn);
101static struct iscsi_cls_session *
102qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
103 uint16_t qdepth, uint32_t initial_cmdsn);
104static void qla4xxx_session_destroy(struct iscsi_cls_session *sess);
105static void qla4xxx_task_work(struct work_struct *wdata);
106static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t);
107static int qla4xxx_task_xmit(struct iscsi_task *);
108static void qla4xxx_task_cleanup(struct iscsi_task *);
109static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session);
110static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
111 struct iscsi_stats *stats);
86/* 112/*
87 * SCSI host template entry points 113 * SCSI host template entry points
88 */ 114 */
@@ -94,7 +120,8 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
94static int qla4xxx_slave_alloc(struct scsi_device *device); 120static int qla4xxx_slave_alloc(struct scsi_device *device);
95static int qla4xxx_slave_configure(struct scsi_device *device); 121static int qla4xxx_slave_configure(struct scsi_device *device);
96static void qla4xxx_slave_destroy(struct scsi_device *sdev); 122static void qla4xxx_slave_destroy(struct scsi_device *sdev);
97static void qla4xxx_scan_start(struct Scsi_Host *shost); 123static mode_t ql4_attr_is_visible(int param_type, int param);
124static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
98 125
99static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 126static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
100 QLA82XX_LEGACY_INTR_CONFIG; 127 QLA82XX_LEGACY_INTR_CONFIG;
@@ -115,9 +142,6 @@ static struct scsi_host_template qla4xxx_driver_template = {
115 .slave_alloc = qla4xxx_slave_alloc, 142 .slave_alloc = qla4xxx_slave_alloc,
116 .slave_destroy = qla4xxx_slave_destroy, 143 .slave_destroy = qla4xxx_slave_destroy,
117 144
118 .scan_finished = iscsi_scan_finished,
119 .scan_start = qla4xxx_scan_start,
120
121 .this_id = -1, 145 .this_id = -1,
122 .cmd_per_lun = 3, 146 .cmd_per_lun = 3,
123 .use_clustering = ENABLE_CLUSTERING, 147 .use_clustering = ENABLE_CLUSTERING,
@@ -125,58 +149,396 @@ static struct scsi_host_template qla4xxx_driver_template = {
125 149
126 .max_sectors = 0xFFFF, 150 .max_sectors = 0xFFFF,
127 .shost_attrs = qla4xxx_host_attrs, 151 .shost_attrs = qla4xxx_host_attrs,
152 .host_reset = qla4xxx_host_reset,
153 .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC,
128}; 154};
129 155
130static struct iscsi_transport qla4xxx_iscsi_transport = { 156static struct iscsi_transport qla4xxx_iscsi_transport = {
131 .owner = THIS_MODULE, 157 .owner = THIS_MODULE,
132 .name = DRIVER_NAME, 158 .name = DRIVER_NAME,
133 .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD | 159 .caps = CAP_TEXT_NEGO |
134 CAP_DATA_PATH_OFFLOAD, 160 CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST |
135 .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | 161 CAP_DATADGST | CAP_LOGIN_OFFLOAD |
136 ISCSI_TARGET_NAME | ISCSI_TPGT | 162 CAP_MULTI_R2T,
137 ISCSI_TARGET_ALIAS, 163 .attr_is_visible = ql4_attr_is_visible,
138 .host_param_mask = ISCSI_HOST_HWADDRESS | 164 .create_session = qla4xxx_session_create,
139 ISCSI_HOST_IPADDRESS | 165 .destroy_session = qla4xxx_session_destroy,
140 ISCSI_HOST_INITIATOR_NAME, 166 .start_conn = qla4xxx_conn_start,
141 .tgt_dscvr = qla4xxx_tgt_dscvr, 167 .create_conn = qla4xxx_conn_create,
168 .bind_conn = qla4xxx_conn_bind,
169 .stop_conn = iscsi_conn_stop,
170 .destroy_conn = qla4xxx_conn_destroy,
171 .set_param = iscsi_set_param,
142 .get_conn_param = qla4xxx_conn_get_param, 172 .get_conn_param = qla4xxx_conn_get_param,
143 .get_session_param = qla4xxx_sess_get_param, 173 .get_session_param = iscsi_session_get_param,
174 .get_ep_param = qla4xxx_get_ep_param,
175 .ep_connect = qla4xxx_ep_connect,
176 .ep_poll = qla4xxx_ep_poll,
177 .ep_disconnect = qla4xxx_ep_disconnect,
178 .get_stats = qla4xxx_conn_get_stats,
179 .send_pdu = iscsi_conn_send_pdu,
180 .xmit_task = qla4xxx_task_xmit,
181 .cleanup_task = qla4xxx_task_cleanup,
182 .alloc_pdu = qla4xxx_alloc_pdu,
183
144 .get_host_param = qla4xxx_host_get_param, 184 .get_host_param = qla4xxx_host_get_param,
145 .session_recovery_timedout = qla4xxx_recovery_timedout, 185 .set_iface_param = qla4xxx_iface_set_param,
186 .get_iface_param = qla4xxx_get_iface_param,
187 .bsg_request = qla4xxx_bsg_request,
146}; 188};
147 189
148static struct scsi_transport_template *qla4xxx_scsi_transport; 190static struct scsi_transport_template *qla4xxx_scsi_transport;
149 191
150static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) 192static mode_t ql4_attr_is_visible(int param_type, int param)
151{ 193{
152 struct iscsi_cls_session *session; 194 switch (param_type) {
153 struct ddb_entry *ddb_entry; 195 case ISCSI_HOST_PARAM:
196 switch (param) {
197 case ISCSI_HOST_PARAM_HWADDRESS:
198 case ISCSI_HOST_PARAM_IPADDRESS:
199 case ISCSI_HOST_PARAM_INITIATOR_NAME:
200 return S_IRUGO;
201 default:
202 return 0;
203 }
204 case ISCSI_PARAM:
205 switch (param) {
206 case ISCSI_PARAM_PERSISTENT_ADDRESS:
207 case ISCSI_PARAM_PERSISTENT_PORT:
208 case ISCSI_PARAM_CONN_ADDRESS:
209 case ISCSI_PARAM_CONN_PORT:
210 case ISCSI_PARAM_TARGET_NAME:
211 case ISCSI_PARAM_TPGT:
212 case ISCSI_PARAM_TARGET_ALIAS:
213 case ISCSI_PARAM_MAX_BURST:
214 case ISCSI_PARAM_MAX_R2T:
215 case ISCSI_PARAM_FIRST_BURST:
216 case ISCSI_PARAM_MAX_RECV_DLENGTH:
217 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
218 case ISCSI_PARAM_IFACE_NAME:
219 return S_IRUGO;
220 default:
221 return 0;
222 }
223 case ISCSI_NET_PARAM:
224 switch (param) {
225 case ISCSI_NET_PARAM_IPV4_ADDR:
226 case ISCSI_NET_PARAM_IPV4_SUBNET:
227 case ISCSI_NET_PARAM_IPV4_GW:
228 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
229 case ISCSI_NET_PARAM_IFACE_ENABLE:
230 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
231 case ISCSI_NET_PARAM_IPV6_ADDR:
232 case ISCSI_NET_PARAM_IPV6_ROUTER:
233 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
234 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
235 case ISCSI_NET_PARAM_VLAN_ID:
236 case ISCSI_NET_PARAM_VLAN_PRIORITY:
237 case ISCSI_NET_PARAM_VLAN_ENABLED:
238 case ISCSI_NET_PARAM_MTU:
239 case ISCSI_NET_PARAM_PORT:
240 return S_IRUGO;
241 default:
242 return 0;
243 }
244 }
154 245
155 session = starget_to_session(scsi_target(sc->device)); 246 return 0;
156 ddb_entry = session->dd_data; 247}
157 248
158 /* if we are not logged in then the LLD is going to clean up the cmd */ 249static int qla4xxx_get_iface_param(struct iscsi_iface *iface,
159 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) 250 enum iscsi_param_type param_type,
160 return BLK_EH_RESET_TIMER; 251 int param, char *buf)
161 else 252{
162 return BLK_EH_NOT_HANDLED; 253 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
254 struct scsi_qla_host *ha = to_qla_host(shost);
255 int len = -ENOSYS;
256
257 if (param_type != ISCSI_NET_PARAM)
258 return -ENOSYS;
259
260 switch (param) {
261 case ISCSI_NET_PARAM_IPV4_ADDR:
262 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
263 break;
264 case ISCSI_NET_PARAM_IPV4_SUBNET:
265 len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask);
266 break;
267 case ISCSI_NET_PARAM_IPV4_GW:
268 len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway);
269 break;
270 case ISCSI_NET_PARAM_IFACE_ENABLE:
271 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
272 len = sprintf(buf, "%s\n",
273 (ha->ip_config.ipv4_options &
274 IPOPT_IPV4_PROTOCOL_ENABLE) ?
275 "enabled" : "disabled");
276 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
277 len = sprintf(buf, "%s\n",
278 (ha->ip_config.ipv6_options &
279 IPV6_OPT_IPV6_PROTOCOL_ENABLE) ?
280 "enabled" : "disabled");
281 break;
282 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
283 len = sprintf(buf, "%s\n",
284 (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ?
285 "dhcp" : "static");
286 break;
287 case ISCSI_NET_PARAM_IPV6_ADDR:
288 if (iface->iface_num == 0)
289 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0);
290 if (iface->iface_num == 1)
291 len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1);
292 break;
293 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
294 len = sprintf(buf, "%pI6\n",
295 &ha->ip_config.ipv6_link_local_addr);
296 break;
297 case ISCSI_NET_PARAM_IPV6_ROUTER:
298 len = sprintf(buf, "%pI6\n",
299 &ha->ip_config.ipv6_default_router_addr);
300 break;
301 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
302 len = sprintf(buf, "%s\n",
303 (ha->ip_config.ipv6_addl_options &
304 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ?
305 "nd" : "static");
306 break;
307 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
308 len = sprintf(buf, "%s\n",
309 (ha->ip_config.ipv6_addl_options &
310 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ?
311 "auto" : "static");
312 break;
313 case ISCSI_NET_PARAM_VLAN_ID:
314 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
315 len = sprintf(buf, "%d\n",
316 (ha->ip_config.ipv4_vlan_tag &
317 ISCSI_MAX_VLAN_ID));
318 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
319 len = sprintf(buf, "%d\n",
320 (ha->ip_config.ipv6_vlan_tag &
321 ISCSI_MAX_VLAN_ID));
322 break;
323 case ISCSI_NET_PARAM_VLAN_PRIORITY:
324 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
325 len = sprintf(buf, "%d\n",
326 ((ha->ip_config.ipv4_vlan_tag >> 13) &
327 ISCSI_MAX_VLAN_PRIORITY));
328 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
329 len = sprintf(buf, "%d\n",
330 ((ha->ip_config.ipv6_vlan_tag >> 13) &
331 ISCSI_MAX_VLAN_PRIORITY));
332 break;
333 case ISCSI_NET_PARAM_VLAN_ENABLED:
334 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
335 len = sprintf(buf, "%s\n",
336 (ha->ip_config.ipv4_options &
337 IPOPT_VLAN_TAGGING_ENABLE) ?
338 "enabled" : "disabled");
339 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
340 len = sprintf(buf, "%s\n",
341 (ha->ip_config.ipv6_options &
342 IPV6_OPT_VLAN_TAGGING_ENABLE) ?
343 "enabled" : "disabled");
344 break;
345 case ISCSI_NET_PARAM_MTU:
346 len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size);
347 break;
348 case ISCSI_NET_PARAM_PORT:
349 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
350 len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port);
351 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
352 len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port);
353 break;
354 default:
355 len = -ENOSYS;
356 }
357
358 return len;
163} 359}
164 360
165static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) 361static struct iscsi_endpoint *
362qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
363 int non_blocking)
166{ 364{
167 struct ddb_entry *ddb_entry = session->dd_data; 365 int ret;
168 struct scsi_qla_host *ha = ddb_entry->ha; 366 struct iscsi_endpoint *ep;
367 struct qla_endpoint *qla_ep;
368 struct scsi_qla_host *ha;
369 struct sockaddr_in *addr;
370 struct sockaddr_in6 *addr6;
371
372 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
373 if (!shost) {
374 ret = -ENXIO;
375 printk(KERN_ERR "%s: shost is NULL\n",
376 __func__);
377 return ERR_PTR(ret);
378 }
379
380 ha = iscsi_host_priv(shost);
381
382 ep = iscsi_create_endpoint(sizeof(struct qla_endpoint));
383 if (!ep) {
384 ret = -ENOMEM;
385 return ERR_PTR(ret);
386 }
387
388 qla_ep = ep->dd_data;
389 memset(qla_ep, 0, sizeof(struct qla_endpoint));
390 if (dst_addr->sa_family == AF_INET) {
391 memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in));
392 addr = (struct sockaddr_in *)&qla_ep->dst_addr;
393 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__,
394 (char *)&addr->sin_addr));
395 } else if (dst_addr->sa_family == AF_INET6) {
396 memcpy(&qla_ep->dst_addr, dst_addr,
397 sizeof(struct sockaddr_in6));
398 addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr;
399 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__,
400 (char *)&addr6->sin6_addr));
401 }
402
403 qla_ep->host = shost;
404
405 return ep;
406}
407
408static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
409{
410 struct qla_endpoint *qla_ep;
411 struct scsi_qla_host *ha;
412 int ret = 0;
413
414 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
415 qla_ep = ep->dd_data;
416 ha = to_qla_host(qla_ep->host);
417
418 if (adapter_up(ha))
419 ret = 1;
420
421 return ret;
422}
423
424static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep)
425{
426 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
427 iscsi_destroy_endpoint(ep);
428}
429
430static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
431 enum iscsi_param param,
432 char *buf)
433{
434 struct qla_endpoint *qla_ep = ep->dd_data;
435 struct sockaddr *dst_addr;
169 436
170 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { 437 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
171 atomic_set(&ddb_entry->state, DDB_STATE_DEAD);
172 438
173 DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " 439 switch (param) {
174 "of (%d) secs exhausted, marking device DEAD.\n", 440 case ISCSI_PARAM_CONN_PORT:
175 ha->host_no, __func__, ddb_entry->fw_ddb_index, 441 case ISCSI_PARAM_CONN_ADDRESS:
176 ddb_entry->sess->recovery_tmo)); 442 if (!qla_ep)
443 return -ENOTCONN;
444
445 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
446 if (!dst_addr)
447 return -ENOTCONN;
448
449 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
450 &qla_ep->dst_addr, param, buf);
451 default:
452 return -ENOSYS;
177 } 453 }
178} 454}
179 455
456static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn,
457 struct iscsi_stats *stats)
458{
459 struct iscsi_session *sess;
460 struct iscsi_cls_session *cls_sess;
461 struct ddb_entry *ddb_entry;
462 struct scsi_qla_host *ha;
463 struct ql_iscsi_stats *ql_iscsi_stats;
464 int stats_size;
465 int ret;
466 dma_addr_t iscsi_stats_dma;
467
468 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
469
470 cls_sess = iscsi_conn_to_session(cls_conn);
471 sess = cls_sess->dd_data;
472 ddb_entry = sess->dd_data;
473 ha = ddb_entry->ha;
474
475 stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats));
476 /* Allocate memory */
477 ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size,
478 &iscsi_stats_dma, GFP_KERNEL);
479 if (!ql_iscsi_stats) {
480 ql4_printk(KERN_ERR, ha,
481 "Unable to allocate memory for iscsi stats\n");
482 goto exit_get_stats;
483 }
484
485 ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size,
486 iscsi_stats_dma);
487 if (ret != QLA_SUCCESS) {
488 ql4_printk(KERN_ERR, ha,
489 "Unable to retreive iscsi stats\n");
490 goto free_stats;
491 }
492
493 /* octets */
494 stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets);
495 stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets);
496 /* xmit pdus */
497 stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus);
498 stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus);
499 stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus);
500 stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus);
501 stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus);
502 stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus);
503 stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus);
504 stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus);
505 /* recv pdus */
506 stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus);
507 stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus);
508 stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus);
509 stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus);
510 stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus);
511 stats->logoutrsp_pdus =
512 le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus);
513 stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus);
514 stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus);
515 stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus);
516
517free_stats:
518 dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats,
519 iscsi_stats_dma);
520exit_get_stats:
521 return;
522}
523
524static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc)
525{
526 struct iscsi_cls_session *session;
527 struct iscsi_session *sess;
528 unsigned long flags;
529 enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED;
530
531 session = starget_to_session(scsi_target(sc->device));
532 sess = session->dd_data;
533
534 spin_lock_irqsave(&session->lock, flags);
535 if (session->state == ISCSI_SESSION_FAILED)
536 ret = BLK_EH_RESET_TIMER;
537 spin_unlock_irqrestore(&session->lock, flags);
538
539 return ret;
540}
541
180static int qla4xxx_host_get_param(struct Scsi_Host *shost, 542static int qla4xxx_host_get_param(struct Scsi_Host *shost,
181 enum iscsi_host_param param, char *buf) 543 enum iscsi_host_param param, char *buf)
182{ 544{
@@ -188,9 +550,7 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
188 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); 550 len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN);
189 break; 551 break;
190 case ISCSI_HOST_PARAM_IPADDRESS: 552 case ISCSI_HOST_PARAM_IPADDRESS:
191 len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0], 553 len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address);
192 ha->ip_address[1], ha->ip_address[2],
193 ha->ip_address[3]);
194 break; 554 break;
195 case ISCSI_HOST_PARAM_INITIATOR_NAME: 555 case ISCSI_HOST_PARAM_INITIATOR_NAME:
196 len = sprintf(buf, "%s\n", ha->name_string); 556 len = sprintf(buf, "%s\n", ha->name_string);
@@ -202,154 +562,851 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost,
202 return len; 562 return len;
203} 563}
204 564
205static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, 565static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha)
206 enum iscsi_param param, char *buf)
207{ 566{
208 struct ddb_entry *ddb_entry = sess->dd_data; 567 if (ha->iface_ipv4)
209 int len; 568 return;
210 569
211 switch (param) { 570 /* IPv4 */
212 case ISCSI_PARAM_TARGET_NAME: 571 ha->iface_ipv4 = iscsi_create_iface(ha->host,
213 len = snprintf(buf, PAGE_SIZE - 1, "%s\n", 572 &qla4xxx_iscsi_transport,
214 ddb_entry->iscsi_name); 573 ISCSI_IFACE_TYPE_IPV4, 0, 0);
574 if (!ha->iface_ipv4)
575 ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI "
576 "iface0.\n");
577}
578
579static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha)
580{
581 if (!ha->iface_ipv6_0)
582 /* IPv6 iface-0 */
583 ha->iface_ipv6_0 = iscsi_create_iface(ha->host,
584 &qla4xxx_iscsi_transport,
585 ISCSI_IFACE_TYPE_IPV6, 0,
586 0);
587 if (!ha->iface_ipv6_0)
588 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
589 "iface0.\n");
590
591 if (!ha->iface_ipv6_1)
592 /* IPv6 iface-1 */
593 ha->iface_ipv6_1 = iscsi_create_iface(ha->host,
594 &qla4xxx_iscsi_transport,
595 ISCSI_IFACE_TYPE_IPV6, 1,
596 0);
597 if (!ha->iface_ipv6_1)
598 ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI "
599 "iface1.\n");
600}
601
602static void qla4xxx_create_ifaces(struct scsi_qla_host *ha)
603{
604 if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE)
605 qla4xxx_create_ipv4_iface(ha);
606
607 if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE)
608 qla4xxx_create_ipv6_iface(ha);
609}
610
611static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha)
612{
613 if (ha->iface_ipv4) {
614 iscsi_destroy_iface(ha->iface_ipv4);
615 ha->iface_ipv4 = NULL;
616 }
617}
618
619static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha)
620{
621 if (ha->iface_ipv6_0) {
622 iscsi_destroy_iface(ha->iface_ipv6_0);
623 ha->iface_ipv6_0 = NULL;
624 }
625 if (ha->iface_ipv6_1) {
626 iscsi_destroy_iface(ha->iface_ipv6_1);
627 ha->iface_ipv6_1 = NULL;
628 }
629}
630
631static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha)
632{
633 qla4xxx_destroy_ipv4_iface(ha);
634 qla4xxx_destroy_ipv6_iface(ha);
635}
636
637static void qla4xxx_set_ipv6(struct scsi_qla_host *ha,
638 struct iscsi_iface_param_info *iface_param,
639 struct addr_ctrl_blk *init_fw_cb)
640{
641 /*
642 * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg.
643 * iface_num 1 is valid only for IPv6 Addr.
644 */
645 switch (iface_param->param) {
646 case ISCSI_NET_PARAM_IPV6_ADDR:
647 if (iface_param->iface_num & 0x1)
648 /* IPv6 Addr 1 */
649 memcpy(init_fw_cb->ipv6_addr1, iface_param->value,
650 sizeof(init_fw_cb->ipv6_addr1));
651 else
652 /* IPv6 Addr 0 */
653 memcpy(init_fw_cb->ipv6_addr0, iface_param->value,
654 sizeof(init_fw_cb->ipv6_addr0));
655 break;
656 case ISCSI_NET_PARAM_IPV6_LINKLOCAL:
657 if (iface_param->iface_num & 0x1)
658 break;
659 memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8],
660 sizeof(init_fw_cb->ipv6_if_id));
661 break;
662 case ISCSI_NET_PARAM_IPV6_ROUTER:
663 if (iface_param->iface_num & 0x1)
664 break;
665 memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value,
666 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
667 break;
668 case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG:
669 /* Autocfg applies to even interface */
670 if (iface_param->iface_num & 0x1)
671 break;
672
673 if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE)
674 init_fw_cb->ipv6_addtl_opts &=
675 cpu_to_le16(
676 ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
677 else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE)
678 init_fw_cb->ipv6_addtl_opts |=
679 cpu_to_le16(
680 IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE);
681 else
682 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
683 "IPv6 addr\n");
215 break; 684 break;
216 case ISCSI_PARAM_TPGT: 685 case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG:
217 len = sprintf(buf, "%u\n", ddb_entry->tpgt); 686 /* Autocfg applies to even interface */
687 if (iface_param->iface_num & 0x1)
688 break;
689
690 if (iface_param->value[0] ==
691 ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE)
692 init_fw_cb->ipv6_addtl_opts |= cpu_to_le16(
693 IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
694 else if (iface_param->value[0] ==
695 ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE)
696 init_fw_cb->ipv6_addtl_opts &= cpu_to_le16(
697 ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR);
698 else
699 ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for "
700 "IPv6 linklocal addr\n");
218 break; 701 break;
219 case ISCSI_PARAM_TARGET_ALIAS: 702 case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG:
220 len = snprintf(buf, PAGE_SIZE - 1, "%s\n", 703 /* Autocfg applies to even interface */
221 ddb_entry->iscsi_alias); 704 if (iface_param->iface_num & 0x1)
705 break;
706
707 if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE)
708 memset(init_fw_cb->ipv6_dflt_rtr_addr, 0,
709 sizeof(init_fw_cb->ipv6_dflt_rtr_addr));
710 break;
711 case ISCSI_NET_PARAM_IFACE_ENABLE:
712 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
713 init_fw_cb->ipv6_opts |=
714 cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE);
715 qla4xxx_create_ipv6_iface(ha);
716 } else {
717 init_fw_cb->ipv6_opts &=
718 cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE &
719 0xFFFF);
720 qla4xxx_destroy_ipv6_iface(ha);
721 }
722 break;
723 case ISCSI_NET_PARAM_VLAN_TAG:
724 if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag))
725 break;
726 init_fw_cb->ipv6_vlan_tag =
727 cpu_to_be16(*(uint16_t *)iface_param->value);
728 break;
729 case ISCSI_NET_PARAM_VLAN_ENABLED:
730 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
731 init_fw_cb->ipv6_opts |=
732 cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE);
733 else
734 init_fw_cb->ipv6_opts &=
735 cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE);
736 break;
737 case ISCSI_NET_PARAM_MTU:
738 init_fw_cb->eth_mtu_size =
739 cpu_to_le16(*(uint16_t *)iface_param->value);
740 break;
741 case ISCSI_NET_PARAM_PORT:
742 /* Autocfg applies to even interface */
743 if (iface_param->iface_num & 0x1)
744 break;
745
746 init_fw_cb->ipv6_port =
747 cpu_to_le16(*(uint16_t *)iface_param->value);
222 break; 748 break;
223 default: 749 default:
224 return -ENOSYS; 750 ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n",
751 iface_param->param);
752 break;
225 } 753 }
754}
226 755
227 return len; 756static void qla4xxx_set_ipv4(struct scsi_qla_host *ha,
757 struct iscsi_iface_param_info *iface_param,
758 struct addr_ctrl_blk *init_fw_cb)
759{
760 switch (iface_param->param) {
761 case ISCSI_NET_PARAM_IPV4_ADDR:
762 memcpy(init_fw_cb->ipv4_addr, iface_param->value,
763 sizeof(init_fw_cb->ipv4_addr));
764 break;
765 case ISCSI_NET_PARAM_IPV4_SUBNET:
766 memcpy(init_fw_cb->ipv4_subnet, iface_param->value,
767 sizeof(init_fw_cb->ipv4_subnet));
768 break;
769 case ISCSI_NET_PARAM_IPV4_GW:
770 memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value,
771 sizeof(init_fw_cb->ipv4_gw_addr));
772 break;
773 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
774 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
775 init_fw_cb->ipv4_tcp_opts |=
776 cpu_to_le16(TCPOPT_DHCP_ENABLE);
777 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
778 init_fw_cb->ipv4_tcp_opts &=
779 cpu_to_le16(~TCPOPT_DHCP_ENABLE);
780 else
781 ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n");
782 break;
783 case ISCSI_NET_PARAM_IFACE_ENABLE:
784 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) {
785 init_fw_cb->ipv4_ip_opts |=
786 cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE);
787 qla4xxx_create_ipv4_iface(ha);
788 } else {
789 init_fw_cb->ipv4_ip_opts &=
790 cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE &
791 0xFFFF);
792 qla4xxx_destroy_ipv4_iface(ha);
793 }
794 break;
795 case ISCSI_NET_PARAM_VLAN_TAG:
796 if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag))
797 break;
798 init_fw_cb->ipv4_vlan_tag =
799 cpu_to_be16(*(uint16_t *)iface_param->value);
800 break;
801 case ISCSI_NET_PARAM_VLAN_ENABLED:
802 if (iface_param->value[0] == ISCSI_VLAN_ENABLE)
803 init_fw_cb->ipv4_ip_opts |=
804 cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE);
805 else
806 init_fw_cb->ipv4_ip_opts &=
807 cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE);
808 break;
809 case ISCSI_NET_PARAM_MTU:
810 init_fw_cb->eth_mtu_size =
811 cpu_to_le16(*(uint16_t *)iface_param->value);
812 break;
813 case ISCSI_NET_PARAM_PORT:
814 init_fw_cb->ipv4_port =
815 cpu_to_le16(*(uint16_t *)iface_param->value);
816 break;
817 default:
818 ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n",
819 iface_param->param);
820 break;
821 }
228} 822}
229 823
230static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, 824static void
825qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb)
826{
827 struct addr_ctrl_blk_def *acb;
828 acb = (struct addr_ctrl_blk_def *)init_fw_cb;
829 memset(acb->reserved1, 0, sizeof(acb->reserved1));
830 memset(acb->reserved2, 0, sizeof(acb->reserved2));
831 memset(acb->reserved3, 0, sizeof(acb->reserved3));
832 memset(acb->reserved4, 0, sizeof(acb->reserved4));
833 memset(acb->reserved5, 0, sizeof(acb->reserved5));
834 memset(acb->reserved6, 0, sizeof(acb->reserved6));
835 memset(acb->reserved7, 0, sizeof(acb->reserved7));
836 memset(acb->reserved8, 0, sizeof(acb->reserved8));
837 memset(acb->reserved9, 0, sizeof(acb->reserved9));
838 memset(acb->reserved10, 0, sizeof(acb->reserved10));
839 memset(acb->reserved11, 0, sizeof(acb->reserved11));
840 memset(acb->reserved12, 0, sizeof(acb->reserved12));
841 memset(acb->reserved13, 0, sizeof(acb->reserved13));
842 memset(acb->reserved14, 0, sizeof(acb->reserved14));
843 memset(acb->reserved15, 0, sizeof(acb->reserved15));
844}
845
846static int
847qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
848{
849 struct scsi_qla_host *ha = to_qla_host(shost);
850 int rval = 0;
851 struct iscsi_iface_param_info *iface_param = NULL;
852 struct addr_ctrl_blk *init_fw_cb = NULL;
853 dma_addr_t init_fw_cb_dma;
854 uint32_t mbox_cmd[MBOX_REG_COUNT];
855 uint32_t mbox_sts[MBOX_REG_COUNT];
856 uint32_t rem = len;
857 struct nlattr *attr;
858
859 init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
860 sizeof(struct addr_ctrl_blk),
861 &init_fw_cb_dma, GFP_KERNEL);
862 if (!init_fw_cb) {
863 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n",
864 __func__);
865 return -ENOMEM;
866 }
867
868 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
869 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
870 memset(&mbox_sts, 0, sizeof(mbox_sts));
871
872 if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) {
873 ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__);
874 rval = -EIO;
875 goto exit_init_fw_cb;
876 }
877
878 nla_for_each_attr(attr, data, len, rem) {
879 iface_param = nla_data(attr);
880
881 if (iface_param->param_type != ISCSI_NET_PARAM)
882 continue;
883
884 switch (iface_param->iface_type) {
885 case ISCSI_IFACE_TYPE_IPV4:
886 switch (iface_param->iface_num) {
887 case 0:
888 qla4xxx_set_ipv4(ha, iface_param, init_fw_cb);
889 break;
890 default:
891 /* Cannot have more than one IPv4 interface */
892 ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface "
893 "number = %d\n",
894 iface_param->iface_num);
895 break;
896 }
897 break;
898 case ISCSI_IFACE_TYPE_IPV6:
899 switch (iface_param->iface_num) {
900 case 0:
901 case 1:
902 qla4xxx_set_ipv6(ha, iface_param, init_fw_cb);
903 break;
904 default:
905 /* Cannot have more than two IPv6 interface */
906 ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface "
907 "number = %d\n",
908 iface_param->iface_num);
909 break;
910 }
911 break;
912 default:
913 ql4_printk(KERN_ERR, ha, "Invalid iface type\n");
914 break;
915 }
916 }
917
918 init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A);
919
920 rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB,
921 sizeof(struct addr_ctrl_blk),
922 FLASH_OPT_RMW_COMMIT);
923 if (rval != QLA_SUCCESS) {
924 ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n",
925 __func__);
926 rval = -EIO;
927 goto exit_init_fw_cb;
928 }
929
930 qla4xxx_disable_acb(ha);
931
932 qla4xxx_initcb_to_acb(init_fw_cb);
933
934 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma);
935 if (rval != QLA_SUCCESS) {
936 ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n",
937 __func__);
938 rval = -EIO;
939 goto exit_init_fw_cb;
940 }
941
942 memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk));
943 qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb,
944 init_fw_cb_dma);
945
946exit_init_fw_cb:
947 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk),
948 init_fw_cb, init_fw_cb_dma);
949
950 return rval;
951}
952
953static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
231 enum iscsi_param param, char *buf) 954 enum iscsi_param param, char *buf)
232{ 955{
233 struct iscsi_cls_session *session; 956 struct iscsi_conn *conn;
234 struct ddb_entry *ddb_entry; 957 struct qla_conn *qla_conn;
235 int len; 958 struct sockaddr *dst_addr;
959 int len = 0;
236 960
237 session = iscsi_dev_to_session(conn->dev.parent); 961 conn = cls_conn->dd_data;
238 ddb_entry = session->dd_data; 962 qla_conn = conn->dd_data;
963 dst_addr = &qla_conn->qla_ep->dst_addr;
239 964
240 switch (param) { 965 switch (param) {
241 case ISCSI_PARAM_CONN_PORT: 966 case ISCSI_PARAM_CONN_PORT:
242 len = sprintf(buf, "%hu\n", ddb_entry->port);
243 break;
244 case ISCSI_PARAM_CONN_ADDRESS: 967 case ISCSI_PARAM_CONN_ADDRESS:
245 /* TODO: what are the ipv6 bits */ 968 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
246 len = sprintf(buf, "%pI4\n", &ddb_entry->ip_addr); 969 dst_addr, param, buf);
247 break;
248 default: 970 default:
249 return -ENOSYS; 971 return iscsi_conn_get_param(cls_conn, param, buf);
250 } 972 }
251 973
252 return len; 974 return len;
975
253} 976}
254 977
255static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost, 978static struct iscsi_cls_session *
256 enum iscsi_tgt_dscvr type, uint32_t enable, 979qla4xxx_session_create(struct iscsi_endpoint *ep,
257 struct sockaddr *dst_addr) 980 uint16_t cmds_max, uint16_t qdepth,
981 uint32_t initial_cmdsn)
258{ 982{
983 struct iscsi_cls_session *cls_sess;
259 struct scsi_qla_host *ha; 984 struct scsi_qla_host *ha;
260 struct sockaddr_in *addr; 985 struct qla_endpoint *qla_ep;
261 struct sockaddr_in6 *addr6; 986 struct ddb_entry *ddb_entry;
987 uint32_t ddb_index;
988 uint32_t mbx_sts = 0;
989 struct iscsi_session *sess;
990 struct sockaddr *dst_addr;
991 int ret;
992
993 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
994 if (!ep) {
995 printk(KERN_ERR "qla4xxx: missing ep.\n");
996 return NULL;
997 }
998
999 qla_ep = ep->dd_data;
1000 dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
1001 ha = to_qla_host(qla_ep->host);
1002
1003get_ddb_index:
1004 ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
1005
1006 if (ddb_index >= MAX_DDB_ENTRIES) {
1007 DEBUG2(ql4_printk(KERN_INFO, ha,
1008 "Free DDB index not available\n"));
1009 return NULL;
1010 }
1011
1012 if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
1013 goto get_ddb_index;
1014
1015 DEBUG2(ql4_printk(KERN_INFO, ha,
1016 "Found a free DDB index at %d\n", ddb_index));
1017 ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
1018 if (ret == QLA_ERROR) {
1019 if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
1020 ql4_printk(KERN_INFO, ha,
1021 "DDB index = %d not available trying next\n",
1022 ddb_index);
1023 goto get_ddb_index;
1024 }
1025 DEBUG2(ql4_printk(KERN_INFO, ha,
1026 "Free FW DDB not available\n"));
1027 return NULL;
1028 }
1029
1030 cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
1031 cmds_max, sizeof(struct ddb_entry),
1032 sizeof(struct ql4_task_data),
1033 initial_cmdsn, ddb_index);
1034 if (!cls_sess)
1035 return NULL;
1036
1037 sess = cls_sess->dd_data;
1038 ddb_entry = sess->dd_data;
1039 ddb_entry->fw_ddb_index = ddb_index;
1040 ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
1041 ddb_entry->ha = ha;
1042 ddb_entry->sess = cls_sess;
1043 cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
1044 ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
1045 ha->tot_ddbs++;
1046
1047 return cls_sess;
1048}
1049
1050static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1051{
1052 struct iscsi_session *sess;
1053 struct ddb_entry *ddb_entry;
1054 struct scsi_qla_host *ha;
1055 unsigned long flags;
1056
1057 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1058 sess = cls_sess->dd_data;
1059 ddb_entry = sess->dd_data;
1060 ha = ddb_entry->ha;
1061
1062 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1063
1064 spin_lock_irqsave(&ha->hardware_lock, flags);
1065 qla4xxx_free_ddb(ha, ddb_entry);
1066 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1067 iscsi_session_teardown(cls_sess);
1068}
1069
1070static struct iscsi_cls_conn *
1071qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
1072{
1073 struct iscsi_cls_conn *cls_conn;
1074 struct iscsi_session *sess;
1075 struct ddb_entry *ddb_entry;
1076
1077 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1078 cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
1079 conn_idx);
1080 sess = cls_sess->dd_data;
1081 ddb_entry = sess->dd_data;
1082 ddb_entry->conn = cls_conn;
1083
1084 return cls_conn;
1085}
1086
1087static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
1088 struct iscsi_cls_conn *cls_conn,
1089 uint64_t transport_fd, int is_leading)
1090{
1091 struct iscsi_conn *conn;
1092 struct qla_conn *qla_conn;
1093 struct iscsi_endpoint *ep;
1094
1095 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1096
1097 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
1098 return -EINVAL;
1099 ep = iscsi_lookup_endpoint(transport_fd);
1100 conn = cls_conn->dd_data;
1101 qla_conn = conn->dd_data;
1102 qla_conn->qla_ep = ep->dd_data;
1103 return 0;
1104}
1105
1106static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
1107{
1108 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
1109 struct iscsi_session *sess;
1110 struct ddb_entry *ddb_entry;
1111 struct scsi_qla_host *ha;
1112 struct dev_db_entry *fw_ddb_entry;
1113 dma_addr_t fw_ddb_entry_dma;
1114 uint32_t mbx_sts = 0;
262 int ret = 0; 1115 int ret = 0;
1116 int status = QLA_SUCCESS;
263 1117
264 ha = (struct scsi_qla_host *) shost->hostdata; 1118 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1119 sess = cls_sess->dd_data;
1120 ddb_entry = sess->dd_data;
1121 ha = ddb_entry->ha;
265 1122
266 switch (type) { 1123 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
267 case ISCSI_TGT_DSCVR_SEND_TARGETS: 1124 &fw_ddb_entry_dma, GFP_KERNEL);
268 if (dst_addr->sa_family == AF_INET) { 1125 if (!fw_ddb_entry) {
269 addr = (struct sockaddr_in *)dst_addr; 1126 ql4_printk(KERN_ERR, ha,
270 if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr, 1127 "%s: Unable to allocate dma buffer\n", __func__);
271 addr->sin_port) != QLA_SUCCESS) 1128 return -ENOMEM;
272 ret = -EIO; 1129 }
273 } else if (dst_addr->sa_family == AF_INET6) { 1130
274 /* 1131 ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
275 * TODO: fix qla4xxx_send_tgts 1132 if (ret) {
276 */ 1133 /* If iscsid is stopped and started then no need to do
277 addr6 = (struct sockaddr_in6 *)dst_addr; 1134 * set param again since ddb state will be already
278 if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr, 1135 * active and FW does not allow set ddb to an
279 addr6->sin6_port) != QLA_SUCCESS) 1136 * active session.
280 ret = -EIO; 1137 */
281 } else 1138 if (mbx_sts)
282 ret = -ENOSYS; 1139 if (ddb_entry->fw_ddb_device_state ==
283 break; 1140 DDB_DS_SESSION_ACTIVE) {
284 default: 1141 iscsi_conn_start(ddb_entry->conn);
285 ret = -ENOSYS; 1142 iscsi_conn_login_event(ddb_entry->conn,
1143 ISCSI_CONN_STATE_LOGGED_IN);
1144 goto exit_set_param;
1145 }
1146
1147 ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n",
1148 __func__, ddb_entry->fw_ddb_index);
1149 goto exit_conn_start;
1150 }
1151
1152 status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
1153 if (status == QLA_ERROR) {
1154 ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
1155 sess->targetname);
1156 ret = -EINVAL;
1157 goto exit_conn_start;
286 } 1158 }
1159
1160 if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE)
1161 ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
1162
1163 DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__,
1164 ddb_entry->fw_ddb_device_state));
1165
1166exit_set_param:
1167 ret = 0;
1168
1169exit_conn_start:
1170 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1171 fw_ddb_entry, fw_ddb_entry_dma);
287 return ret; 1172 return ret;
288} 1173}
289 1174
290void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry) 1175static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn)
291{ 1176{
292 if (!ddb_entry->sess) 1177 struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
293 return; 1178 struct iscsi_session *sess;
1179 struct scsi_qla_host *ha;
1180 struct ddb_entry *ddb_entry;
1181 int options;
294 1182
295 if (ddb_entry->conn) { 1183 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
296 atomic_set(&ddb_entry->state, DDB_STATE_DEAD); 1184 sess = cls_sess->dd_data;
297 iscsi_remove_session(ddb_entry->sess); 1185 ddb_entry = sess->dd_data;
1186 ha = ddb_entry->ha;
1187
1188 options = LOGOUT_OPTION_CLOSE_SESSION;
1189 if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR)
1190 ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
1191}
1192
1193static void qla4xxx_task_work(struct work_struct *wdata)
1194{
1195 struct ql4_task_data *task_data;
1196 struct scsi_qla_host *ha;
1197 struct passthru_status *sts;
1198 struct iscsi_task *task;
1199 struct iscsi_hdr *hdr;
1200 uint8_t *data;
1201 uint32_t data_len;
1202 struct iscsi_conn *conn;
1203 int hdr_len;
1204 itt_t itt;
1205
1206 task_data = container_of(wdata, struct ql4_task_data, task_work);
1207 ha = task_data->ha;
1208 task = task_data->task;
1209 sts = &task_data->sts;
1210 hdr_len = sizeof(struct iscsi_hdr);
1211
1212 DEBUG3(printk(KERN_INFO "Status returned\n"));
1213 DEBUG3(qla4xxx_dump_buffer(sts, 64));
1214 DEBUG3(printk(KERN_INFO "Response buffer"));
1215 DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64));
1216
1217 conn = task->conn;
1218
1219 switch (sts->completionStatus) {
1220 case PASSTHRU_STATUS_COMPLETE:
1221 hdr = (struct iscsi_hdr *)task_data->resp_buffer;
1222 /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */
1223 itt = sts->handle;
1224 hdr->itt = itt;
1225 data = task_data->resp_buffer + hdr_len;
1226 data_len = task_data->resp_len - hdr_len;
1227 iscsi_complete_pdu(conn, hdr, data, data_len);
1228 break;
1229 default:
1230 ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n",
1231 sts->completionStatus);
1232 break;
298 } 1233 }
299 iscsi_free_session(ddb_entry->sess); 1234 return;
300} 1235}
301 1236
302int qla4xxx_add_sess(struct ddb_entry *ddb_entry) 1237static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
303{ 1238{
304 int err; 1239 struct ql4_task_data *task_data;
1240 struct iscsi_session *sess;
1241 struct ddb_entry *ddb_entry;
1242 struct scsi_qla_host *ha;
1243 int hdr_len;
305 1244
306 ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo; 1245 sess = task->conn->session;
1246 ddb_entry = sess->dd_data;
1247 ha = ddb_entry->ha;
1248 task_data = task->dd_data;
1249 memset(task_data, 0, sizeof(struct ql4_task_data));
307 1250
308 err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); 1251 if (task->sc) {
309 if (err) { 1252 ql4_printk(KERN_INFO, ha,
310 DEBUG2(printk(KERN_ERR "Could not add session.\n")); 1253 "%s: SCSI Commands not implemented\n", __func__);
311 return err; 1254 return -EINVAL;
312 } 1255 }
313 1256
314 ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0); 1257 hdr_len = sizeof(struct iscsi_hdr);
315 if (!ddb_entry->conn) { 1258 task_data->ha = ha;
316 iscsi_remove_session(ddb_entry->sess); 1259 task_data->task = task;
317 DEBUG2(printk(KERN_ERR "Could not add connection.\n")); 1260
318 return -ENOMEM; 1261 if (task->data_count) {
1262 task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
1263 task->data_count,
1264 PCI_DMA_TODEVICE);
319 } 1265 }
320 1266
321 /* finally ready to go */ 1267 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
322 iscsi_unblock_session(ddb_entry->sess); 1268 __func__, task->conn->max_recv_dlength, hdr_len));
1269
1270 task_data->resp_len = task->conn->max_recv_dlength + hdr_len;
1271 task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev,
1272 task_data->resp_len,
1273 &task_data->resp_dma,
1274 GFP_ATOMIC);
1275 if (!task_data->resp_buffer)
1276 goto exit_alloc_pdu;
1277
1278 task_data->req_len = task->data_count + hdr_len;
1279 task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev,
1280 task_data->req_len,
1281 &task_data->req_dma,
1282 GFP_ATOMIC);
1283 if (!task_data->req_buffer)
1284 goto exit_alloc_pdu;
1285
1286 task->hdr = task_data->req_buffer;
1287
1288 INIT_WORK(&task_data->task_work, qla4xxx_task_work);
1289
323 return 0; 1290 return 0;
1291
1292exit_alloc_pdu:
1293 if (task_data->resp_buffer)
1294 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1295 task_data->resp_buffer, task_data->resp_dma);
1296
1297 if (task_data->req_buffer)
1298 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1299 task_data->req_buffer, task_data->req_dma);
1300 return -ENOMEM;
324} 1301}
325 1302
326struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha) 1303static void qla4xxx_task_cleanup(struct iscsi_task *task)
327{ 1304{
1305 struct ql4_task_data *task_data;
1306 struct iscsi_session *sess;
328 struct ddb_entry *ddb_entry; 1307 struct ddb_entry *ddb_entry;
329 struct iscsi_cls_session *sess; 1308 struct scsi_qla_host *ha;
330 1309 int hdr_len;
331 sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport,
332 sizeof(struct ddb_entry));
333 if (!sess)
334 return NULL;
335 1310
1311 hdr_len = sizeof(struct iscsi_hdr);
1312 sess = task->conn->session;
336 ddb_entry = sess->dd_data; 1313 ddb_entry = sess->dd_data;
337 memset(ddb_entry, 0, sizeof(*ddb_entry)); 1314 ha = ddb_entry->ha;
338 ddb_entry->ha = ha; 1315 task_data = task->dd_data;
339 ddb_entry->sess = sess; 1316
340 return ddb_entry; 1317 if (task->data_count) {
1318 dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
1319 task->data_count, PCI_DMA_TODEVICE);
1320 }
1321
1322 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
1323 __func__, task->conn->max_recv_dlength, hdr_len));
1324
1325 dma_free_coherent(&ha->pdev->dev, task_data->resp_len,
1326 task_data->resp_buffer, task_data->resp_dma);
1327 dma_free_coherent(&ha->pdev->dev, task_data->req_len,
1328 task_data->req_buffer, task_data->req_dma);
1329 return;
341} 1330}
342 1331
343static void qla4xxx_scan_start(struct Scsi_Host *shost) 1332static int qla4xxx_task_xmit(struct iscsi_task *task)
344{ 1333{
345 struct scsi_qla_host *ha = shost_priv(shost); 1334 struct scsi_cmnd *sc = task->sc;
346 struct ddb_entry *ddb_entry, *ddbtemp; 1335 struct iscsi_session *sess = task->conn->session;
1336 struct ddb_entry *ddb_entry = sess->dd_data;
1337 struct scsi_qla_host *ha = ddb_entry->ha;
1338
1339 if (!sc)
1340 return qla4xxx_send_passthru0(task);
347 1341
348 /* finish setup of sessions that were already setup in firmware */ 1342 ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n",
349 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { 1343 __func__);
350 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) 1344 return -ENOSYS;
351 qla4xxx_add_sess(ddb_entry); 1345}
1346
1347void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
1348 struct ddb_entry *ddb_entry)
1349{
1350 struct iscsi_cls_session *cls_sess;
1351 struct iscsi_cls_conn *cls_conn;
1352 struct iscsi_session *sess;
1353 struct iscsi_conn *conn;
1354 uint32_t ddb_state;
1355 dma_addr_t fw_ddb_entry_dma;
1356 struct dev_db_entry *fw_ddb_entry;
1357
1358 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1359 &fw_ddb_entry_dma, GFP_KERNEL);
1360 if (!fw_ddb_entry) {
1361 ql4_printk(KERN_ERR, ha,
1362 "%s: Unable to allocate dma buffer\n", __func__);
1363 return;
352 } 1364 }
1365
1366 if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
1367 fw_ddb_entry_dma, NULL, NULL, &ddb_state,
1368 NULL, NULL, NULL) == QLA_ERROR) {
1369 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
1370 "get_ddb_entry for fw_ddb_index %d\n",
1371 ha->host_no, __func__,
1372 ddb_entry->fw_ddb_index));
1373 return;
1374 }
1375
1376 cls_sess = ddb_entry->sess;
1377 sess = cls_sess->dd_data;
1378
1379 cls_conn = ddb_entry->conn;
1380 conn = cls_conn->dd_data;
1381
1382 /* Update params */
1383 conn->max_recv_dlength = BYTE_UNITS *
1384 le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
1385
1386 conn->max_xmit_dlength = BYTE_UNITS *
1387 le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
1388
1389 sess->initial_r2t_en =
1390 (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1391
1392 sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
1393
1394 sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
1395
1396 sess->first_burst = BYTE_UNITS *
1397 le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
1398
1399 sess->max_burst = BYTE_UNITS *
1400 le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
1401
1402 sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
1403
1404 sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
1405
1406 sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
1407
1408 memcpy(sess->initiatorname, ha->name_string,
1409 min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
353} 1410}
354 1411
355/* 1412/*
@@ -376,25 +1433,15 @@ static void qla4xxx_stop_timer(struct scsi_qla_host *ha)
376} 1433}
377 1434
378/*** 1435/***
379 * qla4xxx_mark_device_missing - mark a device as missing. 1436 * qla4xxx_mark_device_missing - blocks the session
380 * @ha: Pointer to host adapter structure. 1437 * @cls_session: Pointer to the session to be blocked
381 * @ddb_entry: Pointer to device database entry 1438 * @ddb_entry: Pointer to device database entry
382 * 1439 *
383 * This routine marks a device missing and close connection. 1440 * This routine marks a device missing and close connection.
384 **/ 1441 **/
385void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, 1442void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session)
386 struct ddb_entry *ddb_entry)
387{ 1443{
388 if ((atomic_read(&ddb_entry->state) != DDB_STATE_DEAD)) { 1444 iscsi_block_session(cls_session);
389 atomic_set(&ddb_entry->state, DDB_STATE_MISSING);
390 DEBUG2(printk("scsi%ld: ddb [%d] marked MISSING\n",
391 ha->host_no, ddb_entry->fw_ddb_index));
392 } else
393 DEBUG2(printk("scsi%ld: ddb [%d] DEAD\n", ha->host_no,
394 ddb_entry->fw_ddb_index))
395
396 iscsi_block_session(ddb_entry->sess);
397 iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED);
398} 1445}
399 1446
400/** 1447/**
@@ -405,10 +1452,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
405 **/ 1452 **/
406void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) 1453void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha)
407{ 1454{
408 struct ddb_entry *ddb_entry, *ddbtemp; 1455 iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing);
409 list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) {
410 qla4xxx_mark_device_missing(ha, ddb_entry);
411 }
412} 1456}
413 1457
414static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, 1458static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha,
@@ -495,20 +1539,13 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
495 goto qc_fail_command; 1539 goto qc_fail_command;
496 } 1540 }
497 1541
498 if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
499 if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) {
500 cmd->result = DID_NO_CONNECT << 16;
501 goto qc_fail_command;
502 }
503 return SCSI_MLQUEUE_TARGET_BUSY;
504 }
505
506 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || 1542 if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
507 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || 1543 test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
508 test_bit(DPC_RESET_HA, &ha->dpc_flags) || 1544 test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
509 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || 1545 test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
510 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || 1546 test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
511 !test_bit(AF_ONLINE, &ha->flags) || 1547 !test_bit(AF_ONLINE, &ha->flags) ||
1548 !test_bit(AF_LINK_UP, &ha->flags) ||
512 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) 1549 test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
513 goto qc_host_busy; 1550 goto qc_host_busy;
514 1551
@@ -563,6 +1600,13 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
563 1600
564 ha->srb_mempool = NULL; 1601 ha->srb_mempool = NULL;
565 1602
1603 if (ha->chap_dma_pool)
1604 dma_pool_destroy(ha->chap_dma_pool);
1605
1606 if (ha->chap_list)
1607 vfree(ha->chap_list);
1608 ha->chap_list = NULL;
1609
566 /* release io space registers */ 1610 /* release io space registers */
567 if (is_qla8022(ha)) { 1611 if (is_qla8022(ha)) {
568 if (ha->nx_pcibase) 1612 if (ha->nx_pcibase)
@@ -636,6 +1680,15 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
636 goto mem_alloc_error_exit; 1680 goto mem_alloc_error_exit;
637 } 1681 }
638 1682
1683 ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev,
1684 CHAP_DMA_BLOCK_SIZE, 8, 0);
1685
1686 if (ha->chap_dma_pool == NULL) {
1687 ql4_printk(KERN_WARNING, ha,
1688 "%s: chap_dma_pool allocation failed..\n", __func__);
1689 goto mem_alloc_error_exit;
1690 }
1691
639 return QLA_SUCCESS; 1692 return QLA_SUCCESS;
640 1693
641mem_alloc_error_exit: 1694mem_alloc_error_exit:
@@ -753,7 +1806,6 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
753 **/ 1806 **/
754static void qla4xxx_timer(struct scsi_qla_host *ha) 1807static void qla4xxx_timer(struct scsi_qla_host *ha)
755{ 1808{
756 struct ddb_entry *ddb_entry, *dtemp;
757 int start_dpc = 0; 1809 int start_dpc = 0;
758 uint16_t w; 1810 uint16_t w;
759 1811
@@ -773,69 +1825,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
773 qla4_8xxx_watchdog(ha); 1825 qla4_8xxx_watchdog(ha);
774 } 1826 }
775 1827
776 /* Search for relogin's to time-out and port down retry. */
777 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) {
778 /* Count down time between sending relogins */
779 if (adapter_up(ha) &&
780 !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
781 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) {
782 if (atomic_read(&ddb_entry->retry_relogin_timer) !=
783 INVALID_ENTRY) {
784 if (atomic_read(&ddb_entry->retry_relogin_timer)
785 == 0) {
786 atomic_set(&ddb_entry->
787 retry_relogin_timer,
788 INVALID_ENTRY);
789 set_bit(DPC_RELOGIN_DEVICE,
790 &ha->dpc_flags);
791 set_bit(DF_RELOGIN, &ddb_entry->flags);
792 DEBUG2(printk("scsi%ld: %s: ddb [%d]"
793 " login device\n",
794 ha->host_no, __func__,
795 ddb_entry->fw_ddb_index));
796 } else
797 atomic_dec(&ddb_entry->
798 retry_relogin_timer);
799 }
800 }
801
802 /* Wait for relogin to timeout */
803 if (atomic_read(&ddb_entry->relogin_timer) &&
804 (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
805 /*
806 * If the relogin times out and the device is
807 * still NOT ONLINE then try and relogin again.
808 */
809 if (atomic_read(&ddb_entry->state) !=
810 DDB_STATE_ONLINE &&
811 ddb_entry->fw_ddb_device_state ==
812 DDB_DS_SESSION_FAILED) {
813 /* Reset retry relogin timer */
814 atomic_inc(&ddb_entry->relogin_retry_count);
815 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
816 " timed out-retrying"
817 " relogin (%d)\n",
818 ha->host_no,
819 ddb_entry->fw_ddb_index,
820 atomic_read(&ddb_entry->
821 relogin_retry_count))
822 );
823 start_dpc++;
824 DEBUG(printk("scsi%ld:%d:%d: ddb [%d] "
825 "initiate relogin after"
826 " %d seconds\n",
827 ha->host_no, ddb_entry->bus,
828 ddb_entry->target,
829 ddb_entry->fw_ddb_index,
830 ddb_entry->default_time2wait + 4)
831 );
832
833 atomic_set(&ddb_entry->retry_relogin_timer,
834 ddb_entry->default_time2wait + 4);
835 }
836 }
837 }
838
839 if (!is_qla8022(ha)) { 1828 if (!is_qla8022(ha)) {
840 /* Check for heartbeat interval. */ 1829 /* Check for heartbeat interval. */
841 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && 1830 if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE &&
@@ -1081,6 +2070,17 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha)
1081 clear_bit(AF_INIT_DONE, &ha->flags); 2070 clear_bit(AF_INIT_DONE, &ha->flags);
1082} 2071}
1083 2072
2073static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
2074{
2075 struct iscsi_session *sess;
2076 struct ddb_entry *ddb_entry;
2077
2078 sess = cls_session->dd_data;
2079 ddb_entry = sess->dd_data;
2080 ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
2081 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
2082}
2083
1084/** 2084/**
1085 * qla4xxx_recover_adapter - recovers adapter after a fatal error 2085 * qla4xxx_recover_adapter - recovers adapter after a fatal error
1086 * @ha: Pointer to host adapter structure. 2086 * @ha: Pointer to host adapter structure.
@@ -1093,11 +2093,14 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
1093 /* Stall incoming I/O until we are done */ 2093 /* Stall incoming I/O until we are done */
1094 scsi_block_requests(ha->host); 2094 scsi_block_requests(ha->host);
1095 clear_bit(AF_ONLINE, &ha->flags); 2095 clear_bit(AF_ONLINE, &ha->flags);
2096 clear_bit(AF_LINK_UP, &ha->flags);
1096 2097
1097 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); 2098 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__));
1098 2099
1099 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); 2100 set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
1100 2101
2102 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2103
1101 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) 2104 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1102 reset_chip = 1; 2105 reset_chip = 1;
1103 2106
@@ -1160,7 +2163,7 @@ recover_ha_init_adapter:
1160 2163
1161 /* NOTE: AF_ONLINE flag set upon successful completion of 2164 /* NOTE: AF_ONLINE flag set upon successful completion of
1162 * qla4xxx_initialize_adapter */ 2165 * qla4xxx_initialize_adapter */
1163 status = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST); 2166 status = qla4xxx_initialize_adapter(ha);
1164 } 2167 }
1165 2168
1166 /* Retry failed adapter initialization, if necessary 2169 /* Retry failed adapter initialization, if necessary
@@ -1225,27 +2228,34 @@ recover_ha_init_adapter:
1225 return status; 2228 return status;
1226} 2229}
1227 2230
1228static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) 2231static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
1229{ 2232{
1230 struct ddb_entry *ddb_entry, *dtemp; 2233 struct iscsi_session *sess;
2234 struct ddb_entry *ddb_entry;
2235 struct scsi_qla_host *ha;
1231 2236
1232 list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { 2237 sess = cls_session->dd_data;
1233 if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) || 2238 ddb_entry = sess->dd_data;
1234 (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { 2239 ha = ddb_entry->ha;
1235 if (ddb_entry->fw_ddb_device_state == 2240 if (!iscsi_is_session_online(cls_session)) {
1236 DDB_DS_SESSION_ACTIVE) { 2241 if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) {
1237 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); 2242 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
1238 ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" 2243 " unblock session\n", ha->host_no, __func__,
1239 " marked ONLINE\n", ha->host_no, __func__, 2244 ddb_entry->fw_ddb_index);
1240 ddb_entry->fw_ddb_index); 2245 iscsi_unblock_session(ddb_entry->sess);
1241 2246 } else {
1242 iscsi_unblock_session(ddb_entry->sess); 2247 /* Trigger relogin */
1243 } else 2248 iscsi_session_failure(cls_session->dd_data,
1244 qla4xxx_relogin_device(ha, ddb_entry); 2249 ISCSI_ERR_CONN_FAILED);
1245 } 2250 }
1246 } 2251 }
1247} 2252}
1248 2253
2254static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
2255{
2256 iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
2257}
2258
1249void qla4xxx_wake_dpc(struct scsi_qla_host *ha) 2259void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
1250{ 2260{
1251 if (ha->dpc_thread) 2261 if (ha->dpc_thread)
@@ -1267,7 +2277,6 @@ static void qla4xxx_do_dpc(struct work_struct *work)
1267{ 2277{
1268 struct scsi_qla_host *ha = 2278 struct scsi_qla_host *ha =
1269 container_of(work, struct scsi_qla_host, dpc_work); 2279 container_of(work, struct scsi_qla_host, dpc_work);
1270 struct ddb_entry *ddb_entry, *dtemp;
1271 int status = QLA_ERROR; 2280 int status = QLA_ERROR;
1272 2281
1273 DEBUG2(printk("scsi%ld: %s: DPC handler waking up." 2282 DEBUG2(printk("scsi%ld: %s: DPC handler waking up."
@@ -1363,31 +2372,6 @@ dpc_post_reset_ha:
1363 qla4xxx_relogin_all_devices(ha); 2372 qla4xxx_relogin_all_devices(ha);
1364 } 2373 }
1365 } 2374 }
1366
1367 /* ---- relogin device? --- */
1368 if (adapter_up(ha) &&
1369 test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
1370 list_for_each_entry_safe(ddb_entry, dtemp,
1371 &ha->ddb_list, list) {
1372 if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
1373 atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)
1374 qla4xxx_relogin_device(ha, ddb_entry);
1375
1376 /*
1377 * If mbx cmd times out there is no point
1378 * in continuing further.
1379 * With large no of targets this can hang
1380 * the system.
1381 */
1382 if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
1383 printk(KERN_WARNING "scsi%ld: %s: "
1384 "need to reset hba\n",
1385 ha->host_no, __func__);
1386 break;
1387 }
1388 }
1389 }
1390
1391} 2375}
1392 2376
1393/** 2377/**
@@ -1410,6 +2394,10 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1410 if (ha->dpc_thread) 2394 if (ha->dpc_thread)
1411 destroy_workqueue(ha->dpc_thread); 2395 destroy_workqueue(ha->dpc_thread);
1412 2396
2397 /* Kill the kernel thread for this host */
2398 if (ha->task_wq)
2399 destroy_workqueue(ha->task_wq);
2400
1413 /* Put firmware in known state */ 2401 /* Put firmware in known state */
1414 ha->isp_ops->reset_firmware(ha); 2402 ha->isp_ops->reset_firmware(ha);
1415 2403
@@ -1601,6 +2589,594 @@ uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha)
1601 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in)); 2589 return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in));
1602} 2590}
1603 2591
2592static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
2593{
2594 struct scsi_qla_host *ha = data;
2595 char *str = buf;
2596 int rc;
2597
2598 switch (type) {
2599 case ISCSI_BOOT_ETH_FLAGS:
2600 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2601 break;
2602 case ISCSI_BOOT_ETH_INDEX:
2603 rc = sprintf(str, "0\n");
2604 break;
2605 case ISCSI_BOOT_ETH_MAC:
2606 rc = sysfs_format_mac(str, ha->my_mac,
2607 MAC_ADDR_LEN);
2608 break;
2609 default:
2610 rc = -ENOSYS;
2611 break;
2612 }
2613 return rc;
2614}
2615
2616static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
2617{
2618 int rc;
2619
2620 switch (type) {
2621 case ISCSI_BOOT_ETH_FLAGS:
2622 case ISCSI_BOOT_ETH_MAC:
2623 case ISCSI_BOOT_ETH_INDEX:
2624 rc = S_IRUGO;
2625 break;
2626 default:
2627 rc = 0;
2628 break;
2629 }
2630 return rc;
2631}
2632
2633static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
2634{
2635 struct scsi_qla_host *ha = data;
2636 char *str = buf;
2637 int rc;
2638
2639 switch (type) {
2640 case ISCSI_BOOT_INI_INITIATOR_NAME:
2641 rc = sprintf(str, "%s\n", ha->name_string);
2642 break;
2643 default:
2644 rc = -ENOSYS;
2645 break;
2646 }
2647 return rc;
2648}
2649
2650static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
2651{
2652 int rc;
2653
2654 switch (type) {
2655 case ISCSI_BOOT_INI_INITIATOR_NAME:
2656 rc = S_IRUGO;
2657 break;
2658 default:
2659 rc = 0;
2660 break;
2661 }
2662 return rc;
2663}
2664
2665static ssize_t
2666qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
2667 char *buf)
2668{
2669 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2670 char *str = buf;
2671 int rc;
2672
2673 switch (type) {
2674 case ISCSI_BOOT_TGT_NAME:
2675 rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name);
2676 break;
2677 case ISCSI_BOOT_TGT_IP_ADDR:
2678 if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1)
2679 rc = sprintf(buf, "%pI4\n",
2680 &boot_conn->dest_ipaddr.ip_address);
2681 else
2682 rc = sprintf(str, "%pI6\n",
2683 &boot_conn->dest_ipaddr.ip_address);
2684 break;
2685 case ISCSI_BOOT_TGT_PORT:
2686 rc = sprintf(str, "%d\n", boot_conn->dest_port);
2687 break;
2688 case ISCSI_BOOT_TGT_CHAP_NAME:
2689 rc = sprintf(str, "%.*s\n",
2690 boot_conn->chap.target_chap_name_length,
2691 (char *)&boot_conn->chap.target_chap_name);
2692 break;
2693 case ISCSI_BOOT_TGT_CHAP_SECRET:
2694 rc = sprintf(str, "%.*s\n",
2695 boot_conn->chap.target_secret_length,
2696 (char *)&boot_conn->chap.target_secret);
2697 break;
2698 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2699 rc = sprintf(str, "%.*s\n",
2700 boot_conn->chap.intr_chap_name_length,
2701 (char *)&boot_conn->chap.intr_chap_name);
2702 break;
2703 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2704 rc = sprintf(str, "%.*s\n",
2705 boot_conn->chap.intr_secret_length,
2706 (char *)&boot_conn->chap.intr_secret);
2707 break;
2708 case ISCSI_BOOT_TGT_FLAGS:
2709 rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
2710 break;
2711 case ISCSI_BOOT_TGT_NIC_ASSOC:
2712 rc = sprintf(str, "0\n");
2713 break;
2714 default:
2715 rc = -ENOSYS;
2716 break;
2717 }
2718 return rc;
2719}
2720
2721static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf)
2722{
2723 struct scsi_qla_host *ha = data;
2724 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess);
2725
2726 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2727}
2728
2729static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
2730{
2731 struct scsi_qla_host *ha = data;
2732 struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess);
2733
2734 return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
2735}
2736
2737static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
2738{
2739 int rc;
2740
2741 switch (type) {
2742 case ISCSI_BOOT_TGT_NAME:
2743 case ISCSI_BOOT_TGT_IP_ADDR:
2744 case ISCSI_BOOT_TGT_PORT:
2745 case ISCSI_BOOT_TGT_CHAP_NAME:
2746 case ISCSI_BOOT_TGT_CHAP_SECRET:
2747 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
2748 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
2749 case ISCSI_BOOT_TGT_NIC_ASSOC:
2750 case ISCSI_BOOT_TGT_FLAGS:
2751 rc = S_IRUGO;
2752 break;
2753 default:
2754 rc = 0;
2755 break;
2756 }
2757 return rc;
2758}
2759
2760static void qla4xxx_boot_release(void *data)
2761{
2762 struct scsi_qla_host *ha = data;
2763
2764 scsi_host_put(ha->host);
2765}
2766
2767static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
2768{
2769 dma_addr_t buf_dma;
2770 uint32_t addr, pri_addr, sec_addr;
2771 uint32_t offset;
2772 uint16_t func_num;
2773 uint8_t val;
2774 uint8_t *buf = NULL;
2775 size_t size = 13 * sizeof(uint8_t);
2776 int ret = QLA_SUCCESS;
2777
2778 func_num = PCI_FUNC(ha->pdev->devfn);
2779
2780 ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n",
2781 __func__, ha->pdev->device, func_num);
2782
2783 if (is_qla40XX(ha)) {
2784 if (func_num == 1) {
2785 addr = NVRAM_PORT0_BOOT_MODE;
2786 pri_addr = NVRAM_PORT0_BOOT_PRI_TGT;
2787 sec_addr = NVRAM_PORT0_BOOT_SEC_TGT;
2788 } else if (func_num == 3) {
2789 addr = NVRAM_PORT1_BOOT_MODE;
2790 pri_addr = NVRAM_PORT1_BOOT_PRI_TGT;
2791 sec_addr = NVRAM_PORT1_BOOT_SEC_TGT;
2792 } else {
2793 ret = QLA_ERROR;
2794 goto exit_boot_info;
2795 }
2796
2797 /* Check Boot Mode */
2798 val = rd_nvram_byte(ha, addr);
2799 if (!(val & 0x07)) {
2800 DEBUG2(ql4_printk(KERN_ERR, ha,
2801 "%s: Failed Boot options : 0x%x\n",
2802 __func__, val));
2803 ret = QLA_ERROR;
2804 goto exit_boot_info;
2805 }
2806
2807 /* get primary valid target index */
2808 val = rd_nvram_byte(ha, pri_addr);
2809 if (val & BIT_7)
2810 ddb_index[0] = (val & 0x7f);
2811
2812 /* get secondary valid target index */
2813 val = rd_nvram_byte(ha, sec_addr);
2814 if (val & BIT_7)
2815 ddb_index[1] = (val & 0x7f);
2816
2817 } else if (is_qla8022(ha)) {
2818 buf = dma_alloc_coherent(&ha->pdev->dev, size,
2819 &buf_dma, GFP_KERNEL);
2820 if (!buf) {
2821 DEBUG2(ql4_printk(KERN_ERR, ha,
2822 "%s: Unable to allocate dma buffer\n",
2823 __func__));
2824 ret = QLA_ERROR;
2825 goto exit_boot_info;
2826 }
2827
2828 if (ha->port_num == 0)
2829 offset = BOOT_PARAM_OFFSET_PORT0;
2830 else if (ha->port_num == 1)
2831 offset = BOOT_PARAM_OFFSET_PORT1;
2832 else {
2833 ret = QLA_ERROR;
2834 goto exit_boot_info_free;
2835 }
2836 addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) +
2837 offset;
2838 if (qla4xxx_get_flash(ha, buf_dma, addr,
2839 13 * sizeof(uint8_t)) != QLA_SUCCESS) {
2840 DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash"
2841 "failed\n", ha->host_no, __func__));
2842 ret = QLA_ERROR;
2843 goto exit_boot_info_free;
2844 }
2845 /* Check Boot Mode */
2846 if (!(buf[1] & 0x07)) {
2847 DEBUG2(ql4_printk(KERN_INFO, ha,
2848 "Failed: Boot options : 0x%x\n",
2849 buf[1]));
2850 ret = QLA_ERROR;
2851 goto exit_boot_info_free;
2852 }
2853
2854 /* get primary valid target index */
2855 if (buf[2] & BIT_7)
2856 ddb_index[0] = buf[2] & 0x7f;
2857
2858 /* get secondary valid target index */
2859 if (buf[11] & BIT_7)
2860 ddb_index[1] = buf[11] & 0x7f;
2861 } else {
2862 ret = QLA_ERROR;
2863 goto exit_boot_info;
2864 }
2865
2866 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary"
2867 " target ID %d\n", __func__, ddb_index[0],
2868 ddb_index[1]));
2869
2870exit_boot_info_free:
2871 dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
2872exit_boot_info:
2873 return ret;
2874}
2875
2876/**
2877 * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password
2878 * @ha: pointer to adapter structure
2879 * @username: CHAP username to be returned
2880 * @password: CHAP password to be returned
2881 *
2882 * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP
2883 * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/.
2884 * So from the CHAP cache find the first BIDI CHAP entry and set it
2885 * to the boot record in sysfs.
2886 **/
2887static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
2888 char *password)
2889{
2890 int i, ret = -EINVAL;
2891 int max_chap_entries = 0;
2892 struct ql4_chap_table *chap_table;
2893
2894 if (is_qla8022(ha))
2895 max_chap_entries = (ha->hw.flt_chap_size / 2) /
2896 sizeof(struct ql4_chap_table);
2897 else
2898 max_chap_entries = MAX_CHAP_ENTRIES_40XX;
2899
2900 if (!ha->chap_list) {
2901 ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n");
2902 return ret;
2903 }
2904
2905 mutex_lock(&ha->chap_sem);
2906 for (i = 0; i < max_chap_entries; i++) {
2907 chap_table = (struct ql4_chap_table *)ha->chap_list + i;
2908 if (chap_table->cookie !=
2909 __constant_cpu_to_le16(CHAP_VALID_COOKIE)) {
2910 continue;
2911 }
2912
2913 if (chap_table->flags & BIT_7) /* local */
2914 continue;
2915
2916 if (!(chap_table->flags & BIT_6)) /* Not BIDI */
2917 continue;
2918
2919 strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
2920 strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
2921 ret = 0;
2922 break;
2923 }
2924 mutex_unlock(&ha->chap_sem);
2925
2926 return ret;
2927}
2928
2929
2930static int qla4xxx_get_boot_target(struct scsi_qla_host *ha,
2931 struct ql4_boot_session_info *boot_sess,
2932 uint16_t ddb_index)
2933{
2934 struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0];
2935 struct dev_db_entry *fw_ddb_entry;
2936 dma_addr_t fw_ddb_entry_dma;
2937 uint16_t idx;
2938 uint16_t options;
2939 int ret = QLA_SUCCESS;
2940
2941 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
2942 &fw_ddb_entry_dma, GFP_KERNEL);
2943 if (!fw_ddb_entry) {
2944 DEBUG2(ql4_printk(KERN_ERR, ha,
2945 "%s: Unable to allocate dma buffer.\n",
2946 __func__));
2947 ret = QLA_ERROR;
2948 return ret;
2949 }
2950
2951 if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry,
2952 fw_ddb_entry_dma, ddb_index)) {
2953 DEBUG2(ql4_printk(KERN_ERR, ha,
2954 "%s: Flash DDB read Failed\n", __func__));
2955 ret = QLA_ERROR;
2956 goto exit_boot_target;
2957 }
2958
2959 /* Update target name and IP from DDB */
2960 memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name,
2961 min(sizeof(boot_sess->target_name),
2962 sizeof(fw_ddb_entry->iscsi_name)));
2963
2964 options = le16_to_cpu(fw_ddb_entry->options);
2965 if (options & DDB_OPT_IPV6_DEVICE) {
2966 memcpy(&boot_conn->dest_ipaddr.ip_address,
2967 &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN);
2968 } else {
2969 boot_conn->dest_ipaddr.ip_type = 0x1;
2970 memcpy(&boot_conn->dest_ipaddr.ip_address,
2971 &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN);
2972 }
2973
2974 boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port);
2975
2976 /* update chap information */
2977 idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
2978
2979 if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2980
2981 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n"));
2982
2983 ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap.
2984 target_chap_name,
2985 (char *)&boot_conn->chap.target_secret,
2986 idx);
2987 if (ret) {
2988 ql4_printk(KERN_ERR, ha, "Failed to set chap\n");
2989 ret = QLA_ERROR;
2990 goto exit_boot_target;
2991 }
2992
2993 boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
2994 boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN;
2995 }
2996
2997 if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) {
2998
2999 DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n"));
3000
3001 ret = qla4xxx_get_bidi_chap(ha,
3002 (char *)&boot_conn->chap.intr_chap_name,
3003 (char *)&boot_conn->chap.intr_secret);
3004
3005 if (ret) {
3006 ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n");
3007 ret = QLA_ERROR;
3008 goto exit_boot_target;
3009 }
3010
3011 boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN;
3012 boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN;
3013 }
3014
3015exit_boot_target:
3016 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
3017 fw_ddb_entry, fw_ddb_entry_dma);
3018 return ret;
3019}
3020
3021static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
3022{
3023 uint16_t ddb_index[2];
3024 int ret = QLA_ERROR;
3025 int rval;
3026
3027 memset(ddb_index, 0, sizeof(ddb_index));
3028 ddb_index[0] = 0xffff;
3029 ddb_index[1] = 0xffff;
3030 ret = get_fw_boot_info(ha, ddb_index);
3031 if (ret != QLA_SUCCESS) {
3032 DEBUG2(ql4_printk(KERN_ERR, ha,
3033 "%s: Failed to set boot info.\n", __func__));
3034 return ret;
3035 }
3036
3037 if (ddb_index[0] == 0xffff)
3038 goto sec_target;
3039
3040 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess),
3041 ddb_index[0]);
3042 if (rval != QLA_SUCCESS) {
3043 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3044 "primary target\n", __func__));
3045 } else
3046 ret = QLA_SUCCESS;
3047
3048sec_target:
3049 if (ddb_index[1] == 0xffff)
3050 goto exit_get_boot_info;
3051
3052 rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess),
3053 ddb_index[1]);
3054 if (rval != QLA_SUCCESS) {
3055 DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get "
3056 "secondary target\n", __func__));
3057 } else
3058 ret = QLA_SUCCESS;
3059
3060exit_get_boot_info:
3061 return ret;
3062}
3063
3064static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
3065{
3066 struct iscsi_boot_kobj *boot_kobj;
3067
3068 if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
3069 return 0;
3070
3071 ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
3072 if (!ha->boot_kset)
3073 goto kset_free;
3074
3075 if (!scsi_host_get(ha->host))
3076 goto kset_free;
3077 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha,
3078 qla4xxx_show_boot_tgt_pri_info,
3079 qla4xxx_tgt_get_attr_visibility,
3080 qla4xxx_boot_release);
3081 if (!boot_kobj)
3082 goto put_host;
3083
3084 if (!scsi_host_get(ha->host))
3085 goto kset_free;
3086 boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha,
3087 qla4xxx_show_boot_tgt_sec_info,
3088 qla4xxx_tgt_get_attr_visibility,
3089 qla4xxx_boot_release);
3090 if (!boot_kobj)
3091 goto put_host;
3092
3093 if (!scsi_host_get(ha->host))
3094 goto kset_free;
3095 boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha,
3096 qla4xxx_show_boot_ini_info,
3097 qla4xxx_ini_get_attr_visibility,
3098 qla4xxx_boot_release);
3099 if (!boot_kobj)
3100 goto put_host;
3101
3102 if (!scsi_host_get(ha->host))
3103 goto kset_free;
3104 boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha,
3105 qla4xxx_show_boot_eth_info,
3106 qla4xxx_eth_get_attr_visibility,
3107 qla4xxx_boot_release);
3108 if (!boot_kobj)
3109 goto put_host;
3110
3111 return 0;
3112
3113put_host:
3114 scsi_host_put(ha->host);
3115kset_free:
3116 iscsi_boot_destroy_kset(ha->boot_kset);
3117 return -ENOMEM;
3118}
3119
3120
3121/**
3122 * qla4xxx_create chap_list - Create CHAP list from FLASH
3123 * @ha: pointer to adapter structure
3124 *
3125 * Read flash and make a list of CHAP entries, during login when a CHAP entry
3126 * is received, it will be checked in this list. If entry exist then the CHAP
3127 * entry index is set in the DDB. If CHAP entry does not exist in this list
3128 * then a new entry is added in FLASH in CHAP table and the index obtained is
3129 * used in the DDB.
3130 **/
3131static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
3132{
3133 int rval = 0;
3134 uint8_t *chap_flash_data = NULL;
3135 uint32_t offset;
3136 dma_addr_t chap_dma;
3137 uint32_t chap_size = 0;
3138
3139 if (is_qla40XX(ha))
3140 chap_size = MAX_CHAP_ENTRIES_40XX *
3141 sizeof(struct ql4_chap_table);
3142 else /* Single region contains CHAP info for both
3143 * ports which is divided into half for each port.
3144 */
3145 chap_size = ha->hw.flt_chap_size / 2;
3146
3147 chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size,
3148 &chap_dma, GFP_KERNEL);
3149 if (!chap_flash_data) {
3150 ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n");
3151 return;
3152 }
3153 if (is_qla40XX(ha))
3154 offset = FLASH_CHAP_OFFSET;
3155 else {
3156 offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2);
3157 if (ha->port_num == 1)
3158 offset += chap_size;
3159 }
3160
3161 rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size);
3162 if (rval != QLA_SUCCESS)
3163 goto exit_chap_list;
3164
3165 if (ha->chap_list == NULL)
3166 ha->chap_list = vmalloc(chap_size);
3167 if (ha->chap_list == NULL) {
3168 ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n");
3169 goto exit_chap_list;
3170 }
3171
3172 memcpy(ha->chap_list, chap_flash_data, chap_size);
3173
3174exit_chap_list:
3175 dma_free_coherent(&ha->pdev->dev, chap_size,
3176 chap_flash_data, chap_dma);
3177 return;
3178}
3179
1604/** 3180/**
1605 * qla4xxx_probe_adapter - callback function to probe HBA 3181 * qla4xxx_probe_adapter - callback function to probe HBA
1606 * @pdev: pointer to pci_dev structure 3182 * @pdev: pointer to pci_dev structure
@@ -1624,7 +3200,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1624 if (pci_enable_device(pdev)) 3200 if (pci_enable_device(pdev))
1625 return -1; 3201 return -1;
1626 3202
1627 host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha)); 3203 host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0);
1628 if (host == NULL) { 3204 if (host == NULL) {
1629 printk(KERN_WARNING 3205 printk(KERN_WARNING
1630 "qla4xxx: Couldn't allocate host from scsi layer!\n"); 3206 "qla4xxx: Couldn't allocate host from scsi layer!\n");
@@ -1632,7 +3208,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1632 } 3208 }
1633 3209
1634 /* Clear our data area */ 3210 /* Clear our data area */
1635 ha = (struct scsi_qla_host *) host->hostdata; 3211 ha = to_qla_host(host);
1636 memset(ha, 0, sizeof(*ha)); 3212 memset(ha, 0, sizeof(*ha));
1637 3213
1638 /* Save the information from PCI BIOS. */ 3214 /* Save the information from PCI BIOS. */
@@ -1675,11 +3251,12 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1675 qla4xxx_config_dma_addressing(ha); 3251 qla4xxx_config_dma_addressing(ha);
1676 3252
1677 /* Initialize lists and spinlocks. */ 3253 /* Initialize lists and spinlocks. */
1678 INIT_LIST_HEAD(&ha->ddb_list);
1679 INIT_LIST_HEAD(&ha->free_srb_q); 3254 INIT_LIST_HEAD(&ha->free_srb_q);
1680 3255
1681 mutex_init(&ha->mbox_sem); 3256 mutex_init(&ha->mbox_sem);
3257 mutex_init(&ha->chap_sem);
1682 init_completion(&ha->mbx_intr_comp); 3258 init_completion(&ha->mbx_intr_comp);
3259 init_completion(&ha->disable_acb_comp);
1683 3260
1684 spin_lock_init(&ha->hardware_lock); 3261 spin_lock_init(&ha->hardware_lock);
1685 3262
@@ -1692,6 +3269,27 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1692 goto probe_failed; 3269 goto probe_failed;
1693 } 3270 }
1694 3271
3272 host->cmd_per_lun = 3;
3273 host->max_channel = 0;
3274 host->max_lun = MAX_LUNS - 1;
3275 host->max_id = MAX_TARGETS;
3276 host->max_cmd_len = IOCB_MAX_CDB_LEN;
3277 host->can_queue = MAX_SRBS ;
3278 host->transportt = qla4xxx_scsi_transport;
3279
3280 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
3281 if (ret) {
3282 ql4_printk(KERN_WARNING, ha,
3283 "%s: scsi_init_shared_tag_map failed\n", __func__);
3284 goto probe_failed;
3285 }
3286
3287 pci_set_drvdata(pdev, ha);
3288
3289 ret = scsi_add_host(host, &pdev->dev);
3290 if (ret)
3291 goto probe_failed;
3292
1695 if (is_qla8022(ha)) 3293 if (is_qla8022(ha))
1696 (void) qla4_8xxx_get_flash_info(ha); 3294 (void) qla4_8xxx_get_flash_info(ha);
1697 3295
@@ -1700,7 +3298,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1700 * firmware 3298 * firmware
1701 * NOTE: interrupts enabled upon successful completion 3299 * NOTE: interrupts enabled upon successful completion
1702 */ 3300 */
1703 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); 3301 status = qla4xxx_initialize_adapter(ha);
1704 while ((!test_bit(AF_ONLINE, &ha->flags)) && 3302 while ((!test_bit(AF_ONLINE, &ha->flags)) &&
1705 init_retry_count++ < MAX_INIT_RETRIES) { 3303 init_retry_count++ < MAX_INIT_RETRIES) {
1706 3304
@@ -1721,7 +3319,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1721 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) 3319 if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
1722 continue; 3320 continue;
1723 3321
1724 status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); 3322 status = qla4xxx_initialize_adapter(ha);
1725 } 3323 }
1726 3324
1727 if (!test_bit(AF_ONLINE, &ha->flags)) { 3325 if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -1736,24 +3334,9 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1736 qla4_8xxx_idc_unlock(ha); 3334 qla4_8xxx_idc_unlock(ha);
1737 } 3335 }
1738 ret = -ENODEV; 3336 ret = -ENODEV;
1739 goto probe_failed; 3337 goto remove_host;
1740 } 3338 }
1741 3339
1742 host->cmd_per_lun = 3;
1743 host->max_channel = 0;
1744 host->max_lun = MAX_LUNS - 1;
1745 host->max_id = MAX_TARGETS;
1746 host->max_cmd_len = IOCB_MAX_CDB_LEN;
1747 host->can_queue = MAX_SRBS ;
1748 host->transportt = qla4xxx_scsi_transport;
1749
1750 ret = scsi_init_shared_tag_map(host, MAX_SRBS);
1751 if (ret) {
1752 ql4_printk(KERN_WARNING, ha,
1753 "scsi_init_shared_tag_map failed\n");
1754 goto probe_failed;
1755 }
1756
1757 /* Startup the kernel thread for this host adapter. */ 3340 /* Startup the kernel thread for this host adapter. */
1758 DEBUG2(printk("scsi: %s: Starting kernel thread for " 3341 DEBUG2(printk("scsi: %s: Starting kernel thread for "
1759 "qla4xxx_dpc\n", __func__)); 3342 "qla4xxx_dpc\n", __func__));
@@ -1762,10 +3345,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1762 if (!ha->dpc_thread) { 3345 if (!ha->dpc_thread) {
1763 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); 3346 ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n");
1764 ret = -ENODEV; 3347 ret = -ENODEV;
1765 goto probe_failed; 3348 goto remove_host;
1766 } 3349 }
1767 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); 3350 INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc);
1768 3351
3352 sprintf(buf, "qla4xxx_%lu_task", ha->host_no);
3353 ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1);
3354 if (!ha->task_wq) {
3355 ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n");
3356 ret = -ENODEV;
3357 goto remove_host;
3358 }
3359
1769 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc 3360 /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc
1770 * (which is called indirectly by qla4xxx_initialize_adapter), 3361 * (which is called indirectly by qla4xxx_initialize_adapter),
1771 * so that irqs will be registered after crbinit but before 3362 * so that irqs will be registered after crbinit but before
@@ -1776,7 +3367,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1776 if (ret) { 3367 if (ret) {
1777 ql4_printk(KERN_WARNING, ha, "Failed to reserve " 3368 ql4_printk(KERN_WARNING, ha, "Failed to reserve "
1778 "interrupt %d already in use.\n", pdev->irq); 3369 "interrupt %d already in use.\n", pdev->irq);
1779 goto probe_failed; 3370 goto remove_host;
1780 } 3371 }
1781 } 3372 }
1782 3373
@@ -1788,21 +3379,25 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
1788 3379
1789 set_bit(AF_INIT_DONE, &ha->flags); 3380 set_bit(AF_INIT_DONE, &ha->flags);
1790 3381
1791 pci_set_drvdata(pdev, ha);
1792
1793 ret = scsi_add_host(host, &pdev->dev);
1794 if (ret)
1795 goto probe_failed;
1796
1797 printk(KERN_INFO 3382 printk(KERN_INFO
1798 " QLogic iSCSI HBA Driver version: %s\n" 3383 " QLogic iSCSI HBA Driver version: %s\n"
1799 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 3384 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
1800 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), 3385 qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
1801 ha->host_no, ha->firmware_version[0], ha->firmware_version[1], 3386 ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
1802 ha->patch_number, ha->build_number); 3387 ha->patch_number, ha->build_number);
1803 scsi_scan_host(host); 3388
3389 qla4xxx_create_chap_list(ha);
3390
3391 if (qla4xxx_setup_boot_info(ha))
3392 ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
3393 __func__);
3394
3395 qla4xxx_create_ifaces(ha);
1804 return 0; 3396 return 0;
1805 3397
3398remove_host:
3399 scsi_remove_host(ha->host);
3400
1806probe_failed: 3401probe_failed:
1807 qla4xxx_free_adapter(ha); 3402 qla4xxx_free_adapter(ha);
1808 3403
@@ -1867,8 +3462,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1867 if (!is_qla8022(ha)) 3462 if (!is_qla8022(ha))
1868 qla4xxx_prevent_other_port_reinit(ha); 3463 qla4xxx_prevent_other_port_reinit(ha);
1869 3464
1870 /* remove devs from iscsi_sessions to scsi_devices */ 3465 /* destroy iface from sysfs */
1871 qla4xxx_free_ddb_list(ha); 3466 qla4xxx_destroy_ifaces(ha);
3467
3468 if (ha->boot_kset)
3469 iscsi_boot_destroy_kset(ha->boot_kset);
1872 3470
1873 scsi_remove_host(ha->host); 3471 scsi_remove_host(ha->host);
1874 3472
@@ -1907,10 +3505,15 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
1907 3505
1908static int qla4xxx_slave_alloc(struct scsi_device *sdev) 3506static int qla4xxx_slave_alloc(struct scsi_device *sdev)
1909{ 3507{
1910 struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); 3508 struct iscsi_cls_session *cls_sess;
1911 struct ddb_entry *ddb = sess->dd_data; 3509 struct iscsi_session *sess;
3510 struct ddb_entry *ddb;
1912 int queue_depth = QL4_DEF_QDEPTH; 3511 int queue_depth = QL4_DEF_QDEPTH;
1913 3512
3513 cls_sess = starget_to_session(sdev->sdev_target);
3514 sess = cls_sess->dd_data;
3515 ddb = sess->dd_data;
3516
1914 sdev->hostdata = ddb; 3517 sdev->hostdata = ddb;
1915 sdev->tagged_supported = 1; 3518 sdev->tagged_supported = 1;
1916 3519
@@ -2248,7 +3851,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
2248 int return_status = FAILED; 3851 int return_status = FAILED;
2249 struct scsi_qla_host *ha; 3852 struct scsi_qla_host *ha;
2250 3853
2251 ha = (struct scsi_qla_host *) cmd->device->host->hostdata; 3854 ha = to_qla_host(cmd->device->host);
2252 3855
2253 if (ql4xdontresethba) { 3856 if (ql4xdontresethba) {
2254 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", 3857 DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
@@ -2284,6 +3887,110 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
2284 return return_status; 3887 return return_status;
2285} 3888}
2286 3889
3890static int qla4xxx_context_reset(struct scsi_qla_host *ha)
3891{
3892 uint32_t mbox_cmd[MBOX_REG_COUNT];
3893 uint32_t mbox_sts[MBOX_REG_COUNT];
3894 struct addr_ctrl_blk_def *acb = NULL;
3895 uint32_t acb_len = sizeof(struct addr_ctrl_blk_def);
3896 int rval = QLA_SUCCESS;
3897 dma_addr_t acb_dma;
3898
3899 acb = dma_alloc_coherent(&ha->pdev->dev,
3900 sizeof(struct addr_ctrl_blk_def),
3901 &acb_dma, GFP_KERNEL);
3902 if (!acb) {
3903 ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
3904 __func__);
3905 rval = -ENOMEM;
3906 goto exit_port_reset;
3907 }
3908
3909 memset(acb, 0, acb_len);
3910
3911 rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len);
3912 if (rval != QLA_SUCCESS) {
3913 rval = -EIO;
3914 goto exit_free_acb;
3915 }
3916
3917 rval = qla4xxx_disable_acb(ha);
3918 if (rval != QLA_SUCCESS) {
3919 rval = -EIO;
3920 goto exit_free_acb;
3921 }
3922
3923 wait_for_completion_timeout(&ha->disable_acb_comp,
3924 DISABLE_ACB_TOV * HZ);
3925
3926 rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
3927 if (rval != QLA_SUCCESS) {
3928 rval = -EIO;
3929 goto exit_free_acb;
3930 }
3931
3932exit_free_acb:
3933 dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def),
3934 acb, acb_dma);
3935exit_port_reset:
3936 DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__,
3937 rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
3938 return rval;
3939}
3940
3941static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
3942{
3943 struct scsi_qla_host *ha = to_qla_host(shost);
3944 int rval = QLA_SUCCESS;
3945
3946 if (ql4xdontresethba) {
3947 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n",
3948 __func__));
3949 rval = -EPERM;
3950 goto exit_host_reset;
3951 }
3952
3953 rval = qla4xxx_wait_for_hba_online(ha);
3954 if (rval != QLA_SUCCESS) {
3955 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
3956 "adapter\n", __func__));
3957 rval = -EIO;
3958 goto exit_host_reset;
3959 }
3960
3961 if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
3962 goto recover_adapter;
3963
3964 switch (reset_type) {
3965 case SCSI_ADAPTER_RESET:
3966 set_bit(DPC_RESET_HA, &ha->dpc_flags);
3967 break;
3968 case SCSI_FIRMWARE_RESET:
3969 if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
3970 if (is_qla8022(ha))
3971 /* set firmware context reset */
3972 set_bit(DPC_RESET_HA_FW_CONTEXT,
3973 &ha->dpc_flags);
3974 else {
3975 rval = qla4xxx_context_reset(ha);
3976 goto exit_host_reset;
3977 }
3978 }
3979 break;
3980 }
3981
3982recover_adapter:
3983 rval = qla4xxx_recover_adapter(ha);
3984 if (rval != QLA_SUCCESS) {
3985 DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n",
3986 __func__));
3987 rval = -EIO;
3988 }
3989
3990exit_host_reset:
3991 return rval;
3992}
3993
2287/* PCI AER driver recovers from all correctable errors w/o 3994/* PCI AER driver recovers from all correctable errors w/o
2288 * driver intervention. For uncorrectable errors PCI AER 3995 * driver intervention. For uncorrectable errors PCI AER
2289 * driver calls the following device driver's callbacks 3996 * driver calls the following device driver's callbacks
@@ -2360,7 +4067,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2360 4067
2361 if (test_bit(AF_ONLINE, &ha->flags)) { 4068 if (test_bit(AF_ONLINE, &ha->flags)) {
2362 clear_bit(AF_ONLINE, &ha->flags); 4069 clear_bit(AF_ONLINE, &ha->flags);
2363 qla4xxx_mark_all_devices_missing(ha); 4070 clear_bit(AF_LINK_UP, &ha->flags);
4071 iscsi_host_for_each_session(ha->host, qla4xxx_fail_session);
2364 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); 4072 qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
2365 } 4073 }
2366 4074
@@ -2407,7 +4115,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2407 4115
2408 qla4_8xxx_idc_unlock(ha); 4116 qla4_8xxx_idc_unlock(ha);
2409 clear_bit(AF_FW_RECOVERY, &ha->flags); 4117 clear_bit(AF_FW_RECOVERY, &ha->flags);
2410 rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST); 4118 rval = qla4xxx_initialize_adapter(ha);
2411 qla4_8xxx_idc_lock(ha); 4119 qla4_8xxx_idc_lock(ha);
2412 4120
2413 if (rval != QLA_SUCCESS) { 4121 if (rval != QLA_SUCCESS) {
@@ -2443,8 +4151,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
2443 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == 4151 if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
2444 QLA82XX_DEV_READY)) { 4152 QLA82XX_DEV_READY)) {
2445 clear_bit(AF_FW_RECOVERY, &ha->flags); 4153 clear_bit(AF_FW_RECOVERY, &ha->flags);
2446 rval = qla4xxx_initialize_adapter(ha, 4154 rval = qla4xxx_initialize_adapter(ha);
2447 PRESERVE_DDB_LIST);
2448 if (rval == QLA_SUCCESS) { 4155 if (rval == QLA_SUCCESS) {
2449 ret = qla4xxx_request_irqs(ha); 4156 ret = qla4xxx_request_irqs(ha);
2450 if (ret) { 4157 if (ret) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 61049287725..c15347d3f53 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k7" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 9689d41c788..e40dc1cb09a 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -880,7 +880,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd,
880 cmd->control_flags |= CFLAG_WRITE; 880 cmd->control_flags |= CFLAG_WRITE;
881 else 881 else
882 cmd->control_flags |= CFLAG_READ; 882 cmd->control_flags |= CFLAG_READ;
883 cmd->time_out = 30; 883 cmd->time_out = Cmnd->request->timeout/HZ;
884 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); 884 memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len);
885} 885}
886 886
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a4b9cdbaaa0..dc6131e6a1b 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -293,8 +293,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
293 * so that we can deal with it there. 293 * so that we can deal with it there.
294 */ 294 */
295 if (scmd->device->expecting_cc_ua) { 295 if (scmd->device->expecting_cc_ua) {
296 scmd->device->expecting_cc_ua = 0; 296 /*
297 return NEEDS_RETRY; 297 * Because some device does not queue unit
298 * attentions correctly, we carefully check
299 * additional sense code and qualifier so as
300 * not to squash media change unit attention.
301 */
302 if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) {
303 scmd->device->expecting_cc_ua = 0;
304 return NEEDS_RETRY;
305 }
298 } 306 }
299 /* 307 /*
300 * if the device is in the process of becoming ready, we 308 * if the device is in the process of becoming ready, we
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e0bd3f790fc..04c2a278076 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -246,6 +246,43 @@ show_shost_active_mode(struct device *dev,
246 246
247static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); 247static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
248 248
249static int check_reset_type(char *str)
250{
251 if (strncmp(str, "adapter", 10) == 0)
252 return SCSI_ADAPTER_RESET;
253 else if (strncmp(str, "firmware", 10) == 0)
254 return SCSI_FIRMWARE_RESET;
255 else
256 return 0;
257}
258
259static ssize_t
260store_host_reset(struct device *dev, struct device_attribute *attr,
261 const char *buf, size_t count)
262{
263 struct Scsi_Host *shost = class_to_shost(dev);
264 struct scsi_host_template *sht = shost->hostt;
265 int ret = -EINVAL;
266 char str[10];
267 int type;
268
269 sscanf(buf, "%s", str);
270 type = check_reset_type(str);
271
272 if (!type)
273 goto exit_store_host_reset;
274
275 if (sht->host_reset)
276 ret = sht->host_reset(shost, type);
277
278exit_store_host_reset:
279 if (ret == 0)
280 ret = count;
281 return ret;
282}
283
284static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset);
285
249shost_rd_attr(unique_id, "%u\n"); 286shost_rd_attr(unique_id, "%u\n");
250shost_rd_attr(host_busy, "%hu\n"); 287shost_rd_attr(host_busy, "%hu\n");
251shost_rd_attr(cmd_per_lun, "%hd\n"); 288shost_rd_attr(cmd_per_lun, "%hd\n");
@@ -272,6 +309,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
272 &dev_attr_active_mode.attr, 309 &dev_attr_active_mode.attr,
273 &dev_attr_prot_capabilities.attr, 310 &dev_attr_prot_capabilities.attr,
274 &dev_attr_prot_guard_type.attr, 311 &dev_attr_prot_guard_type.attr,
312 &dev_attr_host_reset.attr,
275 NULL 313 NULL
276}; 314};
277 315
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 3fd16d7212d..1bcd65a509e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -23,6 +23,8 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/bsg-lib.h>
27#include <linux/idr.h>
26#include <net/tcp.h> 28#include <net/tcp.h>
27#include <scsi/scsi.h> 29#include <scsi/scsi.h>
28#include <scsi/scsi_host.h> 30#include <scsi/scsi_host.h>
@@ -31,10 +33,7 @@
31#include <scsi/scsi_transport_iscsi.h> 33#include <scsi/scsi_transport_iscsi.h>
32#include <scsi/iscsi_if.h> 34#include <scsi/iscsi_if.h>
33#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
34 36#include <scsi/scsi_bsg_iscsi.h>
35#define ISCSI_SESSION_ATTRS 23
36#define ISCSI_CONN_ATTRS 13
37#define ISCSI_HOST_ATTRS 4
38 37
39#define ISCSI_TRANSPORT_VERSION "2.0-870" 38#define ISCSI_TRANSPORT_VERSION "2.0-870"
40 39
@@ -76,16 +75,14 @@ struct iscsi_internal {
76 struct list_head list; 75 struct list_head list;
77 struct device dev; 76 struct device dev;
78 77
79 struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1];
80 struct transport_container conn_cont; 78 struct transport_container conn_cont;
81 struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1];
82 struct transport_container session_cont; 79 struct transport_container session_cont;
83 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
84}; 80};
85 81
86static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ 82static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
87static struct workqueue_struct *iscsi_eh_timer_workq; 83static struct workqueue_struct *iscsi_eh_timer_workq;
88 84
85static DEFINE_IDA(iscsi_sess_ida);
89/* 86/*
90 * list of registered transports and lock that must 87 * list of registered transports and lock that must
91 * be held while accessing list. The iscsi_transport_lock must 88 * be held while accessing list. The iscsi_transport_lock must
@@ -270,6 +267,291 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle)
270} 267}
271EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); 268EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint);
272 269
270/*
271 * Interface to display network param to sysfs
272 */
273
274static void iscsi_iface_release(struct device *dev)
275{
276 struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
277 struct device *parent = iface->dev.parent;
278
279 kfree(iface);
280 put_device(parent);
281}
282
283
284static struct class iscsi_iface_class = {
285 .name = "iscsi_iface",
286 .dev_release = iscsi_iface_release,
287};
288
289#define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \
290struct device_attribute dev_attr_##_prefix##_##_name = \
291 __ATTR(_name, _mode, _show, _store)
292
293/* iface attrs show */
294#define iscsi_iface_attr_show(type, name, param_type, param) \
295static ssize_t \
296show_##type##_##name(struct device *dev, struct device_attribute *attr, \
297 char *buf) \
298{ \
299 struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \
300 struct iscsi_transport *t = iface->transport; \
301 return t->get_iface_param(iface, param_type, param, buf); \
302} \
303
304#define iscsi_iface_net_attr(type, name, param) \
305 iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \
306static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL);
307
308/* generic read only ipvi4 attribute */
309iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR);
310iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW);
311iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET);
312iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO);
313
314/* generic read only ipv6 attribute */
315iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR);
316iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL);
317iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER);
318iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg,
319 ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG);
320iscsi_iface_net_attr(ipv6_iface, link_local_autocfg,
321 ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG);
322
323/* common read only iface attribute */
324iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE);
325iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID);
326iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY);
327iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
328iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
329iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
330
331static mode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
332 struct attribute *attr, int i)
333{
334 struct device *dev = container_of(kobj, struct device, kobj);
335 struct iscsi_iface *iface = iscsi_dev_to_iface(dev);
336 struct iscsi_transport *t = iface->transport;
337 int param;
338
339 if (attr == &dev_attr_iface_enabled.attr)
340 param = ISCSI_NET_PARAM_IFACE_ENABLE;
341 else if (attr == &dev_attr_iface_vlan_id.attr)
342 param = ISCSI_NET_PARAM_VLAN_ID;
343 else if (attr == &dev_attr_iface_vlan_priority.attr)
344 param = ISCSI_NET_PARAM_VLAN_PRIORITY;
345 else if (attr == &dev_attr_iface_vlan_enabled.attr)
346 param = ISCSI_NET_PARAM_VLAN_ENABLED;
347 else if (attr == &dev_attr_iface_mtu.attr)
348 param = ISCSI_NET_PARAM_MTU;
349 else if (attr == &dev_attr_iface_port.attr)
350 param = ISCSI_NET_PARAM_PORT;
351 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) {
352 if (attr == &dev_attr_ipv4_iface_ipaddress.attr)
353 param = ISCSI_NET_PARAM_IPV4_ADDR;
354 else if (attr == &dev_attr_ipv4_iface_gateway.attr)
355 param = ISCSI_NET_PARAM_IPV4_GW;
356 else if (attr == &dev_attr_ipv4_iface_subnet.attr)
357 param = ISCSI_NET_PARAM_IPV4_SUBNET;
358 else if (attr == &dev_attr_ipv4_iface_bootproto.attr)
359 param = ISCSI_NET_PARAM_IPV4_BOOTPROTO;
360 else
361 return 0;
362 } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) {
363 if (attr == &dev_attr_ipv6_iface_ipaddress.attr)
364 param = ISCSI_NET_PARAM_IPV6_ADDR;
365 else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr)
366 param = ISCSI_NET_PARAM_IPV6_LINKLOCAL;
367 else if (attr == &dev_attr_ipv6_iface_router_addr.attr)
368 param = ISCSI_NET_PARAM_IPV6_ROUTER;
369 else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr)
370 param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG;
371 else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr)
372 param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG;
373 else
374 return 0;
375 } else {
376 WARN_ONCE(1, "Invalid iface attr");
377 return 0;
378 }
379
380 return t->attr_is_visible(ISCSI_NET_PARAM, param);
381}
382
383static struct attribute *iscsi_iface_attrs[] = {
384 &dev_attr_iface_enabled.attr,
385 &dev_attr_iface_vlan_id.attr,
386 &dev_attr_iface_vlan_priority.attr,
387 &dev_attr_iface_vlan_enabled.attr,
388 &dev_attr_ipv4_iface_ipaddress.attr,
389 &dev_attr_ipv4_iface_gateway.attr,
390 &dev_attr_ipv4_iface_subnet.attr,
391 &dev_attr_ipv4_iface_bootproto.attr,
392 &dev_attr_ipv6_iface_ipaddress.attr,
393 &dev_attr_ipv6_iface_link_local_addr.attr,
394 &dev_attr_ipv6_iface_router_addr.attr,
395 &dev_attr_ipv6_iface_ipaddr_autocfg.attr,
396 &dev_attr_ipv6_iface_link_local_autocfg.attr,
397 &dev_attr_iface_mtu.attr,
398 &dev_attr_iface_port.attr,
399 NULL,
400};
401
402static struct attribute_group iscsi_iface_group = {
403 .attrs = iscsi_iface_attrs,
404 .is_visible = iscsi_iface_attr_is_visible,
405};
406
407struct iscsi_iface *
408iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport,
409 uint32_t iface_type, uint32_t iface_num, int dd_size)
410{
411 struct iscsi_iface *iface;
412 int err;
413
414 iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL);
415 if (!iface)
416 return NULL;
417
418 iface->transport = transport;
419 iface->iface_type = iface_type;
420 iface->iface_num = iface_num;
421 iface->dev.release = iscsi_iface_release;
422 iface->dev.class = &iscsi_iface_class;
423 /* parent reference released in iscsi_iface_release */
424 iface->dev.parent = get_device(&shost->shost_gendev);
425 if (iface_type == ISCSI_IFACE_TYPE_IPV4)
426 dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no,
427 iface_num);
428 else
429 dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no,
430 iface_num);
431
432 err = device_register(&iface->dev);
433 if (err)
434 goto free_iface;
435
436 err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group);
437 if (err)
438 goto unreg_iface;
439
440 if (dd_size)
441 iface->dd_data = &iface[1];
442 return iface;
443
444unreg_iface:
445 device_unregister(&iface->dev);
446 return NULL;
447
448free_iface:
449 put_device(iface->dev.parent);
450 kfree(iface);
451 return NULL;
452}
453EXPORT_SYMBOL_GPL(iscsi_create_iface);
454
455void iscsi_destroy_iface(struct iscsi_iface *iface)
456{
457 sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group);
458 device_unregister(&iface->dev);
459}
460EXPORT_SYMBOL_GPL(iscsi_destroy_iface);
461
462/*
463 * BSG support
464 */
465/**
466 * iscsi_bsg_host_dispatch - Dispatch command to LLD.
467 * @job: bsg job to be processed
468 */
469static int iscsi_bsg_host_dispatch(struct bsg_job *job)
470{
471 struct Scsi_Host *shost = iscsi_job_to_shost(job);
472 struct iscsi_bsg_request *req = job->request;
473 struct iscsi_bsg_reply *reply = job->reply;
474 struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
475 int cmdlen = sizeof(uint32_t); /* start with length of msgcode */
476 int ret;
477
478 /* check if we have the msgcode value at least */
479 if (job->request_len < sizeof(uint32_t)) {
480 ret = -ENOMSG;
481 goto fail_host_msg;
482 }
483
484 /* Validate the host command */
485 switch (req->msgcode) {
486 case ISCSI_BSG_HST_VENDOR:
487 cmdlen += sizeof(struct iscsi_bsg_host_vendor);
488 if ((shost->hostt->vendor_id == 0L) ||
489 (req->rqst_data.h_vendor.vendor_id !=
490 shost->hostt->vendor_id)) {
491 ret = -ESRCH;
492 goto fail_host_msg;
493 }
494 break;
495 default:
496 ret = -EBADR;
497 goto fail_host_msg;
498 }
499
500 /* check if we really have all the request data needed */
501 if (job->request_len < cmdlen) {
502 ret = -ENOMSG;
503 goto fail_host_msg;
504 }
505
506 ret = i->iscsi_transport->bsg_request(job);
507 if (!ret)
508 return 0;
509
510fail_host_msg:
511 /* return the errno failure code as the only status */
512 BUG_ON(job->reply_len < sizeof(uint32_t));
513 reply->reply_payload_rcv_len = 0;
514 reply->result = ret;
515 job->reply_len = sizeof(uint32_t);
516 bsg_job_done(job, ret, 0);
517 return 0;
518}
519
520/**
521 * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests
522 * @shost: shost for iscsi_host
523 * @cls_host: iscsi_cls_host adding the structures to
524 */
525static int
526iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost)
527{
528 struct device *dev = &shost->shost_gendev;
529 struct iscsi_internal *i = to_iscsi_internal(shost->transportt);
530 struct request_queue *q;
531 char bsg_name[20];
532 int ret;
533
534 if (!i->iscsi_transport->bsg_request)
535 return -ENOTSUPP;
536
537 snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no);
538
539 q = __scsi_alloc_queue(shost, bsg_request_fn);
540 if (!q)
541 return -ENOMEM;
542
543 ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0);
544 if (ret) {
545 shost_printk(KERN_ERR, shost, "bsg interface failed to "
546 "initialize - no request queue\n");
547 blk_cleanup_queue(q);
548 return ret;
549 }
550
551 ihost->bsg_q = q;
552 return 0;
553}
554
273static int iscsi_setup_host(struct transport_container *tc, struct device *dev, 555static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
274 struct device *cdev) 556 struct device *cdev)
275{ 557{
@@ -279,13 +561,30 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev,
279 memset(ihost, 0, sizeof(*ihost)); 561 memset(ihost, 0, sizeof(*ihost));
280 atomic_set(&ihost->nr_scans, 0); 562 atomic_set(&ihost->nr_scans, 0);
281 mutex_init(&ihost->mutex); 563 mutex_init(&ihost->mutex);
564
565 iscsi_bsg_host_add(shost, ihost);
566 /* ignore any bsg add error - we just can't do sgio */
567
568 return 0;
569}
570
571static int iscsi_remove_host(struct transport_container *tc,
572 struct device *dev, struct device *cdev)
573{
574 struct Scsi_Host *shost = dev_to_shost(dev);
575 struct iscsi_cls_host *ihost = shost->shost_data;
576
577 if (ihost->bsg_q) {
578 bsg_remove_queue(ihost->bsg_q);
579 blk_cleanup_queue(ihost->bsg_q);
580 }
282 return 0; 581 return 0;
283} 582}
284 583
285static DECLARE_TRANSPORT_CLASS(iscsi_host_class, 584static DECLARE_TRANSPORT_CLASS(iscsi_host_class,
286 "iscsi_host", 585 "iscsi_host",
287 iscsi_setup_host, 586 iscsi_setup_host,
288 NULL, 587 iscsi_remove_host,
289 NULL); 588 NULL);
290 589
291static DECLARE_TRANSPORT_CLASS(iscsi_session_class, 590static DECLARE_TRANSPORT_CLASS(iscsi_session_class,
@@ -404,6 +703,19 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
404} 703}
405EXPORT_SYMBOL_GPL(iscsi_session_chkready); 704EXPORT_SYMBOL_GPL(iscsi_session_chkready);
406 705
706int iscsi_is_session_online(struct iscsi_cls_session *session)
707{
708 unsigned long flags;
709 int ret = 0;
710
711 spin_lock_irqsave(&session->lock, flags);
712 if (session->state == ISCSI_SESSION_LOGGED_IN)
713 ret = 1;
714 spin_unlock_irqrestore(&session->lock, flags);
715 return ret;
716}
717EXPORT_SYMBOL_GPL(iscsi_is_session_online);
718
407static void iscsi_session_release(struct device *dev) 719static void iscsi_session_release(struct device *dev)
408{ 720{
409 struct iscsi_cls_session *session = iscsi_dev_to_session(dev); 721 struct iscsi_cls_session *session = iscsi_dev_to_session(dev);
@@ -680,6 +992,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
680 struct Scsi_Host *shost = iscsi_session_to_shost(session); 992 struct Scsi_Host *shost = iscsi_session_to_shost(session);
681 struct iscsi_cls_host *ihost = shost->shost_data; 993 struct iscsi_cls_host *ihost = shost->shost_data;
682 unsigned long flags; 994 unsigned long flags;
995 unsigned int target_id;
683 996
684 ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); 997 ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
685 998
@@ -691,10 +1004,15 @@ static void __iscsi_unbind_session(struct work_struct *work)
691 mutex_unlock(&ihost->mutex); 1004 mutex_unlock(&ihost->mutex);
692 return; 1005 return;
693 } 1006 }
1007
1008 target_id = session->target_id;
694 session->target_id = ISCSI_MAX_TARGET; 1009 session->target_id = ISCSI_MAX_TARGET;
695 spin_unlock_irqrestore(&session->lock, flags); 1010 spin_unlock_irqrestore(&session->lock, flags);
696 mutex_unlock(&ihost->mutex); 1011 mutex_unlock(&ihost->mutex);
697 1012
1013 if (session->ida_used)
1014 ida_simple_remove(&iscsi_sess_ida, target_id);
1015
698 scsi_remove_target(&session->dev); 1016 scsi_remove_target(&session->dev);
699 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); 1017 iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
700 ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); 1018 ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
@@ -735,59 +1053,36 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
735} 1053}
736EXPORT_SYMBOL_GPL(iscsi_alloc_session); 1054EXPORT_SYMBOL_GPL(iscsi_alloc_session);
737 1055
738static int iscsi_get_next_target_id(struct device *dev, void *data)
739{
740 struct iscsi_cls_session *session;
741 unsigned long flags;
742 int err = 0;
743
744 if (!iscsi_is_session_dev(dev))
745 return 0;
746
747 session = iscsi_dev_to_session(dev);
748 spin_lock_irqsave(&session->lock, flags);
749 if (*((unsigned int *) data) == session->target_id)
750 err = -EEXIST;
751 spin_unlock_irqrestore(&session->lock, flags);
752 return err;
753}
754
755int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) 1056int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
756{ 1057{
757 struct Scsi_Host *shost = iscsi_session_to_shost(session); 1058 struct Scsi_Host *shost = iscsi_session_to_shost(session);
758 struct iscsi_cls_host *ihost; 1059 struct iscsi_cls_host *ihost;
759 unsigned long flags; 1060 unsigned long flags;
760 unsigned int id = target_id; 1061 int id = 0;
761 int err; 1062 int err;
762 1063
763 ihost = shost->shost_data; 1064 ihost = shost->shost_data;
764 session->sid = atomic_add_return(1, &iscsi_session_nr); 1065 session->sid = atomic_add_return(1, &iscsi_session_nr);
765 1066
766 if (id == ISCSI_MAX_TARGET) { 1067 if (target_id == ISCSI_MAX_TARGET) {
767 for (id = 0; id < ISCSI_MAX_TARGET; id++) { 1068 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
768 err = device_for_each_child(&shost->shost_gendev, &id,
769 iscsi_get_next_target_id);
770 if (!err)
771 break;
772 }
773 1069
774 if (id == ISCSI_MAX_TARGET) { 1070 if (id < 0) {
775 iscsi_cls_session_printk(KERN_ERR, session, 1071 iscsi_cls_session_printk(KERN_ERR, session,
776 "Too many iscsi targets. Max " 1072 "Failure in Target ID Allocation\n");
777 "number of targets is %d.\n", 1073 return id;
778 ISCSI_MAX_TARGET - 1);
779 err = -EOVERFLOW;
780 goto release_host;
781 } 1074 }
782 } 1075 session->target_id = (unsigned int)id;
783 session->target_id = id; 1076 session->ida_used = true;
1077 } else
1078 session->target_id = target_id;
784 1079
785 dev_set_name(&session->dev, "session%u", session->sid); 1080 dev_set_name(&session->dev, "session%u", session->sid);
786 err = device_add(&session->dev); 1081 err = device_add(&session->dev);
787 if (err) { 1082 if (err) {
788 iscsi_cls_session_printk(KERN_ERR, session, 1083 iscsi_cls_session_printk(KERN_ERR, session,
789 "could not register session's dev\n"); 1084 "could not register session's dev\n");
790 goto release_host; 1085 goto release_ida;
791 } 1086 }
792 transport_register_device(&session->dev); 1087 transport_register_device(&session->dev);
793 1088
@@ -799,8 +1094,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
799 ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); 1094 ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
800 return 0; 1095 return 0;
801 1096
802release_host: 1097release_ida:
803 scsi_host_put(shost); 1098 if (session->ida_used)
1099 ida_simple_remove(&iscsi_sess_ida, session->target_id);
1100
804 return err; 1101 return err;
805} 1102}
806EXPORT_SYMBOL_GPL(iscsi_add_session); 1103EXPORT_SYMBOL_GPL(iscsi_add_session);
@@ -1144,6 +1441,40 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1144} 1441}
1145EXPORT_SYMBOL_GPL(iscsi_conn_error_event); 1442EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
1146 1443
1444void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
1445 enum iscsi_conn_state state)
1446{
1447 struct nlmsghdr *nlh;
1448 struct sk_buff *skb;
1449 struct iscsi_uevent *ev;
1450 struct iscsi_internal *priv;
1451 int len = NLMSG_SPACE(sizeof(*ev));
1452
1453 priv = iscsi_if_transport_lookup(conn->transport);
1454 if (!priv)
1455 return;
1456
1457 skb = alloc_skb(len, GFP_ATOMIC);
1458 if (!skb) {
1459 iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored "
1460 "conn login (%d)\n", state);
1461 return;
1462 }
1463
1464 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1465 ev = NLMSG_DATA(nlh);
1466 ev->transport_handle = iscsi_handle(conn->transport);
1467 ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
1468 ev->r.conn_login.state = state;
1469 ev->r.conn_login.cid = conn->cid;
1470 ev->r.conn_login.sid = iscsi_conn_get_sid(conn);
1471 iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
1472
1473 iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n",
1474 state);
1475}
1476EXPORT_SYMBOL_GPL(iscsi_conn_login_event);
1477
1147static int 1478static int
1148iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, 1479iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1149 void *payload, int size) 1480 void *payload, int size)
@@ -1558,6 +1889,29 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
1558} 1889}
1559 1890
1560static int 1891static int
1892iscsi_set_iface_params(struct iscsi_transport *transport,
1893 struct iscsi_uevent *ev, uint32_t len)
1894{
1895 char *data = (char *)ev + sizeof(*ev);
1896 struct Scsi_Host *shost;
1897 int err;
1898
1899 if (!transport->set_iface_param)
1900 return -ENOSYS;
1901
1902 shost = scsi_host_lookup(ev->u.set_iface_params.host_no);
1903 if (!shost) {
1904 printk(KERN_ERR "set_iface_params could not find host no %u\n",
1905 ev->u.set_iface_params.host_no);
1906 return -ENODEV;
1907 }
1908
1909 err = transport->set_iface_param(shost, data, len);
1910 scsi_host_put(shost);
1911 return err;
1912}
1913
1914static int
1561iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 1915iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1562{ 1916{
1563 int err = 0; 1917 int err = 0;
@@ -1696,6 +2050,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
1696 case ISCSI_UEVENT_PATH_UPDATE: 2050 case ISCSI_UEVENT_PATH_UPDATE:
1697 err = iscsi_set_path(transport, ev); 2051 err = iscsi_set_path(transport, ev);
1698 break; 2052 break;
2053 case ISCSI_UEVENT_SET_IFACE_PARAMS:
2054 err = iscsi_set_iface_params(transport, ev,
2055 nlmsg_attrlen(nlh, sizeof(*ev)));
2056 break;
1699 default: 2057 default:
1700 err = -ENOSYS; 2058 err = -ENOSYS;
1701 break; 2059 break;
@@ -1824,6 +2182,70 @@ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \
1824iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); 2182iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS);
1825iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); 2183iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT);
1826 2184
2185static struct attribute *iscsi_conn_attrs[] = {
2186 &dev_attr_conn_max_recv_dlength.attr,
2187 &dev_attr_conn_max_xmit_dlength.attr,
2188 &dev_attr_conn_header_digest.attr,
2189 &dev_attr_conn_data_digest.attr,
2190 &dev_attr_conn_ifmarker.attr,
2191 &dev_attr_conn_ofmarker.attr,
2192 &dev_attr_conn_address.attr,
2193 &dev_attr_conn_port.attr,
2194 &dev_attr_conn_exp_statsn.attr,
2195 &dev_attr_conn_persistent_address.attr,
2196 &dev_attr_conn_persistent_port.attr,
2197 &dev_attr_conn_ping_tmo.attr,
2198 &dev_attr_conn_recv_tmo.attr,
2199 NULL,
2200};
2201
2202static mode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
2203 struct attribute *attr, int i)
2204{
2205 struct device *cdev = container_of(kobj, struct device, kobj);
2206 struct iscsi_cls_conn *conn = transport_class_to_conn(cdev);
2207 struct iscsi_transport *t = conn->transport;
2208 int param;
2209
2210 if (attr == &dev_attr_conn_max_recv_dlength.attr)
2211 param = ISCSI_PARAM_MAX_RECV_DLENGTH;
2212 else if (attr == &dev_attr_conn_max_xmit_dlength.attr)
2213 param = ISCSI_PARAM_MAX_XMIT_DLENGTH;
2214 else if (attr == &dev_attr_conn_header_digest.attr)
2215 param = ISCSI_PARAM_HDRDGST_EN;
2216 else if (attr == &dev_attr_conn_data_digest.attr)
2217 param = ISCSI_PARAM_DATADGST_EN;
2218 else if (attr == &dev_attr_conn_ifmarker.attr)
2219 param = ISCSI_PARAM_IFMARKER_EN;
2220 else if (attr == &dev_attr_conn_ofmarker.attr)
2221 param = ISCSI_PARAM_OFMARKER_EN;
2222 else if (attr == &dev_attr_conn_address.attr)
2223 param = ISCSI_PARAM_CONN_ADDRESS;
2224 else if (attr == &dev_attr_conn_port.attr)
2225 param = ISCSI_PARAM_CONN_PORT;
2226 else if (attr == &dev_attr_conn_exp_statsn.attr)
2227 param = ISCSI_PARAM_EXP_STATSN;
2228 else if (attr == &dev_attr_conn_persistent_address.attr)
2229 param = ISCSI_PARAM_PERSISTENT_ADDRESS;
2230 else if (attr == &dev_attr_conn_persistent_port.attr)
2231 param = ISCSI_PARAM_PERSISTENT_PORT;
2232 else if (attr == &dev_attr_conn_ping_tmo.attr)
2233 param = ISCSI_PARAM_PING_TMO;
2234 else if (attr == &dev_attr_conn_recv_tmo.attr)
2235 param = ISCSI_PARAM_RECV_TMO;
2236 else {
2237 WARN_ONCE(1, "Invalid conn attr");
2238 return 0;
2239 }
2240
2241 return t->attr_is_visible(ISCSI_PARAM, param);
2242}
2243
2244static struct attribute_group iscsi_conn_group = {
2245 .attrs = iscsi_conn_attrs,
2246 .is_visible = iscsi_conn_attr_is_visible,
2247};
2248
1827/* 2249/*
1828 * iSCSI session attrs 2250 * iSCSI session attrs
1829 */ 2251 */
@@ -1845,7 +2267,6 @@ show_session_param_##param(struct device *dev, \
1845 iscsi_session_attr_show(param, perm) \ 2267 iscsi_session_attr_show(param, perm) \
1846static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \ 2268static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \
1847 NULL); 2269 NULL);
1848
1849iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0); 2270iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0);
1850iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0); 2271iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0);
1851iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0); 2272iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0);
@@ -1922,6 +2343,100 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
1922 store_priv_session_##field) 2343 store_priv_session_##field)
1923iscsi_priv_session_rw_attr(recovery_tmo, "%d"); 2344iscsi_priv_session_rw_attr(recovery_tmo, "%d");
1924 2345
2346static struct attribute *iscsi_session_attrs[] = {
2347 &dev_attr_sess_initial_r2t.attr,
2348 &dev_attr_sess_max_outstanding_r2t.attr,
2349 &dev_attr_sess_immediate_data.attr,
2350 &dev_attr_sess_first_burst_len.attr,
2351 &dev_attr_sess_max_burst_len.attr,
2352 &dev_attr_sess_data_pdu_in_order.attr,
2353 &dev_attr_sess_data_seq_in_order.attr,
2354 &dev_attr_sess_erl.attr,
2355 &dev_attr_sess_targetname.attr,
2356 &dev_attr_sess_tpgt.attr,
2357 &dev_attr_sess_password.attr,
2358 &dev_attr_sess_password_in.attr,
2359 &dev_attr_sess_username.attr,
2360 &dev_attr_sess_username_in.attr,
2361 &dev_attr_sess_fast_abort.attr,
2362 &dev_attr_sess_abort_tmo.attr,
2363 &dev_attr_sess_lu_reset_tmo.attr,
2364 &dev_attr_sess_tgt_reset_tmo.attr,
2365 &dev_attr_sess_ifacename.attr,
2366 &dev_attr_sess_initiatorname.attr,
2367 &dev_attr_sess_targetalias.attr,
2368 &dev_attr_priv_sess_recovery_tmo.attr,
2369 &dev_attr_priv_sess_state.attr,
2370 NULL,
2371};
2372
2373static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
2374 struct attribute *attr, int i)
2375{
2376 struct device *cdev = container_of(kobj, struct device, kobj);
2377 struct iscsi_cls_session *session = transport_class_to_session(cdev);
2378 struct iscsi_transport *t = session->transport;
2379 int param;
2380
2381 if (attr == &dev_attr_sess_initial_r2t.attr)
2382 param = ISCSI_PARAM_INITIAL_R2T_EN;
2383 else if (attr == &dev_attr_sess_max_outstanding_r2t.attr)
2384 param = ISCSI_PARAM_MAX_R2T;
2385 else if (attr == &dev_attr_sess_immediate_data.attr)
2386 param = ISCSI_PARAM_IMM_DATA_EN;
2387 else if (attr == &dev_attr_sess_first_burst_len.attr)
2388 param = ISCSI_PARAM_FIRST_BURST;
2389 else if (attr == &dev_attr_sess_max_burst_len.attr)
2390 param = ISCSI_PARAM_MAX_BURST;
2391 else if (attr == &dev_attr_sess_data_pdu_in_order.attr)
2392 param = ISCSI_PARAM_PDU_INORDER_EN;
2393 else if (attr == &dev_attr_sess_data_seq_in_order.attr)
2394 param = ISCSI_PARAM_DATASEQ_INORDER_EN;
2395 else if (attr == &dev_attr_sess_erl.attr)
2396 param = ISCSI_PARAM_ERL;
2397 else if (attr == &dev_attr_sess_targetname.attr)
2398 param = ISCSI_PARAM_TARGET_NAME;
2399 else if (attr == &dev_attr_sess_tpgt.attr)
2400 param = ISCSI_PARAM_TPGT;
2401 else if (attr == &dev_attr_sess_password.attr)
2402 param = ISCSI_PARAM_USERNAME;
2403 else if (attr == &dev_attr_sess_password_in.attr)
2404 param = ISCSI_PARAM_USERNAME_IN;
2405 else if (attr == &dev_attr_sess_username.attr)
2406 param = ISCSI_PARAM_PASSWORD;
2407 else if (attr == &dev_attr_sess_username_in.attr)
2408 param = ISCSI_PARAM_PASSWORD_IN;
2409 else if (attr == &dev_attr_sess_fast_abort.attr)
2410 param = ISCSI_PARAM_FAST_ABORT;
2411 else if (attr == &dev_attr_sess_abort_tmo.attr)
2412 param = ISCSI_PARAM_ABORT_TMO;
2413 else if (attr == &dev_attr_sess_lu_reset_tmo.attr)
2414 param = ISCSI_PARAM_LU_RESET_TMO;
2415 else if (attr == &dev_attr_sess_tgt_reset_tmo.attr)
2416 param = ISCSI_PARAM_TGT_RESET_TMO;
2417 else if (attr == &dev_attr_sess_ifacename.attr)
2418 param = ISCSI_PARAM_IFACE_NAME;
2419 else if (attr == &dev_attr_sess_initiatorname.attr)
2420 param = ISCSI_PARAM_INITIATOR_NAME;
2421 else if (attr == &dev_attr_sess_targetalias.attr)
2422 param = ISCSI_PARAM_TARGET_ALIAS;
2423 else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
2424 return S_IRUGO | S_IWUSR;
2425 else if (attr == &dev_attr_priv_sess_state.attr)
2426 return S_IRUGO;
2427 else {
2428 WARN_ONCE(1, "Invalid session attr");
2429 return 0;
2430 }
2431
2432 return t->attr_is_visible(ISCSI_PARAM, param);
2433}
2434
2435static struct attribute_group iscsi_session_group = {
2436 .attrs = iscsi_session_attrs,
2437 .is_visible = iscsi_session_attr_is_visible,
2438};
2439
1925/* 2440/*
1926 * iSCSI host attrs 2441 * iSCSI host attrs
1927 */ 2442 */
@@ -1945,41 +2460,42 @@ iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS);
1945iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS); 2460iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS);
1946iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME); 2461iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME);
1947 2462
1948#define SETUP_PRIV_SESSION_RD_ATTR(field) \ 2463static struct attribute *iscsi_host_attrs[] = {
1949do { \ 2464 &dev_attr_host_netdev.attr,
1950 priv->session_attrs[count] = &dev_attr_priv_sess_##field; \ 2465 &dev_attr_host_hwaddress.attr,
1951 count++; \ 2466 &dev_attr_host_ipaddress.attr,
1952} while (0) 2467 &dev_attr_host_initiatorname.attr,
1953 2468 NULL,
1954#define SETUP_PRIV_SESSION_RW_ATTR(field) \ 2469};
1955do { \
1956 priv->session_attrs[count] = &dev_attr_priv_sess_##field; \
1957 count++; \
1958} while (0)
1959
1960#define SETUP_SESSION_RD_ATTR(field, param_flag) \
1961do { \
1962 if (tt->param_mask & param_flag) { \
1963 priv->session_attrs[count] = &dev_attr_sess_##field; \
1964 count++; \
1965 } \
1966} while (0)
1967 2470
1968#define SETUP_CONN_RD_ATTR(field, param_flag) \ 2471static mode_t iscsi_host_attr_is_visible(struct kobject *kobj,
1969do { \ 2472 struct attribute *attr, int i)
1970 if (tt->param_mask & param_flag) { \ 2473{
1971 priv->conn_attrs[count] = &dev_attr_conn_##field; \ 2474 struct device *cdev = container_of(kobj, struct device, kobj);
1972 count++; \ 2475 struct Scsi_Host *shost = transport_class_to_shost(cdev);
1973 } \ 2476 struct iscsi_internal *priv = to_iscsi_internal(shost->transportt);
1974} while (0) 2477 int param;
2478
2479 if (attr == &dev_attr_host_netdev.attr)
2480 param = ISCSI_HOST_PARAM_NETDEV_NAME;
2481 else if (attr == &dev_attr_host_hwaddress.attr)
2482 param = ISCSI_HOST_PARAM_HWADDRESS;
2483 else if (attr == &dev_attr_host_ipaddress.attr)
2484 param = ISCSI_HOST_PARAM_IPADDRESS;
2485 else if (attr == &dev_attr_host_initiatorname.attr)
2486 param = ISCSI_HOST_PARAM_INITIATOR_NAME;
2487 else {
2488 WARN_ONCE(1, "Invalid host attr");
2489 return 0;
2490 }
1975 2491
1976#define SETUP_HOST_RD_ATTR(field, param_flag) \ 2492 return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param);
1977do { \ 2493}
1978 if (tt->host_param_mask & param_flag) { \ 2494
1979 priv->host_attrs[count] = &dev_attr_host_##field; \ 2495static struct attribute_group iscsi_host_group = {
1980 count++; \ 2496 .attrs = iscsi_host_attrs,
1981 } \ 2497 .is_visible = iscsi_host_attr_is_visible,
1982} while (0) 2498};
1983 2499
1984static int iscsi_session_match(struct attribute_container *cont, 2500static int iscsi_session_match(struct attribute_container *cont,
1985 struct device *dev) 2501 struct device *dev)
@@ -2051,7 +2567,7 @@ iscsi_register_transport(struct iscsi_transport *tt)
2051{ 2567{
2052 struct iscsi_internal *priv; 2568 struct iscsi_internal *priv;
2053 unsigned long flags; 2569 unsigned long flags;
2054 int count = 0, err; 2570 int err;
2055 2571
2056 BUG_ON(!tt); 2572 BUG_ON(!tt);
2057 2573
@@ -2078,77 +2594,24 @@ iscsi_register_transport(struct iscsi_transport *tt)
2078 goto unregister_dev; 2594 goto unregister_dev;
2079 2595
2080 /* host parameters */ 2596 /* host parameters */
2081 priv->t.host_attrs.ac.attrs = &priv->host_attrs[0];
2082 priv->t.host_attrs.ac.class = &iscsi_host_class.class; 2597 priv->t.host_attrs.ac.class = &iscsi_host_class.class;
2083 priv->t.host_attrs.ac.match = iscsi_host_match; 2598 priv->t.host_attrs.ac.match = iscsi_host_match;
2599 priv->t.host_attrs.ac.grp = &iscsi_host_group;
2084 priv->t.host_size = sizeof(struct iscsi_cls_host); 2600 priv->t.host_size = sizeof(struct iscsi_cls_host);
2085 transport_container_register(&priv->t.host_attrs); 2601 transport_container_register(&priv->t.host_attrs);
2086 2602
2087 SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME);
2088 SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS);
2089 SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS);
2090 SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME);
2091 BUG_ON(count > ISCSI_HOST_ATTRS);
2092 priv->host_attrs[count] = NULL;
2093 count = 0;
2094
2095 /* connection parameters */ 2603 /* connection parameters */
2096 priv->conn_cont.ac.attrs = &priv->conn_attrs[0];
2097 priv->conn_cont.ac.class = &iscsi_connection_class.class; 2604 priv->conn_cont.ac.class = &iscsi_connection_class.class;
2098 priv->conn_cont.ac.match = iscsi_conn_match; 2605 priv->conn_cont.ac.match = iscsi_conn_match;
2606 priv->conn_cont.ac.grp = &iscsi_conn_group;
2099 transport_container_register(&priv->conn_cont); 2607 transport_container_register(&priv->conn_cont);
2100 2608
2101 SETUP_CONN_RD_ATTR(max_recv_dlength, ISCSI_MAX_RECV_DLENGTH);
2102 SETUP_CONN_RD_ATTR(max_xmit_dlength, ISCSI_MAX_XMIT_DLENGTH);
2103 SETUP_CONN_RD_ATTR(header_digest, ISCSI_HDRDGST_EN);
2104 SETUP_CONN_RD_ATTR(data_digest, ISCSI_DATADGST_EN);
2105 SETUP_CONN_RD_ATTR(ifmarker, ISCSI_IFMARKER_EN);
2106 SETUP_CONN_RD_ATTR(ofmarker, ISCSI_OFMARKER_EN);
2107 SETUP_CONN_RD_ATTR(address, ISCSI_CONN_ADDRESS);
2108 SETUP_CONN_RD_ATTR(port, ISCSI_CONN_PORT);
2109 SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN);
2110 SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS);
2111 SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT);
2112 SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO);
2113 SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO);
2114
2115 BUG_ON(count > ISCSI_CONN_ATTRS);
2116 priv->conn_attrs[count] = NULL;
2117 count = 0;
2118
2119 /* session parameters */ 2609 /* session parameters */
2120 priv->session_cont.ac.attrs = &priv->session_attrs[0];
2121 priv->session_cont.ac.class = &iscsi_session_class.class; 2610 priv->session_cont.ac.class = &iscsi_session_class.class;
2122 priv->session_cont.ac.match = iscsi_session_match; 2611 priv->session_cont.ac.match = iscsi_session_match;
2612 priv->session_cont.ac.grp = &iscsi_session_group;
2123 transport_container_register(&priv->session_cont); 2613 transport_container_register(&priv->session_cont);
2124 2614
2125 SETUP_SESSION_RD_ATTR(initial_r2t, ISCSI_INITIAL_R2T_EN);
2126 SETUP_SESSION_RD_ATTR(max_outstanding_r2t, ISCSI_MAX_R2T);
2127 SETUP_SESSION_RD_ATTR(immediate_data, ISCSI_IMM_DATA_EN);
2128 SETUP_SESSION_RD_ATTR(first_burst_len, ISCSI_FIRST_BURST);
2129 SETUP_SESSION_RD_ATTR(max_burst_len, ISCSI_MAX_BURST);
2130 SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PDU_INORDER_EN);
2131 SETUP_SESSION_RD_ATTR(data_seq_in_order, ISCSI_DATASEQ_INORDER_EN);
2132 SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL);
2133 SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME);
2134 SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT);
2135 SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME);
2136 SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN);
2137 SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD);
2138 SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN);
2139 SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT);
2140 SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO);
2141 SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO);
2142 SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO);
2143 SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME);
2144 SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME);
2145 SETUP_SESSION_RD_ATTR(targetalias, ISCSI_TARGET_ALIAS);
2146 SETUP_PRIV_SESSION_RW_ATTR(recovery_tmo);
2147 SETUP_PRIV_SESSION_RD_ATTR(state);
2148
2149 BUG_ON(count > ISCSI_SESSION_ATTRS);
2150 priv->session_attrs[count] = NULL;
2151
2152 spin_lock_irqsave(&iscsi_transport_lock, flags); 2615 spin_lock_irqsave(&iscsi_transport_lock, flags);
2153 list_add(&priv->list, &iscsi_transports); 2616 list_add(&priv->list, &iscsi_transports);
2154 spin_unlock_irqrestore(&iscsi_transport_lock, flags); 2617 spin_unlock_irqrestore(&iscsi_transport_lock, flags);
@@ -2210,10 +2673,14 @@ static __init int iscsi_transport_init(void)
2210 if (err) 2673 if (err)
2211 goto unregister_transport_class; 2674 goto unregister_transport_class;
2212 2675
2213 err = transport_class_register(&iscsi_host_class); 2676 err = class_register(&iscsi_iface_class);
2214 if (err) 2677 if (err)
2215 goto unregister_endpoint_class; 2678 goto unregister_endpoint_class;
2216 2679
2680 err = transport_class_register(&iscsi_host_class);
2681 if (err)
2682 goto unregister_iface_class;
2683
2217 err = transport_class_register(&iscsi_connection_class); 2684 err = transport_class_register(&iscsi_connection_class);
2218 if (err) 2685 if (err)
2219 goto unregister_host_class; 2686 goto unregister_host_class;
@@ -2243,6 +2710,8 @@ unregister_conn_class:
2243 transport_class_unregister(&iscsi_connection_class); 2710 transport_class_unregister(&iscsi_connection_class);
2244unregister_host_class: 2711unregister_host_class:
2245 transport_class_unregister(&iscsi_host_class); 2712 transport_class_unregister(&iscsi_host_class);
2713unregister_iface_class:
2714 class_unregister(&iscsi_iface_class);
2246unregister_endpoint_class: 2715unregister_endpoint_class:
2247 class_unregister(&iscsi_endpoint_class); 2716 class_unregister(&iscsi_endpoint_class);
2248unregister_transport_class: 2717unregister_transport_class:
@@ -2258,6 +2727,7 @@ static void __exit iscsi_transport_exit(void)
2258 transport_class_unregister(&iscsi_session_class); 2727 transport_class_unregister(&iscsi_session_class);
2259 transport_class_unregister(&iscsi_host_class); 2728 transport_class_unregister(&iscsi_host_class);
2260 class_unregister(&iscsi_endpoint_class); 2729 class_unregister(&iscsi_endpoint_class);
2730 class_unregister(&iscsi_iface_class);
2261 class_unregister(&iscsi_transport_class); 2731 class_unregister(&iscsi_transport_class);
2262} 2732}
2263 2733
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index c6fcf76cade..9d9330ae421 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1545,8 +1545,14 @@ int sas_rphy_add(struct sas_rphy *rphy)
1545 1545
1546 if (identify->device_type == SAS_END_DEVICE && 1546 if (identify->device_type == SAS_END_DEVICE &&
1547 rphy->scsi_target_id != -1) { 1547 rphy->scsi_target_id != -1) {
1548 scsi_scan_target(&rphy->dev, 0, 1548 int lun;
1549 rphy->scsi_target_id, SCAN_WILD_CARD, 0); 1549
1550 if (identify->target_port_protocols & SAS_PROTOCOL_SSP)
1551 lun = SCAN_WILD_CARD;
1552 else
1553 lun = 0;
1554
1555 scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0);
1550 } 1556 }
1551 1557
1552 return 0; 1558 return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 953773cb26d..a7942e5c8be 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1066,12 +1066,13 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
1066 unsigned int cmd, unsigned long arg) 1066 unsigned int cmd, unsigned long arg)
1067{ 1067{
1068 struct gendisk *disk = bdev->bd_disk; 1068 struct gendisk *disk = bdev->bd_disk;
1069 struct scsi_device *sdp = scsi_disk(disk)->device; 1069 struct scsi_disk *sdkp = scsi_disk(disk);
1070 struct scsi_device *sdp = sdkp->device;
1070 void __user *p = (void __user *)arg; 1071 void __user *p = (void __user *)arg;
1071 int error; 1072 int error;
1072 1073
1073 SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n", 1074 SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
1074 disk->disk_name, cmd)); 1075 "cmd=0x%x\n", disk->disk_name, cmd));
1075 1076
1076 /* 1077 /*
1077 * If we are in the middle of error recovery, don't let anyone 1078 * If we are in the middle of error recovery, don't let anyone
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 02fa4697a0e..6957350e122 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -21,6 +21,8 @@
21#define dev_to_part(device) container_of((device), struct hd_struct, __dev) 21#define dev_to_part(device) container_of((device), struct hd_struct, __dev)
22#define disk_to_dev(disk) (&(disk)->part0.__dev) 22#define disk_to_dev(disk) (&(disk)->part0.__dev)
23#define part_to_dev(part) (&((part)->__dev)) 23#define part_to_dev(part) (&((part)->__dev))
24#define alias_name(disk) ((disk)->alias ? (disk)->alias : \
25 (disk)->disk_name)
24 26
25extern struct device_type part_type; 27extern struct device_type part_type;
26extern struct kobject *block_depr; 28extern struct kobject *block_depr;
@@ -58,6 +60,7 @@ enum {
58 60
59#define DISK_MAX_PARTS 256 61#define DISK_MAX_PARTS 256
60#define DISK_NAME_LEN 32 62#define DISK_NAME_LEN 32
63#define ALIAS_LEN 256
61 64
62#include <linux/major.h> 65#include <linux/major.h>
63#include <linux/device.h> 66#include <linux/device.h>
@@ -162,6 +165,7 @@ struct gendisk {
162 * disks that can't be partitioned. */ 165 * disks that can't be partitioned. */
163 166
164 char disk_name[DISK_NAME_LEN]; /* name of major driver */ 167 char disk_name[DISK_NAME_LEN]; /* name of major driver */
168 char *alias; /* alias name of disk */
165 char *(*devnode)(struct gendisk *gd, mode_t *mode); 169 char *(*devnode)(struct gendisk *gd, mode_t *mode);
166 170
167 unsigned int events; /* supported events */ 171 unsigned int events; /* supported events */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index efd6f980076..23fa829bf7a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -1052,6 +1052,8 @@ extern int ata_scsi_slave_config(struct scsi_device *sdev);
1052extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 1052extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
1053extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 1053extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
1054 int queue_depth, int reason); 1054 int queue_depth, int reason);
1055extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1056 int queue_depth, int reason);
1055extern struct ata_device *ata_dev_pair(struct ata_device *adev); 1057extern struct ata_device *ata_dev_pair(struct ata_device *adev);
1056extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); 1058extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
1057extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); 1059extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index ddb04568a50..2703e3bedbf 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -59,6 +59,7 @@ enum iscsi_uevent_e {
59 ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST = UEVENT_BASE + 19, 59 ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST = UEVENT_BASE + 19,
60 60
61 ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20, 61 ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20,
62 ISCSI_UEVENT_SET_IFACE_PARAMS = UEVENT_BASE + 21,
62 63
63 /* up events */ 64 /* up events */
64 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, 65 ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -70,6 +71,7 @@ enum iscsi_uevent_e {
70 71
71 ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7, 72 ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7,
72 ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8, 73 ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8,
74 ISCSI_KEVENT_CONN_LOGIN_STATE = KEVENT_BASE + 9,
73}; 75};
74 76
75enum iscsi_tgt_dscvr { 77enum iscsi_tgt_dscvr {
@@ -172,6 +174,10 @@ struct iscsi_uevent {
172 struct msg_set_path { 174 struct msg_set_path {
173 uint32_t host_no; 175 uint32_t host_no;
174 } set_path; 176 } set_path;
177 struct msg_set_iface_params {
178 uint32_t host_no;
179 uint32_t count;
180 } set_iface_params;
175 } u; 181 } u;
176 union { 182 union {
177 /* messages k -> u */ 183 /* messages k -> u */
@@ -193,6 +199,11 @@ struct iscsi_uevent {
193 uint32_t cid; 199 uint32_t cid;
194 uint64_t recv_handle; 200 uint64_t recv_handle;
195 } recv_req; 201 } recv_req;
202 struct msg_conn_login {
203 uint32_t sid;
204 uint32_t cid;
205 uint32_t state; /* enum iscsi_conn_state */
206 } conn_login;
196 struct msg_conn_error { 207 struct msg_conn_error {
197 uint32_t sid; 208 uint32_t sid;
198 uint32_t cid; 209 uint32_t cid;
@@ -214,6 +225,21 @@ struct iscsi_uevent {
214 } r; 225 } r;
215} __attribute__ ((aligned (sizeof(uint64_t)))); 226} __attribute__ ((aligned (sizeof(uint64_t))));
216 227
228enum iscsi_param_type {
229 ISCSI_PARAM, /* iscsi_param (session, conn, target, LU) */
230 ISCSI_HOST_PARAM, /* iscsi_host_param */
231 ISCSI_NET_PARAM, /* iscsi_net_param */
232};
233
234struct iscsi_iface_param_info {
235 uint32_t iface_num; /* iface number, 0 - n */
236 uint32_t len; /* Actual length of the param */
237 uint16_t param; /* iscsi param value */
238 uint8_t iface_type; /* IPv4 or IPv6 */
239 uint8_t param_type; /* iscsi_param_type */
240 uint8_t value[0]; /* length sized value follows */
241} __packed;
242
217/* 243/*
218 * To keep the struct iscsi_uevent size the same for userspace code 244 * To keep the struct iscsi_uevent size the same for userspace code
219 * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and 245 * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and
@@ -237,6 +263,71 @@ struct iscsi_path {
237 uint16_t pmtu; 263 uint16_t pmtu;
238} __attribute__ ((aligned (sizeof(uint64_t)))); 264} __attribute__ ((aligned (sizeof(uint64_t))));
239 265
266/* iscsi iface enabled/disabled setting */
267#define ISCSI_IFACE_DISABLE 0x01
268#define ISCSI_IFACE_ENABLE 0x02
269
270/* ipv4 bootproto */
271#define ISCSI_BOOTPROTO_STATIC 0x01
272#define ISCSI_BOOTPROTO_DHCP 0x02
273
274/* ipv6 addr autoconfig type */
275#define ISCSI_IPV6_AUTOCFG_DISABLE 0x01
276#define ISCSI_IPV6_AUTOCFG_ND_ENABLE 0x02
277#define ISCSI_IPV6_AUTOCFG_DHCPV6_ENABLE 0x03
278
279/* ipv6 link local addr type */
280#define ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE 0x01
281#define ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE 0x02
282
283/* ipv6 router addr type */
284#define ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE 0x01
285#define ISCSI_IPV6_ROUTER_AUTOCFG_DISABLE 0x02
286
287#define ISCSI_IFACE_TYPE_IPV4 0x01
288#define ISCSI_IFACE_TYPE_IPV6 0x02
289
290#define ISCSI_MAX_VLAN_ID 4095
291#define ISCSI_MAX_VLAN_PRIORITY 7
292
293/* iscsi vlan enable/disabled setting */
294#define ISCSI_VLAN_DISABLE 0x01
295#define ISCSI_VLAN_ENABLE 0x02
296
297/* iSCSI network params */
298enum iscsi_net_param {
299 ISCSI_NET_PARAM_IPV4_ADDR = 1,
300 ISCSI_NET_PARAM_IPV4_SUBNET = 2,
301 ISCSI_NET_PARAM_IPV4_GW = 3,
302 ISCSI_NET_PARAM_IPV4_BOOTPROTO = 4,
303 ISCSI_NET_PARAM_MAC = 5,
304 ISCSI_NET_PARAM_IPV6_LINKLOCAL = 6,
305 ISCSI_NET_PARAM_IPV6_ADDR = 7,
306 ISCSI_NET_PARAM_IPV6_ROUTER = 8,
307 ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG = 9,
308 ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG = 10,
309 ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG = 11,
310 ISCSI_NET_PARAM_IFACE_ENABLE = 12,
311 ISCSI_NET_PARAM_VLAN_ID = 13,
312 ISCSI_NET_PARAM_VLAN_PRIORITY = 14,
313 ISCSI_NET_PARAM_VLAN_ENABLED = 15,
314 ISCSI_NET_PARAM_VLAN_TAG = 16,
315 ISCSI_NET_PARAM_IFACE_TYPE = 17,
316 ISCSI_NET_PARAM_IFACE_NAME = 18,
317 ISCSI_NET_PARAM_MTU = 19,
318 ISCSI_NET_PARAM_PORT = 20,
319};
320
321enum iscsi_conn_state {
322 ISCSI_CONN_STATE_FREE,
323 ISCSI_CONN_STATE_XPT_WAIT,
324 ISCSI_CONN_STATE_IN_LOGIN,
325 ISCSI_CONN_STATE_LOGGED_IN,
326 ISCSI_CONN_STATE_IN_LOGOUT,
327 ISCSI_CONN_STATE_LOGOUT_REQUESTED,
328 ISCSI_CONN_STATE_CLEANUP_WAIT,
329};
330
240/* 331/*
241 * Common error codes 332 * Common error codes
242 */ 333 */
@@ -319,44 +410,6 @@ enum iscsi_param {
319 ISCSI_PARAM_MAX, 410 ISCSI_PARAM_MAX,
320}; 411};
321 412
322#define ISCSI_MAX_RECV_DLENGTH (1ULL << ISCSI_PARAM_MAX_RECV_DLENGTH)
323#define ISCSI_MAX_XMIT_DLENGTH (1ULL << ISCSI_PARAM_MAX_XMIT_DLENGTH)
324#define ISCSI_HDRDGST_EN (1ULL << ISCSI_PARAM_HDRDGST_EN)
325#define ISCSI_DATADGST_EN (1ULL << ISCSI_PARAM_DATADGST_EN)
326#define ISCSI_INITIAL_R2T_EN (1ULL << ISCSI_PARAM_INITIAL_R2T_EN)
327#define ISCSI_MAX_R2T (1ULL << ISCSI_PARAM_MAX_R2T)
328#define ISCSI_IMM_DATA_EN (1ULL << ISCSI_PARAM_IMM_DATA_EN)
329#define ISCSI_FIRST_BURST (1ULL << ISCSI_PARAM_FIRST_BURST)
330#define ISCSI_MAX_BURST (1ULL << ISCSI_PARAM_MAX_BURST)
331#define ISCSI_PDU_INORDER_EN (1ULL << ISCSI_PARAM_PDU_INORDER_EN)
332#define ISCSI_DATASEQ_INORDER_EN (1ULL << ISCSI_PARAM_DATASEQ_INORDER_EN)
333#define ISCSI_ERL (1ULL << ISCSI_PARAM_ERL)
334#define ISCSI_IFMARKER_EN (1ULL << ISCSI_PARAM_IFMARKER_EN)
335#define ISCSI_OFMARKER_EN (1ULL << ISCSI_PARAM_OFMARKER_EN)
336#define ISCSI_EXP_STATSN (1ULL << ISCSI_PARAM_EXP_STATSN)
337#define ISCSI_TARGET_NAME (1ULL << ISCSI_PARAM_TARGET_NAME)
338#define ISCSI_TPGT (1ULL << ISCSI_PARAM_TPGT)
339#define ISCSI_PERSISTENT_ADDRESS (1ULL << ISCSI_PARAM_PERSISTENT_ADDRESS)
340#define ISCSI_PERSISTENT_PORT (1ULL << ISCSI_PARAM_PERSISTENT_PORT)
341#define ISCSI_SESS_RECOVERY_TMO (1ULL << ISCSI_PARAM_SESS_RECOVERY_TMO)
342#define ISCSI_CONN_PORT (1ULL << ISCSI_PARAM_CONN_PORT)
343#define ISCSI_CONN_ADDRESS (1ULL << ISCSI_PARAM_CONN_ADDRESS)
344#define ISCSI_USERNAME (1ULL << ISCSI_PARAM_USERNAME)
345#define ISCSI_USERNAME_IN (1ULL << ISCSI_PARAM_USERNAME_IN)
346#define ISCSI_PASSWORD (1ULL << ISCSI_PARAM_PASSWORD)
347#define ISCSI_PASSWORD_IN (1ULL << ISCSI_PARAM_PASSWORD_IN)
348#define ISCSI_FAST_ABORT (1ULL << ISCSI_PARAM_FAST_ABORT)
349#define ISCSI_ABORT_TMO (1ULL << ISCSI_PARAM_ABORT_TMO)
350#define ISCSI_LU_RESET_TMO (1ULL << ISCSI_PARAM_LU_RESET_TMO)
351#define ISCSI_HOST_RESET_TMO (1ULL << ISCSI_PARAM_HOST_RESET_TMO)
352#define ISCSI_PING_TMO (1ULL << ISCSI_PARAM_PING_TMO)
353#define ISCSI_RECV_TMO (1ULL << ISCSI_PARAM_RECV_TMO)
354#define ISCSI_IFACE_NAME (1ULL << ISCSI_PARAM_IFACE_NAME)
355#define ISCSI_ISID (1ULL << ISCSI_PARAM_ISID)
356#define ISCSI_INITIATOR_NAME (1ULL << ISCSI_PARAM_INITIATOR_NAME)
357#define ISCSI_TGT_RESET_TMO (1ULL << ISCSI_PARAM_TGT_RESET_TMO)
358#define ISCSI_TARGET_ALIAS (1ULL << ISCSI_PARAM_TARGET_ALIAS)
359
360/* iSCSI HBA params */ 413/* iSCSI HBA params */
361enum iscsi_host_param { 414enum iscsi_host_param {
362 ISCSI_HOST_PARAM_HWADDRESS, 415 ISCSI_HOST_PARAM_HWADDRESS,
@@ -366,11 +419,6 @@ enum iscsi_host_param {
366 ISCSI_HOST_PARAM_MAX, 419 ISCSI_HOST_PARAM_MAX,
367}; 420};
368 421
369#define ISCSI_HOST_HWADDRESS (1ULL << ISCSI_HOST_PARAM_HWADDRESS)
370#define ISCSI_HOST_INITIATOR_NAME (1ULL << ISCSI_HOST_PARAM_INITIATOR_NAME)
371#define ISCSI_HOST_NETDEV_NAME (1ULL << ISCSI_HOST_PARAM_NETDEV_NAME)
372#define ISCSI_HOST_IPADDRESS (1ULL << ISCSI_HOST_PARAM_IPADDRESS)
373
374#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle) 422#define iscsi_ptr(_handle) ((void*)(unsigned long)_handle)
375#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr) 423#define iscsi_handle(_ptr) ((uint64_t)(unsigned long)_ptr)
376 424
@@ -392,6 +440,7 @@ enum iscsi_host_param {
392#define CAP_DIGEST_OFFLOAD 0x1000 /* offload hdr and data digests */ 440#define CAP_DIGEST_OFFLOAD 0x1000 /* offload hdr and data digests */
393#define CAP_PADDING_OFFLOAD 0x2000 /* offload padding insertion, removal, 441#define CAP_PADDING_OFFLOAD 0x2000 /* offload padding insertion, removal,
394 and verification */ 442 and verification */
443#define CAP_LOGIN_OFFLOAD 0x4000 /* offload session login */
395 444
396/* 445/*
397 * These flags describes reason of stop_conn() call 446 * These flags describes reason of stop_conn() call
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 7d96829b0c0..5d1a758e059 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -281,9 +281,6 @@ struct fc_seq_els_data {
281 * @timer: The command timer 281 * @timer: The command timer
282 * @tm_done: Completion indicator 282 * @tm_done: Completion indicator
283 * @wait_for_comp: Indicator to wait for completion of the I/O (in jiffies) 283 * @wait_for_comp: Indicator to wait for completion of the I/O (in jiffies)
284 * @start_time: Timestamp indicating the start of the I/O (in jiffies)
285 * @end_time: Timestamp indicating the end of the I/O (in jiffies)
286 * @last_pkt_time: Timestamp of the last frame received (in jiffies)
287 * @data_len: The length of the data 284 * @data_len: The length of the data
288 * @cdb_cmd: The CDB command 285 * @cdb_cmd: The CDB command
289 * @xfer_len: The transfer length 286 * @xfer_len: The transfer length
@@ -304,50 +301,46 @@ struct fc_seq_els_data {
304 * @recov_seq: The sequence for REC or SRR 301 * @recov_seq: The sequence for REC or SRR
305 */ 302 */
306struct fc_fcp_pkt { 303struct fc_fcp_pkt {
307 /* Housekeeping information */
308 struct fc_lport *lp;
309 u16 state;
310 atomic_t ref_cnt;
311 spinlock_t scsi_pkt_lock; 304 spinlock_t scsi_pkt_lock;
305 atomic_t ref_cnt;
306
307 /* SCSI command and data transfer information */
308 u32 data_len;
312 309
313 /* SCSI I/O related information */ 310 /* SCSI I/O related information */
314 struct scsi_cmnd *cmd; 311 struct scsi_cmnd *cmd;
315 struct list_head list; 312 struct list_head list;
316 313
317 /* Timeout related information */ 314 /* Housekeeping information */
318 struct timer_list timer; 315 struct fc_lport *lp;
319 struct completion tm_done; 316 u8 state;
320 int wait_for_comp;
321 unsigned long start_time;
322 unsigned long end_time;
323 unsigned long last_pkt_time;
324
325 /* SCSI command and data transfer information */
326 u32 data_len;
327
328 /* Transport related veriables */
329 struct fcp_cmnd cdb_cmd;
330 size_t xfer_len;
331 u16 xfer_ddp;
332 u32 xfer_contig_end;
333 u16 max_payload;
334 317
335 /* SCSI/FCP return status */ 318 /* SCSI/FCP return status */
336 u32 io_status;
337 u8 cdb_status; 319 u8 cdb_status;
338 u8 status_code; 320 u8 status_code;
339 u8 scsi_comp_flags; 321 u8 scsi_comp_flags;
322 u32 io_status;
340 u32 req_flags; 323 u32 req_flags;
341 u32 scsi_resid; 324 u32 scsi_resid;
342 325
326 /* Transport related veriables */
327 size_t xfer_len;
328 struct fcp_cmnd cdb_cmd;
329 u32 xfer_contig_end;
330 u16 max_payload;
331 u16 xfer_ddp;
332
343 /* Associated structures */ 333 /* Associated structures */
344 struct fc_rport *rport; 334 struct fc_rport *rport;
345 struct fc_seq *seq_ptr; 335 struct fc_seq *seq_ptr;
346 336
347 /* Error Processing information */ 337 /* Timeout/error related information */
348 u8 recov_retry; 338 struct timer_list timer;
339 int wait_for_comp;
340 u32 recov_retry;
349 struct fc_seq *recov_seq; 341 struct fc_seq *recov_seq;
350}; 342 struct completion tm_done;
343} ____cacheline_aligned_in_smp;
351 344
352/* 345/*
353 * Structure and function definitions for managing Fibre Channel Exchanges 346 * Structure and function definitions for managing Fibre Channel Exchanges
@@ -413,35 +406,32 @@ struct fc_seq {
413 * sequence allocation 406 * sequence allocation
414 */ 407 */
415struct fc_exch { 408struct fc_exch {
409 spinlock_t ex_lock;
410 atomic_t ex_refcnt;
411 enum fc_class class;
416 struct fc_exch_mgr *em; 412 struct fc_exch_mgr *em;
417 struct fc_exch_pool *pool; 413 struct fc_exch_pool *pool;
418 u32 state;
419 u16 xid;
420 struct list_head ex_list; 414 struct list_head ex_list;
421 spinlock_t ex_lock;
422 atomic_t ex_refcnt;
423 struct delayed_work timeout_work;
424 struct fc_lport *lp; 415 struct fc_lport *lp;
416 u32 esb_stat;
417 u8 state;
418 u8 fh_type;
419 u8 seq_id;
420 u8 encaps;
421 u16 xid;
425 u16 oxid; 422 u16 oxid;
426 u16 rxid; 423 u16 rxid;
427 u32 oid; 424 u32 oid;
428 u32 sid; 425 u32 sid;
429 u32 did; 426 u32 did;
430 u32 esb_stat;
431 u32 r_a_tov; 427 u32 r_a_tov;
432 u8 seq_id;
433 u8 encaps;
434 u32 f_ctl; 428 u32 f_ctl;
435 u8 fh_type; 429 struct fc_seq seq;
436 enum fc_class class;
437 struct fc_seq seq;
438
439 void (*resp)(struct fc_seq *, struct fc_frame *, void *); 430 void (*resp)(struct fc_seq *, struct fc_frame *, void *);
440 void *arg; 431 void *arg;
441
442 void (*destructor)(struct fc_seq *, void *); 432 void (*destructor)(struct fc_seq *, void *);
443 433 struct delayed_work timeout_work;
444}; 434} ____cacheline_aligned_in_smp;
445#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) 435#define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
446 436
447 437
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 8c1638b8c28..d1e95c6ac77 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -229,6 +229,11 @@ int fcoe_libfc_config(struct fc_lport *, struct fcoe_ctlr *,
229 const struct libfc_function_template *, int init_fcp); 229 const struct libfc_function_template *, int init_fcp);
230u32 fcoe_fc_crc(struct fc_frame *fp); 230u32 fcoe_fc_crc(struct fc_frame *fp);
231int fcoe_start_io(struct sk_buff *skb); 231int fcoe_start_io(struct sk_buff *skb);
232int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
233void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb,
234 struct net_device *netdev);
235void fcoe_wwn_to_str(u64 wwn, char *buf, int len);
236int fcoe_validate_vport_create(struct fc_vport *vport);
232 237
233/** 238/**
234 * is_fip_mode() - returns true if FIP mode selected. 239 * is_fip_mode() - returns true if FIP mode selected.
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index ee866060f8a..6a308d42d98 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -142,8 +142,11 @@ struct expander_device {
142 u16 ex_change_count; 142 u16 ex_change_count;
143 u16 max_route_indexes; 143 u16 max_route_indexes;
144 u8 num_phys; 144 u8 num_phys;
145
146 u8 t2t_supp:1;
145 u8 configuring:1; 147 u8 configuring:1;
146 u8 conf_route_table:1; 148 u8 conf_route_table:1;
149
147 u8 enclosure_logical_id[8]; 150 u8 enclosure_logical_id[8];
148 151
149 struct ex_phy *ex_phy; 152 struct ex_phy *ex_phy;
@@ -386,6 +389,11 @@ sdev_to_domain_dev(struct scsi_device *sdev) {
386 return starget_to_domain_dev(sdev->sdev_target); 389 return starget_to_domain_dev(sdev->sdev_target);
387} 390}
388 391
392static inline struct ata_device *sas_to_ata_dev(struct domain_device *dev)
393{
394 return &dev->sata_dev.ap->link.device[0];
395}
396
389static inline struct domain_device * 397static inline struct domain_device *
390cmd_to_domain_dev(struct scsi_cmnd *cmd) 398cmd_to_domain_dev(struct scsi_cmnd *cmd)
391{ 399{
@@ -405,6 +413,20 @@ static inline void sas_phy_disconnected(struct asd_sas_phy *phy)
405 phy->linkrate = SAS_LINK_RATE_UNKNOWN; 413 phy->linkrate = SAS_LINK_RATE_UNKNOWN;
406} 414}
407 415
416static inline unsigned int to_sas_gpio_od(int device, int bit)
417{
418 return 3 * device + bit;
419}
420
421#ifdef CONFIG_SCSI_SAS_HOST_SMP
422int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count);
423#else
424static inline int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count)
425{
426 return -1;
427}
428#endif
429
408/* ---------- Tasks ---------- */ 430/* ---------- Tasks ---------- */
409/* 431/*
410 service_response | SAS_TASK_COMPLETE | SAS_TASK_UNDELIVERED | 432 service_response | SAS_TASK_COMPLETE | SAS_TASK_UNDELIVERED |
@@ -555,36 +577,14 @@ struct sas_task {
555 struct work_struct abort_work; 577 struct work_struct abort_work;
556}; 578};
557 579
558extern struct kmem_cache *sas_task_cache;
559
560#define SAS_TASK_STATE_PENDING 1 580#define SAS_TASK_STATE_PENDING 1
561#define SAS_TASK_STATE_DONE 2 581#define SAS_TASK_STATE_DONE 2
562#define SAS_TASK_STATE_ABORTED 4 582#define SAS_TASK_STATE_ABORTED 4
563#define SAS_TASK_NEED_DEV_RESET 8 583#define SAS_TASK_NEED_DEV_RESET 8
564#define SAS_TASK_AT_INITIATOR 16 584#define SAS_TASK_AT_INITIATOR 16
565 585
566static inline struct sas_task *sas_alloc_task(gfp_t flags) 586extern struct sas_task *sas_alloc_task(gfp_t flags);
567{ 587extern void sas_free_task(struct sas_task *task);
568 struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
569
570 if (task) {
571 INIT_LIST_HEAD(&task->list);
572 spin_lock_init(&task->task_state_lock);
573 task->task_state_flags = SAS_TASK_STATE_PENDING;
574 init_timer(&task->timer);
575 init_completion(&task->completion);
576 }
577
578 return task;
579}
580
581static inline void sas_free_task(struct sas_task *task)
582{
583 if (task) {
584 BUG_ON(!list_empty(&task->list));
585 kmem_cache_free(sas_task_cache, task);
586 }
587}
588 588
589struct sas_domain_function_template { 589struct sas_domain_function_template {
590 /* The class calls these to notify the LLDD of an event. */ 590 /* The class calls these to notify the LLDD of an event. */
@@ -614,6 +614,10 @@ struct sas_domain_function_template {
614 614
615 /* Phy management */ 615 /* Phy management */
616 int (*lldd_control_phy)(struct asd_sas_phy *, enum phy_func, void *); 616 int (*lldd_control_phy)(struct asd_sas_phy *, enum phy_func, void *);
617
618 /* GPIO support */
619 int (*lldd_write_gpio)(struct sas_ha_struct *, u8 reg_type,
620 u8 reg_index, u8 reg_count, u8 *write_data);
617}; 621};
618 622
619extern int sas_register_ha(struct sas_ha_struct *); 623extern int sas_register_ha(struct sas_ha_struct *);
@@ -652,7 +656,7 @@ int sas_discover_event(struct asd_sas_port *, enum discover_event ev);
652int sas_discover_sata(struct domain_device *); 656int sas_discover_sata(struct domain_device *);
653int sas_discover_end_dev(struct domain_device *); 657int sas_discover_end_dev(struct domain_device *);
654 658
655void sas_unregister_dev(struct domain_device *); 659void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *);
656 660
657void sas_init_dev(struct domain_device *); 661void sas_init_dev(struct domain_device *);
658 662
diff --git a/include/scsi/sas.h b/include/scsi/sas.h
index e9fd0228138..3673d685e6a 100644
--- a/include/scsi/sas.h
+++ b/include/scsi/sas.h
@@ -108,6 +108,7 @@ enum sas_protocol {
108 SAS_PROTOCOL_STP = 0x04, 108 SAS_PROTOCOL_STP = 0x04,
109 SAS_PROTOCOL_SSP = 0x08, 109 SAS_PROTOCOL_SSP = 0x08,
110 SAS_PROTOCOL_ALL = 0x0E, 110 SAS_PROTOCOL_ALL = 0x0E,
111 SAS_PROTOCOL_STP_ALL = SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA,
111}; 112};
112 113
113/* From the spec; local phys only */ 114/* From the spec; local phys only */
@@ -121,6 +122,7 @@ enum phy_func {
121 PHY_FUNC_TX_SATA_PS_SIGNAL, 122 PHY_FUNC_TX_SATA_PS_SIGNAL,
122 PHY_FUNC_RELEASE_SPINUP_HOLD = 0x10, /* LOCAL PORT ONLY! */ 123 PHY_FUNC_RELEASE_SPINUP_HOLD = 0x10, /* LOCAL PORT ONLY! */
123 PHY_FUNC_SET_LINK_RATE, 124 PHY_FUNC_SET_LINK_RATE,
125 PHY_FUNC_GET_EVENTS,
124}; 126};
125 127
126/* SAS LLDD would need to report only _very_few_ of those, like BROADCAST. 128/* SAS LLDD would need to report only _very_few_ of those, like BROADCAST.
@@ -195,6 +197,14 @@ enum sas_open_rej_reason {
195 SAS_OREJ_RSVD_RETRY = 18, 197 SAS_OREJ_RSVD_RETRY = 18,
196}; 198};
197 199
200enum sas_gpio_reg_type {
201 SAS_GPIO_REG_CFG = 0,
202 SAS_GPIO_REG_RX = 1,
203 SAS_GPIO_REG_RX_GP = 2,
204 SAS_GPIO_REG_TX = 3,
205 SAS_GPIO_REG_TX_GP = 4,
206};
207
198struct dev_to_host_fis { 208struct dev_to_host_fis {
199 u8 fis_type; /* 0x34 */ 209 u8 fis_type; /* 0x34 */
200 u8 flags; 210 u8 flags;
@@ -341,7 +351,12 @@ struct report_general_resp {
341 351
342 u8 conf_route_table:1; 352 u8 conf_route_table:1;
343 u8 configuring:1; 353 u8 configuring:1;
344 u8 _r_b:6; 354 u8 config_others:1;
355 u8 orej_retry_supp:1;
356 u8 stp_cont_awt:1;
357 u8 self_config:1;
358 u8 zone_config:1;
359 u8 t2t_supp:1;
345 360
346 u8 _r_c; 361 u8 _r_c;
347 362
@@ -528,7 +543,12 @@ struct report_general_resp {
528 u8 _r_a; 543 u8 _r_a;
529 u8 num_phys; 544 u8 num_phys;
530 545
531 u8 _r_b:6; 546 u8 t2t_supp:1;
547 u8 zone_config:1;
548 u8 self_config:1;
549 u8 stp_cont_awt:1;
550 u8 orej_retry_supp:1;
551 u8 config_others:1;
532 u8 configuring:1; 552 u8 configuring:1;
533 u8 conf_route_table:1; 553 u8 conf_route_table:1;
534 554
diff --git a/include/scsi/scsi_bsg_iscsi.h b/include/scsi/scsi_bsg_iscsi.h
new file mode 100644
index 00000000000..fd5689d4c05
--- /dev/null
+++ b/include/scsi/scsi_bsg_iscsi.h
@@ -0,0 +1,110 @@
1/*
2 * iSCSI Transport BSG Interface
3 *
4 * Copyright (C) 2009 James Smart, Emulex Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 */
21
22#ifndef SCSI_BSG_ISCSI_H
23#define SCSI_BSG_ISCSI_H
24
25/*
26 * This file intended to be included by both kernel and user space
27 */
28
29#include <scsi/scsi.h>
30
31/*
32 * iSCSI Transport SGIO v4 BSG Message Support
33 */
34
35/* Default BSG request timeout (in seconds) */
36#define ISCSI_DEFAULT_BSG_TIMEOUT (10 * HZ)
37
38
39/*
40 * Request Message Codes supported by the iSCSI Transport
41 */
42
43/* define the class masks for the message codes */
44#define ISCSI_BSG_CLS_MASK 0xF0000000 /* find object class */
45#define ISCSI_BSG_HST_MASK 0x80000000 /* iscsi host class */
46
47/* iscsi host Message Codes */
48#define ISCSI_BSG_HST_VENDOR (ISCSI_BSG_HST_MASK | 0x000000FF)
49
50
51/*
52 * iSCSI Host Messages
53 */
54
55/* ISCSI_BSG_HST_VENDOR : */
56
57/* Request:
58 * Note: When specifying vendor_id, be sure to read the Vendor Type and ID
59 * formatting requirements specified in scsi_netlink.h
60 */
61struct iscsi_bsg_host_vendor {
62 /*
63 * Identifies the vendor that the message is formatted for. This
64 * should be the recipient of the message.
65 */
66 uint64_t vendor_id;
67
68 /* start of vendor command area */
69 uint32_t vendor_cmd[0];
70};
71
72/* Response:
73 */
74struct iscsi_bsg_host_vendor_reply {
75 /* start of vendor response area */
76 uint32_t vendor_rsp[0];
77};
78
79
80/* request (CDB) structure of the sg_io_v4 */
81struct iscsi_bsg_request {
82 uint32_t msgcode;
83 union {
84 struct iscsi_bsg_host_vendor h_vendor;
85 } rqst_data;
86} __attribute__((packed));
87
88
89/* response (request sense data) structure of the sg_io_v4 */
90struct iscsi_bsg_reply {
91 /*
92 * The completion result. Result exists in two forms:
93 * if negative, it is an -Exxx system errno value. There will
94 * be no further reply information supplied.
95 * else, it's the 4-byte scsi error result, with driver, host,
96 * msg and status fields. The per-msgcode reply structure
97 * will contain valid data.
98 */
99 uint32_t result;
100
101 /* If there was reply_payload, how much was recevied ? */
102 uint32_t reply_payload_rcv_len;
103
104 union {
105 struct iscsi_bsg_host_vendor_reply vendor_reply;
106 } reply_data;
107};
108
109
110#endif /* SCSI_BSG_ISCSI_H */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index d371c3ca90c..5591ed54dc9 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -197,6 +197,7 @@ struct scsi_device_handler {
197 int (*activate)(struct scsi_device *, activate_complete, void *); 197 int (*activate)(struct scsi_device *, activate_complete, void *);
198 int (*prep_fn)(struct scsi_device *, struct request *); 198 int (*prep_fn)(struct scsi_device *, struct request *);
199 int (*set_params)(struct scsi_device *, const char *); 199 int (*set_params)(struct scsi_device *, const char *);
200 bool (*match)(struct scsi_device *);
200}; 201};
201 202
202struct scsi_dh_data { 203struct scsi_dh_data {
@@ -471,6 +472,11 @@ static inline int scsi_device_protection(struct scsi_device *sdev)
471 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0); 472 return sdev->scsi_level > SCSI_2 && sdev->inquiry[5] & (1<<0);
472} 473}
473 474
475static inline int scsi_device_tpgs(struct scsi_device *sdev)
476{
477 return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0;
478}
479
474#define MODULE_ALIAS_SCSI_DEVICE(type) \ 480#define MODULE_ALIAS_SCSI_DEVICE(type) \
475 MODULE_ALIAS("scsi:t-" __stringify(type) "*") 481 MODULE_ALIAS("scsi:t-" __stringify(type) "*")
476#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" 482#define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x"
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index f1f2644137b..50266c9405f 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -355,6 +355,19 @@ struct scsi_host_template {
355 */ 355 */
356 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *); 356 enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
357 357
358 /* This is an optional routine that allows transport to initiate
359 * LLD adapter or firmware reset using sysfs attribute.
360 *
361 * Return values: 0 on success, -ve value on failure.
362 *
363 * Status: OPTIONAL
364 */
365
366 int (*host_reset)(struct Scsi_Host *shost, int reset_type);
367#define SCSI_ADAPTER_RESET 1
368#define SCSI_FIRMWARE_RESET 2
369
370
358 /* 371 /*
359 * Name of proc directory 372 * Name of proc directory
360 */ 373 */
@@ -791,7 +804,8 @@ static inline struct device *scsi_get_device(struct Scsi_Host *shost)
791 **/ 804 **/
792static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) 805static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
793{ 806{
794 return shost->shost_state == SHOST_RUNNING; 807 return shost->shost_state == SHOST_RUNNING ||
808 shost->shost_state == SHOST_RECOVERY;
795} 809}
796 810
797extern void scsi_unblock_requests(struct Scsi_Host *); 811extern void scsi_unblock_requests(struct Scsi_Host *);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index bf8f5296567..5994bcc1b01 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -37,6 +37,8 @@ struct iscsi_cls_conn;
37struct iscsi_conn; 37struct iscsi_conn;
38struct iscsi_task; 38struct iscsi_task;
39struct sockaddr; 39struct sockaddr;
40struct iscsi_iface;
41struct bsg_job;
40 42
41/** 43/**
42 * struct iscsi_transport - iSCSI Transport template 44 * struct iscsi_transport - iSCSI Transport template
@@ -84,9 +86,7 @@ struct iscsi_transport {
84 struct module *owner; 86 struct module *owner;
85 char *name; 87 char *name;
86 unsigned int caps; 88 unsigned int caps;
87 /* LLD sets this to indicate what values it can export to sysfs */ 89
88 uint64_t param_mask;
89 uint64_t host_param_mask;
90 struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep, 90 struct iscsi_cls_session *(*create_session) (struct iscsi_endpoint *ep,
91 uint16_t cmds_max, uint16_t qdepth, 91 uint16_t cmds_max, uint16_t qdepth,
92 uint32_t sn); 92 uint32_t sn);
@@ -137,6 +137,13 @@ struct iscsi_transport {
137 int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, 137 int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
138 uint32_t enable, struct sockaddr *dst_addr); 138 uint32_t enable, struct sockaddr *dst_addr);
139 int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params); 139 int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params);
140 int (*set_iface_param) (struct Scsi_Host *shost, void *data,
141 uint32_t len);
142 int (*get_iface_param) (struct iscsi_iface *iface,
143 enum iscsi_param_type param_type,
144 int param, char *buf);
145 mode_t (*attr_is_visible)(int param_type, int param);
146 int (*bsg_request)(struct bsg_job *job);
140}; 147};
141 148
142/* 149/*
@@ -150,6 +157,8 @@ extern int iscsi_unregister_transport(struct iscsi_transport *tt);
150 */ 157 */
151extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn, 158extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn,
152 enum iscsi_err error); 159 enum iscsi_err error);
160extern void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
161 enum iscsi_conn_state state);
153extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, 162extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
154 char *data, uint32_t data_size); 163 char *data, uint32_t data_size);
155 164
@@ -171,6 +180,9 @@ struct iscsi_cls_conn {
171#define iscsi_dev_to_conn(_dev) \ 180#define iscsi_dev_to_conn(_dev) \
172 container_of(_dev, struct iscsi_cls_conn, dev) 181 container_of(_dev, struct iscsi_cls_conn, dev)
173 182
183#define transport_class_to_conn(_cdev) \
184 iscsi_dev_to_conn(_cdev->parent)
185
174#define iscsi_conn_to_session(_conn) \ 186#define iscsi_conn_to_session(_conn) \
175 iscsi_dev_to_session(_conn->dev.parent) 187 iscsi_dev_to_session(_conn->dev.parent)
176 188
@@ -197,6 +209,7 @@ struct iscsi_cls_session {
197 struct delayed_work recovery_work; 209 struct delayed_work recovery_work;
198 210
199 unsigned int target_id; 211 unsigned int target_id;
212 bool ida_used;
200 213
201 int state; 214 int state;
202 int sid; /* session id */ 215 int sid; /* session id */
@@ -207,6 +220,9 @@ struct iscsi_cls_session {
207#define iscsi_dev_to_session(_dev) \ 220#define iscsi_dev_to_session(_dev) \
208 container_of(_dev, struct iscsi_cls_session, dev) 221 container_of(_dev, struct iscsi_cls_session, dev)
209 222
223#define transport_class_to_session(_cdev) \
224 iscsi_dev_to_session(_cdev->parent)
225
210#define iscsi_session_to_shost(_session) \ 226#define iscsi_session_to_shost(_session) \
211 dev_to_shost(_session->dev.parent) 227 dev_to_shost(_session->dev.parent)
212 228
@@ -216,8 +232,12 @@ struct iscsi_cls_session {
216struct iscsi_cls_host { 232struct iscsi_cls_host {
217 atomic_t nr_scans; 233 atomic_t nr_scans;
218 struct mutex mutex; 234 struct mutex mutex;
235 struct request_queue *bsg_q;
219}; 236};
220 237
238#define iscsi_job_to_shost(_job) \
239 dev_to_shost(_job->dev)
240
221extern void iscsi_host_for_each_session(struct Scsi_Host *shost, 241extern void iscsi_host_for_each_session(struct Scsi_Host *shost,
222 void (*fn)(struct iscsi_cls_session *)); 242 void (*fn)(struct iscsi_cls_session *));
223 243
@@ -228,6 +248,20 @@ struct iscsi_endpoint {
228 struct iscsi_cls_conn *conn; 248 struct iscsi_cls_conn *conn;
229}; 249};
230 250
251struct iscsi_iface {
252 struct device dev;
253 struct iscsi_transport *transport;
254 uint32_t iface_type; /* IPv4 or IPv6 */
255 uint32_t iface_num; /* iface number, 0 - n */
256 void *dd_data; /* LLD private data */
257};
258
259#define iscsi_dev_to_iface(_dev) \
260 container_of(_dev, struct iscsi_iface, dev)
261
262#define iscsi_iface_to_shost(_iface) \
263 dev_to_shost(_iface->dev.parent)
264
231/* 265/*
232 * session and connection functions that can be used by HW iSCSI LLDs 266 * session and connection functions that can be used by HW iSCSI LLDs
233 */ 267 */
@@ -238,6 +272,7 @@ struct iscsi_endpoint {
238 dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a) 272 dev_printk(prefix, &(_cls_conn)->dev, fmt, ##a)
239 273
240extern int iscsi_session_chkready(struct iscsi_cls_session *session); 274extern int iscsi_session_chkready(struct iscsi_cls_session *session);
275extern int iscsi_is_session_online(struct iscsi_cls_session *session);
241extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost, 276extern struct iscsi_cls_session *iscsi_alloc_session(struct Scsi_Host *shost,
242 struct iscsi_transport *transport, int dd_size); 277 struct iscsi_transport *transport, int dd_size);
243extern int iscsi_add_session(struct iscsi_cls_session *session, 278extern int iscsi_add_session(struct iscsi_cls_session *session,
@@ -261,5 +296,11 @@ extern struct iscsi_endpoint *iscsi_create_endpoint(int dd_size);
261extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep); 296extern void iscsi_destroy_endpoint(struct iscsi_endpoint *ep);
262extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle); 297extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle);
263extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd); 298extern int iscsi_block_scsi_eh(struct scsi_cmnd *cmd);
299extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
300 struct iscsi_transport *t,
301 uint32_t iface_type,
302 uint32_t iface_num, int dd_size);
303extern void iscsi_destroy_iface(struct iscsi_iface *iface);
304extern struct iscsi_iface *iscsi_lookup_iface(int handle);
264 305
265#endif 306#endif