aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-02 15:22:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-02 15:22:54 -0400
commitdf910390e2db07a76c87f258475f6c96253cee6c (patch)
treed522f0f098688c330014c5d78be6b3e74de87b7e
parent91a247d7d3694a161092931ea4e0b13c11b8e9a0 (diff)
parent9f55bca2b82a77a3cc3204900db2fc40ab30019e (diff)
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull first round of SCSI updates from James Bottomley: "This includes one new driver: cxlflash plus the usual grab bag of updates for the major drivers: qla2xxx, ipr, storvsc, pm80xx, hptiop, plus a few assorted fixes. There's another tranch coming, but I want to incubate it another few days in the checkers, plus it includes a mpt2sas separated lifetime fix, which Avago won't get done testing until Friday" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (85 commits) aic94xx: set an error code on failure storvsc: Set the error code correctly in failure conditions storvsc: Allow write_same when host is windows 10 storvsc: use storage protocol version to determine storage capabilities storvsc: use correct defaults for values determined by protocol negotiation storvsc: Untangle the storage protocol negotiation from the vmbus protocol negotiation. storvsc: Use a single value to track protocol versions storvsc: Rather than look for sets of specific protocol versions, make decisions based on ranges. cxlflash: Remove unused variable from queuecommand cxlflash: shift wrapping bug in afu_link_reset() cxlflash: off by one bug in cxlflash_show_port_status() cxlflash: Virtual LUN support cxlflash: Superpipe support cxlflash: Base error recovery support qla2xxx: Update driver version to 8.07.00.26-k qla2xxx: Add pci device id 0x2261. qla2xxx: Fix missing device login retries. qla2xxx: do not clear slot in outstanding cmd array qla2xxx: Remove decrement of sp reference count in abort handler. qla2xxx: Add support to show MPI and PEP FW version for ISP27xx. ...
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/powerpc/cxlflash.txt318
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/message/fusion/mptctl.c9
-rw-r--r--drivers/scsi/Kconfig1
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/cxlflash/Kconfig11
-rw-r--r--drivers/scsi/cxlflash/Makefile2
-rw-r--r--drivers/scsi/cxlflash/common.h208
-rw-r--r--drivers/scsi/cxlflash/lunmgt.c266
-rw-r--r--drivers/scsi/cxlflash/main.c2494
-rw-r--r--drivers/scsi/cxlflash/main.h108
-rw-r--r--drivers/scsi/cxlflash/sislite.h472
-rw-r--r--drivers/scsi/cxlflash/superpipe.c2084
-rw-r--r--drivers/scsi/cxlflash/superpipe.h147
-rw-r--r--drivers/scsi/cxlflash/vlun.c1243
-rw-r--r--drivers/scsi/cxlflash/vlun.h86
-rw-r--r--drivers/scsi/hpsa.c301
-rw-r--r--drivers/scsi/hpsa.h16
-rw-r--r--drivers/scsi/hpsa_cmd.h10
-rw-r--r--drivers/scsi/hptiop.c97
-rw-r--r--drivers/scsi/hptiop.h6
-rw-r--r--drivers/scsi/ipr.c15
-rw-r--r--drivers/scsi/ipr.h17
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/megaraid.c140
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c544
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c95
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c16
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c16
-rw-r--r--drivers/scsi/mvsas/mv_init.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c5
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c19
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h12
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c111
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c24
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c102
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c162
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c132
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c80
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c165
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c20
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c139
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c6
-rw-r--r--drivers/scsi/scsi_error.c9
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c11
-rw-r--r--drivers/scsi/st.c83
-rw-r--r--drivers/scsi/storvsc_drv.c224
-rw-r--r--include/scsi/scsi_device.h3
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/uapi/scsi/Kbuild1
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h174
70 files changed, 9177 insertions, 1327 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 141f847c7648..64df08db4657 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -316,6 +316,7 @@ Code Seq#(hex) Include File Comments
3160xB3 00 linux/mmc/ioctl.h 3160xB3 00 linux/mmc/ioctl.h
3170xC0 00-0F linux/usb/iowarrior.h 3170xC0 00-0F linux/usb/iowarrior.h
3180xCA 00-0F uapi/misc/cxl.h 3180xCA 00-0F uapi/misc/cxl.h
3190xCA 80-8F uapi/scsi/cxlflash_ioctl.h
3190xCB 00-1F CBM serial IEC bus in development: 3200xCB 00-1F CBM serial IEC bus in development:
320 <mailto:michael.klein@puffin.lb.shuttle.de> 321 <mailto:michael.klein@puffin.lb.shuttle.de>
3210xCD 01 linux/reiserfs_fs.h 3220xCD 01 linux/reiserfs_fs.h
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt
new file mode 100644
index 000000000000..4202d1bc583c
--- /dev/null
+++ b/Documentation/powerpc/cxlflash.txt
@@ -0,0 +1,318 @@
1Introduction
2============
3
4 The IBM Power architecture provides support for CAPI (Coherent
5 Accelerator Power Interface), which is available to certain PCIe slots
6 on Power 8 systems. CAPI can be thought of as a special tunneling
7 protocol through PCIe that allow PCIe adapters to look like special
8 purpose co-processors which can read or write an application's
9 memory and generate page faults. As a result, the host interface to
10 an adapter running in CAPI mode does not require the data buffers to
11 be mapped to the device's memory (IOMMU bypass) nor does it require
12 memory to be pinned.
13
14 On Linux, Coherent Accelerator (CXL) kernel services present CAPI
15 devices as a PCI device by implementing a virtual PCI host bridge.
16 This abstraction simplifies the infrastructure and programming
17 model, allowing for drivers to look similar to other native PCI
18 device drivers.
19
20 CXL provides a mechanism by which user space applications can
21 directly talk to a device (network or storage) bypassing the typical
22 kernel/device driver stack. The CXL Flash Adapter Driver enables a
23 user space application direct access to Flash storage.
24
25 The CXL Flash Adapter Driver is a kernel module that sits in the
26 SCSI stack as a low level device driver (below the SCSI disk and
27 protocol drivers) for the IBM CXL Flash Adapter. This driver is
28 responsible for the initialization of the adapter, setting up the
29 special path for user space access, and performing error recovery. It
30 communicates directly the Flash Accelerator Functional Unit (AFU)
31 as described in Documentation/powerpc/cxl.txt.
32
33 The cxlflash driver supports two, mutually exclusive, modes of
34 operation at the device (LUN) level:
35
36 - Any flash device (LUN) can be configured to be accessed as a
37 regular disk device (i.e.: /dev/sdc). This is the default mode.
38
39 - Any flash device (LUN) can be configured to be accessed from
40 user space with a special block library. This mode further
41 specifies the means of accessing the device and provides for
42 either raw access to the entire LUN (referred to as direct
43 or physical LUN access) or access to a kernel/AFU-mediated
44 partition of the LUN (referred to as virtual LUN access). The
45 segmentation of a disk device into virtual LUNs is assisted
46 by special translation services provided by the Flash AFU.
47
48Overview
49========
50
51 The Coherent Accelerator Interface Architecture (CAIA) introduces a
52 concept of a master context. A master typically has special privileges
53 granted to it by the kernel or hypervisor allowing it to perform AFU
54 wide management and control. The master may or may not be involved
55 directly in each user I/O, but at the minimum is involved in the
56 initial setup before the user application is allowed to send requests
57 directly to the AFU.
58
59 The CXL Flash Adapter Driver establishes a master context with the
60 AFU. It uses memory mapped I/O (MMIO) for this control and setup. The
61 Adapter Problem Space Memory Map looks like this:
62
63 +-------------------------------+
64 | 512 * 64 KB User MMIO |
65 | (per context) |
66 | User Accessible |
67 +-------------------------------+
68 | 512 * 128 B per context |
69 | Provisioning and Control |
70 | Trusted Process accessible |
71 +-------------------------------+
72 | 64 KB Global |
73 | Trusted Process accessible |
74 +-------------------------------+
75
76 This driver configures itself into the SCSI software stack as an
77 adapter driver. The driver is the only entity that is considered a
78 Trusted Process to program the Provisioning and Control and Global
79 areas in the MMIO Space shown above. The master context driver
80 discovers all LUNs attached to the CXL Flash adapter and instantiates
81 scsi block devices (/dev/sdb, /dev/sdc etc.) for each unique LUN
82 seen from each path.
83
84 Once these scsi block devices are instantiated, an application
85 written to a specification provided by the block library may get
86 access to the Flash from user space (without requiring a system call).
87
88 This master context driver also provides a series of ioctls for this
89 block library to enable this user space access. The driver supports
90 two modes for accessing the block device.
91
92 The first mode is called a virtual mode. In this mode a single scsi
93 block device (/dev/sdb) may be carved up into any number of distinct
94 virtual LUNs. The virtual LUNs may be resized as long as the sum of
95 the sizes of all the virtual LUNs, along with the meta-data associated
96 with it does not exceed the physical capacity.
97
98 The second mode is called the physical mode. In this mode a single
99 block device (/dev/sdb) may be opened directly by the block library
100 and the entire space for the LUN is available to the application.
101
102 Only the physical mode provides persistence of the data. i.e. The
103 data written to the block device will survive application exit and
104 restart and also reboot. The virtual LUNs do not persist (i.e. do
105 not survive after the application terminates or the system reboots).
106
107
108Block library API
109=================
110
111 Applications intending to get access to the CXL Flash from user
112 space should use the block library, as it abstracts the details of
113 interfacing directly with the cxlflash driver that are necessary for
114 performing administrative actions (i.e.: setup, tear down, resize).
115 The block library can be thought of as a 'user' of services,
116 implemented as IOCTLs, that are provided by the cxlflash driver
117 specifically for devices (LUNs) operating in user space access
118 mode. While it is not a requirement that applications understand
119 the interface between the block library and the cxlflash driver,
120 a high-level overview of each supported service (IOCTL) is provided
121 below.
122
123 The block library can be found on GitHub:
124 http://www.github.com/mikehollinger/ibmcapikv
125
126
127CXL Flash Driver IOCTLs
128=======================
129
130 Users, such as the block library, that wish to interface with a flash
131 device (LUN) via user space access need to use the services provided
132 by the cxlflash driver. As these services are implemented as ioctls,
133 a file descriptor handle must first be obtained in order to establish
134 the communication channel between a user and the kernel. This file
135 descriptor is obtained by opening the device special file associated
136 with the scsi disk device (/dev/sdb) that was created during LUN
137 discovery. As per the location of the cxlflash driver within the
138 SCSI protocol stack, this open is actually not seen by the cxlflash
139 driver. Upon successful open, the user receives a file descriptor
140 (herein referred to as fd1) that should be used for issuing the
141 subsequent ioctls listed below.
142
143 The structure definitions for these IOCTLs are available in:
144 uapi/scsi/cxlflash_ioctl.h
145
146DK_CXLFLASH_ATTACH
147------------------
148
149 This ioctl obtains, initializes, and starts a context using the CXL
150 kernel services. These services specify a context id (u16) by which
151 to uniquely identify the context and its allocated resources. The
152 services additionally provide a second file descriptor (herein
153 referred to as fd2) that is used by the block library to initiate
154 memory mapped I/O (via mmap()) to the CXL flash device and poll for
155 completion events. This file descriptor is intentionally installed by
156 this driver and not the CXL kernel services to allow for intermediary
157 notification and access in the event of a non-user-initiated close(),
158 such as a killed process. This design point is described in further
159 detail in the description for the DK_CXLFLASH_DETACH ioctl.
160
161 There are a few important aspects regarding the "tokens" (context id
162 and fd2) that are provided back to the user:
163
164 - These tokens are only valid for the process under which they
165 were created. The child of a forked process cannot continue
166 to use the context id or file descriptor created by its parent
167 (see DK_CXLFLASH_VLUN_CLONE for further details).
168
169 - These tokens are only valid for the lifetime of the context and
170 the process under which they were created. Once either is
171 destroyed, the tokens are to be considered stale and subsequent
172 usage will result in errors.
173
174 - When a context is no longer needed, the user shall detach from
175 the context via the DK_CXLFLASH_DETACH ioctl.
176
177 - A close on fd2 will invalidate the tokens. This operation is not
178 required by the user.
179
180DK_CXLFLASH_USER_DIRECT
181-----------------------
182 This ioctl is responsible for transitioning the LUN to direct
183 (physical) mode access and configuring the AFU for direct access from
184 user space on a per-context basis. Additionally, the block size and
185 last logical block address (LBA) are returned to the user.
186
187 As mentioned previously, when operating in user space access mode,
188 LUNs may be accessed in whole or in part. Only one mode is allowed
189 at a time and if one mode is active (outstanding references exist),
190 requests to use the LUN in a different mode are denied.
191
192 The AFU is configured for direct access from user space by adding an
193 entry to the AFU's resource handle table. The index of the entry is
194 treated as a resource handle that is returned to the user. The user
195 is then able to use the handle to reference the LUN during I/O.
196
197DK_CXLFLASH_USER_VIRTUAL
198------------------------
199 This ioctl is responsible for transitioning the LUN to virtual mode
200 of access and configuring the AFU for virtual access from user space
201 on a per-context basis. Additionally, the block size and last logical
202 block address (LBA) are returned to the user.
203
204 As mentioned previously, when operating in user space access mode,
205 LUNs may be accessed in whole or in part. Only one mode is allowed
206 at a time and if one mode is active (outstanding references exist),
207 requests to use the LUN in a different mode are denied.
208
209 The AFU is configured for virtual access from user space by adding
210 an entry to the AFU's resource handle table. The index of the entry
211 is treated as a resource handle that is returned to the user. The
212 user is then able to use the handle to reference the LUN during I/O.
213
214 By default, the virtual LUN is created with a size of 0. The user
215 would need to use the DK_CXLFLASH_VLUN_RESIZE ioctl to adjust the grow
216 the virtual LUN to a desired size. To avoid having to perform this
217 resize for the initial creation of the virtual LUN, the user has the
218 option of specifying a size as part of the DK_CXLFLASH_USER_VIRTUAL
219 ioctl, such that when success is returned to the user, the
220 resource handle that is provided is already referencing provisioned
221 storage. This is reflected by the last LBA being a non-zero value.
222
223DK_CXLFLASH_VLUN_RESIZE
224-----------------------
225 This ioctl is responsible for resizing a previously created virtual
226 LUN and will fail if invoked upon a LUN that is not in virtual
227 mode. Upon success, an updated last LBA is returned to the user
228 indicating the new size of the virtual LUN associated with the
229 resource handle.
230
231 The partitioning of virtual LUNs is jointly mediated by the cxlflash
232 driver and the AFU. An allocation table is kept for each LUN that is
233 operating in the virtual mode and used to program a LUN translation
234 table that the AFU references when provided with a resource handle.
235
236DK_CXLFLASH_RELEASE
237-------------------
238 This ioctl is responsible for releasing a previously obtained
239 reference to either a physical or virtual LUN. This can be
240 thought of as the inverse of the DK_CXLFLASH_USER_DIRECT or
241 DK_CXLFLASH_USER_VIRTUAL ioctls. Upon success, the resource handle
242 is no longer valid and the entry in the resource handle table is
243 made available to be used again.
244
245 As part of the release process for virtual LUNs, the virtual LUN
246 is first resized to 0 to clear out and free the translation tables
247 associated with the virtual LUN reference.
248
249DK_CXLFLASH_DETACH
250------------------
251 This ioctl is responsible for unregistering a context with the
252 cxlflash driver and release outstanding resources that were
253 not explicitly released via the DK_CXLFLASH_RELEASE ioctl. Upon
254 success, all "tokens" which had been provided to the user from the
255 DK_CXLFLASH_ATTACH onward are no longer valid.
256
257DK_CXLFLASH_VLUN_CLONE
258----------------------
259 This ioctl is responsible for cloning a previously created
260 context to a more recently created context. It exists solely to
261 support maintaining user space access to storage after a process
262 forks. Upon success, the child process (which invoked the ioctl)
263 will have access to the same LUNs via the same resource handle(s)
264 and fd2 as the parent, but under a different context.
265
266 Context sharing across processes is not supported with CXL and
267 therefore each fork must be met with establishing a new context
268 for the child process. This ioctl simplifies the state management
269 and playback required by a user in such a scenario. When a process
270 forks, child process can clone the parents context by first creating
271 a context (via DK_CXLFLASH_ATTACH) and then using this ioctl to
272 perform the clone from the parent to the child.
273
274 The clone itself is fairly simple. The resource handle and lun
275 translation tables are copied from the parent context to the child's
276 and then synced with the AFU.
277
278DK_CXLFLASH_VERIFY
279------------------
280 This ioctl is used to detect various changes such as the capacity of
281 the disk changing, the number of LUNs visible changing, etc. In cases
282 where the changes affect the application (such as a LUN resize), the
283 cxlflash driver will report the changed state to the application.
284
285 The user calls in when they want to validate that a LUN hasn't been
286 changed in response to a check condition. As the user is operating out
287 of band from the kernel, they will see these types of events without
288 the kernel's knowledge. When encountered, the user's architected
289 behavior is to call in to this ioctl, indicating what they want to
290 verify and passing along any appropriate information. For now, only
291 verifying a LUN change (ie: size different) with sense data is
292 supported.
293
294DK_CXLFLASH_RECOVER_AFU
295-----------------------
296 This ioctl is used to drive recovery (if such an action is warranted)
297 of a specified user context. Any state associated with the user context
298 is re-established upon successful recovery.
299
300 User contexts are put into an error condition when the device needs to
301 be reset or is terminating. Users are notified of this error condition
302 by seeing all 0xF's on an MMIO read. Upon encountering this, the
303 architected behavior for a user is to call into this ioctl to recover
304 their context. A user may also call into this ioctl at any time to
305 check if the device is operating normally. If a failure is returned
306 from this ioctl, the user is expected to gracefully clean up their
307 context via release/detach ioctls. Until they do, the context they
308 hold is not relinquished. The user may also optionally exit the process
309 at which time the context/resources they held will be freed as part of
310 the release fop.
311
312DK_CXLFLASH_MANAGE_LUN
313----------------------
314 This ioctl is used to switch a LUN from a mode where it is available
315 for file-system access (legacy), to a mode where it is set aside for
316 exclusive user space access (superpipe). In case a LUN is visible
317 across multiple ports and adapters, this ioctl is used to uniquely
318 identify each LUN by its World Wide Node Name (WWNN).
diff --git a/MAINTAINERS b/MAINTAINERS
index 68760a91d399..4be7e5e7e9f9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8098,7 +8098,7 @@ S: Supported
8098F: drivers/scsi/pmcraid.* 8098F: drivers/scsi/pmcraid.*
8099 8099
8100PMC SIERRA PM8001 DRIVER 8100PMC SIERRA PM8001 DRIVER
8101M: xjtuwjp@gmail.com 8101M: Jack Wang <jinpu.wang@profitbricks.com>
8102M: lindar_liu@usish.com 8102M: lindar_liu@usish.com
8103L: pmchba@pmcs.com 8103L: pmchba@pmcs.com
8104L: linux-scsi@vger.kernel.org 8104L: linux-scsi@vger.kernel.org
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 70bb7530b22c..fc7393729081 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1859,6 +1859,15 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
1859 } 1859 }
1860 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 1860 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
1861 1861
1862 /* Basic sanity checks to prevent underflows or integer overflows */
1863 if (karg.maxReplyBytes < 0 ||
1864 karg.dataInSize < 0 ||
1865 karg.dataOutSize < 0 ||
1866 karg.dataSgeOffset < 0 ||
1867 karg.maxSenseBytes < 0 ||
1868 karg.dataSgeOffset > ioc->req_sz / 4)
1869 return -EINVAL;
1870
1862 /* Verify that the final request frame will not be too large. 1871 /* Verify that the final request frame will not be too large.
1863 */ 1872 */
1864 sz = karg.dataSgeOffset * 4; 1873 sz = karg.dataSgeOffset * 4;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 456e1567841c..95f7a76cfafc 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -345,6 +345,7 @@ source "drivers/scsi/cxgbi/Kconfig"
345source "drivers/scsi/bnx2i/Kconfig" 345source "drivers/scsi/bnx2i/Kconfig"
346source "drivers/scsi/bnx2fc/Kconfig" 346source "drivers/scsi/bnx2fc/Kconfig"
347source "drivers/scsi/be2iscsi/Kconfig" 347source "drivers/scsi/be2iscsi/Kconfig"
348source "drivers/scsi/cxlflash/Kconfig"
348 349
349config SGIWD93_SCSI 350config SGIWD93_SCSI
350 tristate "SGI WD93C93 SCSI Driver" 351 tristate "SGI WD93C93 SCSI Driver"
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 91209e3d27e3..471d08791766 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -102,6 +102,7 @@ obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
102obj-$(CONFIG_SCSI_EATA) += eata.o 102obj-$(CONFIG_SCSI_EATA) += eata.o
103obj-$(CONFIG_SCSI_DC395x) += dc395x.o 103obj-$(CONFIG_SCSI_DC395x) += dc395x.o
104obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o 104obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
105obj-$(CONFIG_CXLFLASH) += cxlflash/
105obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o 106obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
106obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ 107obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
107obj-$(CONFIG_MEGARAID_SAS) += megaraid/ 108obj-$(CONFIG_MEGARAID_SAS) += megaraid/
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 4b135cca42a1..31e8576cbaab 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -109,6 +109,7 @@ static int asd_map_memio(struct asd_ha_struct *asd_ha)
109 if (!io_handle->addr) { 109 if (!io_handle->addr) {
110 asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1, 110 asd_printk("couldn't map MBAR%d of %s\n", i==0?0:1,
111 pci_name(asd_ha->pcidev)); 111 pci_name(asd_ha->pcidev));
112 err = -ENOMEM;
112 goto Err_unreq; 113 goto Err_unreq;
113 } 114 }
114 } 115 }
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 7223b0006740..8367c11d554b 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -851,6 +851,8 @@ bfad_im_module_exit(void)
851 851
852 if (bfad_im_scsi_vport_transport_template) 852 if (bfad_im_scsi_vport_transport_template)
853 fc_release_transport(bfad_im_scsi_vport_transport_template); 853 fc_release_transport(bfad_im_scsi_vport_transport_template);
854
855 idr_destroy(&bfad_im_port_index);
854} 856}
855 857
856void 858void
diff --git a/drivers/scsi/cxlflash/Kconfig b/drivers/scsi/cxlflash/Kconfig
new file mode 100644
index 000000000000..c052104e523e
--- /dev/null
+++ b/drivers/scsi/cxlflash/Kconfig
@@ -0,0 +1,11 @@
1#
2# IBM CXL-attached Flash Accelerator SCSI Driver
3#
4
5config CXLFLASH
6 tristate "Support for IBM CAPI Flash"
7 depends on PCI && SCSI && CXL && EEH
8 default m
9 help
10 Allows CAPI Accelerated IO to Flash
11 If unsure, say N.
diff --git a/drivers/scsi/cxlflash/Makefile b/drivers/scsi/cxlflash/Makefile
new file mode 100644
index 000000000000..9e39866d473b
--- /dev/null
+++ b/drivers/scsi/cxlflash/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_CXLFLASH) += cxlflash.o
2cxlflash-y += main.o superpipe.o lunmgt.o vlun.o
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
new file mode 100644
index 000000000000..1c56037146e1
--- /dev/null
+++ b/drivers/scsi/cxlflash/common.h
@@ -0,0 +1,208 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_COMMON_H
16#define _CXLFLASH_COMMON_H
17
18#include <linux/list.h>
19#include <linux/types.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_device.h>
22
23
24#define MAX_CONTEXT CXLFLASH_MAX_CONTEXT /* num contexts per afu */
25
26#define CXLFLASH_BLOCK_SIZE 4096 /* 4K blocks */
27#define CXLFLASH_MAX_XFER_SIZE 16777216 /* 16MB transfer */
28#define CXLFLASH_MAX_SECTORS (CXLFLASH_MAX_XFER_SIZE/512) /* SCSI wants
29 max_sectors
30 in units of
31 512 byte
32 sectors
33 */
34
35#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
36#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
37
38/* AFU command retry limit */
39#define MC_RETRY_CNT 5 /* sufficient for SCSI check and
40 certain AFU errors */
41
42/* Command management definitions */
43#define CXLFLASH_NUM_CMDS (2 * CXLFLASH_MAX_CMDS) /* Must be a pow2 for
44 alignment and more
45 efficient array
46 index derivation
47 */
48
49#define CXLFLASH_MAX_CMDS 16
50#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
51
52
53static inline void check_sizes(void)
54{
55 BUILD_BUG_ON_NOT_POWER_OF_2(CXLFLASH_NUM_CMDS);
56}
57
58/* AFU defines a fixed size of 4K for command buffers (borrow 4K page define) */
59#define CMD_BUFSIZE SIZE_4K
60
61/* flags in IOA status area for host use */
62#define B_DONE 0x01
63#define B_ERROR 0x02 /* set with B_DONE */
64#define B_TIMEOUT 0x04 /* set with B_DONE & B_ERROR */
65
66enum cxlflash_lr_state {
67 LINK_RESET_INVALID,
68 LINK_RESET_REQUIRED,
69 LINK_RESET_COMPLETE
70};
71
72enum cxlflash_init_state {
73 INIT_STATE_NONE,
74 INIT_STATE_PCI,
75 INIT_STATE_AFU,
76 INIT_STATE_SCSI
77};
78
79enum cxlflash_state {
80 STATE_NORMAL, /* Normal running state, everything good */
81 STATE_LIMBO, /* Limbo running state, trying to reset/recover */
82 STATE_FAILTERM /* Failed/terminating state, error out users/threads */
83};
84
85/*
86 * Each context has its own set of resource handles that is visible
87 * only from that context.
88 */
89
90struct cxlflash_cfg {
91 struct afu *afu;
92 struct cxl_context *mcctx;
93
94 struct pci_dev *dev;
95 struct pci_device_id *dev_id;
96 struct Scsi_Host *host;
97
98 ulong cxlflash_regs_pci;
99
100 struct work_struct work_q;
101 enum cxlflash_init_state init_state;
102 enum cxlflash_lr_state lr_state;
103 int lr_port;
104
105 struct cxl_afu *cxl_afu;
106
107 struct pci_pool *cxlflash_cmd_pool;
108 struct pci_dev *parent_dev;
109
110 atomic_t recovery_threads;
111 struct mutex ctx_recovery_mutex;
112 struct mutex ctx_tbl_list_mutex;
113 struct ctx_info *ctx_tbl[MAX_CONTEXT];
114 struct list_head ctx_err_recovery; /* contexts w/ recovery pending */
115 struct file_operations cxl_fops;
116
117 atomic_t num_user_contexts;
118
119 /* Parameters that are LUN table related */
120 int last_lun_index[CXLFLASH_NUM_FC_PORTS];
121 int promote_lun_index;
122 struct list_head lluns; /* list of llun_info structs */
123
124 wait_queue_head_t tmf_waitq;
125 bool tmf_active;
126 wait_queue_head_t limbo_waitq;
127 enum cxlflash_state state;
128};
129
130struct afu_cmd {
131 struct sisl_ioarcb rcb; /* IOARCB (cache line aligned) */
132 struct sisl_ioasa sa; /* IOASA must follow IOARCB */
133 spinlock_t slock;
134 struct completion cevent;
135 char *buf; /* per command buffer */
136 struct afu *parent;
137 int slot;
138 atomic_t free;
139
140 u8 cmd_tmf:1;
141
142 /* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
143 * However for performance reasons the IOARCB/IOASA should be
144 * cache line aligned.
145 */
146} __aligned(cache_line_size());
147
148struct afu {
149 /* Stuff requiring alignment go first. */
150
151 u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
152 /*
153 * Command & data for AFU commands.
154 */
155 struct afu_cmd cmd[CXLFLASH_NUM_CMDS];
156
157 /* Beware of alignment till here. Preferably introduce new
158 * fields after this point
159 */
160
161 /* AFU HW */
162 struct cxl_ioctl_start_work work;
163 struct cxlflash_afu_map *afu_map; /* entire MMIO map */
164 struct sisl_host_map *host_map; /* MC host map */
165 struct sisl_ctrl_map *ctrl_map; /* MC control map */
166
167 ctx_hndl_t ctx_hndl; /* master's context handle */
168 u64 *hrrq_start;
169 u64 *hrrq_end;
170 u64 *hrrq_curr;
171 bool toggle;
172 bool read_room;
173 atomic64_t room;
174 u64 hb;
175 u32 cmd_couts; /* Number of command checkouts */
176 u32 internal_lun; /* User-desired LUN mode for this AFU */
177
178 char version[8];
179 u64 interface_version;
180
181 struct cxlflash_cfg *parent; /* Pointer back to parent cxlflash_cfg */
182
183};
184
185static inline u64 lun_to_lunid(u64 lun)
186{
187 u64 lun_id;
188
189 int_to_scsilun(lun, (struct scsi_lun *)&lun_id);
190 return swab64(lun_id);
191}
192
193int cxlflash_send_cmd(struct afu *, struct afu_cmd *);
194void cxlflash_wait_resp(struct afu *, struct afu_cmd *);
195int cxlflash_afu_reset(struct cxlflash_cfg *);
196struct afu_cmd *cxlflash_cmd_checkout(struct afu *);
197void cxlflash_cmd_checkin(struct afu_cmd *);
198int cxlflash_afu_sync(struct afu *, ctx_hndl_t, res_hndl_t, u8);
199void cxlflash_list_init(void);
200void cxlflash_term_global_luns(void);
201void cxlflash_free_errpage(void);
202int cxlflash_ioctl(struct scsi_device *, int, void __user *);
203void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *);
204int cxlflash_mark_contexts_error(struct cxlflash_cfg *);
205void cxlflash_term_local_luns(struct cxlflash_cfg *);
206void cxlflash_restore_luntable(struct cxlflash_cfg *);
207
208#endif /* ifndef _CXLFLASH_COMMON_H */
diff --git a/drivers/scsi/cxlflash/lunmgt.c b/drivers/scsi/cxlflash/lunmgt.c
new file mode 100644
index 000000000000..d98ad0ff64c1
--- /dev/null
+++ b/drivers/scsi/cxlflash/lunmgt.c
@@ -0,0 +1,266 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <misc/cxl.h>
16#include <asm/unaligned.h>
17
18#include <scsi/scsi_host.h>
19#include <uapi/scsi/cxlflash_ioctl.h>
20
21#include "sislite.h"
22#include "common.h"
23#include "vlun.h"
24#include "superpipe.h"
25
26/**
27 * create_local() - allocate and initialize a local LUN information structure
28 * @sdev: SCSI device associated with LUN.
29 * @wwid: World Wide Node Name for LUN.
30 *
31 * Return: Allocated local llun_info structure on success, NULL on failure
32 */
33static struct llun_info *create_local(struct scsi_device *sdev, u8 *wwid)
34{
35 struct llun_info *lli = NULL;
36
37 lli = kzalloc(sizeof(*lli), GFP_KERNEL);
38 if (unlikely(!lli)) {
39 pr_err("%s: could not allocate lli\n", __func__);
40 goto out;
41 }
42
43 lli->sdev = sdev;
44 lli->newly_created = true;
45 lli->host_no = sdev->host->host_no;
46 lli->in_table = false;
47
48 memcpy(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
49out:
50 return lli;
51}
52
53/**
54 * create_global() - allocate and initialize a global LUN information structure
55 * @sdev: SCSI device associated with LUN.
56 * @wwid: World Wide Node Name for LUN.
57 *
58 * Return: Allocated global glun_info structure on success, NULL on failure
59 */
60static struct glun_info *create_global(struct scsi_device *sdev, u8 *wwid)
61{
62 struct glun_info *gli = NULL;
63
64 gli = kzalloc(sizeof(*gli), GFP_KERNEL);
65 if (unlikely(!gli)) {
66 pr_err("%s: could not allocate gli\n", __func__);
67 goto out;
68 }
69
70 mutex_init(&gli->mutex);
71 memcpy(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN);
72out:
73 return gli;
74}
75
76/**
77 * refresh_local() - find and update local LUN information structure by WWID
78 * @cfg: Internal structure associated with the host.
79 * @wwid: WWID associated with LUN.
80 *
81 * When the LUN is found, mark it by updating it's newly_created field.
82 *
83 * Return: Found local lun_info structure on success, NULL on failure
84 * If a LUN with the WWID is found in the list, refresh it's state.
85 */
86static struct llun_info *refresh_local(struct cxlflash_cfg *cfg, u8 *wwid)
87{
88 struct llun_info *lli, *temp;
89
90 list_for_each_entry_safe(lli, temp, &cfg->lluns, list)
91 if (!memcmp(lli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN)) {
92 lli->newly_created = false;
93 return lli;
94 }
95
96 return NULL;
97}
98
99/**
100 * lookup_global() - find a global LUN information structure by WWID
101 * @wwid: WWID associated with LUN.
102 *
103 * Return: Found global lun_info structure on success, NULL on failure
104 */
105static struct glun_info *lookup_global(u8 *wwid)
106{
107 struct glun_info *gli, *temp;
108
109 list_for_each_entry_safe(gli, temp, &global.gluns, list)
110 if (!memcmp(gli->wwid, wwid, DK_CXLFLASH_MANAGE_LUN_WWID_LEN))
111 return gli;
112
113 return NULL;
114}
115
116/**
117 * find_and_create_lun() - find or create a local LUN information structure
118 * @sdev: SCSI device associated with LUN.
119 * @wwid: WWID associated with LUN.
120 *
121 * The LUN is kept both in a local list (per adapter) and in a global list
122 * (across all adapters). Certain attributes of the LUN are local to the
123 * adapter (such as index, port selection mask etc.).
124 * The block allocation map is shared across all adapters (i.e. associated
125 * wih the global list). Since different attributes are associated with
126 * the per adapter and global entries, allocate two separate structures for each
127 * LUN (one local, one global).
128 *
129 * Keep a pointer back from the local to the global entry.
130 *
131 * Return: Found/Allocated local lun_info structure on success, NULL on failure
132 */
133static struct llun_info *find_and_create_lun(struct scsi_device *sdev, u8 *wwid)
134{
135 struct llun_info *lli = NULL;
136 struct glun_info *gli = NULL;
137 struct Scsi_Host *shost = sdev->host;
138 struct cxlflash_cfg *cfg = shost_priv(shost);
139
140 mutex_lock(&global.mutex);
141 if (unlikely(!wwid))
142 goto out;
143
144 lli = refresh_local(cfg, wwid);
145 if (lli)
146 goto out;
147
148 lli = create_local(sdev, wwid);
149 if (unlikely(!lli))
150 goto out;
151
152 gli = lookup_global(wwid);
153 if (gli) {
154 lli->parent = gli;
155 list_add(&lli->list, &cfg->lluns);
156 goto out;
157 }
158
159 gli = create_global(sdev, wwid);
160 if (unlikely(!gli)) {
161 kfree(lli);
162 lli = NULL;
163 goto out;
164 }
165
166 lli->parent = gli;
167 list_add(&lli->list, &cfg->lluns);
168
169 list_add(&gli->list, &global.gluns);
170
171out:
172 mutex_unlock(&global.mutex);
173 pr_debug("%s: returning %p\n", __func__, lli);
174 return lli;
175}
176
177/**
178 * cxlflash_term_local_luns() - Delete all entries from local LUN list, free.
179 * @cfg: Internal structure associated with the host.
180 */
181void cxlflash_term_local_luns(struct cxlflash_cfg *cfg)
182{
183 struct llun_info *lli, *temp;
184
185 mutex_lock(&global.mutex);
186 list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
187 list_del(&lli->list);
188 kfree(lli);
189 }
190 mutex_unlock(&global.mutex);
191}
192
193/**
194 * cxlflash_list_init() - initializes the global LUN list
195 */
196void cxlflash_list_init(void)
197{
198 INIT_LIST_HEAD(&global.gluns);
199 mutex_init(&global.mutex);
200 global.err_page = NULL;
201}
202
203/**
204 * cxlflash_term_global_luns() - frees resources associated with global LUN list
205 */
206void cxlflash_term_global_luns(void)
207{
208 struct glun_info *gli, *temp;
209
210 mutex_lock(&global.mutex);
211 list_for_each_entry_safe(gli, temp, &global.gluns, list) {
212 list_del(&gli->list);
213 cxlflash_ba_terminate(&gli->blka.ba_lun);
214 kfree(gli);
215 }
216 mutex_unlock(&global.mutex);
217}
218
219/**
220 * cxlflash_manage_lun() - handles LUN management activities
221 * @sdev: SCSI device associated with LUN.
222 * @manage: Manage ioctl data structure.
223 *
224 * This routine is used to notify the driver about a LUN's WWID and associate
225 * SCSI devices (sdev) with a global LUN instance. Additionally it serves to
226 * change a LUN's operating mode: legacy or superpipe.
227 *
228 * Return: 0 on success, -errno on failure
229 */
230int cxlflash_manage_lun(struct scsi_device *sdev,
231 struct dk_cxlflash_manage_lun *manage)
232{
233 int rc = 0;
234 struct llun_info *lli = NULL;
235 u64 flags = manage->hdr.flags;
236 u32 chan = sdev->channel;
237
238 lli = find_and_create_lun(sdev, manage->wwid);
239 pr_debug("%s: ENTER: WWID = %016llX%016llX, flags = %016llX li = %p\n",
240 __func__, get_unaligned_le64(&manage->wwid[0]),
241 get_unaligned_le64(&manage->wwid[8]),
242 manage->hdr.flags, lli);
243 if (unlikely(!lli)) {
244 rc = -ENOMEM;
245 goto out;
246 }
247
248 if (flags & DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE) {
249 if (lli->newly_created)
250 lli->port_sel = CHAN2PORT(chan);
251 else
252 lli->port_sel = BOTH_PORTS;
253 /* Store off lun in unpacked, AFU-friendly format */
254 lli->lun_id[chan] = lun_to_lunid(sdev->lun);
255 sdev->hostdata = lli;
256 } else if (flags & DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE) {
257 if (lli->parent->mode != MODE_NONE)
258 rc = -EBUSY;
259 else
260 sdev->hostdata = NULL;
261 }
262
263out:
264 pr_debug("%s: returning rc=%d\n", __func__, rc);
265 return rc;
266}
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
new file mode 100644
index 000000000000..3e3ccf16e7c2
--- /dev/null
+++ b/drivers/scsi/cxlflash/main.c
@@ -0,0 +1,2494 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/delay.h>
16#include <linux/list.h>
17#include <linux/module.h>
18#include <linux/pci.h>
19
20#include <asm/unaligned.h>
21
22#include <misc/cxl.h>
23
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_host.h>
26#include <uapi/scsi/cxlflash_ioctl.h>
27
28#include "main.h"
29#include "sislite.h"
30#include "common.h"
31
32MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
33MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
34MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
35MODULE_LICENSE("GPL");
36
37
38/**
39 * cxlflash_cmd_checkout() - checks out an AFU command
40 * @afu: AFU to checkout from.
41 *
42 * Commands are checked out in a round-robin fashion. Note that since
43 * the command pool is larger than the hardware queue, the majority of
44 * times we will only loop once or twice before getting a command. The
45 * buffer and CDB within the command are initialized (zeroed) prior to
46 * returning.
47 *
48 * Return: The checked out command or NULL when command pool is empty.
49 */
50struct afu_cmd *cxlflash_cmd_checkout(struct afu *afu)
51{
52 int k, dec = CXLFLASH_NUM_CMDS;
53 struct afu_cmd *cmd;
54
55 while (dec--) {
56 k = (afu->cmd_couts++ & (CXLFLASH_NUM_CMDS - 1));
57
58 cmd = &afu->cmd[k];
59
60 if (!atomic_dec_if_positive(&cmd->free)) {
61 pr_debug("%s: returning found index=%d\n",
62 __func__, cmd->slot);
63 memset(cmd->buf, 0, CMD_BUFSIZE);
64 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
65 return cmd;
66 }
67 }
68
69 return NULL;
70}
71
72/**
73 * cxlflash_cmd_checkin() - checks in an AFU command
74 * @cmd: AFU command to checkin.
75 *
76 * Safe to pass commands that have already been checked in. Several
77 * internal tracking fields are reset as part of the checkin. Note
78 * that these are intentionally reset prior to toggling the free bit
79 * to avoid clobbering values in the event that the command is checked
80 * out right away.
81 */
82void cxlflash_cmd_checkin(struct afu_cmd *cmd)
83{
84 cmd->rcb.scp = NULL;
85 cmd->rcb.timeout = 0;
86 cmd->sa.ioasc = 0;
87 cmd->cmd_tmf = false;
88 cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */
89
90 if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
91 pr_err("%s: Freeing cmd (%d) that is not in use!\n",
92 __func__, cmd->slot);
93 return;
94 }
95
96 pr_debug("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
97}
98
99/**
100 * process_cmd_err() - command error handler
101 * @cmd: AFU command that experienced the error.
102 * @scp: SCSI command associated with the AFU command in error.
103 *
104 * Translates error bits from AFU command to SCSI command results.
105 */
106static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
107{
108 struct sisl_ioarcb *ioarcb;
109 struct sisl_ioasa *ioasa;
110
111 if (unlikely(!cmd))
112 return;
113
114 ioarcb = &(cmd->rcb);
115 ioasa = &(cmd->sa);
116
117 if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
118 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
119 __func__, cmd, scp);
120 scp->result = (DID_ERROR << 16);
121 }
122
123 if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
124 pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
125 __func__, cmd, scp);
126 scp->result = (DID_ERROR << 16);
127 }
128
129 pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
130 "afu_extra=0x%X, scsi_entra=0x%X, fc_extra=0x%X\n",
131 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
132 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
133 ioasa->fc_extra);
134
135 if (ioasa->rc.scsi_rc) {
136 /* We have a SCSI status */
137 if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
138 memcpy(scp->sense_buffer, ioasa->sense_data,
139 SISL_SENSE_DATA_LEN);
140 scp->result = ioasa->rc.scsi_rc;
141 } else
142 scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
143 }
144
145 /*
146 * We encountered an error. Set scp->result based on nature
147 * of error.
148 */
149 if (ioasa->rc.fc_rc) {
150 /* We have an FC status */
151 switch (ioasa->rc.fc_rc) {
152 case SISL_FC_RC_LINKDOWN:
153 scp->result = (DID_REQUEUE << 16);
154 break;
155 case SISL_FC_RC_RESID:
156 /* This indicates an FCP resid underrun */
157 if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
158 /* If the SISL_RC_FLAGS_OVERRUN flag was set,
159 * then we will handle this error else where.
160 * If not then we must handle it here.
161 * This is probably an AFU bug. We will
162 * attempt a retry to see if that resolves it.
163 */
164 scp->result = (DID_ERROR << 16);
165 }
166 break;
167 case SISL_FC_RC_RESIDERR:
168 /* Resid mismatch between adapter and device */
169 case SISL_FC_RC_TGTABORT:
170 case SISL_FC_RC_ABORTOK:
171 case SISL_FC_RC_ABORTFAIL:
172 case SISL_FC_RC_NOLOGI:
173 case SISL_FC_RC_ABORTPEND:
174 case SISL_FC_RC_WRABORTPEND:
175 case SISL_FC_RC_NOEXP:
176 case SISL_FC_RC_INUSE:
177 scp->result = (DID_ERROR << 16);
178 break;
179 }
180 }
181
182 if (ioasa->rc.afu_rc) {
183 /* We have an AFU error */
184 switch (ioasa->rc.afu_rc) {
185 case SISL_AFU_RC_NO_CHANNELS:
186 scp->result = (DID_MEDIUM_ERROR << 16);
187 break;
188 case SISL_AFU_RC_DATA_DMA_ERR:
189 switch (ioasa->afu_extra) {
190 case SISL_AFU_DMA_ERR_PAGE_IN:
191 /* Retry */
192 scp->result = (DID_IMM_RETRY << 16);
193 break;
194 case SISL_AFU_DMA_ERR_INVALID_EA:
195 default:
196 scp->result = (DID_ERROR << 16);
197 }
198 break;
199 case SISL_AFU_RC_OUT_OF_DATA_BUFS:
200 /* Retry */
201 scp->result = (DID_ALLOC_FAILURE << 16);
202 break;
203 default:
204 scp->result = (DID_ERROR << 16);
205 }
206 }
207}
208
209/**
210 * cmd_complete() - command completion handler
211 * @cmd: AFU command that has completed.
212 *
213 * Prepares and submits command that has either completed or timed out to
214 * the SCSI stack. Checks AFU command back into command pool for non-internal
215 * (rcb.scp populated) commands.
216 */
217static void cmd_complete(struct afu_cmd *cmd)
218{
219 struct scsi_cmnd *scp;
220 u32 resid;
221 ulong lock_flags;
222 struct afu *afu = cmd->parent;
223 struct cxlflash_cfg *cfg = afu->parent;
224 bool cmd_is_tmf;
225
226 spin_lock_irqsave(&cmd->slock, lock_flags);
227 cmd->sa.host_use_b[0] |= B_DONE;
228 spin_unlock_irqrestore(&cmd->slock, lock_flags);
229
230 if (cmd->rcb.scp) {
231 scp = cmd->rcb.scp;
232 if (unlikely(cmd->sa.rc.afu_rc ||
233 cmd->sa.rc.scsi_rc ||
234 cmd->sa.rc.fc_rc))
235 process_cmd_err(cmd, scp);
236 else
237 scp->result = (DID_OK << 16);
238
239 resid = cmd->sa.resid;
240 cmd_is_tmf = cmd->cmd_tmf;
241 cxlflash_cmd_checkin(cmd); /* Don't use cmd after here */
242
243 pr_debug("%s: calling scsi_set_resid, scp=%p "
244 "result=%X resid=%d\n", __func__,
245 scp, scp->result, resid);
246
247 scsi_set_resid(scp, resid);
248 scsi_dma_unmap(scp);
249 scp->scsi_done(scp);
250
251 if (cmd_is_tmf) {
252 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
253 cfg->tmf_active = false;
254 wake_up_all_locked(&cfg->tmf_waitq);
255 spin_unlock_irqrestore(&cfg->tmf_waitq.lock,
256 lock_flags);
257 }
258 } else
259 complete(&cmd->cevent);
260}
261
262/**
263 * send_tmf() - sends a Task Management Function (TMF)
264 * @afu: AFU to checkout from.
265 * @scp: SCSI command from stack.
266 * @tmfcmd: TMF command to send.
267 *
268 * Return:
269 * 0 on success
270 * SCSI_MLQUEUE_HOST_BUSY when host is busy
271 */
272static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
273{
274 struct afu_cmd *cmd;
275
276 u32 port_sel = scp->device->channel + 1;
277 short lflag = 0;
278 struct Scsi_Host *host = scp->device->host;
279 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
280 ulong lock_flags;
281 int rc = 0;
282
283 cmd = cxlflash_cmd_checkout(afu);
284 if (unlikely(!cmd)) {
285 pr_err("%s: could not get a free command\n", __func__);
286 rc = SCSI_MLQUEUE_HOST_BUSY;
287 goto out;
288 }
289
290 /* If a Task Management Function is active, do not send one more.
291 */
292 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
293 if (cfg->tmf_active)
294 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
295 !cfg->tmf_active);
296 cfg->tmf_active = true;
297 cmd->cmd_tmf = true;
298 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
299
300 cmd->rcb.ctx_id = afu->ctx_hndl;
301 cmd->rcb.port_sel = port_sel;
302 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
303
304 lflag = SISL_REQ_FLAGS_TMF_CMD;
305
306 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
307 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
308
309 /* Stash the scp in the reserved field, for reuse during interrupt */
310 cmd->rcb.scp = scp;
311
312 /* Copy the CDB from the cmd passed in */
313 memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
314
315 /* Send the command */
316 rc = cxlflash_send_cmd(afu, cmd);
317 if (unlikely(rc)) {
318 cxlflash_cmd_checkin(cmd);
319 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
320 cfg->tmf_active = false;
321 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
322 goto out;
323 }
324
325 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
326 wait_event_interruptible_locked_irq(cfg->tmf_waitq, !cfg->tmf_active);
327 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
328out:
329 return rc;
330}
331
332/**
333 * cxlflash_driver_info() - information handler for this host driver
334 * @host: SCSI host associated with device.
335 *
336 * Return: A string describing the device.
337 */
338static const char *cxlflash_driver_info(struct Scsi_Host *host)
339{
340 return CXLFLASH_ADAPTER_NAME;
341}
342
343/**
344 * cxlflash_queuecommand() - sends a mid-layer request
345 * @host: SCSI host associated with device.
346 * @scp: SCSI command to send.
347 *
348 * Return:
349 * 0 on success
350 * SCSI_MLQUEUE_HOST_BUSY when host is busy
351 */
352static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
353{
354 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
355 struct afu *afu = cfg->afu;
356 struct pci_dev *pdev = cfg->dev;
357 struct afu_cmd *cmd;
358 u32 port_sel = scp->device->channel + 1;
359 int nseg, i, ncount;
360 struct scatterlist *sg;
361 ulong lock_flags;
362 short lflag = 0;
363 int rc = 0;
364
365 pr_debug("%s: (scp=%p) %d/%d/%d/%llu cdb=(%08X-%08X-%08X-%08X)\n",
366 __func__, scp, host->host_no, scp->device->channel,
367 scp->device->id, scp->device->lun,
368 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
369 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
370 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
371 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
372
373 /* If a Task Management Function is active, wait for it to complete
374 * before continuing with regular commands.
375 */
376 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
377 if (cfg->tmf_active) {
378 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
379 rc = SCSI_MLQUEUE_HOST_BUSY;
380 goto out;
381 }
382 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
383
384 switch (cfg->state) {
385 case STATE_LIMBO:
386 dev_dbg_ratelimited(&cfg->dev->dev, "%s: device in limbo!\n",
387 __func__);
388 rc = SCSI_MLQUEUE_HOST_BUSY;
389 goto out;
390 case STATE_FAILTERM:
391 dev_dbg_ratelimited(&cfg->dev->dev, "%s: device has failed!\n",
392 __func__);
393 scp->result = (DID_NO_CONNECT << 16);
394 scp->scsi_done(scp);
395 rc = 0;
396 goto out;
397 default:
398 break;
399 }
400
401 cmd = cxlflash_cmd_checkout(afu);
402 if (unlikely(!cmd)) {
403 pr_err("%s: could not get a free command\n", __func__);
404 rc = SCSI_MLQUEUE_HOST_BUSY;
405 goto out;
406 }
407
408 cmd->rcb.ctx_id = afu->ctx_hndl;
409 cmd->rcb.port_sel = port_sel;
410 cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
411
412 if (scp->sc_data_direction == DMA_TO_DEVICE)
413 lflag = SISL_REQ_FLAGS_HOST_WRITE;
414 else
415 lflag = SISL_REQ_FLAGS_HOST_READ;
416
417 cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
418 SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);
419
420 /* Stash the scp in the reserved field, for reuse during interrupt */
421 cmd->rcb.scp = scp;
422
423 nseg = scsi_dma_map(scp);
424 if (unlikely(nseg < 0)) {
425 dev_err(&pdev->dev, "%s: Fail DMA map! nseg=%d\n",
426 __func__, nseg);
427 rc = SCSI_MLQUEUE_HOST_BUSY;
428 goto out;
429 }
430
431 ncount = scsi_sg_count(scp);
432 scsi_for_each_sg(scp, sg, ncount, i) {
433 cmd->rcb.data_len = sg_dma_len(sg);
434 cmd->rcb.data_ea = sg_dma_address(sg);
435 }
436
437 /* Copy the CDB from the scsi_cmnd passed in */
438 memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
439
440 /* Send the command */
441 rc = cxlflash_send_cmd(afu, cmd);
442 if (unlikely(rc)) {
443 cxlflash_cmd_checkin(cmd);
444 scsi_dma_unmap(scp);
445 }
446
447out:
448 return rc;
449}
450
451/**
452 * cxlflash_eh_device_reset_handler() - reset a single LUN
453 * @scp: SCSI command to send.
454 *
455 * Return:
456 * SUCCESS as defined in scsi/scsi.h
457 * FAILED as defined in scsi/scsi.h
458 */
459static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
460{
461 int rc = SUCCESS;
462 struct Scsi_Host *host = scp->device->host;
463 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
464 struct afu *afu = cfg->afu;
465 int rcr = 0;
466
467 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
468 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
469 host->host_no, scp->device->channel,
470 scp->device->id, scp->device->lun,
471 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
472 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
473 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
474 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
475
476 switch (cfg->state) {
477 case STATE_NORMAL:
478 rcr = send_tmf(afu, scp, TMF_LUN_RESET);
479 if (unlikely(rcr))
480 rc = FAILED;
481 break;
482 case STATE_LIMBO:
483 wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
484 if (cfg->state == STATE_NORMAL)
485 break;
486 /* fall through */
487 default:
488 rc = FAILED;
489 break;
490 }
491
492 pr_debug("%s: returning rc=%d\n", __func__, rc);
493 return rc;
494}
495
496/**
497 * cxlflash_eh_host_reset_handler() - reset the host adapter
498 * @scp: SCSI command from stack identifying host.
499 *
500 * Return:
501 * SUCCESS as defined in scsi/scsi.h
502 * FAILED as defined in scsi/scsi.h
503 */
504static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
505{
506 int rc = SUCCESS;
507 int rcr = 0;
508 struct Scsi_Host *host = scp->device->host;
509 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
510
511 pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
512 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
513 host->host_no, scp->device->channel,
514 scp->device->id, scp->device->lun,
515 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
516 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
517 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
518 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
519
520 switch (cfg->state) {
521 case STATE_NORMAL:
522 cfg->state = STATE_LIMBO;
523 scsi_block_requests(cfg->host);
524 cxlflash_mark_contexts_error(cfg);
525 rcr = cxlflash_afu_reset(cfg);
526 if (rcr) {
527 rc = FAILED;
528 cfg->state = STATE_FAILTERM;
529 } else
530 cfg->state = STATE_NORMAL;
531 wake_up_all(&cfg->limbo_waitq);
532 scsi_unblock_requests(cfg->host);
533 break;
534 case STATE_LIMBO:
535 wait_event(cfg->limbo_waitq, cfg->state != STATE_LIMBO);
536 if (cfg->state == STATE_NORMAL)
537 break;
538 /* fall through */
539 default:
540 rc = FAILED;
541 break;
542 }
543
544 pr_debug("%s: returning rc=%d\n", __func__, rc);
545 return rc;
546}
547
548/**
549 * cxlflash_change_queue_depth() - change the queue depth for the device
550 * @sdev: SCSI device destined for queue depth change.
551 * @qdepth: Requested queue depth value to set.
552 *
553 * The requested queue depth is capped to the maximum supported value.
554 *
555 * Return: The actual queue depth set.
556 */
557static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
558{
559
560 if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
561 qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
562
563 scsi_change_queue_depth(sdev, qdepth);
564 return sdev->queue_depth;
565}
566
567/**
568 * cxlflash_show_port_status() - queries and presents the current port status
569 * @dev: Generic device associated with the host owning the port.
570 * @attr: Device attribute representing the port.
571 * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII.
572 *
573 * Return: The size of the ASCII string returned in @buf.
574 */
575static ssize_t cxlflash_show_port_status(struct device *dev,
576 struct device_attribute *attr,
577 char *buf)
578{
579 struct Scsi_Host *shost = class_to_shost(dev);
580 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
581 struct afu *afu = cfg->afu;
582
583 char *disp_status;
584 int rc;
585 u32 port;
586 u64 status;
587 u64 *fc_regs;
588
589 rc = kstrtouint((attr->attr.name + 4), 10, &port);
590 if (rc || (port >= NUM_FC_PORTS))
591 return 0;
592
593 fc_regs = &afu->afu_map->global.fc_regs[port][0];
594 status =
595 (readq_be(&fc_regs[FC_MTIP_STATUS / 8]) & FC_MTIP_STATUS_MASK);
596
597 if (status == FC_MTIP_STATUS_ONLINE)
598 disp_status = "online";
599 else if (status == FC_MTIP_STATUS_OFFLINE)
600 disp_status = "offline";
601 else
602 disp_status = "unknown";
603
604 return snprintf(buf, PAGE_SIZE, "%s\n", disp_status);
605}
606
607/**
608 * cxlflash_show_lun_mode() - presents the current LUN mode of the host
609 * @dev: Generic device associated with the host.
610 * @attr: Device attribute representing the lun mode.
611 * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
612 *
613 * Return: The size of the ASCII string returned in @buf.
614 */
615static ssize_t cxlflash_show_lun_mode(struct device *dev,
616 struct device_attribute *attr, char *buf)
617{
618 struct Scsi_Host *shost = class_to_shost(dev);
619 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
620 struct afu *afu = cfg->afu;
621
622 return snprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
623}
624
625/**
626 * cxlflash_store_lun_mode() - sets the LUN mode of the host
627 * @dev: Generic device associated with the host.
628 * @attr: Device attribute representing the lun mode.
629 * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
630 * @count: Length of data resizing in @buf.
631 *
632 * The CXL Flash AFU supports a dummy LUN mode where the external
633 * links and storage are not required. Space on the FPGA is used
634 * to create 1 or 2 small LUNs which are presented to the system
635 * as if they were a normal storage device. This feature is useful
636 * during development and also provides manufacturing with a way
637 * to test the AFU without an actual device.
638 *
639 * 0 = external LUN[s] (default)
640 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
641 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
642 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
643 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
644 *
645 * Return: The size of the ASCII string returned in @buf.
646 */
647static ssize_t cxlflash_store_lun_mode(struct device *dev,
648 struct device_attribute *attr,
649 const char *buf, size_t count)
650{
651 struct Scsi_Host *shost = class_to_shost(dev);
652 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
653 struct afu *afu = cfg->afu;
654 int rc;
655 u32 lun_mode;
656
657 rc = kstrtouint(buf, 10, &lun_mode);
658 if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
659 afu->internal_lun = lun_mode;
660 cxlflash_afu_reset(cfg);
661 scsi_scan_host(cfg->host);
662 }
663
664 return count;
665}
666
667/**
668 * cxlflash_show_ioctl_version() - presents the current ioctl version of the host
669 * @dev: Generic device associated with the host.
670 * @attr: Device attribute representing the ioctl version.
671 * @buf: Buffer of length PAGE_SIZE to report back the ioctl version.
672 *
673 * Return: The size of the ASCII string returned in @buf.
674 */
675static ssize_t cxlflash_show_ioctl_version(struct device *dev,
676 struct device_attribute *attr,
677 char *buf)
678{
679 return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
680}
681
682/**
683 * cxlflash_show_dev_mode() - presents the current mode of the device
684 * @dev: Generic device associated with the device.
685 * @attr: Device attribute representing the device mode.
686 * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
687 *
688 * Return: The size of the ASCII string returned in @buf.
689 */
690static ssize_t cxlflash_show_dev_mode(struct device *dev,
691 struct device_attribute *attr, char *buf)
692{
693 struct scsi_device *sdev = to_scsi_device(dev);
694
695 return snprintf(buf, PAGE_SIZE, "%s\n",
696 sdev->hostdata ? "superpipe" : "legacy");
697}
698
699/**
700 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
701 * @cxlflash: Internal structure associated with the host.
702 */
703static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
704{
705 struct pci_dev *pdev = cfg->dev;
706
707 if (pci_channel_offline(pdev))
708 wait_event_timeout(cfg->limbo_waitq,
709 !pci_channel_offline(pdev),
710 CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
711}
712
713/*
714 * Host attributes
715 */
716static DEVICE_ATTR(port0, S_IRUGO, cxlflash_show_port_status, NULL);
717static DEVICE_ATTR(port1, S_IRUGO, cxlflash_show_port_status, NULL);
718static DEVICE_ATTR(lun_mode, S_IRUGO | S_IWUSR, cxlflash_show_lun_mode,
719 cxlflash_store_lun_mode);
720static DEVICE_ATTR(ioctl_version, S_IRUGO, cxlflash_show_ioctl_version, NULL);
721
722static struct device_attribute *cxlflash_host_attrs[] = {
723 &dev_attr_port0,
724 &dev_attr_port1,
725 &dev_attr_lun_mode,
726 &dev_attr_ioctl_version,
727 NULL
728};
729
730/*
731 * Device attributes
732 */
733static DEVICE_ATTR(mode, S_IRUGO, cxlflash_show_dev_mode, NULL);
734
735static struct device_attribute *cxlflash_dev_attrs[] = {
736 &dev_attr_mode,
737 NULL
738};
739
740/*
741 * Host template
742 */
743static struct scsi_host_template driver_template = {
744 .module = THIS_MODULE,
745 .name = CXLFLASH_ADAPTER_NAME,
746 .info = cxlflash_driver_info,
747 .ioctl = cxlflash_ioctl,
748 .proc_name = CXLFLASH_NAME,
749 .queuecommand = cxlflash_queuecommand,
750 .eh_device_reset_handler = cxlflash_eh_device_reset_handler,
751 .eh_host_reset_handler = cxlflash_eh_host_reset_handler,
752 .change_queue_depth = cxlflash_change_queue_depth,
753 .cmd_per_lun = 16,
754 .can_queue = CXLFLASH_MAX_CMDS,
755 .this_id = -1,
756 .sg_tablesize = SG_NONE, /* No scatter gather support. */
757 .max_sectors = CXLFLASH_MAX_SECTORS,
758 .use_clustering = ENABLE_CLUSTERING,
759 .shost_attrs = cxlflash_host_attrs,
760 .sdev_attrs = cxlflash_dev_attrs,
761};
762
763/*
764 * Device dependent values
765 */
766static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS };
767
768/*
769 * PCI device binding table
770 */
771static struct pci_device_id cxlflash_pci_table[] = {
772 {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
773 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
774 {}
775};
776
777MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
778
779/**
780 * free_mem() - free memory associated with the AFU
781 * @cxlflash: Internal structure associated with the host.
782 */
783static void free_mem(struct cxlflash_cfg *cfg)
784{
785 int i;
786 char *buf = NULL;
787 struct afu *afu = cfg->afu;
788
789 if (cfg->afu) {
790 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
791 buf = afu->cmd[i].buf;
792 if (!((u64)buf & (PAGE_SIZE - 1)))
793 free_page((ulong)buf);
794 }
795
796 free_pages((ulong)afu, get_order(sizeof(struct afu)));
797 cfg->afu = NULL;
798 }
799}
800
801/**
802 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
803 * @cxlflash: Internal structure associated with the host.
804 *
805 * Safe to call with AFU in a partially allocated/initialized state.
806 */
807static void stop_afu(struct cxlflash_cfg *cfg)
808{
809 int i;
810 struct afu *afu = cfg->afu;
811
812 if (likely(afu)) {
813 for (i = 0; i < CXLFLASH_NUM_CMDS; i++)
814 complete(&afu->cmd[i].cevent);
815
816 if (likely(afu->afu_map)) {
817 cxl_psa_unmap((void *)afu->afu_map);
818 afu->afu_map = NULL;
819 }
820 }
821}
822
823/**
824 * term_mc() - terminates the master context
825 * @cxlflash: Internal structure associated with the host.
826 * @level: Depth of allocation, where to begin waterfall tear down.
827 *
828 * Safe to call with AFU/MC in partially allocated/initialized state.
829 */
830static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level)
831{
832 int rc = 0;
833 struct afu *afu = cfg->afu;
834
835 if (!afu || !cfg->mcctx) {
836 pr_err("%s: returning from term_mc with NULL afu or MC\n",
837 __func__);
838 return;
839 }
840
841 switch (level) {
842 case UNDO_START:
843 rc = cxl_stop_context(cfg->mcctx);
844 BUG_ON(rc);
845 case UNMAP_THREE:
846 cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
847 case UNMAP_TWO:
848 cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
849 case UNMAP_ONE:
850 cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
851 case FREE_IRQ:
852 cxl_free_afu_irqs(cfg->mcctx);
853 case RELEASE_CONTEXT:
854 cfg->mcctx = NULL;
855 }
856}
857
858/**
859 * term_afu() - terminates the AFU
860 * @cxlflash: Internal structure associated with the host.
861 *
862 * Safe to call with AFU/MC in partially allocated/initialized state.
863 */
864static void term_afu(struct cxlflash_cfg *cfg)
865{
866 term_mc(cfg, UNDO_START);
867
868 if (cfg->afu)
869 stop_afu(cfg);
870
871 pr_debug("%s: returning\n", __func__);
872}
873
874/**
875 * cxlflash_remove() - PCI entry point to tear down host
876 * @pdev: PCI device associated with the host.
877 *
878 * Safe to use as a cleanup in partially allocated/initialized state.
879 */
880static void cxlflash_remove(struct pci_dev *pdev)
881{
882 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
883 ulong lock_flags;
884
885 /* If a Task Management Function is active, wait for it to complete
886 * before continuing with remove.
887 */
888 spin_lock_irqsave(&cfg->tmf_waitq.lock, lock_flags);
889 if (cfg->tmf_active)
890 wait_event_interruptible_locked_irq(cfg->tmf_waitq,
891 !cfg->tmf_active);
892 spin_unlock_irqrestore(&cfg->tmf_waitq.lock, lock_flags);
893
894 cfg->state = STATE_FAILTERM;
895 cxlflash_stop_term_user_contexts(cfg);
896
897 switch (cfg->init_state) {
898 case INIT_STATE_SCSI:
899 cxlflash_term_local_luns(cfg);
900 scsi_remove_host(cfg->host);
901 scsi_host_put(cfg->host);
902 /* Fall through */
903 case INIT_STATE_AFU:
904 term_afu(cfg);
905 case INIT_STATE_PCI:
906 pci_release_regions(cfg->dev);
907 pci_disable_device(pdev);
908 case INIT_STATE_NONE:
909 flush_work(&cfg->work_q);
910 free_mem(cfg);
911 break;
912 }
913
914 pr_debug("%s: returning\n", __func__);
915}
916
917/**
918 * alloc_mem() - allocates the AFU and its command pool
919 * @cxlflash: Internal structure associated with the host.
920 *
921 * A partially allocated state remains on failure.
922 *
923 * Return:
924 * 0 on success
925 * -ENOMEM on failure to allocate memory
926 */
927static int alloc_mem(struct cxlflash_cfg *cfg)
928{
929 int rc = 0;
930 int i;
931 char *buf = NULL;
932
933 /* This allocation is about 12K, i.e. only 1 64k page
934 * and upto 4 4k pages
935 */
936 cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
937 get_order(sizeof(struct afu)));
938 if (unlikely(!cfg->afu)) {
939 pr_err("%s: cannot get %d free pages\n",
940 __func__, get_order(sizeof(struct afu)));
941 rc = -ENOMEM;
942 goto out;
943 }
944 cfg->afu->parent = cfg;
945 cfg->afu->afu_map = NULL;
946
947 for (i = 0; i < CXLFLASH_NUM_CMDS; buf += CMD_BUFSIZE, i++) {
948 if (!((u64)buf & (PAGE_SIZE - 1))) {
949 buf = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
950 if (unlikely(!buf)) {
951 pr_err("%s: Allocate command buffers fail!\n",
952 __func__);
953 rc = -ENOMEM;
954 free_mem(cfg);
955 goto out;
956 }
957 }
958
959 cfg->afu->cmd[i].buf = buf;
960 atomic_set(&cfg->afu->cmd[i].free, 1);
961 cfg->afu->cmd[i].slot = i;
962 }
963
964out:
965 return rc;
966}
967
968/**
969 * init_pci() - initializes the host as a PCI device
970 * @cxlflash: Internal structure associated with the host.
971 *
972 * Return:
973 * 0 on success
974 * -EIO on unable to communicate with device
975 * A return code from the PCI sub-routines
976 */
977static int init_pci(struct cxlflash_cfg *cfg)
978{
979 struct pci_dev *pdev = cfg->dev;
980 int rc = 0;
981
982 cfg->cxlflash_regs_pci = pci_resource_start(pdev, 0);
983 rc = pci_request_regions(pdev, CXLFLASH_NAME);
984 if (rc < 0) {
985 dev_err(&pdev->dev,
986 "%s: Couldn't register memory range of registers\n",
987 __func__);
988 goto out;
989 }
990
991 rc = pci_enable_device(pdev);
992 if (rc || pci_channel_offline(pdev)) {
993 if (pci_channel_offline(pdev)) {
994 cxlflash_wait_for_pci_err_recovery(cfg);
995 rc = pci_enable_device(pdev);
996 }
997
998 if (rc) {
999 dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
1000 __func__);
1001 cxlflash_wait_for_pci_err_recovery(cfg);
1002 goto out_release_regions;
1003 }
1004 }
1005
1006 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1007 if (rc < 0) {
1008 dev_dbg(&pdev->dev, "%s: Failed to set 64 bit PCI DMA mask\n",
1009 __func__);
1010 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1011 }
1012
1013 if (rc < 0) {
1014 dev_err(&pdev->dev, "%s: Failed to set PCI DMA mask\n",
1015 __func__);
1016 goto out_disable;
1017 }
1018
1019 pci_set_master(pdev);
1020
1021 if (pci_channel_offline(pdev)) {
1022 cxlflash_wait_for_pci_err_recovery(cfg);
1023 if (pci_channel_offline(pdev)) {
1024 rc = -EIO;
1025 goto out_msi_disable;
1026 }
1027 }
1028
1029 rc = pci_save_state(pdev);
1030
1031 if (rc != PCIBIOS_SUCCESSFUL) {
1032 dev_err(&pdev->dev, "%s: Failed to save PCI config space\n",
1033 __func__);
1034 rc = -EIO;
1035 goto cleanup_nolog;
1036 }
1037
1038out:
1039 pr_debug("%s: returning rc=%d\n", __func__, rc);
1040 return rc;
1041
1042cleanup_nolog:
1043out_msi_disable:
1044 cxlflash_wait_for_pci_err_recovery(cfg);
1045out_disable:
1046 pci_disable_device(pdev);
1047out_release_regions:
1048 pci_release_regions(pdev);
1049 goto out;
1050
1051}
1052
1053/**
1054 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1055 * @cxlflash: Internal structure associated with the host.
1056 *
1057 * Return:
1058 * 0 on success
1059 * A return code from adding the host
1060 */
1061static int init_scsi(struct cxlflash_cfg *cfg)
1062{
1063 struct pci_dev *pdev = cfg->dev;
1064 int rc = 0;
1065
1066 rc = scsi_add_host(cfg->host, &pdev->dev);
1067 if (rc) {
1068 dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
1069 __func__, rc);
1070 goto out;
1071 }
1072
1073 scsi_scan_host(cfg->host);
1074
1075out:
1076 pr_debug("%s: returning rc=%d\n", __func__, rc);
1077 return rc;
1078}
1079
1080/**
1081 * set_port_online() - transitions the specified host FC port to online state
1082 * @fc_regs: Top of MMIO region defined for specified port.
1083 *
1084 * The provided MMIO region must be mapped prior to call. Online state means
1085 * that the FC link layer has synced, completed the handshaking process, and
1086 * is ready for login to start.
1087 */
1088static void set_port_online(u64 *fc_regs)
1089{
1090 u64 cmdcfg;
1091
1092 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1093 cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */
1094 cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */
1095 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1096}
1097
1098/**
1099 * set_port_offline() - transitions the specified host FC port to offline state
1100 * @fc_regs: Top of MMIO region defined for specified port.
1101 *
1102 * The provided MMIO region must be mapped prior to call.
1103 */
1104static void set_port_offline(u64 *fc_regs)
1105{
1106 u64 cmdcfg;
1107
1108 cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1109 cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */
1110 cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */
1111 writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1112}
1113
1114/**
1115 * wait_port_online() - waits for the specified host FC port come online
1116 * @fc_regs: Top of MMIO region defined for specified port.
1117 * @delay_us: Number of microseconds to delay between reading port status.
1118 * @nretry: Number of cycles to retry reading port status.
1119 *
1120 * The provided MMIO region must be mapped prior to call. This will timeout
1121 * when the cable is not plugged in.
1122 *
1123 * Return:
1124 * TRUE (1) when the specified port is online
1125 * FALSE (0) when the specified port fails to come online after timeout
1126 * -EINVAL when @delay_us is less than 1000
1127 */
1128static int wait_port_online(u64 *fc_regs, u32 delay_us, u32 nretry)
1129{
1130 u64 status;
1131
1132 if (delay_us < 1000) {
1133 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1134 return -EINVAL;
1135 }
1136
1137 do {
1138 msleep(delay_us / 1000);
1139 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1140 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1141 nretry--);
1142
1143 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1144}
1145
1146/**
1147 * wait_port_offline() - waits for the specified host FC port go offline
1148 * @fc_regs: Top of MMIO region defined for specified port.
1149 * @delay_us: Number of microseconds to delay between reading port status.
1150 * @nretry: Number of cycles to retry reading port status.
1151 *
1152 * The provided MMIO region must be mapped prior to call.
1153 *
1154 * Return:
1155 * TRUE (1) when the specified port is offline
1156 * FALSE (0) when the specified port fails to go offline after timeout
1157 * -EINVAL when @delay_us is less than 1000
1158 */
1159static int wait_port_offline(u64 *fc_regs, u32 delay_us, u32 nretry)
1160{
1161 u64 status;
1162
1163 if (delay_us < 1000) {
1164 pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
1165 return -EINVAL;
1166 }
1167
1168 do {
1169 msleep(delay_us / 1000);
1170 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1171 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1172 nretry--);
1173
1174 return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1175}
1176
1177/**
1178 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1179 * @afu: AFU associated with the host that owns the specified FC port.
1180 * @port: Port number being configured.
1181 * @fc_regs: Top of MMIO region defined for specified port.
1182 * @wwpn: The world-wide-port-number previously discovered for port.
1183 *
1184 * The provided MMIO region must be mapped prior to call. As part of the
1185 * sequence to configure the WWPN, the port is toggled offline and then back
1186 * online. This toggling action can cause this routine to delay up to a few
1187 * seconds. When configured to use the internal LUN feature of the AFU, a
1188 * failure to come online is overridden.
1189 *
1190 * Return:
1191 * 0 when the WWPN is successfully written and the port comes back online
1192 * -1 when the port fails to go offline or come back up online
1193 */
1194static int afu_set_wwpn(struct afu *afu, int port, u64 *fc_regs, u64 wwpn)
1195{
1196 int ret = 0;
1197
1198 set_port_offline(fc_regs);
1199
1200 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1201 FC_PORT_STATUS_RETRY_CNT)) {
1202 pr_debug("%s: wait on port %d to go offline timed out\n",
1203 __func__, port);
1204 ret = -1; /* but continue on to leave the port back online */
1205 }
1206
1207 if (ret == 0)
1208 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1209
1210 set_port_online(fc_regs);
1211
1212 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1213 FC_PORT_STATUS_RETRY_CNT)) {
1214 pr_debug("%s: wait on port %d to go online timed out\n",
1215 __func__, port);
1216 ret = -1;
1217
1218 /*
1219 * Override for internal lun!!!
1220 */
1221 if (afu->internal_lun) {
1222 pr_debug("%s: Overriding port %d online timeout!!!\n",
1223 __func__, port);
1224 ret = 0;
1225 }
1226 }
1227
1228 pr_debug("%s: returning rc=%d\n", __func__, ret);
1229
1230 return ret;
1231}
1232
1233/**
1234 * afu_link_reset() - resets the specified host FC port
1235 * @afu: AFU associated with the host that owns the specified FC port.
1236 * @port: Port number being configured.
1237 * @fc_regs: Top of MMIO region defined for specified port.
1238 *
1239 * The provided MMIO region must be mapped prior to call. The sequence to
1240 * reset the port involves toggling it offline and then back online. This
1241 * action can cause this routine to delay up to a few seconds. An effort
1242 * is made to maintain link with the device by switching to host to use
1243 * the alternate port exclusively while the reset takes place.
1244 * failure to come online is overridden.
1245 */
1246static void afu_link_reset(struct afu *afu, int port, u64 *fc_regs)
1247{
1248 u64 port_sel;
1249
1250 /* first switch the AFU to the other links, if any */
1251 port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1252 port_sel &= ~(1ULL << port);
1253 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1254 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1255
1256 set_port_offline(fc_regs);
1257 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1258 FC_PORT_STATUS_RETRY_CNT))
1259 pr_err("%s: wait on port %d to go offline timed out\n",
1260 __func__, port);
1261
1262 set_port_online(fc_regs);
1263 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1264 FC_PORT_STATUS_RETRY_CNT))
1265 pr_err("%s: wait on port %d to go online timed out\n",
1266 __func__, port);
1267
1268 /* switch back to include this port */
1269 port_sel |= (1ULL << port);
1270 writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1271 cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1272
1273 pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
1274}
1275
1276/*
1277 * Asynchronous interrupt information table
1278 */
1279static const struct asyc_intr_info ainfo[] = {
1280 {SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
1281 {SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
1282 {SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1283 {SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, 0},
1284 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1285 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, 0},
1286 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1287 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1288 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1289 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1290 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1291 {SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, 0},
1292 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1293 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, 0},
1294 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1295 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1296 {0x0, "", 0, 0} /* terminator */
1297};
1298
1299/**
1300 * find_ainfo() - locates and returns asynchronous interrupt information
1301 * @status: Status code set by AFU on error.
1302 *
1303 * Return: The located information or NULL when the status code is invalid.
1304 */
1305static const struct asyc_intr_info *find_ainfo(u64 status)
1306{
1307 const struct asyc_intr_info *info;
1308
1309 for (info = &ainfo[0]; info->status; info++)
1310 if (info->status == status)
1311 return info;
1312
1313 return NULL;
1314}
1315
1316/**
1317 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1318 * @afu: AFU associated with the host.
1319 */
1320static void afu_err_intr_init(struct afu *afu)
1321{
1322 int i;
1323 u64 reg;
1324
1325 /* global async interrupts: AFU clears afu_ctrl on context exit
1326 * if async interrupts were sent to that context. This prevents
1327 * the AFU form sending further async interrupts when
1328 * there is
1329 * nobody to receive them.
1330 */
1331
1332 /* mask all */
1333 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1334 /* set LISN# to send and point to master context */
1335 reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1336
1337 if (afu->internal_lun)
1338 reg |= 1; /* Bit 63 indicates local lun */
1339 writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1340 /* clear all */
1341 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1342 /* unmask bits that are of interest */
1343 /* note: afu can send an interrupt after this step */
1344 writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1345 /* clear again in case a bit came on after previous clear but before */
1346 /* unmask */
1347 writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1348
1349 /* Clear/Set internal lun bits */
1350 reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1351 reg &= SISL_FC_INTERNAL_MASK;
1352 if (afu->internal_lun)
1353 reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1354 writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
1355
1356 /* now clear FC errors */
1357 for (i = 0; i < NUM_FC_PORTS; i++) {
1358 writeq_be(0xFFFFFFFFU,
1359 &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
1360 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
1361 }
1362
1363 /* sync interrupts for master's IOARRIN write */
1364 /* note that unlike asyncs, there can be no pending sync interrupts */
1365 /* at this time (this is a fresh context and master has not written */
1366 /* IOARRIN yet), so there is nothing to clear. */
1367
1368 /* set LISN#, it is always sent to the context that wrote IOARRIN */
1369 writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
1370 writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
1371}
1372
1373/**
1374 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1375 * @irq: Interrupt number.
1376 * @data: Private data provided at interrupt registration, the AFU.
1377 *
1378 * Return: Always return IRQ_HANDLED.
1379 */
1380static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1381{
1382 struct afu *afu = (struct afu *)data;
1383 u64 reg;
1384 u64 reg_unmasked;
1385
1386 reg = readq_be(&afu->host_map->intr_status);
1387 reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1388
1389 if (reg_unmasked == 0UL) {
1390 pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
1391 __func__, (u64)afu, reg);
1392 goto cxlflash_sync_err_irq_exit;
1393 }
1394
1395 pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
1396 __func__, (u64)afu, reg);
1397
1398 writeq_be(reg_unmasked, &afu->host_map->intr_clear);
1399
1400cxlflash_sync_err_irq_exit:
1401 pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
1402 return IRQ_HANDLED;
1403}
1404
1405/**
1406 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1407 * @irq: Interrupt number.
1408 * @data: Private data provided at interrupt registration, the AFU.
1409 *
1410 * Return: Always return IRQ_HANDLED.
1411 */
1412static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1413{
1414 struct afu *afu = (struct afu *)data;
1415 struct afu_cmd *cmd;
1416 bool toggle = afu->toggle;
1417 u64 entry,
1418 *hrrq_start = afu->hrrq_start,
1419 *hrrq_end = afu->hrrq_end,
1420 *hrrq_curr = afu->hrrq_curr;
1421
1422 /* Process however many RRQ entries that are ready */
1423 while (true) {
1424 entry = *hrrq_curr;
1425
1426 if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1427 break;
1428
1429 cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
1430 cmd_complete(cmd);
1431
1432 /* Advance to next entry or wrap and flip the toggle bit */
1433 if (hrrq_curr < hrrq_end)
1434 hrrq_curr++;
1435 else {
1436 hrrq_curr = hrrq_start;
1437 toggle ^= SISL_RESP_HANDLE_T_BIT;
1438 }
1439 }
1440
1441 afu->hrrq_curr = hrrq_curr;
1442 afu->toggle = toggle;
1443
1444 return IRQ_HANDLED;
1445}
1446
1447/**
1448 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1449 * @irq: Interrupt number.
1450 * @data: Private data provided at interrupt registration, the AFU.
1451 *
1452 * Return: Always return IRQ_HANDLED.
1453 */
1454static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1455{
1456 struct afu *afu = (struct afu *)data;
1457 struct cxlflash_cfg *cfg;
1458 u64 reg_unmasked;
1459 const struct asyc_intr_info *info;
1460 struct sisl_global_map *global = &afu->afu_map->global;
1461 u64 reg;
1462 u8 port;
1463 int i;
1464
1465 cfg = afu->parent;
1466
1467 reg = readq_be(&global->regs.aintr_status);
1468 reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1469
1470 if (reg_unmasked == 0) {
1471 pr_err("%s: spurious interrupt, aintr_status 0x%016llX\n",
1472 __func__, reg);
1473 goto out;
1474 }
1475
1476 /* it is OK to clear AFU status before FC_ERROR */
1477 writeq_be(reg_unmasked, &global->regs.aintr_clear);
1478
1479 /* check each bit that is on */
1480 for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
1481 info = find_ainfo(1ULL << i);
1482 if ((reg_unmasked & 0x1) || !info)
1483 continue;
1484
1485 port = info->port;
1486
1487 pr_err("%s: FC Port %d -> %s, fc_status 0x%08llX\n",
1488 __func__, port, info->desc,
1489 readq_be(&global->fc_regs[port][FC_STATUS / 8]));
1490
1491 /*
1492 * do link reset first, some OTHER errors will set FC_ERROR
1493 * again if cleared before or w/o a reset
1494 */
1495 if (info->action & LINK_RESET) {
1496 pr_err("%s: FC Port %d: resetting link\n",
1497 __func__, port);
1498 cfg->lr_state = LINK_RESET_REQUIRED;
1499 cfg->lr_port = port;
1500 schedule_work(&cfg->work_q);
1501 }
1502
1503 if (info->action & CLR_FC_ERROR) {
1504 reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);
1505
1506 /*
1507 * since all errors are unmasked, FC_ERROR and FC_ERRCAP
1508 * should be the same and tracing one is sufficient.
1509 */
1510
1511 pr_err("%s: fc %d: clearing fc_error 0x%08llX\n",
1512 __func__, port, reg);
1513
1514 writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
1515 writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
1516 }
1517 }
1518
1519out:
1520 pr_debug("%s: returning rc=%d, afu=%p\n", __func__, IRQ_HANDLED, afu);
1521 return IRQ_HANDLED;
1522}
1523
1524/**
1525 * start_context() - starts the master context
1526 * @cxlflash: Internal structure associated with the host.
1527 *
1528 * Return: A success or failure value from CXL services.
1529 */
1530static int start_context(struct cxlflash_cfg *cfg)
1531{
1532 int rc = 0;
1533
1534 rc = cxl_start_context(cfg->mcctx,
1535 cfg->afu->work.work_element_descriptor,
1536 NULL);
1537
1538 pr_debug("%s: returning rc=%d\n", __func__, rc);
1539 return rc;
1540}
1541
1542/**
1543 * read_vpd() - obtains the WWPNs from VPD
1544 * @cxlflash: Internal structure associated with the host.
1545 * @wwpn: Array of size NUM_FC_PORTS to pass back WWPNs
1546 *
1547 * Return:
1548 * 0 on success
1549 * -ENODEV when VPD or WWPN keywords not found
1550 */
1551static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1552{
1553 struct pci_dev *dev = cfg->parent_dev;
1554 int rc = 0;
1555 int ro_start, ro_size, i, j, k;
1556 ssize_t vpd_size;
1557 char vpd_data[CXLFLASH_VPD_LEN];
1558 char tmp_buf[WWPN_BUF_LEN] = { 0 };
1559 char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };
1560
1561 /* Get the VPD data from the device */
1562 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
1563 if (unlikely(vpd_size <= 0)) {
1564 pr_err("%s: Unable to read VPD (size = %ld)\n",
1565 __func__, vpd_size);
1566 rc = -ENODEV;
1567 goto out;
1568 }
1569
1570 /* Get the read only section offset */
1571 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
1572 PCI_VPD_LRDT_RO_DATA);
1573 if (unlikely(ro_start < 0)) {
1574 pr_err("%s: VPD Read-only data not found\n", __func__);
1575 rc = -ENODEV;
1576 goto out;
1577 }
1578
1579 /* Get the read only section size, cap when extends beyond read VPD */
1580 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
1581 j = ro_size;
1582 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1583 if (unlikely((i + j) > vpd_size)) {
1584 pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
1585 __func__, (i + j), vpd_size);
1586 ro_size = vpd_size - i;
1587 }
1588
1589 /*
1590 * Find the offset of the WWPN tag within the read only
1591 * VPD data and validate the found field (partials are
1592 * no good to us). Convert the ASCII data to an integer
1593 * value. Note that we must copy to a temporary buffer
1594 * because the conversion service requires that the ASCII
1595 * string be terminated.
1596 */
1597 for (k = 0; k < NUM_FC_PORTS; k++) {
1598 j = ro_size;
1599 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
1600
1601 i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
1602 if (unlikely(i < 0)) {
1603 pr_err("%s: Port %d WWPN not found in VPD\n",
1604 __func__, k);
1605 rc = -ENODEV;
1606 goto out;
1607 }
1608
1609 j = pci_vpd_info_field_size(&vpd_data[i]);
1610 i += PCI_VPD_INFO_FLD_HDR_SIZE;
1611 if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1612 pr_err("%s: Port %d WWPN incomplete or VPD corrupt\n",
1613 __func__, k);
1614 rc = -ENODEV;
1615 goto out;
1616 }
1617
1618 memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1619 rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1620 if (unlikely(rc)) {
1621 pr_err("%s: Fail to convert port %d WWPN to integer\n",
1622 __func__, k);
1623 rc = -ENODEV;
1624 goto out;
1625 }
1626 }
1627
1628out:
1629 pr_debug("%s: returning rc=%d\n", __func__, rc);
1630 return rc;
1631}
1632
1633/**
1634 * cxlflash_context_reset() - timeout handler for AFU commands
1635 * @cmd: AFU command that timed out.
1636 *
1637 * Sends a reset to the AFU.
1638 */
1639void cxlflash_context_reset(struct afu_cmd *cmd)
1640{
1641 int nretry = 0;
1642 u64 rrin = 0x1;
1643 u64 room = 0;
1644 struct afu *afu = cmd->parent;
1645 ulong lock_flags;
1646
1647 pr_debug("%s: cmd=%p\n", __func__, cmd);
1648
1649 spin_lock_irqsave(&cmd->slock, lock_flags);
1650
1651 /* Already completed? */
1652 if (cmd->sa.host_use_b[0] & B_DONE) {
1653 spin_unlock_irqrestore(&cmd->slock, lock_flags);
1654 return;
1655 }
1656
1657 cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
1658 spin_unlock_irqrestore(&cmd->slock, lock_flags);
1659
1660 /*
1661 * We really want to send this reset at all costs, so spread
1662 * out wait time on successive retries for available room.
1663 */
1664 do {
1665 room = readq_be(&afu->host_map->cmd_room);
1666 atomic64_set(&afu->room, room);
1667 if (room)
1668 goto write_rrin;
1669 udelay(nretry);
1670 } while (nretry++ < MC_ROOM_RETRY_CNT);
1671
1672 pr_err("%s: no cmd_room to send reset\n", __func__);
1673 return;
1674
1675write_rrin:
1676 nretry = 0;
1677 writeq_be(rrin, &afu->host_map->ioarrin);
1678 do {
1679 rrin = readq_be(&afu->host_map->ioarrin);
1680 if (rrin != 0x1)
1681 break;
1682 /* Double delay each time */
1683 udelay(2 ^ nretry);
1684 } while (nretry++ < MC_ROOM_RETRY_CNT);
1685}
1686
1687/**
1688 * init_pcr() - initialize the provisioning and control registers
1689 * @cxlflash: Internal structure associated with the host.
1690 *
1691 * Also sets up fast access to the mapped registers and initializes AFU
1692 * command fields that never change.
1693 */
1694void init_pcr(struct cxlflash_cfg *cfg)
1695{
1696 struct afu *afu = cfg->afu;
1697 struct sisl_ctrl_map *ctrl_map;
1698 int i;
1699
1700 for (i = 0; i < MAX_CONTEXT; i++) {
1701 ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1702 /* disrupt any clients that could be running */
1703 /* e. g. clients that survived a master restart */
1704 writeq_be(0, &ctrl_map->rht_start);
1705 writeq_be(0, &ctrl_map->rht_cnt_id);
1706 writeq_be(0, &ctrl_map->ctx_cap);
1707 }
1708
1709 /* copy frequently used fields into afu */
1710 afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
1711 /* ctx_hndl is 16 bits in CAIA */
1712 afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
1713 afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;
1714
1715 /* Program the Endian Control for the master context */
1716 writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);
1717
1718 /* initialize cmd fields that never change */
1719 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1720 afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
1721 afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
1722 afu->cmd[i].rcb.rrq = 0x0;
1723 }
1724}
1725
1726/**
1727 * init_global() - initialize AFU global registers
1728 * @cxlflash: Internal structure associated with the host.
1729 */
1730int init_global(struct cxlflash_cfg *cfg)
1731{
1732 struct afu *afu = cfg->afu;
1733 u64 wwpn[NUM_FC_PORTS]; /* wwpn of AFU ports */
1734 int i = 0, num_ports = 0;
1735 int rc = 0;
1736 u64 reg;
1737
1738 rc = read_vpd(cfg, &wwpn[0]);
1739 if (rc) {
1740 pr_err("%s: could not read vpd rc=%d\n", __func__, rc);
1741 goto out;
1742 }
1743
1744 pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);
1745
1746 /* set up RRQ in AFU for master issued cmds */
1747 writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
1748 writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);
1749
1750 /* AFU configuration */
1751 reg = readq_be(&afu->afu_map->global.regs.afu_config);
1752 reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1753 /* enable all auto retry options and control endianness */
1754 /* leave others at default: */
1755 /* CTX_CAP write protected, mbox_r does not clear on read and */
1756 /* checker on if dual afu */
1757 writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1758
1759 /* global port select: select either port */
1760 if (afu->internal_lun) {
1761 /* only use port 0 */
1762 writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1763 num_ports = NUM_FC_PORTS - 1;
1764 } else {
1765 writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
1766 num_ports = NUM_FC_PORTS;
1767 }
1768
1769 for (i = 0; i < num_ports; i++) {
1770 /* unmask all errors (but they are still masked at AFU) */
1771 writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1772 /* clear CRC error cnt & set a threshold */
1773 (void)readq_be(&afu->afu_map->global.
1774 fc_regs[i][FC_CNT_CRCERR / 8]);
1775 writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
1776 [FC_CRC_THRESH / 8]);
1777
1778 /* set WWPNs. If already programmed, wwpn[i] is 0 */
1779 if (wwpn[i] != 0 &&
1780 afu_set_wwpn(afu, i,
1781 &afu->afu_map->global.fc_regs[i][0],
1782 wwpn[i])) {
1783 pr_err("%s: failed to set WWPN on port %d\n",
1784 __func__, i);
1785 rc = -EIO;
1786 goto out;
1787 }
1788 /* Programming WWPN back to back causes additional
1789 * offline/online transitions and a PLOGI
1790 */
1791 msleep(100);
1792
1793 }
1794
1795 /* set up master's own CTX_CAP to allow real mode, host translation */
1796 /* tbls, afu cmds and read/write GSCSI cmds. */
1797 /* First, unlock ctx_cap write by reading mbox */
1798 (void)readq_be(&afu->ctrl_map->mbox_r); /* unlock ctx_cap */
1799 writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1800 SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1801 SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1802 &afu->ctrl_map->ctx_cap);
1803 /* init heartbeat */
1804 afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1805
1806out:
1807 return rc;
1808}
1809
1810/**
1811 * start_afu() - initializes and starts the AFU
1812 * @cxlflash: Internal structure associated with the host.
1813 */
1814static int start_afu(struct cxlflash_cfg *cfg)
1815{
1816 struct afu *afu = cfg->afu;
1817 struct afu_cmd *cmd;
1818
1819 int i = 0;
1820 int rc = 0;
1821
1822 for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
1823 cmd = &afu->cmd[i];
1824
1825 init_completion(&cmd->cevent);
1826 spin_lock_init(&cmd->slock);
1827 cmd->parent = afu;
1828 }
1829
1830 init_pcr(cfg);
1831
1832 /* initialize RRQ pointers */
1833 afu->hrrq_start = &afu->rrq_entry[0];
1834 afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
1835 afu->hrrq_curr = afu->hrrq_start;
1836 afu->toggle = 1;
1837
1838 rc = init_global(cfg);
1839
1840 pr_debug("%s: returning rc=%d\n", __func__, rc);
1841 return rc;
1842}
1843
1844/**
1845 * init_mc() - create and register as the master context
1846 * @cxlflash: Internal structure associated with the host.
1847 *
1848 * Return:
1849 * 0 on success
1850 * -ENOMEM when unable to obtain a context from CXL services
1851 * A failure value from CXL services.
1852 */
1853static int init_mc(struct cxlflash_cfg *cfg)
1854{
1855 struct cxl_context *ctx;
1856 struct device *dev = &cfg->dev->dev;
1857 struct afu *afu = cfg->afu;
1858 int rc = 0;
1859 enum undo_level level;
1860
1861 ctx = cxl_get_context(cfg->dev);
1862 if (unlikely(!ctx))
1863 return -ENOMEM;
1864 cfg->mcctx = ctx;
1865
1866 /* Set it up as a master with the CXL */
1867 cxl_set_master(ctx);
1868
1869 /* During initialization reset the AFU to start from a clean slate */
1870 rc = cxl_afu_reset(cfg->mcctx);
1871 if (unlikely(rc)) {
1872 dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
1873 __func__, rc);
1874 level = RELEASE_CONTEXT;
1875 goto out;
1876 }
1877
1878 rc = cxl_allocate_afu_irqs(ctx, 3);
1879 if (unlikely(rc)) {
1880 dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
1881 __func__, rc);
1882 level = RELEASE_CONTEXT;
1883 goto out;
1884 }
1885
1886 rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
1887 "SISL_MSI_SYNC_ERROR");
1888 if (unlikely(rc <= 0)) {
1889 dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
1890 __func__);
1891 level = FREE_IRQ;
1892 goto out;
1893 }
1894
1895 rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
1896 "SISL_MSI_RRQ_UPDATED");
1897 if (unlikely(rc <= 0)) {
1898 dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
1899 __func__);
1900 level = UNMAP_ONE;
1901 goto out;
1902 }
1903
1904 rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
1905 "SISL_MSI_ASYNC_ERROR");
1906 if (unlikely(rc <= 0)) {
1907 dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
1908 __func__);
1909 level = UNMAP_TWO;
1910 goto out;
1911 }
1912
1913 rc = 0;
1914
1915 /* This performs the equivalent of the CXL_IOCTL_START_WORK.
1916 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
1917 * element (pe) that is embedded in the context (ctx)
1918 */
1919 rc = start_context(cfg);
1920 if (unlikely(rc)) {
1921 dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
1922 level = UNMAP_THREE;
1923 goto out;
1924 }
1925ret:
1926 pr_debug("%s: returning rc=%d\n", __func__, rc);
1927 return rc;
1928out:
1929 term_mc(cfg, level);
1930 goto ret;
1931}
1932
1933/**
1934 * init_afu() - setup as master context and start AFU
1935 * @cxlflash: Internal structure associated with the host.
1936 *
1937 * This routine is a higher level of control for configuring the
1938 * AFU on probe and reset paths.
1939 *
1940 * Return:
1941 * 0 on success
1942 * -ENOMEM when unable to map the AFU MMIO space
1943 * A failure value from internal services.
1944 */
1945static int init_afu(struct cxlflash_cfg *cfg)
1946{
1947 u64 reg;
1948 int rc = 0;
1949 struct afu *afu = cfg->afu;
1950 struct device *dev = &cfg->dev->dev;
1951
1952 cxl_perst_reloads_same_image(cfg->cxl_afu, true);
1953
1954 rc = init_mc(cfg);
1955 if (rc) {
1956 dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
1957 __func__, rc);
1958 goto err1;
1959 }
1960
1961 /* Map the entire MMIO space of the AFU.
1962 */
1963 afu->afu_map = cxl_psa_map(cfg->mcctx);
1964 if (!afu->afu_map) {
1965 rc = -ENOMEM;
1966 term_mc(cfg, UNDO_START);
1967 dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1968 goto err1;
1969 }
1970
1971 /* don't byte reverse on reading afu_version, else the string form */
1972 /* will be backwards */
1973 reg = afu->afu_map->global.regs.afu_version;
1974 memcpy(afu->version, &reg, 8);
1975 afu->interface_version =
1976 readq_be(&afu->afu_map->global.regs.interface_version);
1977 pr_debug("%s: afu version %s, interface version 0x%llX\n",
1978 __func__, afu->version, afu->interface_version);
1979
1980 rc = start_afu(cfg);
1981 if (rc) {
1982 dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
1983 __func__, rc);
1984 term_mc(cfg, UNDO_START);
1985 cxl_psa_unmap((void *)afu->afu_map);
1986 afu->afu_map = NULL;
1987 goto err1;
1988 }
1989
1990 afu_err_intr_init(cfg->afu);
1991 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
1992
1993 /* Restore the LUN mappings */
1994 cxlflash_restore_luntable(cfg);
1995err1:
1996 pr_debug("%s: returning rc=%d\n", __func__, rc);
1997 return rc;
1998}
1999
2000/**
2001 * cxlflash_send_cmd() - sends an AFU command
2002 * @afu: AFU associated with the host.
2003 * @cmd: AFU command to send.
2004 *
2005 * Return:
2006 * 0 on success
2007 * -1 on failure
2008 */
2009int cxlflash_send_cmd(struct afu *afu, struct afu_cmd *cmd)
2010{
2011 struct cxlflash_cfg *cfg = afu->parent;
2012 int nretry = 0;
2013 int rc = 0;
2014 u64 room;
2015 long newval;
2016
2017 /*
2018 * This routine is used by critical users such an AFU sync and to
2019 * send a task management function (TMF). Thus we want to retry a
2020 * bit before returning an error. To avoid the performance penalty
2021 * of MMIO, we spread the update of 'room' over multiple commands.
2022 */
2023retry:
2024 newval = atomic64_dec_if_positive(&afu->room);
2025 if (!newval) {
2026 do {
2027 room = readq_be(&afu->host_map->cmd_room);
2028 atomic64_set(&afu->room, room);
2029 if (room)
2030 goto write_ioarrin;
2031 udelay(nretry);
2032 } while (nretry++ < MC_ROOM_RETRY_CNT);
2033
2034 pr_err("%s: no cmd_room to send 0x%X\n",
2035 __func__, cmd->rcb.cdb[0]);
2036
2037 goto no_room;
2038 } else if (unlikely(newval < 0)) {
2039 /* This should be rare. i.e. Only if two threads race and
2040 * decrement before the MMIO read is done. In this case
2041 * just benefit from the other thread having updated
2042 * afu->room.
2043 */
2044 if (nretry++ < MC_ROOM_RETRY_CNT) {
2045 udelay(nretry);
2046 goto retry;
2047 }
2048
2049 goto no_room;
2050 }
2051
2052write_ioarrin:
2053 writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
2054out:
2055 pr_debug("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
2056 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
2057 return rc;
2058
2059no_room:
2060 afu->read_room = true;
2061 schedule_work(&cfg->work_q);
2062 rc = SCSI_MLQUEUE_HOST_BUSY;
2063 goto out;
2064}
2065
2066/**
2067 * cxlflash_wait_resp() - polls for a response or timeout to a sent AFU command
2068 * @afu: AFU associated with the host.
2069 * @cmd: AFU command that was sent.
2070 */
2071void cxlflash_wait_resp(struct afu *afu, struct afu_cmd *cmd)
2072{
2073 ulong timeout = jiffies + (cmd->rcb.timeout * 2 * HZ);
2074
2075 timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
2076 if (!timeout)
2077 cxlflash_context_reset(cmd);
2078
2079 if (unlikely(cmd->sa.ioasc != 0))
2080 pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
2081 "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
2082 cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
2083 cmd->sa.rc.fc_rc);
2084}
2085
2086/**
2087 * cxlflash_afu_sync() - builds and sends an AFU sync command
2088 * @afu: AFU associated with the host.
2089 * @ctx_hndl_u: Identifies context requesting sync.
2090 * @res_hndl_u: Identifies resource requesting sync.
2091 * @mode: Type of sync to issue (lightweight, heavyweight, global).
2092 *
2093 * The AFU can only take 1 sync command at a time. This routine enforces this
2094 * limitation by using a mutex to provide exlusive access to the AFU during
2095 * the sync. This design point requires calling threads to not be on interrupt
2096 * context due to the possibility of sleeping during concurrent sync operations.
2097 *
2098 * AFU sync operations are only necessary and allowed when the device is
2099 * operating normally. When not operating normally, sync requests can occur as
2100 * part of cleaning up resources associated with an adapter prior to removal.
2101 * In this scenario, these requests are simply ignored (safe due to the AFU
2102 * going away).
2103 *
2104 * Return:
2105 * 0 on success
2106 * -1 on failure
2107 */
2108int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
2109 res_hndl_t res_hndl_u, u8 mode)
2110{
2111 struct cxlflash_cfg *cfg = afu->parent;
2112 struct afu_cmd *cmd = NULL;
2113 int rc = 0;
2114 int retry_cnt = 0;
2115 static DEFINE_MUTEX(sync_active);
2116
2117 if (cfg->state != STATE_NORMAL) {
2118 pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
2119 return 0;
2120 }
2121
2122 mutex_lock(&sync_active);
2123retry:
2124 cmd = cxlflash_cmd_checkout(afu);
2125 if (unlikely(!cmd)) {
2126 retry_cnt++;
2127 udelay(1000 * retry_cnt);
2128 if (retry_cnt < MC_RETRY_CNT)
2129 goto retry;
2130 pr_err("%s: could not get a free command\n", __func__);
2131 rc = -1;
2132 goto out;
2133 }
2134
2135 pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
2136
2137 memset(cmd->rcb.cdb, 0, sizeof(cmd->rcb.cdb));
2138
2139 cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2140 cmd->rcb.port_sel = 0x0; /* NA */
2141 cmd->rcb.lun_id = 0x0; /* NA */
2142 cmd->rcb.data_len = 0x0;
2143 cmd->rcb.data_ea = 0x0;
2144 cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2145
2146 cmd->rcb.cdb[0] = 0xC0; /* AFU Sync */
2147 cmd->rcb.cdb[1] = mode;
2148
2149 /* The cdb is aligned, no unaligned accessors required */
2150 *((u16 *)&cmd->rcb.cdb[2]) = swab16(ctx_hndl_u);
2151 *((u32 *)&cmd->rcb.cdb[4]) = swab32(res_hndl_u);
2152
2153 rc = cxlflash_send_cmd(afu, cmd);
2154 if (unlikely(rc))
2155 goto out;
2156
2157 cxlflash_wait_resp(afu, cmd);
2158
2159 /* set on timeout */
2160 if (unlikely((cmd->sa.ioasc != 0) ||
2161 (cmd->sa.host_use_b[0] & B_ERROR)))
2162 rc = -1;
2163out:
2164 mutex_unlock(&sync_active);
2165 if (cmd)
2166 cxlflash_cmd_checkin(cmd);
2167 pr_debug("%s: returning rc=%d\n", __func__, rc);
2168 return rc;
2169}
2170
2171/**
2172 * cxlflash_afu_reset() - resets the AFU
2173 * @cxlflash: Internal structure associated with the host.
2174 *
2175 * Return:
2176 * 0 on success
2177 * A failure value from internal services.
2178 */
2179int cxlflash_afu_reset(struct cxlflash_cfg *cfg)
2180{
2181 int rc = 0;
2182 /* Stop the context before the reset. Since the context is
2183 * no longer available restart it after the reset is complete
2184 */
2185
2186 term_afu(cfg);
2187
2188 rc = init_afu(cfg);
2189
2190 pr_debug("%s: returning rc=%d\n", __func__, rc);
2191 return rc;
2192}
2193
2194/**
2195 * cxlflash_worker_thread() - work thread handler for the AFU
2196 * @work: Work structure contained within cxlflash associated with host.
2197 *
2198 * Handles the following events:
2199 * - Link reset which cannot be performed on interrupt context due to
2200 * blocking up to a few seconds
2201 * - Read AFU command room
2202 */
2203static void cxlflash_worker_thread(struct work_struct *work)
2204{
2205 struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
2206 work_q);
2207 struct afu *afu = cfg->afu;
2208 int port;
2209 ulong lock_flags;
2210
2211 /* Avoid MMIO if the device has failed */
2212
2213 if (cfg->state != STATE_NORMAL)
2214 return;
2215
2216 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2217
2218 if (cfg->lr_state == LINK_RESET_REQUIRED) {
2219 port = cfg->lr_port;
2220 if (port < 0)
2221 pr_err("%s: invalid port index %d\n", __func__, port);
2222 else {
2223 spin_unlock_irqrestore(cfg->host->host_lock,
2224 lock_flags);
2225
2226 /* The reset can block... */
2227 afu_link_reset(afu, port,
2228 &afu->afu_map->
2229 global.fc_regs[port][0]);
2230 spin_lock_irqsave(cfg->host->host_lock, lock_flags);
2231 }
2232
2233 cfg->lr_state = LINK_RESET_COMPLETE;
2234 }
2235
2236 if (afu->read_room) {
2237 atomic64_set(&afu->room, readq_be(&afu->host_map->cmd_room));
2238 afu->read_room = false;
2239 }
2240
2241 spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2242}
2243
2244/**
2245 * cxlflash_probe() - PCI entry point to add host
2246 * @pdev: PCI device associated with the host.
2247 * @dev_id: PCI device id associated with device.
2248 *
2249 * Return: 0 on success / non-zero on failure
2250 */
2251static int cxlflash_probe(struct pci_dev *pdev,
2252 const struct pci_device_id *dev_id)
2253{
2254 struct Scsi_Host *host;
2255 struct cxlflash_cfg *cfg = NULL;
2256 struct device *phys_dev;
2257 struct dev_dependent_vals *ddv;
2258 int rc = 0;
2259
2260 dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
2261 __func__, pdev->irq);
2262
2263 ddv = (struct dev_dependent_vals *)dev_id->driver_data;
2264 driver_template.max_sectors = ddv->max_sectors;
2265
2266 host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
2267 if (!host) {
2268 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2269 __func__);
2270 rc = -ENOMEM;
2271 goto out;
2272 }
2273
2274 host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
2275 host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
2276 host->max_channel = NUM_FC_PORTS - 1;
2277 host->unique_id = host->host_no;
2278 host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
2279
2280 cfg = (struct cxlflash_cfg *)host->hostdata;
2281 cfg->host = host;
2282 rc = alloc_mem(cfg);
2283 if (rc) {
2284 dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
2285 __func__);
2286 rc = -ENOMEM;
2287 goto out;
2288 }
2289
2290 cfg->init_state = INIT_STATE_NONE;
2291 cfg->dev = pdev;
2292
2293 /*
2294 * The promoted LUNs move to the top of the LUN table. The rest stay
2295 * on the bottom half. The bottom half grows from the end
2296 * (index = 255), whereas the top half grows from the beginning
2297 * (index = 0).
2298 */
2299 cfg->promote_lun_index = 0;
2300 cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
2301 cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;
2302
2303 cfg->dev_id = (struct pci_device_id *)dev_id;
2304 cfg->mcctx = NULL;
2305
2306 init_waitqueue_head(&cfg->tmf_waitq);
2307 init_waitqueue_head(&cfg->limbo_waitq);
2308
2309 INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
2310 cfg->lr_state = LINK_RESET_INVALID;
2311 cfg->lr_port = -1;
2312 mutex_init(&cfg->ctx_tbl_list_mutex);
2313 mutex_init(&cfg->ctx_recovery_mutex);
2314 INIT_LIST_HEAD(&cfg->ctx_err_recovery);
2315 INIT_LIST_HEAD(&cfg->lluns);
2316
2317 pci_set_drvdata(pdev, cfg);
2318
2319 /* Use the special service provided to look up the physical
2320 * PCI device, since we are called on the probe of the virtual
2321 * PCI host bus (vphb)
2322 */
2323 phys_dev = cxl_get_phys_dev(pdev);
2324 if (!dev_is_pci(phys_dev)) {
2325 pr_err("%s: not a pci dev\n", __func__);
2326 rc = -ENODEV;
2327 goto out_remove;
2328 }
2329 cfg->parent_dev = to_pci_dev(phys_dev);
2330
2331 cfg->cxl_afu = cxl_pci_to_afu(pdev);
2332
2333 rc = init_pci(cfg);
2334 if (rc) {
2335 dev_err(&pdev->dev, "%s: call to init_pci "
2336 "failed rc=%d!\n", __func__, rc);
2337 goto out_remove;
2338 }
2339 cfg->init_state = INIT_STATE_PCI;
2340
2341 rc = init_afu(cfg);
2342 if (rc) {
2343 dev_err(&pdev->dev, "%s: call to init_afu "
2344 "failed rc=%d!\n", __func__, rc);
2345 goto out_remove;
2346 }
2347 cfg->init_state = INIT_STATE_AFU;
2348
2349
2350 rc = init_scsi(cfg);
2351 if (rc) {
2352 dev_err(&pdev->dev, "%s: call to init_scsi "
2353 "failed rc=%d!\n", __func__, rc);
2354 goto out_remove;
2355 }
2356 cfg->init_state = INIT_STATE_SCSI;
2357
2358out:
2359 pr_debug("%s: returning rc=%d\n", __func__, rc);
2360 return rc;
2361
2362out_remove:
2363 cxlflash_remove(pdev);
2364 goto out;
2365}
2366
2367/**
2368 * cxlflash_pci_error_detected() - called when a PCI error is detected
2369 * @pdev: PCI device struct.
2370 * @state: PCI channel state.
2371 *
2372 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2373 */
2374static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2375 pci_channel_state_t state)
2376{
2377 int rc = 0;
2378 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2379 struct device *dev = &cfg->dev->dev;
2380
2381 dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
2382
2383 switch (state) {
2384 case pci_channel_io_frozen:
2385 cfg->state = STATE_LIMBO;
2386
2387 /* Turn off legacy I/O */
2388 scsi_block_requests(cfg->host);
2389 rc = cxlflash_mark_contexts_error(cfg);
2390 if (unlikely(rc))
2391 dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
2392 __func__, rc);
2393 term_mc(cfg, UNDO_START);
2394 stop_afu(cfg);
2395
2396 return PCI_ERS_RESULT_NEED_RESET;
2397 case pci_channel_io_perm_failure:
2398 cfg->state = STATE_FAILTERM;
2399 wake_up_all(&cfg->limbo_waitq);
2400 scsi_unblock_requests(cfg->host);
2401 return PCI_ERS_RESULT_DISCONNECT;
2402 default:
2403 break;
2404 }
2405 return PCI_ERS_RESULT_NEED_RESET;
2406}
2407
2408/**
2409 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
2410 * @pdev: PCI device struct.
2411 *
2412 * This routine is called by the pci error recovery code after the PCI
2413 * slot has been reset, just before we should resume normal operations.
2414 *
2415 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
2416 */
2417static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
2418{
2419 int rc = 0;
2420 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2421 struct device *dev = &cfg->dev->dev;
2422
2423 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2424
2425 rc = init_afu(cfg);
2426 if (unlikely(rc)) {
2427 dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
2428 return PCI_ERS_RESULT_DISCONNECT;
2429 }
2430
2431 return PCI_ERS_RESULT_RECOVERED;
2432}
2433
2434/**
2435 * cxlflash_pci_resume() - called when normal operation can resume
2436 * @pdev: PCI device struct
2437 */
2438static void cxlflash_pci_resume(struct pci_dev *pdev)
2439{
2440 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
2441 struct device *dev = &cfg->dev->dev;
2442
2443 dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
2444
2445 cfg->state = STATE_NORMAL;
2446 wake_up_all(&cfg->limbo_waitq);
2447 scsi_unblock_requests(cfg->host);
2448}
2449
2450static const struct pci_error_handlers cxlflash_err_handler = {
2451 .error_detected = cxlflash_pci_error_detected,
2452 .slot_reset = cxlflash_pci_slot_reset,
2453 .resume = cxlflash_pci_resume,
2454};
2455
2456/*
2457 * PCI device structure
2458 */
2459static struct pci_driver cxlflash_driver = {
2460 .name = CXLFLASH_NAME,
2461 .id_table = cxlflash_pci_table,
2462 .probe = cxlflash_probe,
2463 .remove = cxlflash_remove,
2464 .err_handler = &cxlflash_err_handler,
2465};
2466
2467/**
2468 * init_cxlflash() - module entry point
2469 *
2470 * Return: 0 on success / non-zero on failure
2471 */
2472static int __init init_cxlflash(void)
2473{
2474 pr_info("%s: IBM Power CXL Flash Adapter: %s\n",
2475 __func__, CXLFLASH_DRIVER_DATE);
2476
2477 cxlflash_list_init();
2478
2479 return pci_register_driver(&cxlflash_driver);
2480}
2481
2482/**
2483 * exit_cxlflash() - module exit point
2484 */
2485static void __exit exit_cxlflash(void)
2486{
2487 cxlflash_term_global_luns();
2488 cxlflash_free_errpage();
2489
2490 pci_unregister_driver(&cxlflash_driver);
2491}
2492
2493module_init(init_cxlflash);
2494module_exit(exit_cxlflash);
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
new file mode 100644
index 000000000000..cf0e80938b13
--- /dev/null
+++ b/drivers/scsi/cxlflash/main.h
@@ -0,0 +1,108 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_MAIN_H
16#define _CXLFLASH_MAIN_H
17
18#include <linux/list.h>
19#include <linux/types.h>
20#include <scsi/scsi.h>
21#include <scsi/scsi_device.h>
22
23#define CXLFLASH_NAME "cxlflash"
24#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
25#define CXLFLASH_DRIVER_DATE "(August 13, 2015)"
26
27#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
28#define CXLFLASH_SUBS_DEV_ID 0x04F0
29
30/* Since there is only one target, make it 0 */
31#define CXLFLASH_TARGET 0
32#define CXLFLASH_MAX_CDB_LEN 16
33
34/* Really only one target per bus since the Texan is directly attached */
35#define CXLFLASH_MAX_NUM_TARGETS_PER_BUS 1
36#define CXLFLASH_MAX_NUM_LUNS_PER_TARGET 65536
37
38#define CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT (120 * HZ)
39
40#define NUM_FC_PORTS CXLFLASH_NUM_FC_PORTS /* ports per AFU */
41
42/* FC defines */
43#define FC_MTIP_CMDCONFIG 0x010
44#define FC_MTIP_STATUS 0x018
45
46#define FC_PNAME 0x300
47#define FC_CONFIG 0x320
48#define FC_CONFIG2 0x328
49#define FC_STATUS 0x330
50#define FC_ERROR 0x380
51#define FC_ERRCAP 0x388
52#define FC_ERRMSK 0x390
53#define FC_CNT_CRCERR 0x538
54#define FC_CRC_THRESH 0x580
55
56#define FC_MTIP_CMDCONFIG_ONLINE 0x20ULL
57#define FC_MTIP_CMDCONFIG_OFFLINE 0x40ULL
58
59#define FC_MTIP_STATUS_MASK 0x30ULL
60#define FC_MTIP_STATUS_ONLINE 0x20ULL
61#define FC_MTIP_STATUS_OFFLINE 0x10ULL
62
63/* TIMEOUT and RETRY definitions */
64
65/* AFU command timeout values */
66#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
67
68/* AFU command room retry limit */
69#define MC_ROOM_RETRY_CNT 10
70
71/* FC CRC clear periodic timer */
72#define MC_CRC_THRESH 100 /* threshold in 5 mins */
73
74#define FC_PORT_STATUS_RETRY_CNT 100 /* 100 100ms retries = 10 seconds */
75#define FC_PORT_STATUS_RETRY_INTERVAL_US 100000 /* microseconds */
76
77/* VPD defines */
78#define CXLFLASH_VPD_LEN 256
79#define WWPN_LEN 16
80#define WWPN_BUF_LEN (WWPN_LEN + 1)
81
82enum undo_level {
83 RELEASE_CONTEXT = 0,
84 FREE_IRQ,
85 UNMAP_ONE,
86 UNMAP_TWO,
87 UNMAP_THREE,
88 UNDO_START
89};
90
91struct dev_dependent_vals {
92 u64 max_sectors;
93};
94
95struct asyc_intr_info {
96 u64 status;
97 char *desc;
98 u8 port;
99 u8 action;
100#define CLR_FC_ERROR 0x01
101#define LINK_RESET 0x02
102};
103
104#ifndef CONFIG_CXL_EEH
105#define cxl_perst_reloads_same_image(_a, _b) do { } while (0)
106#endif
107
108#endif /* _CXLFLASH_MAIN_H */
diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h
new file mode 100644
index 000000000000..63bf394fe78c
--- /dev/null
+++ b/drivers/scsi/cxlflash/sislite.h
@@ -0,0 +1,472 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _SISLITE_H
16#define _SISLITE_H
17
18#include <linux/types.h>
19
20typedef u16 ctx_hndl_t;
21typedef u32 res_hndl_t;
22
23#define SIZE_4K 4096
24#define SIZE_64K 65536
25
26/*
27 * IOARCB: 64 bytes, min 16 byte alignment required, host native endianness
28 * except for SCSI CDB which remains big endian per SCSI standards.
29 */
30struct sisl_ioarcb {
31 u16 ctx_id; /* ctx_hndl_t */
32 u16 req_flags;
33#define SISL_REQ_FLAGS_RES_HNDL 0x8000U /* bit 0 (MSB) */
34#define SISL_REQ_FLAGS_PORT_LUN_ID 0x0000U
35
36#define SISL_REQ_FLAGS_SUP_UNDERRUN 0x4000U /* bit 1 */
37
38#define SISL_REQ_FLAGS_TIMEOUT_SECS 0x0000U /* bits 8,9 */
39#define SISL_REQ_FLAGS_TIMEOUT_MSECS 0x0040U
40#define SISL_REQ_FLAGS_TIMEOUT_USECS 0x0080U
41#define SISL_REQ_FLAGS_TIMEOUT_CYCLES 0x00C0U
42
43#define SISL_REQ_FLAGS_TMF_CMD 0x0004u /* bit 13 */
44
45#define SISL_REQ_FLAGS_AFU_CMD 0x0002U /* bit 14 */
46
47#define SISL_REQ_FLAGS_HOST_WRITE 0x0001U /* bit 15 (LSB) */
48#define SISL_REQ_FLAGS_HOST_READ 0x0000U
49
50 union {
51 u32 res_hndl; /* res_hndl_t */
52 u32 port_sel; /* this is a selection mask:
53 * 0x1 -> port#0 can be selected,
54 * 0x2 -> port#1 can be selected.
55 * Can be bitwise ORed.
56 */
57 };
58 u64 lun_id;
59 u32 data_len; /* 4K for read/write */
60 u32 ioadl_len;
61 union {
62 u64 data_ea; /* min 16 byte aligned */
63 u64 ioadl_ea;
64 };
65 u8 msi; /* LISN to send on RRQ write */
66#define SISL_MSI_CXL_PFAULT 0 /* reserved for CXL page faults */
67#define SISL_MSI_SYNC_ERROR 1 /* recommended for AFU sync error */
68#define SISL_MSI_RRQ_UPDATED 2 /* recommended for IO completion */
69#define SISL_MSI_ASYNC_ERROR 3 /* master only - for AFU async error */
70
71 u8 rrq; /* 0 for a single RRQ */
72 u16 timeout; /* in units specified by req_flags */
73 u32 rsvd1;
74 u8 cdb[16]; /* must be in big endian */
75 struct scsi_cmnd *scp;
76} __packed;
77
78struct sisl_rc {
79 u8 flags;
80#define SISL_RC_FLAGS_SENSE_VALID 0x80U
81#define SISL_RC_FLAGS_FCP_RSP_CODE_VALID 0x40U
82#define SISL_RC_FLAGS_OVERRUN 0x20U
83#define SISL_RC_FLAGS_UNDERRUN 0x10U
84
85 u8 afu_rc;
86#define SISL_AFU_RC_RHT_INVALID 0x01U /* user error */
87#define SISL_AFU_RC_RHT_UNALIGNED 0x02U /* should never happen */
88#define SISL_AFU_RC_RHT_OUT_OF_BOUNDS 0x03u /* user error */
89#define SISL_AFU_RC_RHT_DMA_ERR 0x04u /* see afu_extra
90 may retry if afu_retry is off
91 possible on master exit
92 */
93#define SISL_AFU_RC_RHT_RW_PERM 0x05u /* no RW perms, user error */
94#define SISL_AFU_RC_LXT_UNALIGNED 0x12U /* should never happen */
95#define SISL_AFU_RC_LXT_OUT_OF_BOUNDS 0x13u /* user error */
96#define SISL_AFU_RC_LXT_DMA_ERR 0x14u /* see afu_extra
97 may retry if afu_retry is off
98 possible on master exit
99 */
100#define SISL_AFU_RC_LXT_RW_PERM 0x15u /* no RW perms, user error */
101
102#define SISL_AFU_RC_NOT_XLATE_HOST 0x1au /* possible if master exited */
103
104 /* NO_CHANNELS means the FC ports selected by dest_port in
105 * IOARCB or in the LXT entry are down when the AFU tried to select
106 * a FC port. If the port went down on an active IO, it will set
107 * fc_rc to =0x54(NOLOGI) or 0x57(LINKDOWN) instead.
108 */
109#define SISL_AFU_RC_NO_CHANNELS 0x20U /* see afu_extra, may retry */
110#define SISL_AFU_RC_CAP_VIOLATION 0x21U /* either user error or
111 afu reset/master restart
112 */
113#define SISL_AFU_RC_OUT_OF_DATA_BUFS 0x30U /* always retry */
114#define SISL_AFU_RC_DATA_DMA_ERR 0x31U /* see afu_extra
115 may retry if afu_retry is off
116 */
117
118 u8 scsi_rc; /* SCSI status byte, retry as appropriate */
119#define SISL_SCSI_RC_CHECK 0x02U
120#define SISL_SCSI_RC_BUSY 0x08u
121
122 u8 fc_rc; /* retry */
123 /*
124 * We should only see fc_rc=0x57 (LINKDOWN) or 0x54(NOLOGI) for
125 * commands that are in flight when a link goes down or is logged out.
126 * If the link is down or logged out before AFU selects the port, either
127 * it will choose the other port or we will get afu_rc=0x20 (no_channel)
128 * if there is no valid port to use.
129 *
130 * ABORTPEND/ABORTOK/ABORTFAIL/TGTABORT can be retried, typically these
131 * would happen if a frame is dropped and something times out.
132 * NOLOGI or LINKDOWN can be retried if the other port is up.
133 * RESIDERR can be retried as well.
134 *
135 * ABORTFAIL might indicate that lots of frames are getting CRC errors.
136 * So it maybe retried once and reset the link if it happens again.
137 * The link can also be reset on the CRC error threshold interrupt.
138 */
139#define SISL_FC_RC_ABORTPEND 0x52 /* exchange timeout or abort request */
140#define SISL_FC_RC_WRABORTPEND 0x53 /* due to write XFER_RDY invalid */
141#define SISL_FC_RC_NOLOGI 0x54 /* port not logged in, in-flight cmds */
142#define SISL_FC_RC_NOEXP 0x55 /* FC protocol error or HW bug */
143#define SISL_FC_RC_INUSE 0x56 /* tag already in use, HW bug */
144#define SISL_FC_RC_LINKDOWN 0x57 /* link down, in-flight cmds */
145#define SISL_FC_RC_ABORTOK 0x58 /* pending abort completed w/success */
146#define SISL_FC_RC_ABORTFAIL 0x59 /* pending abort completed w/fail */
147#define SISL_FC_RC_RESID 0x5A /* ioasa underrun/overrun flags set */
148#define SISL_FC_RC_RESIDERR 0x5B /* actual data len does not match SCSI
149 reported len, possbly due to dropped
150 frames */
151#define SISL_FC_RC_TGTABORT 0x5C /* command aborted by target */
152};
153
154#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
155
156/*
157 * IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
158 * host native endianness
159 */
160struct sisl_ioasa {
161 union {
162 struct sisl_rc rc;
163 u32 ioasc;
164#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
165 };
166 u32 resid;
167 u8 port;
168 u8 afu_extra;
169 /* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
170 * afu_exta contains PSL response code. Useful codes are:
171 */
172#define SISL_AFU_DMA_ERR_PAGE_IN 0x0A /* AFU_retry_on_pagein Action
173 * Enabled N/A
174 * Disabled retry
175 */
176#define SISL_AFU_DMA_ERR_INVALID_EA 0x0B /* this is a hard error
177 * afu_rc Implies
178 * 0x04, 0x14 master exit.
179 * 0x31 user error.
180 */
181 /* when afu rc=0x20 (no channels):
182 * afu_extra bits [4:5]: available portmask, [6:7]: requested portmask.
183 */
184#define SISL_AFU_NO_CLANNELS_AMASK(afu_extra) (((afu_extra) & 0x0C) >> 2)
185#define SISL_AFU_NO_CLANNELS_RMASK(afu_extra) ((afu_extra) & 0x03)
186
187 u8 scsi_extra;
188 u8 fc_extra;
189 u8 sense_data[SISL_SENSE_DATA_LEN];
190
191 /* These fields are defined by the SISlite architecture for the
192 * host to use as they see fit for their implementation.
193 */
194 union {
195 u64 host_use[4];
196 u8 host_use_b[32];
197 };
198} __packed;
199
200#define SISL_RESP_HANDLE_T_BIT 0x1ULL /* Toggle bit */
201
202/* MMIO space is required to support only 64-bit access */
203
204/*
205 * This AFU has two mechanisms to deal with endian-ness.
206 * One is a global configuration (in the afu_config) register
207 * below that specifies the endian-ness of the host.
208 * The other is a per context (i.e. application) specification
209 * controlled by the endian_ctrl field here. Since the master
210 * context is one such application the master context's
211 * endian-ness is set to be the same as the host.
212 *
213 * As per the SISlite spec, the MMIO registers are always
214 * big endian.
215 */
216#define SISL_ENDIAN_CTRL_BE 0x8000000000000080ULL
217#define SISL_ENDIAN_CTRL_LE 0x0000000000000000ULL
218
219#ifdef __BIG_ENDIAN
220#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_BE
221#else
222#define SISL_ENDIAN_CTRL SISL_ENDIAN_CTRL_LE
223#endif
224
225/* per context host transport MMIO */
226struct sisl_host_map {
227 __be64 endian_ctrl; /* Per context Endian Control. The AFU will
228 * operate on whatever the context is of the
229 * host application.
230 */
231
232 __be64 intr_status; /* this sends LISN# programmed in ctx_ctrl.
233 * Only recovery in a PERM_ERR is a context
234 * exit since there is no way to tell which
235 * command caused the error.
236 */
237#define SISL_ISTATUS_PERM_ERR_CMDROOM 0x0010ULL /* b59, user error */
238#define SISL_ISTATUS_PERM_ERR_RCB_READ 0x0008ULL /* b60, user error */
239#define SISL_ISTATUS_PERM_ERR_SA_WRITE 0x0004ULL /* b61, user error */
240#define SISL_ISTATUS_PERM_ERR_RRQ_WRITE 0x0002ULL /* b62, user error */
241 /* Page in wait accessing RCB/IOASA/RRQ is reported in b63.
242 * Same error in data/LXT/RHT access is reported via IOASA.
243 */
244#define SISL_ISTATUS_TEMP_ERR_PAGEIN 0x0001ULL /* b63, can be generated
245 * only when AFU auto
246 * retry is disabled.
247 * If user can determine
248 * the command that
249 * caused the error, it
250 * can be retried.
251 */
252#define SISL_ISTATUS_UNMASK (0x001FULL) /* 1 means unmasked */
253#define SISL_ISTATUS_MASK ~(SISL_ISTATUS_UNMASK) /* 1 means masked */
254
255 __be64 intr_clear;
256 __be64 intr_mask;
257 __be64 ioarrin; /* only write what cmd_room permits */
258 __be64 rrq_start; /* start & end are both inclusive */
259 __be64 rrq_end; /* write sequence: start followed by end */
260 __be64 cmd_room;
261 __be64 ctx_ctrl; /* least signiifcant byte or b56:63 is LISN# */
262 __be64 mbox_w; /* restricted use */
263};
264
265/* per context provisioning & control MMIO */
266struct sisl_ctrl_map {
267 __be64 rht_start;
268 __be64 rht_cnt_id;
269 /* both cnt & ctx_id args must be ULL */
270#define SISL_RHT_CNT_ID(cnt, ctx_id) (((cnt) << 48) | ((ctx_id) << 32))
271
272 __be64 ctx_cap; /* afu_rc below is when the capability is violated */
273#define SISL_CTX_CAP_PROXY_ISSUE 0x8000000000000000ULL /* afu_rc 0x21 */
274#define SISL_CTX_CAP_REAL_MODE 0x4000000000000000ULL /* afu_rc 0x21 */
275#define SISL_CTX_CAP_HOST_XLATE 0x2000000000000000ULL /* afu_rc 0x1a */
276#define SISL_CTX_CAP_PROXY_TARGET 0x1000000000000000ULL /* afu_rc 0x21 */
277#define SISL_CTX_CAP_AFU_CMD 0x0000000000000008ULL /* afu_rc 0x21 */
278#define SISL_CTX_CAP_GSCSI_CMD 0x0000000000000004ULL /* afu_rc 0x21 */
279#define SISL_CTX_CAP_WRITE_CMD 0x0000000000000002ULL /* afu_rc 0x21 */
280#define SISL_CTX_CAP_READ_CMD 0x0000000000000001ULL /* afu_rc 0x21 */
281 __be64 mbox_r;
282};
283
284/* single copy global regs */
285struct sisl_global_regs {
286 __be64 aintr_status;
287 /* In cxlflash, each FC port/link gets a byte of status */
288#define SISL_ASTATUS_FC0_OTHER 0x8000ULL /* b48, other err,
289 FC_ERRCAP[31:20] */
290#define SISL_ASTATUS_FC0_LOGO 0x4000ULL /* b49, target sent FLOGI/PLOGI/LOGO
291 while logged in */
292#define SISL_ASTATUS_FC0_CRC_T 0x2000ULL /* b50, CRC threshold exceeded */
293#define SISL_ASTATUS_FC0_LOGI_R 0x1000ULL /* b51, login state mechine timed out
294 and retrying */
295#define SISL_ASTATUS_FC0_LOGI_F 0x0800ULL /* b52, login failed,
296 FC_ERROR[19:0] */
297#define SISL_ASTATUS_FC0_LOGI_S 0x0400ULL /* b53, login succeeded */
298#define SISL_ASTATUS_FC0_LINK_DN 0x0200ULL /* b54, link online to offline */
299#define SISL_ASTATUS_FC0_LINK_UP 0x0100ULL /* b55, link offline to online */
300
301#define SISL_ASTATUS_FC1_OTHER 0x0080ULL /* b56 */
302#define SISL_ASTATUS_FC1_LOGO 0x0040ULL /* b57 */
303#define SISL_ASTATUS_FC1_CRC_T 0x0020ULL /* b58 */
304#define SISL_ASTATUS_FC1_LOGI_R 0x0010ULL /* b59 */
305#define SISL_ASTATUS_FC1_LOGI_F 0x0008ULL /* b60 */
306#define SISL_ASTATUS_FC1_LOGI_S 0x0004ULL /* b61 */
307#define SISL_ASTATUS_FC1_LINK_DN 0x0002ULL /* b62 */
308#define SISL_ASTATUS_FC1_LINK_UP 0x0001ULL /* b63 */
309
310#define SISL_FC_INTERNAL_UNMASK 0x0000000300000000ULL /* 1 means unmasked */
311#define SISL_FC_INTERNAL_MASK ~(SISL_FC_INTERNAL_UNMASK)
312#define SISL_FC_INTERNAL_SHIFT 32
313
314#define SISL_ASTATUS_UNMASK 0xFFFFULL /* 1 means unmasked */
315#define SISL_ASTATUS_MASK ~(SISL_ASTATUS_UNMASK) /* 1 means masked */
316
317 __be64 aintr_clear;
318 __be64 aintr_mask;
319 __be64 afu_ctrl;
320 __be64 afu_hb;
321 __be64 afu_scratch_pad;
322 __be64 afu_port_sel;
323#define SISL_AFUCONF_AR_IOARCB 0x4000ULL
324#define SISL_AFUCONF_AR_LXT 0x2000ULL
325#define SISL_AFUCONF_AR_RHT 0x1000ULL
326#define SISL_AFUCONF_AR_DATA 0x0800ULL
327#define SISL_AFUCONF_AR_RSRC 0x0400ULL
328#define SISL_AFUCONF_AR_IOASA 0x0200ULL
329#define SISL_AFUCONF_AR_RRQ 0x0100ULL
330/* Aggregate all Auto Retry Bits */
331#define SISL_AFUCONF_AR_ALL (SISL_AFUCONF_AR_IOARCB|SISL_AFUCONF_AR_LXT| \
332 SISL_AFUCONF_AR_RHT|SISL_AFUCONF_AR_DATA| \
333 SISL_AFUCONF_AR_RSRC|SISL_AFUCONF_AR_IOASA| \
334 SISL_AFUCONF_AR_RRQ)
335#ifdef __BIG_ENDIAN
336#define SISL_AFUCONF_ENDIAN 0x0000ULL
337#else
338#define SISL_AFUCONF_ENDIAN 0x0020ULL
339#endif
340#define SISL_AFUCONF_MBOX_CLR_READ 0x0010ULL
341 __be64 afu_config;
342 __be64 rsvd[0xf8];
343 __be64 afu_version;
344 __be64 interface_version;
345};
346
347#define CXLFLASH_NUM_FC_PORTS 2
348#define CXLFLASH_MAX_CONTEXT 512 /* how many contexts per afu */
349#define CXLFLASH_NUM_VLUNS 512
350
351struct sisl_global_map {
352 union {
353 struct sisl_global_regs regs;
354 char page0[SIZE_4K]; /* page 0 */
355 };
356
357 char page1[SIZE_4K]; /* page 1 */
358
359 /* pages 2 & 3 */
360 __be64 fc_regs[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
361
362 /* pages 4 & 5 (lun tbl) */
363 __be64 fc_port[CXLFLASH_NUM_FC_PORTS][CXLFLASH_NUM_VLUNS];
364
365};
366
367/*
368 * CXL Flash Memory Map
369 *
370 * +-------------------------------+
371 * | 512 * 64 KB User MMIO |
372 * | (per context) |
373 * | User Accessible |
374 * +-------------------------------+
375 * | 512 * 128 B per context |
376 * | Provisioning and Control |
377 * | Trusted Process accessible |
378 * +-------------------------------+
379 * | 64 KB Global |
380 * | Trusted Process accessible |
381 * +-------------------------------+
382*/
383struct cxlflash_afu_map {
384 union {
385 struct sisl_host_map host;
386 char harea[SIZE_64K]; /* 64KB each */
387 } hosts[CXLFLASH_MAX_CONTEXT];
388
389 union {
390 struct sisl_ctrl_map ctrl;
391 char carea[cache_line_size()]; /* 128B each */
392 } ctrls[CXLFLASH_MAX_CONTEXT];
393
394 union {
395 struct sisl_global_map global;
396 char garea[SIZE_64K]; /* 64KB single block */
397 };
398};
399
400/*
401 * LXT - LBA Translation Table
402 * LXT control blocks
403 */
404struct sisl_lxt_entry {
405 u64 rlba_base; /* bits 0:47 is base
406 * b48:55 is lun index
407 * b58:59 is write & read perms
408 * (if no perm, afu_rc=0x15)
409 * b60:63 is port_sel mask
410 */
411};
412
413/*
414 * RHT - Resource Handle Table
415 * Per the SISlite spec, RHT entries are to be 16-byte aligned
416 */
417struct sisl_rht_entry {
418 struct sisl_lxt_entry *lxt_start;
419 u32 lxt_cnt;
420 u16 rsvd;
421 u8 fp; /* format & perm nibbles.
422 * (if no perm, afu_rc=0x05)
423 */
424 u8 nmask;
425} __packed __aligned(16);
426
427struct sisl_rht_entry_f1 {
428 u64 lun_id;
429 union {
430 struct {
431 u8 valid;
432 u8 rsvd[5];
433 u8 fp;
434 u8 port_sel;
435 };
436
437 u64 dw;
438 };
439} __packed __aligned(16);
440
441/* make the fp byte */
442#define SISL_RHT_FP(fmt, perm) (((fmt) << 4) | (perm))
443
444/* make the fp byte for a clone from a source fp and clone flags
445 * flags must be only 2 LSB bits.
446 */
447#define SISL_RHT_FP_CLONE(src_fp, cln_flags) ((src_fp) & (0xFC | (cln_flags)))
448
449#define RHT_PERM_READ 0x01U
450#define RHT_PERM_WRITE 0x02U
451#define RHT_PERM_RW (RHT_PERM_READ | RHT_PERM_WRITE)
452
453/* extract the perm bits from a fp */
454#define SISL_RHT_PERM(fp) ((fp) & RHT_PERM_RW)
455
456#define PORT0 0x01U
457#define PORT1 0x02U
458#define BOTH_PORTS (PORT0 | PORT1)
459
460/* AFU Sync Mode byte */
461#define AFU_LW_SYNC 0x0U
462#define AFU_HW_SYNC 0x1U
463#define AFU_GSYNC 0x2U
464
465/* Special Task Management Function CDB */
466#define TMF_LUN_RESET 0x1U
467#define TMF_CLEAR_ACA 0x2U
468
469
470#define SISLITE_MAX_WS_BLOCKS 512
471
472#endif /* _SISLITE_H */
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
new file mode 100644
index 000000000000..f1b62cea75b1
--- /dev/null
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -0,0 +1,2084 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/delay.h>
16#include <linux/file.h>
17#include <linux/syscalls.h>
18#include <misc/cxl.h>
19#include <asm/unaligned.h>
20
21#include <scsi/scsi.h>
22#include <scsi/scsi_host.h>
23#include <scsi/scsi_cmnd.h>
24#include <scsi/scsi_eh.h>
25#include <uapi/scsi/cxlflash_ioctl.h>
26
27#include "sislite.h"
28#include "common.h"
29#include "vlun.h"
30#include "superpipe.h"
31
32struct cxlflash_global global;
33
34/**
35 * marshal_rele_to_resize() - translate release to resize structure
36 * @rele: Source structure from which to translate/copy.
37 * @resize: Destination structure for the translate/copy.
38 */
39static void marshal_rele_to_resize(struct dk_cxlflash_release *release,
40 struct dk_cxlflash_resize *resize)
41{
42 resize->hdr = release->hdr;
43 resize->context_id = release->context_id;
44 resize->rsrc_handle = release->rsrc_handle;
45}
46
47/**
48 * marshal_det_to_rele() - translate detach to release structure
49 * @detach: Destination structure for the translate/copy.
50 * @rele: Source structure from which to translate/copy.
51 */
52static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
53 struct dk_cxlflash_release *release)
54{
55 release->hdr = detach->hdr;
56 release->context_id = detach->context_id;
57}
58
59/**
60 * cxlflash_free_errpage() - frees resources associated with global error page
61 */
62void cxlflash_free_errpage(void)
63{
64
65 mutex_lock(&global.mutex);
66 if (global.err_page) {
67 __free_page(global.err_page);
68 global.err_page = NULL;
69 }
70 mutex_unlock(&global.mutex);
71}
72
73/**
74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts
75 * @cfg: Internal structure associated with the host.
76 *
77 * When the host needs to go down, all users must be quiesced and their
78 * memory freed. This is accomplished by putting the contexts in error
79 * state which will notify the user and let them 'drive' the tear-down.
80 * Meanwhile, this routine camps until all user contexts have been removed.
81 */
82void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg)
83{
84 struct device *dev = &cfg->dev->dev;
85 int i, found;
86
87 cxlflash_mark_contexts_error(cfg);
88
89 while (true) {
90 found = false;
91
92 for (i = 0; i < MAX_CONTEXT; i++)
93 if (cfg->ctx_tbl[i]) {
94 found = true;
95 break;
96 }
97
98 if (!found && list_empty(&cfg->ctx_err_recovery))
99 return;
100
101 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n",
102 __func__);
103 wake_up_all(&cfg->limbo_waitq);
104 ssleep(1);
105 }
106}
107
108/**
109 * find_error_context() - locates a context by cookie on the error recovery list
110 * @cfg: Internal structure associated with the host.
111 * @rctxid: Desired context by id.
112 * @file: Desired context by file.
113 *
114 * Return: Found context on success, NULL on failure
115 */
116static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid,
117 struct file *file)
118{
119 struct ctx_info *ctxi;
120
121 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list)
122 if ((ctxi->ctxid == rctxid) || (ctxi->file == file))
123 return ctxi;
124
125 return NULL;
126}
127
128/**
129 * get_context() - obtains a validated and locked context reference
130 * @cfg: Internal structure associated with the host.
131 * @rctxid: Desired context (raw, un-decoded format).
132 * @arg: LUN information or file associated with request.
133 * @ctx_ctrl: Control information to 'steer' desired lookup.
134 *
135 * NOTE: despite the name pid, in linux, current->pid actually refers
136 * to the lightweight process id (tid) and can change if the process is
137 * multi threaded. The tgid remains constant for the process and only changes
138 * when the process of fork. For all intents and purposes, think of tgid
139 * as a pid in the traditional sense.
140 *
141 * Return: Validated context on success, NULL on failure
142 */
143struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid,
144 void *arg, enum ctx_ctrl ctx_ctrl)
145{
146 struct device *dev = &cfg->dev->dev;
147 struct ctx_info *ctxi = NULL;
148 struct lun_access *lun_access = NULL;
149 struct file *file = NULL;
150 struct llun_info *lli = arg;
151 u64 ctxid = DECODE_CTXID(rctxid);
152 int rc;
153 pid_t pid = current->tgid, ctxpid = 0;
154
155 if (ctx_ctrl & CTX_CTRL_FILE) {
156 lli = NULL;
157 file = (struct file *)arg;
158 }
159
160 if (ctx_ctrl & CTX_CTRL_CLONE)
161 pid = current->parent->tgid;
162
163 if (likely(ctxid < MAX_CONTEXT)) {
164 while (true) {
165 rc = mutex_lock_interruptible(&cfg->ctx_tbl_list_mutex);
166 if (rc)
167 goto out;
168
169 ctxi = cfg->ctx_tbl[ctxid];
170 if (ctxi)
171 if ((file && (ctxi->file != file)) ||
172 (!file && (ctxi->ctxid != rctxid)))
173 ctxi = NULL;
174
175 if ((ctx_ctrl & CTX_CTRL_ERR) ||
176 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK)))
177 ctxi = find_error_context(cfg, rctxid, file);
178 if (!ctxi) {
179 mutex_unlock(&cfg->ctx_tbl_list_mutex);
180 goto out;
181 }
182
183 /*
184 * Need to acquire ownership of the context while still
185 * under the table/list lock to serialize with a remove
186 * thread. Use the 'try' to avoid stalling the
187 * table/list lock for a single context.
188 *
189 * Note that the lock order is:
190 *
191 * cfg->ctx_tbl_list_mutex -> ctxi->mutex
192 *
193 * Therefore release ctx_tbl_list_mutex before retrying.
194 */
195 rc = mutex_trylock(&ctxi->mutex);
196 mutex_unlock(&cfg->ctx_tbl_list_mutex);
197 if (rc)
198 break; /* got the context's lock! */
199 }
200
201 if (ctxi->unavail)
202 goto denied;
203
204 ctxpid = ctxi->pid;
205 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID)))
206 if (pid != ctxpid)
207 goto denied;
208
209 if (lli) {
210 list_for_each_entry(lun_access, &ctxi->luns, list)
211 if (lun_access->lli == lli)
212 goto out;
213 goto denied;
214 }
215 }
216
217out:
218 dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u "
219 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid,
220 ctx_ctrl);
221
222 return ctxi;
223
224denied:
225 mutex_unlock(&ctxi->mutex);
226 ctxi = NULL;
227 goto out;
228}
229
230/**
231 * put_context() - release a context that was retrieved from get_context()
232 * @ctxi: Context to release.
233 *
234 * For now, releasing the context equates to unlocking it's mutex.
235 */
236void put_context(struct ctx_info *ctxi)
237{
238 mutex_unlock(&ctxi->mutex);
239}
240
241/**
242 * afu_attach() - attach a context to the AFU
243 * @cfg: Internal structure associated with the host.
244 * @ctxi: Context to attach.
245 *
246 * Upon setting the context capabilities, they must be confirmed with
247 * a read back operation as the context might have been closed since
248 * the mailbox was unlocked. When this occurs, registration is failed.
249 *
250 * Return: 0 on success, -errno on failure
251 */
252static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
253{
254 struct device *dev = &cfg->dev->dev;
255 struct afu *afu = cfg->afu;
256 struct sisl_ctrl_map *ctrl_map = ctxi->ctrl_map;
257 int rc = 0;
258 u64 val;
259
260 /* Unlock cap and restrict user to read/write cmds in translated mode */
261 readq_be(&ctrl_map->mbox_r);
262 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD);
263 writeq_be(val, &ctrl_map->ctx_cap);
264 val = readq_be(&ctrl_map->ctx_cap);
265 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) {
266 dev_err(dev, "%s: ctx may be closed val=%016llX\n",
267 __func__, val);
268 rc = -EAGAIN;
269 goto out;
270 }
271
272 /* Set up MMIO registers pointing to the RHT */
273 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start);
274 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl));
275 writeq_be(val, &ctrl_map->rht_cnt_id);
276out:
277 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
278 return rc;
279}
280
281/**
282 * read_cap16() - issues a SCSI READ_CAP16 command
283 * @sdev: SCSI device associated with LUN.
284 * @lli: LUN destined for capacity request.
285 *
286 * Return: 0 on success, -errno on failure
287 */
288static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
289{
290 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
291 struct device *dev = &cfg->dev->dev;
292 struct glun_info *gli = lli->parent;
293 u8 *cmd_buf = NULL;
294 u8 *scsi_cmd = NULL;
295 u8 *sense_buf = NULL;
296 int rc = 0;
297 int result = 0;
298 int retry_cnt = 0;
299 u32 tout = (MC_DISCOVERY_TIMEOUT * HZ);
300
301retry:
302 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
303 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
304 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
305 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
306 rc = -ENOMEM;
307 goto out;
308 }
309
310 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */
311 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */
312 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]);
313
314 dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__,
315 retry_cnt ? "re" : "", scsi_cmd[0]);
316
317 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
318 CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
319
320 if (driver_byte(result) == DRIVER_SENSE) {
321 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
322 if (result & SAM_STAT_CHECK_CONDITION) {
323 struct scsi_sense_hdr sshdr;
324
325 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE,
326 &sshdr);
327 switch (sshdr.sense_key) {
328 case NO_SENSE:
329 case RECOVERED_ERROR:
330 /* fall through */
331 case NOT_READY:
332 result &= ~SAM_STAT_CHECK_CONDITION;
333 break;
334 case UNIT_ATTENTION:
335 switch (sshdr.asc) {
336 case 0x29: /* Power on Reset or Device Reset */
337 /* fall through */
338 case 0x2A: /* Device capacity changed */
339 case 0x3F: /* Report LUNs changed */
340 /* Retry the command once more */
341 if (retry_cnt++ < 1) {
342 kfree(cmd_buf);
343 kfree(scsi_cmd);
344 kfree(sense_buf);
345 goto retry;
346 }
347 }
348 break;
349 default:
350 break;
351 }
352 }
353 }
354
355 if (result) {
356 dev_err(dev, "%s: command failed, result=0x%x\n",
357 __func__, result);
358 rc = -EIO;
359 goto out;
360 }
361
362 /*
363 * Read cap was successful, grab values from the buffer;
364 * note that we don't need to worry about unaligned access
365 * as the buffer is allocated on an aligned boundary.
366 */
367 mutex_lock(&gli->mutex);
368 gli->max_lba = be64_to_cpu(*((u64 *)&cmd_buf[0]));
369 gli->blk_len = be32_to_cpu(*((u32 *)&cmd_buf[8]));
370 mutex_unlock(&gli->mutex);
371
372out:
373 kfree(cmd_buf);
374 kfree(scsi_cmd);
375 kfree(sense_buf);
376
377 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n",
378 __func__, gli->max_lba, gli->blk_len, rc);
379 return rc;
380}
381
382/**
383 * get_rhte() - obtains validated resource handle table entry reference
384 * @ctxi: Context owning the resource handle.
385 * @rhndl: Resource handle associated with entry.
386 * @lli: LUN associated with request.
387 *
388 * Return: Validated RHTE on success, NULL on failure
389 */
390struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl,
391 struct llun_info *lli)
392{
393 struct sisl_rht_entry *rhte = NULL;
394
395 if (unlikely(!ctxi->rht_start)) {
396 pr_debug("%s: Context does not have allocated RHT!\n",
397 __func__);
398 goto out;
399 }
400
401 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) {
402 pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl);
403 goto out;
404 }
405
406 if (unlikely(ctxi->rht_lun[rhndl] != lli)) {
407 pr_debug("%s: Bad resource handle LUN! (%d)\n",
408 __func__, rhndl);
409 goto out;
410 }
411
412 rhte = &ctxi->rht_start[rhndl];
413 if (unlikely(rhte->nmask == 0)) {
414 pr_debug("%s: Unopened resource handle! (%d)\n",
415 __func__, rhndl);
416 rhte = NULL;
417 goto out;
418 }
419
420out:
421 return rhte;
422}
423
424/**
425 * rhte_checkout() - obtains free/empty resource handle table entry
426 * @ctxi: Context owning the resource handle.
427 * @lli: LUN associated with request.
428 *
429 * Return: Free RHTE on success, NULL on failure
430 */
431struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi,
432 struct llun_info *lli)
433{
434 struct sisl_rht_entry *rhte = NULL;
435 int i;
436
437 /* Find a free RHT entry */
438 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
439 if (ctxi->rht_start[i].nmask == 0) {
440 rhte = &ctxi->rht_start[i];
441 ctxi->rht_out++;
442 break;
443 }
444
445 if (likely(rhte))
446 ctxi->rht_lun[i] = lli;
447
448 pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i);
449 return rhte;
450}
451
452/**
453 * rhte_checkin() - releases a resource handle table entry
454 * @ctxi: Context owning the resource handle.
455 * @rhte: RHTE to release.
456 */
457void rhte_checkin(struct ctx_info *ctxi,
458 struct sisl_rht_entry *rhte)
459{
460 u32 rsrc_handle = rhte - ctxi->rht_start;
461
462 rhte->nmask = 0;
463 rhte->fp = 0;
464 ctxi->rht_out--;
465 ctxi->rht_lun[rsrc_handle] = NULL;
466 ctxi->rht_needs_ws[rsrc_handle] = false;
467}
468
469/**
470 * rhte_format1() - populates a RHTE for format 1
471 * @rhte: RHTE to populate.
472 * @lun_id: LUN ID of LUN associated with RHTE.
473 * @perm: Desired permissions for RHTE.
474 * @port_sel: Port selection mask
475 */
476static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm,
477 u32 port_sel)
478{
479 /*
480 * Populate the Format 1 RHT entry for direct access (physical
481 * LUN) using the synchronization sequence defined in the
482 * SISLite specification.
483 */
484 struct sisl_rht_entry_f1 dummy = { 0 };
485 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
486
487 memset(rhte_f1, 0, sizeof(*rhte_f1));
488 rhte_f1->fp = SISL_RHT_FP(1U, 0);
489 dma_wmb(); /* Make setting of format bit visible */
490
491 rhte_f1->lun_id = lun_id;
492 dma_wmb(); /* Make setting of LUN id visible */
493
494 /*
495 * Use a dummy RHT Format 1 entry to build the second dword
496 * of the entry that must be populated in a single write when
497 * enabled (valid bit set to TRUE).
498 */
499 dummy.valid = 0x80;
500 dummy.fp = SISL_RHT_FP(1U, perm);
501 dummy.port_sel = port_sel;
502 rhte_f1->dw = dummy.dw;
503
504 dma_wmb(); /* Make remaining RHT entry fields visible */
505}
506
507/**
508 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode
509 * @gli: LUN to attach.
510 * @mode: Desired mode of the LUN.
511 * @locked: Mutex status on current thread.
512 *
513 * Return: 0 on success, -errno on failure
514 */
515int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked)
516{
517 int rc = 0;
518
519 if (!locked)
520 mutex_lock(&gli->mutex);
521
522 if (gli->mode == MODE_NONE)
523 gli->mode = mode;
524 else if (gli->mode != mode) {
525 pr_debug("%s: LUN operating in mode %d, requested mode %d\n",
526 __func__, gli->mode, mode);
527 rc = -EINVAL;
528 goto out;
529 }
530
531 gli->users++;
532 WARN_ON(gli->users <= 0);
533out:
534 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n",
535 __func__, rc, gli->mode, gli->users);
536 if (!locked)
537 mutex_unlock(&gli->mutex);
538 return rc;
539}
540
541/**
542 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode
543 * @gli: LUN to detach.
544 *
545 * When resetting the mode, terminate block allocation resources as they
546 * are no longer required (service is safe to call even when block allocation
547 * resources were not present - such as when transitioning from physical mode).
548 * These resources will be reallocated when needed (subsequent transition to
549 * virtual mode).
550 */
551void cxlflash_lun_detach(struct glun_info *gli)
552{
553 mutex_lock(&gli->mutex);
554 WARN_ON(gli->mode == MODE_NONE);
555 if (--gli->users == 0) {
556 gli->mode = MODE_NONE;
557 cxlflash_ba_terminate(&gli->blka.ba_lun);
558 }
559 pr_debug("%s: gli->users=%u\n", __func__, gli->users);
560 WARN_ON(gli->users < 0);
561 mutex_unlock(&gli->mutex);
562}
563
564/**
565 * _cxlflash_disk_release() - releases the specified resource entry
566 * @sdev: SCSI device associated with LUN.
567 * @ctxi: Context owning resources.
568 * @release: Release ioctl data structure.
569 *
570 * For LUNs in virtual mode, the virtual LUN associated with the specified
571 * resource handle is resized to 0 prior to releasing the RHTE. Note that the
572 * AFU sync should _not_ be performed when the context is sitting on the error
573 * recovery list. A context on the error recovery list is not known to the AFU
574 * due to reset. When the context is recovered, it will be reattached and made
575 * known again to the AFU.
576 *
577 * Return: 0 on success, -errno on failure
578 */
579int _cxlflash_disk_release(struct scsi_device *sdev,
580 struct ctx_info *ctxi,
581 struct dk_cxlflash_release *release)
582{
583 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
584 struct device *dev = &cfg->dev->dev;
585 struct llun_info *lli = sdev->hostdata;
586 struct glun_info *gli = lli->parent;
587 struct afu *afu = cfg->afu;
588 bool put_ctx = false;
589
590 struct dk_cxlflash_resize size;
591 res_hndl_t rhndl = release->rsrc_handle;
592
593 int rc = 0;
594 u64 ctxid = DECODE_CTXID(release->context_id),
595 rctxid = release->context_id;
596
597 struct sisl_rht_entry *rhte;
598 struct sisl_rht_entry_f1 *rhte_f1;
599
600 dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n",
601 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users);
602
603 if (!ctxi) {
604 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
605 if (unlikely(!ctxi)) {
606 dev_dbg(dev, "%s: Bad context! (%llu)\n",
607 __func__, ctxid);
608 rc = -EINVAL;
609 goto out;
610 }
611
612 put_ctx = true;
613 }
614
615 rhte = get_rhte(ctxi, rhndl, lli);
616 if (unlikely(!rhte)) {
617 dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
618 __func__, rhndl);
619 rc = -EINVAL;
620 goto out;
621 }
622
623 /*
624 * Resize to 0 for virtual LUNS by setting the size
625 * to 0. This will clear LXT_START and LXT_CNT fields
626 * in the RHT entry and properly sync with the AFU.
627 *
628 * Afterwards we clear the remaining fields.
629 */
630 switch (gli->mode) {
631 case MODE_VIRTUAL:
632 marshal_rele_to_resize(release, &size);
633 size.req_size = 0;
634 rc = _cxlflash_vlun_resize(sdev, ctxi, &size);
635 if (rc) {
636 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc);
637 goto out;
638 }
639
640 break;
641 case MODE_PHYSICAL:
642 /*
643 * Clear the Format 1 RHT entry for direct access
644 * (physical LUN) using the synchronization sequence
645 * defined in the SISLite specification.
646 */
647 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte;
648
649 rhte_f1->valid = 0;
650 dma_wmb(); /* Make revocation of RHT entry visible */
651
652 rhte_f1->lun_id = 0;
653 dma_wmb(); /* Make clearing of LUN id visible */
654
655 rhte_f1->dw = 0;
656 dma_wmb(); /* Make RHT entry bottom-half clearing visible */
657
658 if (!ctxi->err_recovery_active)
659 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
660 break;
661 default:
662 WARN(1, "Unsupported LUN mode!");
663 goto out;
664 }
665
666 rhte_checkin(ctxi, rhte);
667 cxlflash_lun_detach(gli);
668
669out:
670 if (put_ctx)
671 put_context(ctxi);
672 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
673 return rc;
674}
675
676int cxlflash_disk_release(struct scsi_device *sdev,
677 struct dk_cxlflash_release *release)
678{
679 return _cxlflash_disk_release(sdev, NULL, release);
680}
681
682/**
683 * destroy_context() - releases a context
684 * @cfg: Internal structure associated with the host.
685 * @ctxi: Context to release.
686 *
687 * Note that the rht_lun member of the context was cut from a single
688 * allocation when the context was created and therefore does not need
689 * to be explicitly freed. Also note that we conditionally check for the
690 * existence of the context control map before clearing the RHT registers
691 * and context capabilities because it is possible to destroy a context
692 * while the context is in the error state (previous mapping was removed
693 * [so we don't have to worry about clearing] and context is waiting for
694 * a new mapping).
695 */
696static void destroy_context(struct cxlflash_cfg *cfg,
697 struct ctx_info *ctxi)
698{
699 struct afu *afu = cfg->afu;
700
701 WARN_ON(!list_empty(&ctxi->luns));
702
703 /* Clear RHT registers and drop all capabilities for this context */
704 if (afu->afu_map && ctxi->ctrl_map) {
705 writeq_be(0, &ctxi->ctrl_map->rht_start);
706 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
707 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
708 }
709
710 /* Free memory associated with context */
711 free_page((ulong)ctxi->rht_start);
712 kfree(ctxi->rht_needs_ws);
713 kfree(ctxi->rht_lun);
714 kfree(ctxi);
715 atomic_dec_if_positive(&cfg->num_user_contexts);
716}
717
718/**
719 * create_context() - allocates and initializes a context
720 * @cfg: Internal structure associated with the host.
721 * @ctx: Previously obtained CXL context reference.
722 * @ctxid: Previously obtained process element associated with CXL context.
723 * @adap_fd: Previously obtained adapter fd associated with CXL context.
724 * @file: Previously obtained file associated with CXL context.
725 * @perms: User-specified permissions.
726 *
727 * The context's mutex is locked when an allocated context is returned.
728 *
729 * Return: Allocated context on success, NULL on failure
730 */
731static struct ctx_info *create_context(struct cxlflash_cfg *cfg,
732 struct cxl_context *ctx, int ctxid,
733 int adap_fd, struct file *file,
734 u32 perms)
735{
736 struct device *dev = &cfg->dev->dev;
737 struct afu *afu = cfg->afu;
738 struct ctx_info *ctxi = NULL;
739 struct llun_info **lli = NULL;
740 bool *ws = NULL;
741 struct sisl_rht_entry *rhte;
742
743 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL);
744 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL);
745 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL);
746 if (unlikely(!ctxi || !lli || !ws)) {
747 dev_err(dev, "%s: Unable to allocate context!\n", __func__);
748 goto err;
749 }
750
751 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL);
752 if (unlikely(!rhte)) {
753 dev_err(dev, "%s: Unable to allocate RHT!\n", __func__);
754 goto err;
755 }
756
757 ctxi->rht_lun = lli;
758 ctxi->rht_needs_ws = ws;
759 ctxi->rht_start = rhte;
760 ctxi->rht_perms = perms;
761
762 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
763 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
764 ctxi->lfd = adap_fd;
765 ctxi->pid = current->tgid; /* tgid = pid */
766 ctxi->ctx = ctx;
767 ctxi->file = file;
768 mutex_init(&ctxi->mutex);
769 INIT_LIST_HEAD(&ctxi->luns);
770 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
771
772 atomic_inc(&cfg->num_user_contexts);
773 mutex_lock(&ctxi->mutex);
774out:
775 return ctxi;
776
777err:
778 kfree(ws);
779 kfree(lli);
780 kfree(ctxi);
781 ctxi = NULL;
782 goto out;
783}
784
785/**
786 * _cxlflash_disk_detach() - detaches a LUN from a context
787 * @sdev: SCSI device associated with LUN.
788 * @ctxi: Context owning resources.
789 * @detach: Detach ioctl data structure.
790 *
791 * As part of the detach, all per-context resources associated with the LUN
792 * are cleaned up. When detaching the last LUN for a context, the context
793 * itself is cleaned up and released.
794 *
795 * Return: 0 on success, -errno on failure
796 */
797static int _cxlflash_disk_detach(struct scsi_device *sdev,
798 struct ctx_info *ctxi,
799 struct dk_cxlflash_detach *detach)
800{
801 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
802 struct device *dev = &cfg->dev->dev;
803 struct llun_info *lli = sdev->hostdata;
804 struct lun_access *lun_access, *t;
805 struct dk_cxlflash_release rel;
806 bool put_ctx = false;
807
808 int i;
809 int rc = 0;
810 int lfd;
811 u64 ctxid = DECODE_CTXID(detach->context_id),
812 rctxid = detach->context_id;
813
814 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid);
815
816 if (!ctxi) {
817 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
818 if (unlikely(!ctxi)) {
819 dev_dbg(dev, "%s: Bad context! (%llu)\n",
820 __func__, ctxid);
821 rc = -EINVAL;
822 goto out;
823 }
824
825 put_ctx = true;
826 }
827
828 /* Cleanup outstanding resources tied to this LUN */
829 if (ctxi->rht_out) {
830 marshal_det_to_rele(detach, &rel);
831 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
832 if (ctxi->rht_lun[i] == lli) {
833 rel.rsrc_handle = i;
834 _cxlflash_disk_release(sdev, ctxi, &rel);
835 }
836
837 /* No need to loop further if we're done */
838 if (ctxi->rht_out == 0)
839 break;
840 }
841 }
842
843 /* Take our LUN out of context, free the node */
844 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
845 if (lun_access->lli == lli) {
846 list_del(&lun_access->list);
847 kfree(lun_access);
848 lun_access = NULL;
849 break;
850 }
851
852 /* Tear down context following last LUN cleanup */
853 if (list_empty(&ctxi->luns)) {
854 ctxi->unavail = true;
855 mutex_unlock(&ctxi->mutex);
856 mutex_lock(&cfg->ctx_tbl_list_mutex);
857 mutex_lock(&ctxi->mutex);
858
859 /* Might not have been in error list so conditionally remove */
860 if (!list_empty(&ctxi->list))
861 list_del(&ctxi->list);
862 cfg->ctx_tbl[ctxid] = NULL;
863 mutex_unlock(&cfg->ctx_tbl_list_mutex);
864 mutex_unlock(&ctxi->mutex);
865
866 lfd = ctxi->lfd;
867 destroy_context(cfg, ctxi);
868 ctxi = NULL;
869 put_ctx = false;
870
871 /*
872 * As a last step, clean up external resources when not
873 * already on an external cleanup thread, i.e.: close(adap_fd).
874 *
875 * NOTE: this will free up the context from the CXL services,
876 * allowing it to dole out the same context_id on a future
877 * (or even currently in-flight) disk_attach operation.
878 */
879 if (lfd != -1)
880 sys_close(lfd);
881 }
882
883out:
884 if (put_ctx)
885 put_context(ctxi);
886 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
887 return rc;
888}
889
890static int cxlflash_disk_detach(struct scsi_device *sdev,
891 struct dk_cxlflash_detach *detach)
892{
893 return _cxlflash_disk_detach(sdev, NULL, detach);
894}
895
896/**
897 * cxlflash_cxl_release() - release handler for adapter file descriptor
898 * @inode: File-system inode associated with fd.
899 * @file: File installed with adapter file descriptor.
900 *
901 * This routine is the release handler for the fops registered with
902 * the CXL services on an initial attach for a context. It is called
903 * when a close is performed on the adapter file descriptor returned
904 * to the user. Programmatically, the user is not required to perform
905 * the close, as it is handled internally via the detach ioctl when
906 * a context is being removed. Note that nothing prevents the user
907 * from performing a close, but the user should be aware that doing
908 * so is considered catastrophic and subsequent usage of the superpipe
909 * API with previously saved off tokens will fail.
910 *
911 * When initiated from an external close (either by the user or via
912 * a process tear down), the routine derives the context reference
913 * and calls detach for each LUN associated with the context. The
914 * final detach operation will cause the context itself to be freed.
915 * Note that the saved off lfd is reset prior to calling detach to
916 * signify that the final detach should not perform a close.
917 *
918 * When initiated from a detach operation as part of the tear down
919 * of a context, the context is first completely freed and then the
920 * close is performed. This routine will fail to derive the context
921 * reference (due to the context having already been freed) and then
922 * call into the CXL release entry point.
923 *
924 * Thus, with exception to when the CXL process element (context id)
925 * lookup fails (a case that should theoretically never occur), every
926 * call into this routine results in a complete freeing of a context.
927 *
928 * As part of the detach, all per-context resources associated with the LUN
929 * are cleaned up. When detaching the last LUN for a context, the context
930 * itself is cleaned up and released.
931 *
932 * Return: 0 on success
933 */
934static int cxlflash_cxl_release(struct inode *inode, struct file *file)
935{
936 struct cxl_context *ctx = cxl_fops_get_context(file);
937 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
938 cxl_fops);
939 struct device *dev = &cfg->dev->dev;
940 struct ctx_info *ctxi = NULL;
941 struct dk_cxlflash_detach detach = { { 0 }, 0 };
942 struct lun_access *lun_access, *t;
943 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
944 int ctxid;
945
946 ctxid = cxl_process_element(ctx);
947 if (unlikely(ctxid < 0)) {
948 dev_err(dev, "%s: Context %p was closed! (%d)\n",
949 __func__, ctx, ctxid);
950 goto out;
951 }
952
953 ctxi = get_context(cfg, ctxid, file, ctrl);
954 if (unlikely(!ctxi)) {
955 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE);
956 if (!ctxi) {
957 dev_dbg(dev, "%s: Context %d already free!\n",
958 __func__, ctxid);
959 goto out_release;
960 }
961
962 dev_dbg(dev, "%s: Another process owns context %d!\n",
963 __func__, ctxid);
964 put_context(ctxi);
965 goto out;
966 }
967
968 dev_dbg(dev, "%s: close(%d) for context %d\n",
969 __func__, ctxi->lfd, ctxid);
970
971 /* Reset the file descriptor to indicate we're on a close() thread */
972 ctxi->lfd = -1;
973 detach.context_id = ctxi->ctxid;
974 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
975 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
976out_release:
977 cxl_fd_release(inode, file);
978out:
979 dev_dbg(dev, "%s: returning\n", __func__);
980 return 0;
981}
982
983/**
984 * unmap_context() - clears a previously established mapping
985 * @ctxi: Context owning the mapping.
986 *
987 * This routine is used to switch between the error notification page
988 * (dummy page of all 1's) and the real mapping (established by the CXL
989 * fault handler).
990 */
991static void unmap_context(struct ctx_info *ctxi)
992{
993 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1);
994}
995
996/**
997 * get_err_page() - obtains and allocates the error notification page
998 *
999 * Return: error notification page on success, NULL on failure
1000 */
1001static struct page *get_err_page(void)
1002{
1003 struct page *err_page = global.err_page;
1004
1005 if (unlikely(!err_page)) {
1006 err_page = alloc_page(GFP_KERNEL);
1007 if (unlikely(!err_page)) {
1008 pr_err("%s: Unable to allocate err_page!\n", __func__);
1009 goto out;
1010 }
1011
1012 memset(page_address(err_page), -1, PAGE_SIZE);
1013
1014 /* Serialize update w/ other threads to avoid a leak */
1015 mutex_lock(&global.mutex);
1016 if (likely(!global.err_page))
1017 global.err_page = err_page;
1018 else {
1019 __free_page(err_page);
1020 err_page = global.err_page;
1021 }
1022 mutex_unlock(&global.mutex);
1023 }
1024
1025out:
1026 pr_debug("%s: returning err_page=%p\n", __func__, err_page);
1027 return err_page;
1028}
1029
1030/**
1031 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor
1032 * @vma: VM area associated with mapping.
1033 * @vmf: VM fault associated with current fault.
1034 *
1035 * To support error notification via MMIO, faults are 'caught' by this routine
1036 * that was inserted before passing back the adapter file descriptor on attach.
1037 * When a fault occurs, this routine evaluates if error recovery is active and
1038 * if so, installs the error page to 'notify' the user about the error state.
1039 * During normal operation, the fault is simply handled by the original fault
1040 * handler that was installed by CXL services as part of initializing the
1041 * adapter file descriptor. The VMA's page protection bits are toggled to
1042 * indicate cached/not-cached depending on the memory backing the fault.
1043 *
1044 * Return: 0 on success, VM_FAULT_SIGBUS on failure
1045 */
1046static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1047{
1048 struct file *file = vma->vm_file;
1049 struct cxl_context *ctx = cxl_fops_get_context(file);
1050 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1051 cxl_fops);
1052 struct device *dev = &cfg->dev->dev;
1053 struct ctx_info *ctxi = NULL;
1054 struct page *err_page = NULL;
1055 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1056 int rc = 0;
1057 int ctxid;
1058
1059 ctxid = cxl_process_element(ctx);
1060 if (unlikely(ctxid < 0)) {
1061 dev_err(dev, "%s: Context %p was closed! (%d)\n",
1062 __func__, ctx, ctxid);
1063 goto err;
1064 }
1065
1066 ctxi = get_context(cfg, ctxid, file, ctrl);
1067 if (unlikely(!ctxi)) {
1068 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1069 goto err;
1070 }
1071
1072 dev_dbg(dev, "%s: fault(%d) for context %d\n",
1073 __func__, ctxi->lfd, ctxid);
1074
1075 if (likely(!ctxi->err_recovery_active)) {
1076 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1077 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf);
1078 } else {
1079 dev_dbg(dev, "%s: err recovery active, use err_page!\n",
1080 __func__);
1081
1082 err_page = get_err_page();
1083 if (unlikely(!err_page)) {
1084 dev_err(dev, "%s: Could not obtain error page!\n",
1085 __func__);
1086 rc = VM_FAULT_RETRY;
1087 goto out;
1088 }
1089
1090 get_page(err_page);
1091 vmf->page = err_page;
1092 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
1093 }
1094
1095out:
1096 if (likely(ctxi))
1097 put_context(ctxi);
1098 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1099 return rc;
1100
1101err:
1102 rc = VM_FAULT_SIGBUS;
1103 goto out;
1104}
1105
1106/*
1107 * Local MMAP vmops to 'catch' faults
1108 */
1109static const struct vm_operations_struct cxlflash_mmap_vmops = {
1110 .fault = cxlflash_mmap_fault,
1111};
1112
1113/**
1114 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor
1115 * @file: File installed with adapter file descriptor.
1116 * @vma: VM area associated with mapping.
1117 *
1118 * Installs local mmap vmops to 'catch' faults for error notification support.
1119 *
1120 * Return: 0 on success, -errno on failure
1121 */
1122static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1123{
1124 struct cxl_context *ctx = cxl_fops_get_context(file);
1125 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg,
1126 cxl_fops);
1127 struct device *dev = &cfg->dev->dev;
1128 struct ctx_info *ctxi = NULL;
1129 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
1130 int ctxid;
1131 int rc = 0;
1132
1133 ctxid = cxl_process_element(ctx);
1134 if (unlikely(ctxid < 0)) {
1135 dev_err(dev, "%s: Context %p was closed! (%d)\n",
1136 __func__, ctx, ctxid);
1137 rc = -EIO;
1138 goto out;
1139 }
1140
1141 ctxi = get_context(cfg, ctxid, file, ctrl);
1142 if (unlikely(!ctxi)) {
1143 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid);
1144 rc = -EIO;
1145 goto out;
1146 }
1147
1148 dev_dbg(dev, "%s: mmap(%d) for context %d\n",
1149 __func__, ctxi->lfd, ctxid);
1150
1151 rc = cxl_fd_mmap(file, vma);
1152 if (likely(!rc)) {
1153 /* Insert ourself in the mmap fault handler path */
1154 ctxi->cxl_mmap_vmops = vma->vm_ops;
1155 vma->vm_ops = &cxlflash_mmap_vmops;
1156 }
1157
1158out:
1159 if (likely(ctxi))
1160 put_context(ctxi);
1161 return rc;
1162}
1163
1164/*
1165 * Local fops for adapter file descriptor
1166 */
1167static const struct file_operations cxlflash_cxl_fops = {
1168 .owner = THIS_MODULE,
1169 .mmap = cxlflash_cxl_mmap,
1170 .release = cxlflash_cxl_release,
1171};
1172
1173/**
1174 * cxlflash_mark_contexts_error() - move contexts to error state and list
1175 * @cfg: Internal structure associated with the host.
1176 *
1177 * A context is only moved over to the error list when there are no outstanding
1178 * references to it. This ensures that a running operation has completed.
1179 *
1180 * Return: 0 on success, -errno on failure
1181 */
1182int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg)
1183{
1184 int i, rc = 0;
1185 struct ctx_info *ctxi = NULL;
1186
1187 mutex_lock(&cfg->ctx_tbl_list_mutex);
1188
1189 for (i = 0; i < MAX_CONTEXT; i++) {
1190 ctxi = cfg->ctx_tbl[i];
1191 if (ctxi) {
1192 mutex_lock(&ctxi->mutex);
1193 cfg->ctx_tbl[i] = NULL;
1194 list_add(&ctxi->list, &cfg->ctx_err_recovery);
1195 ctxi->err_recovery_active = true;
1196 ctxi->ctrl_map = NULL;
1197 unmap_context(ctxi);
1198 mutex_unlock(&ctxi->mutex);
1199 }
1200 }
1201
1202 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1203 return rc;
1204}
1205
1206/*
1207 * Dummy NULL fops
1208 */
1209static const struct file_operations null_fops = {
1210 .owner = THIS_MODULE,
1211};
1212
1213/**
1214 * cxlflash_disk_attach() - attach a LUN to a context
1215 * @sdev: SCSI device associated with LUN.
1216 * @attach: Attach ioctl data structure.
1217 *
1218 * Creates a context and attaches LUN to it. A LUN can only be attached
1219 * one time to a context (subsequent attaches for the same context/LUN pair
1220 * are not supported). Additional LUNs can be attached to a context by
1221 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header.
1222 *
1223 * Return: 0 on success, -errno on failure
1224 */
1225static int cxlflash_disk_attach(struct scsi_device *sdev,
1226 struct dk_cxlflash_attach *attach)
1227{
1228 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1229 struct device *dev = &cfg->dev->dev;
1230 struct afu *afu = cfg->afu;
1231 struct llun_info *lli = sdev->hostdata;
1232 struct glun_info *gli = lli->parent;
1233 struct cxl_ioctl_start_work *work;
1234 struct ctx_info *ctxi = NULL;
1235 struct lun_access *lun_access = NULL;
1236 int rc = 0;
1237 u32 perms;
1238 int ctxid = -1;
1239 u64 rctxid = 0UL;
1240 struct file *file;
1241
1242 struct cxl_context *ctx;
1243
1244 int fd = -1;
1245
1246 /* On first attach set fileops */
1247 if (atomic_read(&cfg->num_user_contexts) == 0)
1248 cfg->cxl_fops = cxlflash_cxl_fops;
1249
1250 if (attach->num_interrupts > 4) {
1251 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n",
1252 __func__, attach->num_interrupts);
1253 rc = -EINVAL;
1254 goto out;
1255 }
1256
1257 if (gli->max_lba == 0) {
1258 dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n",
1259 __func__, lli->lun_id[sdev->channel]);
1260 rc = read_cap16(sdev, lli);
1261 if (rc) {
1262 dev_err(dev, "%s: Invalid device! (%d)\n",
1263 __func__, rc);
1264 rc = -ENODEV;
1265 goto out;
1266 }
1267 dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba);
1268 dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len);
1269 }
1270
1271 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) {
1272 rctxid = attach->context_id;
1273 ctxi = get_context(cfg, rctxid, NULL, 0);
1274 if (!ctxi) {
1275 dev_dbg(dev, "%s: Bad context! (%016llX)\n",
1276 __func__, rctxid);
1277 rc = -EINVAL;
1278 goto out;
1279 }
1280
1281 list_for_each_entry(lun_access, &ctxi->luns, list)
1282 if (lun_access->lli == lli) {
1283 dev_dbg(dev, "%s: Already attached!\n",
1284 __func__);
1285 rc = -EINVAL;
1286 goto out;
1287 }
1288 }
1289
1290 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL);
1291 if (unlikely(!lun_access)) {
1292 dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__);
1293 rc = -ENOMEM;
1294 goto out;
1295 }
1296
1297 lun_access->lli = lli;
1298 lun_access->sdev = sdev;
1299
1300 /* Non-NULL context indicates reuse */
1301 if (ctxi) {
1302 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
1303 __func__, rctxid);
1304 list_add(&lun_access->list, &ctxi->luns);
1305 fd = ctxi->lfd;
1306 goto out_attach;
1307 }
1308
1309 ctx = cxl_dev_context_init(cfg->dev);
1310 if (unlikely(IS_ERR_OR_NULL(ctx))) {
1311 dev_err(dev, "%s: Could not initialize context %p\n",
1312 __func__, ctx);
1313 rc = -ENODEV;
1314 goto err0;
1315 }
1316
1317 ctxid = cxl_process_element(ctx);
1318 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
1319 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1320 rc = -EPERM;
1321 goto err1;
1322 }
1323
1324 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1325 if (unlikely(fd < 0)) {
1326 rc = -ENODEV;
1327 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1328 goto err1;
1329 }
1330
1331 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */
1332 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1333
1334 ctxi = create_context(cfg, ctx, ctxid, fd, file, perms);
1335 if (unlikely(!ctxi)) {
1336 dev_err(dev, "%s: Failed to create context! (%d)\n",
1337 __func__, ctxid);
1338 goto err2;
1339 }
1340
1341 work = &ctxi->work;
1342 work->num_interrupts = attach->num_interrupts;
1343 work->flags = CXL_START_WORK_NUM_IRQS;
1344
1345 rc = cxl_start_work(ctx, work);
1346 if (unlikely(rc)) {
1347 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1348 __func__, rc);
1349 goto err3;
1350 }
1351
1352 rc = afu_attach(cfg, ctxi);
1353 if (unlikely(rc)) {
1354 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1355 goto err4;
1356 }
1357
1358 /*
1359 * No error paths after this point. Once the fd is installed it's
1360 * visible to user space and can't be undone safely on this thread.
1361 * There is no need to worry about a deadlock here because no one
1362 * knows about us yet; we can be the only one holding our mutex.
1363 */
1364 list_add(&lun_access->list, &ctxi->luns);
1365 mutex_unlock(&ctxi->mutex);
1366 mutex_lock(&cfg->ctx_tbl_list_mutex);
1367 mutex_lock(&ctxi->mutex);
1368 cfg->ctx_tbl[ctxid] = ctxi;
1369 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1370 fd_install(fd, file);
1371
1372out_attach:
1373 attach->hdr.return_flags = 0;
1374 attach->context_id = ctxi->ctxid;
1375 attach->block_size = gli->blk_len;
1376 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1377 attach->last_lba = gli->max_lba;
1378 attach->max_xfer = (sdev->host->max_sectors * 512) / gli->blk_len;
1379
1380out:
1381 attach->adap_fd = fd;
1382
1383 if (ctxi)
1384 put_context(ctxi);
1385
1386 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n",
1387 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba);
1388 return rc;
1389
1390err4:
1391 cxl_stop_context(ctx);
1392err3:
1393 put_context(ctxi);
1394 destroy_context(cfg, ctxi);
1395 ctxi = NULL;
1396err2:
1397 /*
1398 * Here, we're overriding the fops with a dummy all-NULL fops because
1399 * fput() calls the release fop, which will cause us to mistakenly
1400 * call into the CXL code. Rather than try to add yet more complexity
1401 * to that routine (cxlflash_cxl_release) we should try to fix the
1402 * issue here.
1403 */
1404 file->f_op = &null_fops;
1405 fput(file);
1406 put_unused_fd(fd);
1407 fd = -1;
1408err1:
1409 cxl_release_context(ctx);
1410err0:
1411 kfree(lun_access);
1412 goto out;
1413}
1414
1415/**
1416 * recover_context() - recovers a context in error
1417 * @cfg: Internal structure associated with the host.
1418 * @ctxi: Context to release.
1419 *
1420 * Restablishes the state for a context-in-error.
1421 *
1422 * Return: 0 on success, -errno on failure
1423 */
1424static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
1425{
1426 struct device *dev = &cfg->dev->dev;
1427 int rc = 0;
1428 int old_fd, fd = -1;
1429 int ctxid = -1;
1430 struct file *file;
1431 struct cxl_context *ctx;
1432 struct afu *afu = cfg->afu;
1433
1434 ctx = cxl_dev_context_init(cfg->dev);
1435 if (unlikely(IS_ERR_OR_NULL(ctx))) {
1436 dev_err(dev, "%s: Could not initialize context %p\n",
1437 __func__, ctx);
1438 rc = -ENODEV;
1439 goto out;
1440 }
1441
1442 ctxid = cxl_process_element(ctx);
1443 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) {
1444 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid);
1445 rc = -EPERM;
1446 goto err1;
1447 }
1448
1449 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd);
1450 if (unlikely(fd < 0)) {
1451 rc = -ENODEV;
1452 dev_err(dev, "%s: Could not get file descriptor\n", __func__);
1453 goto err1;
1454 }
1455
1456 rc = cxl_start_work(ctx, &ctxi->work);
1457 if (unlikely(rc)) {
1458 dev_dbg(dev, "%s: Could not start context rc=%d\n",
1459 __func__, rc);
1460 goto err2;
1461 }
1462
1463 /* Update with new MMIO area based on updated context id */
1464 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
1465
1466 rc = afu_attach(cfg, ctxi);
1467 if (rc) {
1468 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc);
1469 goto err3;
1470 }
1471
1472 /*
1473 * No error paths after this point. Once the fd is installed it's
1474 * visible to user space and can't be undone safely on this thread.
1475 */
1476 old_fd = ctxi->lfd;
1477 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1478 ctxi->lfd = fd;
1479 ctxi->ctx = ctx;
1480 ctxi->file = file;
1481
1482 /*
1483 * Put context back in table (note the reinit of the context list);
1484 * we must first drop the context's mutex and then acquire it in
1485 * order with the table/list mutex to avoid a deadlock - safe to do
1486 * here because no one can find us at this moment in time.
1487 */
1488 mutex_unlock(&ctxi->mutex);
1489 mutex_lock(&cfg->ctx_tbl_list_mutex);
1490 mutex_lock(&ctxi->mutex);
1491 list_del_init(&ctxi->list);
1492 cfg->ctx_tbl[ctxid] = ctxi;
1493 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1494 fd_install(fd, file);
1495
1496 /* Release the original adapter fd and associated CXL resources */
1497 sys_close(old_fd);
1498out:
1499 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1500 __func__, ctxid, fd, rc);
1501 return rc;
1502
1503err3:
1504 cxl_stop_context(ctx);
1505err2:
1506 fput(file);
1507 put_unused_fd(fd);
1508err1:
1509 cxl_release_context(ctx);
1510 goto out;
1511}
1512
1513/**
1514 * check_state() - checks and responds to the current adapter state
1515 * @cfg: Internal structure associated with the host.
1516 *
1517 * This routine can block and should only be used on process context.
1518 * Note that when waking up from waiting in limbo, the state is unknown
1519 * and must be checked again before proceeding.
1520 *
1521 * Return: 0 on success, -errno on failure
1522 */
1523static int check_state(struct cxlflash_cfg *cfg)
1524{
1525 struct device *dev = &cfg->dev->dev;
1526 int rc = 0;
1527
1528retry:
1529 switch (cfg->state) {
1530 case STATE_LIMBO:
1531 dev_dbg(dev, "%s: Limbo, going to wait...\n", __func__);
1532 rc = wait_event_interruptible(cfg->limbo_waitq,
1533 cfg->state != STATE_LIMBO);
1534 if (unlikely(rc))
1535 break;
1536 goto retry;
1537 case STATE_FAILTERM:
1538 dev_dbg(dev, "%s: Failed/Terminating!\n", __func__);
1539 rc = -ENODEV;
1540 break;
1541 default:
1542 break;
1543 }
1544
1545 return rc;
1546}
1547
1548/**
1549 * cxlflash_afu_recover() - initiates AFU recovery
1550 * @sdev: SCSI device associated with LUN.
1551 * @recover: Recover ioctl data structure.
1552 *
1553 * Only a single recovery is allowed at a time to avoid exhausting CXL
1554 * resources (leading to recovery failure) in the event that we're up
1555 * against the maximum number of contexts limit. For similar reasons,
1556 * a context recovery is retried if there are multiple recoveries taking
1557 * place at the same time and the failure was due to CXL services being
1558 * unable to keep up.
1559 *
1560 * Because a user can detect an error condition before the kernel, it is
1561 * quite possible for this routine to act as the kernel's EEH detection
1562 * source (MMIO read of mbox_r). Because of this, there is a window of
1563 * time where an EEH might have been detected but not yet 'serviced'
1564 * (callback invoked, causing the device to enter limbo state). To avoid
1565 * looping in this routine during that window, a 1 second sleep is in place
1566 * between the time the MMIO failure is detected and the time a wait on the
1567 * limbo wait queue is attempted via check_state().
1568 *
1569 * Return: 0 on success, -errno on failure
1570 */
1571static int cxlflash_afu_recover(struct scsi_device *sdev,
1572 struct dk_cxlflash_recover_afu *recover)
1573{
1574 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1575 struct device *dev = &cfg->dev->dev;
1576 struct llun_info *lli = sdev->hostdata;
1577 struct afu *afu = cfg->afu;
1578 struct ctx_info *ctxi = NULL;
1579 struct mutex *mutex = &cfg->ctx_recovery_mutex;
1580 u64 ctxid = DECODE_CTXID(recover->context_id),
1581 rctxid = recover->context_id;
1582 long reg;
1583 int lretry = 20; /* up to 2 seconds */
1584 int rc = 0;
1585
1586 atomic_inc(&cfg->recovery_threads);
1587 rc = mutex_lock_interruptible(mutex);
1588 if (rc)
1589 goto out;
1590
1591 dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n",
1592 __func__, recover->reason, rctxid);
1593
1594retry:
1595 /* Ensure that this process is attached to the context */
1596 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
1597 if (unlikely(!ctxi)) {
1598 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1599 rc = -EINVAL;
1600 goto out;
1601 }
1602
1603 if (ctxi->err_recovery_active) {
1604retry_recover:
1605 rc = recover_context(cfg, ctxi);
1606 if (unlikely(rc)) {
1607 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
1608 __func__, ctxid, rc);
1609 if ((rc == -ENODEV) &&
1610 ((atomic_read(&cfg->recovery_threads) > 1) ||
1611 (lretry--))) {
1612 dev_dbg(dev, "%s: Going to try again!\n",
1613 __func__);
1614 mutex_unlock(mutex);
1615 msleep(100);
1616 rc = mutex_lock_interruptible(mutex);
1617 if (rc)
1618 goto out;
1619 goto retry_recover;
1620 }
1621
1622 goto out;
1623 }
1624
1625 ctxi->err_recovery_active = false;
1626 recover->context_id = ctxi->ctxid;
1627 recover->adap_fd = ctxi->lfd;
1628 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1629 recover->hdr.return_flags |=
1630 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1631 goto out;
1632 }
1633
1634 /* Test if in error state */
1635 reg = readq_be(&afu->ctrl_map->mbox_r);
1636 if (reg == -1) {
1637 dev_dbg(dev, "%s: MMIO read fail! Wait for recovery...\n",
1638 __func__);
1639 mutex_unlock(&ctxi->mutex);
1640 ctxi = NULL;
1641 ssleep(1);
1642 rc = check_state(cfg);
1643 if (unlikely(rc))
1644 goto out;
1645 goto retry;
1646 }
1647
1648 dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__);
1649out:
1650 if (likely(ctxi))
1651 put_context(ctxi);
1652 mutex_unlock(mutex);
1653 atomic_dec_if_positive(&cfg->recovery_threads);
1654 return rc;
1655}
1656
1657/**
1658 * process_sense() - evaluates and processes sense data
1659 * @sdev: SCSI device associated with LUN.
1660 * @verify: Verify ioctl data structure.
1661 *
1662 * Return: 0 on success, -errno on failure
1663 */
1664static int process_sense(struct scsi_device *sdev,
1665 struct dk_cxlflash_verify *verify)
1666{
1667 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1668 struct device *dev = &cfg->dev->dev;
1669 struct llun_info *lli = sdev->hostdata;
1670 struct glun_info *gli = lli->parent;
1671 u64 prev_lba = gli->max_lba;
1672 struct scsi_sense_hdr sshdr = { 0 };
1673 int rc = 0;
1674
1675 rc = scsi_normalize_sense((const u8 *)&verify->sense_data,
1676 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr);
1677 if (!rc) {
1678 dev_err(dev, "%s: Failed to normalize sense data!\n", __func__);
1679 rc = -EINVAL;
1680 goto out;
1681 }
1682
1683 switch (sshdr.sense_key) {
1684 case NO_SENSE:
1685 case RECOVERED_ERROR:
1686 /* fall through */
1687 case NOT_READY:
1688 break;
1689 case UNIT_ATTENTION:
1690 switch (sshdr.asc) {
1691 case 0x29: /* Power on Reset or Device Reset */
1692 /* fall through */
1693 case 0x2A: /* Device settings/capacity changed */
1694 rc = read_cap16(sdev, lli);
1695 if (rc) {
1696 rc = -ENODEV;
1697 break;
1698 }
1699 if (prev_lba != gli->max_lba)
1700 dev_dbg(dev, "%s: Capacity changed old=%lld "
1701 "new=%lld\n", __func__, prev_lba,
1702 gli->max_lba);
1703 break;
1704 case 0x3F: /* Report LUNs changed, Rescan. */
1705 scsi_scan_host(cfg->host);
1706 break;
1707 default:
1708 rc = -EIO;
1709 break;
1710 }
1711 break;
1712 default:
1713 rc = -EIO;
1714 break;
1715 }
1716out:
1717 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__,
1718 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc);
1719 return rc;
1720}
1721
1722/**
1723 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes
1724 * @sdev: SCSI device associated with LUN.
1725 * @verify: Verify ioctl data structure.
1726 *
1727 * Return: 0 on success, -errno on failure
1728 */
1729static int cxlflash_disk_verify(struct scsi_device *sdev,
1730 struct dk_cxlflash_verify *verify)
1731{
1732 int rc = 0;
1733 struct ctx_info *ctxi = NULL;
1734 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1735 struct device *dev = &cfg->dev->dev;
1736 struct llun_info *lli = sdev->hostdata;
1737 struct glun_info *gli = lli->parent;
1738 struct sisl_rht_entry *rhte = NULL;
1739 res_hndl_t rhndl = verify->rsrc_handle;
1740 u64 ctxid = DECODE_CTXID(verify->context_id),
1741 rctxid = verify->context_id;
1742 u64 last_lba = 0;
1743
1744 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, "
1745 "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle,
1746 verify->hint, verify->hdr.flags);
1747
1748 ctxi = get_context(cfg, rctxid, lli, 0);
1749 if (unlikely(!ctxi)) {
1750 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1751 rc = -EINVAL;
1752 goto out;
1753 }
1754
1755 rhte = get_rhte(ctxi, rhndl, lli);
1756 if (unlikely(!rhte)) {
1757 dev_dbg(dev, "%s: Bad resource handle! (%d)\n",
1758 __func__, rhndl);
1759 rc = -EINVAL;
1760 goto out;
1761 }
1762
1763 /*
1764 * Look at the hint/sense to see if it requires us to redrive
1765 * inquiry (i.e. the Unit attention is due to the WWN changing).
1766 */
1767 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) {
1768 rc = process_sense(sdev, verify);
1769 if (unlikely(rc)) {
1770 dev_err(dev, "%s: Failed to validate sense data (%d)\n",
1771 __func__, rc);
1772 goto out;
1773 }
1774 }
1775
1776 switch (gli->mode) {
1777 case MODE_PHYSICAL:
1778 last_lba = gli->max_lba;
1779 break;
1780 case MODE_VIRTUAL:
1781 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */
1782 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len);
1783 last_lba /= CXLFLASH_BLOCK_SIZE;
1784 last_lba--;
1785 break;
1786 default:
1787 WARN(1, "Unsupported LUN mode!");
1788 }
1789
1790 verify->last_lba = last_lba;
1791
1792out:
1793 if (likely(ctxi))
1794 put_context(ctxi);
1795 dev_dbg(dev, "%s: returning rc=%d llba=%llX\n",
1796 __func__, rc, verify->last_lba);
1797 return rc;
1798}
1799
1800/**
1801 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string
1802 * @cmd: The ioctl command to decode.
1803 *
1804 * Return: A string identifying the decoded ioctl.
1805 */
1806static char *decode_ioctl(int cmd)
1807{
1808 switch (cmd) {
1809 case DK_CXLFLASH_ATTACH:
1810 return __stringify_1(DK_CXLFLASH_ATTACH);
1811 case DK_CXLFLASH_USER_DIRECT:
1812 return __stringify_1(DK_CXLFLASH_USER_DIRECT);
1813 case DK_CXLFLASH_USER_VIRTUAL:
1814 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL);
1815 case DK_CXLFLASH_VLUN_RESIZE:
1816 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE);
1817 case DK_CXLFLASH_RELEASE:
1818 return __stringify_1(DK_CXLFLASH_RELEASE);
1819 case DK_CXLFLASH_DETACH:
1820 return __stringify_1(DK_CXLFLASH_DETACH);
1821 case DK_CXLFLASH_VERIFY:
1822 return __stringify_1(DK_CXLFLASH_VERIFY);
1823 case DK_CXLFLASH_VLUN_CLONE:
1824 return __stringify_1(DK_CXLFLASH_VLUN_CLONE);
1825 case DK_CXLFLASH_RECOVER_AFU:
1826 return __stringify_1(DK_CXLFLASH_RECOVER_AFU);
1827 case DK_CXLFLASH_MANAGE_LUN:
1828 return __stringify_1(DK_CXLFLASH_MANAGE_LUN);
1829 }
1830
1831 return "UNKNOWN";
1832}
1833
1834/**
1835 * cxlflash_disk_direct_open() - opens a direct (physical) disk
1836 * @sdev: SCSI device associated with LUN.
1837 * @arg: UDirect ioctl data structure.
1838 *
1839 * On successful return, the user is informed of the resource handle
1840 * to be used to identify the direct lun and the size (in blocks) of
1841 * the direct lun in last LBA format.
1842 *
1843 * Return: 0 on success, -errno on failure
1844 */
1845static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
1846{
1847 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1848 struct device *dev = &cfg->dev->dev;
1849 struct afu *afu = cfg->afu;
1850 struct llun_info *lli = sdev->hostdata;
1851 struct glun_info *gli = lli->parent;
1852
1853 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
1854
1855 u64 ctxid = DECODE_CTXID(pphys->context_id),
1856 rctxid = pphys->context_id;
1857 u64 lun_size = 0;
1858 u64 last_lba = 0;
1859 u64 rsrc_handle = -1;
1860 u32 port = CHAN2PORT(sdev->channel);
1861
1862 int rc = 0;
1863
1864 struct ctx_info *ctxi = NULL;
1865 struct sisl_rht_entry *rhte = NULL;
1866
1867 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
1868
1869 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false);
1870 if (unlikely(rc)) {
1871 dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n",
1872 __func__);
1873 goto out;
1874 }
1875
1876 ctxi = get_context(cfg, rctxid, lli, 0);
1877 if (unlikely(!ctxi)) {
1878 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
1879 rc = -EINVAL;
1880 goto err1;
1881 }
1882
1883 rhte = rhte_checkout(ctxi, lli);
1884 if (unlikely(!rhte)) {
1885 dev_dbg(dev, "%s: too many opens for this context\n", __func__);
1886 rc = -EMFILE; /* too many opens */
1887 goto err1;
1888 }
1889
1890 rsrc_handle = (rhte - ctxi->rht_start);
1891
1892 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
1893 cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
1894
1895 last_lba = gli->max_lba;
1896 pphys->hdr.return_flags = 0;
1897 pphys->last_lba = last_lba;
1898 pphys->rsrc_handle = rsrc_handle;
1899
1900out:
1901 if (likely(ctxi))
1902 put_context(ctxi);
1903 dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n",
1904 __func__, rsrc_handle, rc, last_lba);
1905 return rc;
1906
1907err1:
1908 cxlflash_lun_detach(gli);
1909 goto out;
1910}
1911
1912/**
1913 * ioctl_common() - common IOCTL handler for driver
1914 * @sdev: SCSI device associated with LUN.
1915 * @cmd: IOCTL command.
1916 *
1917 * Handles common fencing operations that are valid for multiple ioctls. Always
1918 * allow through ioctls that are cleanup oriented in nature, even when operating
1919 * in a failed/terminating state.
1920 *
1921 * Return: 0 on success, -errno on failure
1922 */
1923static int ioctl_common(struct scsi_device *sdev, int cmd)
1924{
1925 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1926 struct device *dev = &cfg->dev->dev;
1927 struct llun_info *lli = sdev->hostdata;
1928 int rc = 0;
1929
1930 if (unlikely(!lli)) {
1931 dev_dbg(dev, "%s: Unknown LUN\n", __func__);
1932 rc = -EINVAL;
1933 goto out;
1934 }
1935
1936 rc = check_state(cfg);
1937 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) {
1938 switch (cmd) {
1939 case DK_CXLFLASH_VLUN_RESIZE:
1940 case DK_CXLFLASH_RELEASE:
1941 case DK_CXLFLASH_DETACH:
1942 dev_dbg(dev, "%s: Command override! (%d)\n",
1943 __func__, rc);
1944 rc = 0;
1945 break;
1946 }
1947 }
1948out:
1949 return rc;
1950}
1951
1952/**
1953 * cxlflash_ioctl() - IOCTL handler for driver
1954 * @sdev: SCSI device associated with LUN.
1955 * @cmd: IOCTL command.
1956 * @arg: Userspace ioctl data structure.
1957 *
1958 * Return: 0 on success, -errno on failure
1959 */
1960int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
1961{
1962 typedef int (*sioctl) (struct scsi_device *, void *);
1963
1964 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1965 struct device *dev = &cfg->dev->dev;
1966 struct afu *afu = cfg->afu;
1967 struct dk_cxlflash_hdr *hdr;
1968 char buf[sizeof(union cxlflash_ioctls)];
1969 size_t size = 0;
1970 bool known_ioctl = false;
1971 int idx;
1972 int rc = 0;
1973 struct Scsi_Host *shost = sdev->host;
1974 sioctl do_ioctl = NULL;
1975
1976 static const struct {
1977 size_t size;
1978 sioctl ioctl;
1979 } ioctl_tbl[] = { /* NOTE: order matters here */
1980 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach},
1981 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open},
1982 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release},
1983 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach},
1984 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify},
1985 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover},
1986 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun},
1987 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open},
1988 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize},
1989 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone},
1990 };
1991
1992 /* Restrict command set to physical support only for internal LUN */
1993 if (afu->internal_lun)
1994 switch (cmd) {
1995 case DK_CXLFLASH_RELEASE:
1996 case DK_CXLFLASH_USER_VIRTUAL:
1997 case DK_CXLFLASH_VLUN_RESIZE:
1998 case DK_CXLFLASH_VLUN_CLONE:
1999 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n",
2000 __func__, decode_ioctl(cmd), afu->internal_lun);
2001 rc = -EINVAL;
2002 goto cxlflash_ioctl_exit;
2003 }
2004
2005 switch (cmd) {
2006 case DK_CXLFLASH_ATTACH:
2007 case DK_CXLFLASH_USER_DIRECT:
2008 case DK_CXLFLASH_RELEASE:
2009 case DK_CXLFLASH_DETACH:
2010 case DK_CXLFLASH_VERIFY:
2011 case DK_CXLFLASH_RECOVER_AFU:
2012 case DK_CXLFLASH_USER_VIRTUAL:
2013 case DK_CXLFLASH_VLUN_RESIZE:
2014 case DK_CXLFLASH_VLUN_CLONE:
2015 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n",
2016 __func__, decode_ioctl(cmd), cmd, shost->host_no,
2017 sdev->channel, sdev->id, sdev->lun);
2018 rc = ioctl_common(sdev, cmd);
2019 if (unlikely(rc))
2020 goto cxlflash_ioctl_exit;
2021
2022 /* fall through */
2023
2024 case DK_CXLFLASH_MANAGE_LUN:
2025 known_ioctl = true;
2026 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH);
2027 size = ioctl_tbl[idx].size;
2028 do_ioctl = ioctl_tbl[idx].ioctl;
2029
2030 if (likely(do_ioctl))
2031 break;
2032
2033 /* fall through */
2034 default:
2035 rc = -EINVAL;
2036 goto cxlflash_ioctl_exit;
2037 }
2038
2039 if (unlikely(copy_from_user(&buf, arg, size))) {
2040 dev_err(dev, "%s: copy_from_user() fail! "
2041 "size=%lu cmd=%d (%s) arg=%p\n",
2042 __func__, size, cmd, decode_ioctl(cmd), arg);
2043 rc = -EFAULT;
2044 goto cxlflash_ioctl_exit;
2045 }
2046
2047 hdr = (struct dk_cxlflash_hdr *)&buf;
2048 if (hdr->version != DK_CXLFLASH_VERSION_0) {
2049 dev_dbg(dev, "%s: Version %u not supported for %s\n",
2050 __func__, hdr->version, decode_ioctl(cmd));
2051 rc = -EINVAL;
2052 goto cxlflash_ioctl_exit;
2053 }
2054
2055 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) {
2056 dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__);
2057 rc = -EINVAL;
2058 goto cxlflash_ioctl_exit;
2059 }
2060
2061 rc = do_ioctl(sdev, (void *)&buf);
2062 if (likely(!rc))
2063 if (unlikely(copy_to_user(arg, &buf, size))) {
2064 dev_err(dev, "%s: copy_to_user() fail! "
2065 "size=%lu cmd=%d (%s) arg=%p\n",
2066 __func__, size, cmd, decode_ioctl(cmd), arg);
2067 rc = -EFAULT;
2068 }
2069
2070 /* fall through to exit */
2071
2072cxlflash_ioctl_exit:
2073 if (unlikely(rc && known_ioctl))
2074 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2075 "returned rc %d\n", __func__,
2076 decode_ioctl(cmd), cmd, shost->host_no,
2077 sdev->channel, sdev->id, sdev->lun, rc);
2078 else
2079 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) "
2080 "returned rc %d\n", __func__, decode_ioctl(cmd),
2081 cmd, shost->host_no, sdev->channel, sdev->id,
2082 sdev->lun, rc);
2083 return rc;
2084}
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
new file mode 100644
index 000000000000..d7dc88bc64a4
--- /dev/null
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -0,0 +1,147 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_SUPERPIPE_H
16#define _CXLFLASH_SUPERPIPE_H
17
18extern struct cxlflash_global global;
19
20/*
21 * Terminology: use afu (and not adapter) to refer to the HW.
22 * Adapter is the entire slot and includes PSL out of which
23 * only the AFU is visible to user space.
24 */
25
26/* Chunk size parms: note sislite minimum chunk size is
27 0x10000 LBAs corresponding to a NMASK or 16.
28*/
29#define MC_CHUNK_SIZE (1 << MC_RHT_NMASK) /* in LBAs */
30
31#define MC_DISCOVERY_TIMEOUT 5 /* 5 secs */
32
33#define CHAN2PORT(_x) ((_x) + 1)
34#define PORT2CHAN(_x) ((_x) - 1)
35
36enum lun_mode {
37 MODE_NONE = 0,
38 MODE_VIRTUAL,
39 MODE_PHYSICAL
40};
41
42/* Global (entire driver, spans adapters) lun_info structure */
43struct glun_info {
44 u64 max_lba; /* from read cap(16) */
45 u32 blk_len; /* from read cap(16) */
46 enum lun_mode mode; /* NONE, VIRTUAL, PHYSICAL */
47 int users; /* Number of users w/ references to LUN */
48
49 u8 wwid[16];
50
51 struct mutex mutex;
52
53 struct blka blka;
54 struct list_head list;
55};
56
57/* Local (per-adapter) lun_info structure */
58struct llun_info {
59 u64 lun_id[CXLFLASH_NUM_FC_PORTS]; /* from REPORT_LUNS */
60 u32 lun_index; /* Index in the LUN table */
61 u32 host_no; /* host_no from Scsi_host */
62 u32 port_sel; /* What port to use for this LUN */
63 bool newly_created; /* Whether the LUN was just discovered */
64 bool in_table; /* Whether a LUN table entry was created */
65
66 u8 wwid[16]; /* Keep a duplicate copy here? */
67
68 struct glun_info *parent; /* Pointer to entry in global LUN structure */
69 struct scsi_device *sdev;
70 struct list_head list;
71};
72
73struct lun_access {
74 struct llun_info *lli;
75 struct scsi_device *sdev;
76 struct list_head list;
77};
78
79enum ctx_ctrl {
80 CTX_CTRL_CLONE = (1 << 1),
81 CTX_CTRL_ERR = (1 << 2),
82 CTX_CTRL_ERR_FALLBACK = (1 << 3),
83 CTX_CTRL_NOPID = (1 << 4),
84 CTX_CTRL_FILE = (1 << 5)
85};
86
87#define ENCODE_CTXID(_ctx, _id) (((((u64)_ctx) & 0xFFFFFFFF0) << 28) | _id)
88#define DECODE_CTXID(_val) (_val & 0xFFFFFFFF)
89
90struct ctx_info {
91 struct sisl_ctrl_map *ctrl_map; /* initialized at startup */
92 struct sisl_rht_entry *rht_start; /* 1 page (req'd for alignment),
93 alloc/free on attach/detach */
94 u32 rht_out; /* Number of checked out RHT entries */
95 u32 rht_perms; /* User-defined permissions for RHT entries */
96 struct llun_info **rht_lun; /* Mapping of RHT entries to LUNs */
97 bool *rht_needs_ws; /* User-desired write-same function per RHTE */
98
99 struct cxl_ioctl_start_work work;
100 u64 ctxid;
101 int lfd;
102 pid_t pid;
103 bool unavail;
104 bool err_recovery_active;
105 struct mutex mutex; /* Context protection */
106 struct cxl_context *ctx;
107 struct list_head luns; /* LUNs attached to this context */
108 const struct vm_operations_struct *cxl_mmap_vmops;
109 struct file *file;
110 struct list_head list; /* Link contexts in error recovery */
111};
112
113struct cxlflash_global {
114 struct mutex mutex;
115 struct list_head gluns;/* list of glun_info structs */
116 struct page *err_page; /* One page of all 0xF for error notification */
117};
118
119int cxlflash_vlun_resize(struct scsi_device *, struct dk_cxlflash_resize *);
120int _cxlflash_vlun_resize(struct scsi_device *, struct ctx_info *,
121 struct dk_cxlflash_resize *);
122
123int cxlflash_disk_release(struct scsi_device *, struct dk_cxlflash_release *);
124int _cxlflash_disk_release(struct scsi_device *, struct ctx_info *,
125 struct dk_cxlflash_release *);
126
127int cxlflash_disk_clone(struct scsi_device *, struct dk_cxlflash_clone *);
128
129int cxlflash_disk_virtual_open(struct scsi_device *, void *);
130
131int cxlflash_lun_attach(struct glun_info *, enum lun_mode, bool);
132void cxlflash_lun_detach(struct glun_info *);
133
134struct ctx_info *get_context(struct cxlflash_cfg *, u64, void *, enum ctx_ctrl);
135void put_context(struct ctx_info *);
136
137struct sisl_rht_entry *get_rhte(struct ctx_info *, res_hndl_t,
138 struct llun_info *);
139
140struct sisl_rht_entry *rhte_checkout(struct ctx_info *, struct llun_info *);
141void rhte_checkin(struct ctx_info *, struct sisl_rht_entry *);
142
143void cxlflash_ba_terminate(struct ba_lun *);
144
145int cxlflash_manage_lun(struct scsi_device *, struct dk_cxlflash_manage_lun *);
146
147#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
new file mode 100644
index 000000000000..6155cb1d4ed3
--- /dev/null
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -0,0 +1,1243 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/syscalls.h>
16#include <misc/cxl.h>
17#include <asm/unaligned.h>
18#include <asm/bitsperlong.h>
19
20#include <scsi/scsi_cmnd.h>
21#include <scsi/scsi_host.h>
22#include <uapi/scsi/cxlflash_ioctl.h>
23
24#include "sislite.h"
25#include "common.h"
26#include "vlun.h"
27#include "superpipe.h"
28
29/**
30 * marshal_virt_to_resize() - translate uvirtual to resize structure
31 * @virt: Source structure from which to translate/copy.
32 * @resize: Destination structure for the translate/copy.
33 */
34static void marshal_virt_to_resize(struct dk_cxlflash_uvirtual *virt,
35 struct dk_cxlflash_resize *resize)
36{
37 resize->hdr = virt->hdr;
38 resize->context_id = virt->context_id;
39 resize->rsrc_handle = virt->rsrc_handle;
40 resize->req_size = virt->lun_size;
41 resize->last_lba = virt->last_lba;
42}
43
44/**
45 * marshal_clone_to_rele() - translate clone to release structure
46 * @clone: Source structure from which to translate/copy.
47 * @rele: Destination structure for the translate/copy.
48 */
49static void marshal_clone_to_rele(struct dk_cxlflash_clone *clone,
50 struct dk_cxlflash_release *release)
51{
52 release->hdr = clone->hdr;
53 release->context_id = clone->context_id_dst;
54}
55
56/**
57 * ba_init() - initializes a block allocator
58 * @ba_lun: Block allocator to initialize.
59 *
60 * Return: 0 on success, -errno on failure
61 */
62static int ba_init(struct ba_lun *ba_lun)
63{
64 struct ba_lun_info *bali = NULL;
65 int lun_size_au = 0, i = 0;
66 int last_word_underflow = 0;
67 u64 *lam;
68
69 pr_debug("%s: Initializing LUN: lun_id = %llX, "
70 "ba_lun->lsize = %lX, ba_lun->au_size = %lX\n",
71 __func__, ba_lun->lun_id, ba_lun->lsize, ba_lun->au_size);
72
73 /* Calculate bit map size */
74 lun_size_au = ba_lun->lsize / ba_lun->au_size;
75 if (lun_size_au == 0) {
76 pr_debug("%s: Requested LUN size of 0!\n", __func__);
77 return -EINVAL;
78 }
79
80 /* Allocate lun information container */
81 bali = kzalloc(sizeof(struct ba_lun_info), GFP_KERNEL);
82 if (unlikely(!bali)) {
83 pr_err("%s: Failed to allocate lun_info for lun_id %llX\n",
84 __func__, ba_lun->lun_id);
85 return -ENOMEM;
86 }
87
88 bali->total_aus = lun_size_au;
89 bali->lun_bmap_size = lun_size_au / BITS_PER_LONG;
90
91 if (lun_size_au % BITS_PER_LONG)
92 bali->lun_bmap_size++;
93
94 /* Allocate bitmap space */
95 bali->lun_alloc_map = kzalloc((bali->lun_bmap_size * sizeof(u64)),
96 GFP_KERNEL);
97 if (unlikely(!bali->lun_alloc_map)) {
98 pr_err("%s: Failed to allocate lun allocation map: "
99 "lun_id = %llX\n", __func__, ba_lun->lun_id);
100 kfree(bali);
101 return -ENOMEM;
102 }
103
104 /* Initialize the bit map size and set all bits to '1' */
105 bali->free_aun_cnt = lun_size_au;
106
107 for (i = 0; i < bali->lun_bmap_size; i++)
108 bali->lun_alloc_map[i] = 0xFFFFFFFFFFFFFFFFULL;
109
110 /* If the last word not fully utilized, mark extra bits as allocated */
111 last_word_underflow = (bali->lun_bmap_size * BITS_PER_LONG);
112 last_word_underflow -= bali->free_aun_cnt;
113 if (last_word_underflow > 0) {
114 lam = &bali->lun_alloc_map[bali->lun_bmap_size - 1];
115 for (i = (HIBIT - last_word_underflow + 1);
116 i < BITS_PER_LONG;
117 i++)
118 clear_bit(i, (ulong *)lam);
119 }
120
121 /* Initialize high elevator index, low/curr already at 0 from kzalloc */
122 bali->free_high_idx = bali->lun_bmap_size;
123
124 /* Allocate clone map */
125 bali->aun_clone_map = kzalloc((bali->total_aus * sizeof(u8)),
126 GFP_KERNEL);
127 if (unlikely(!bali->aun_clone_map)) {
128 pr_err("%s: Failed to allocate clone map: lun_id = %llX\n",
129 __func__, ba_lun->lun_id);
130 kfree(bali->lun_alloc_map);
131 kfree(bali);
132 return -ENOMEM;
133 }
134
135 /* Pass the allocated lun info as a handle to the user */
136 ba_lun->ba_lun_handle = bali;
137
138 pr_debug("%s: Successfully initialized the LUN: "
139 "lun_id = %llX, bitmap size = %X, free_aun_cnt = %llX\n",
140 __func__, ba_lun->lun_id, bali->lun_bmap_size,
141 bali->free_aun_cnt);
142 return 0;
143}
144
145/**
146 * find_free_range() - locates a free bit within the block allocator
147 * @low: First word in block allocator to start search.
148 * @high: Last word in block allocator to search.
149 * @bali: LUN information structure owning the block allocator to search.
150 * @bit_word: Passes back the word in the block allocator owning the free bit.
151 *
152 * Return: The bit position within the passed back word, -1 on failure
153 */
154static int find_free_range(u32 low,
155 u32 high,
156 struct ba_lun_info *bali, int *bit_word)
157{
158 int i;
159 u64 bit_pos = -1;
160 ulong *lam, num_bits;
161
162 for (i = low; i < high; i++)
163 if (bali->lun_alloc_map[i] != 0) {
164 lam = (ulong *)&bali->lun_alloc_map[i];
165 num_bits = (sizeof(*lam) * BITS_PER_BYTE);
166 bit_pos = find_first_bit(lam, num_bits);
167
168 pr_devel("%s: Found free bit %llX in lun "
169 "map entry %llX at bitmap index = %X\n",
170 __func__, bit_pos, bali->lun_alloc_map[i],
171 i);
172
173 *bit_word = i;
174 bali->free_aun_cnt--;
175 clear_bit(bit_pos, lam);
176 break;
177 }
178
179 return bit_pos;
180}
181
182/**
183 * ba_alloc() - allocates a block from the block allocator
184 * @ba_lun: Block allocator from which to allocate a block.
185 *
186 * Return: The allocated block, -1 on failure
187 */
188static u64 ba_alloc(struct ba_lun *ba_lun)
189{
190 u64 bit_pos = -1;
191 int bit_word = 0;
192 struct ba_lun_info *bali = NULL;
193
194 bali = ba_lun->ba_lun_handle;
195
196 pr_debug("%s: Received block allocation request: "
197 "lun_id = %llX, free_aun_cnt = %llX\n",
198 __func__, ba_lun->lun_id, bali->free_aun_cnt);
199
200 if (bali->free_aun_cnt == 0) {
201 pr_debug("%s: No space left on LUN: lun_id = %llX\n",
202 __func__, ba_lun->lun_id);
203 return -1ULL;
204 }
205
206 /* Search to find a free entry, curr->high then low->curr */
207 bit_pos = find_free_range(bali->free_curr_idx,
208 bali->free_high_idx, bali, &bit_word);
209 if (bit_pos == -1) {
210 bit_pos = find_free_range(bali->free_low_idx,
211 bali->free_curr_idx,
212 bali, &bit_word);
213 if (bit_pos == -1) {
214 pr_debug("%s: Could not find an allocation unit on LUN:"
215 " lun_id = %llX\n", __func__, ba_lun->lun_id);
216 return -1ULL;
217 }
218 }
219
220 /* Update the free_curr_idx */
221 if (bit_pos == HIBIT)
222 bali->free_curr_idx = bit_word + 1;
223 else
224 bali->free_curr_idx = bit_word;
225
226 pr_debug("%s: Allocating AU number %llX, on lun_id %llX, "
227 "free_aun_cnt = %llX\n", __func__,
228 ((bit_word * BITS_PER_LONG) + bit_pos), ba_lun->lun_id,
229 bali->free_aun_cnt);
230
231 return (u64) ((bit_word * BITS_PER_LONG) + bit_pos);
232}
233
234/**
235 * validate_alloc() - validates the specified block has been allocated
236 * @ba_lun_info: LUN info owning the block allocator.
237 * @aun: Block to validate.
238 *
239 * Return: 0 on success, -1 on failure
240 */
241static int validate_alloc(struct ba_lun_info *bali, u64 aun)
242{
243 int idx = 0, bit_pos = 0;
244
245 idx = aun / BITS_PER_LONG;
246 bit_pos = aun % BITS_PER_LONG;
247
248 if (test_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]))
249 return -1;
250
251 return 0;
252}
253
254/**
255 * ba_free() - frees a block from the block allocator
256 * @ba_lun: Block allocator from which to allocate a block.
257 * @to_free: Block to free.
258 *
259 * Return: 0 on success, -1 on failure
260 */
261static int ba_free(struct ba_lun *ba_lun, u64 to_free)
262{
263 int idx = 0, bit_pos = 0;
264 struct ba_lun_info *bali = NULL;
265
266 bali = ba_lun->ba_lun_handle;
267
268 if (validate_alloc(bali, to_free)) {
269 pr_debug("%s: The AUN %llX is not allocated on lun_id %llX\n",
270 __func__, to_free, ba_lun->lun_id);
271 return -1;
272 }
273
274 pr_debug("%s: Received a request to free AU %llX on lun_id %llX, "
275 "free_aun_cnt = %llX\n", __func__, to_free, ba_lun->lun_id,
276 bali->free_aun_cnt);
277
278 if (bali->aun_clone_map[to_free] > 0) {
279 pr_debug("%s: AUN %llX on lun_id %llX has been cloned. Clone "
280 "count = %X\n", __func__, to_free, ba_lun->lun_id,
281 bali->aun_clone_map[to_free]);
282 bali->aun_clone_map[to_free]--;
283 return 0;
284 }
285
286 idx = to_free / BITS_PER_LONG;
287 bit_pos = to_free % BITS_PER_LONG;
288
289 set_bit(bit_pos, (ulong *)&bali->lun_alloc_map[idx]);
290 bali->free_aun_cnt++;
291
292 if (idx < bali->free_low_idx)
293 bali->free_low_idx = idx;
294 else if (idx > bali->free_high_idx)
295 bali->free_high_idx = idx;
296
297 pr_debug("%s: Successfully freed AU at bit_pos %X, bit map index %X on "
298 "lun_id %llX, free_aun_cnt = %llX\n", __func__, bit_pos, idx,
299 ba_lun->lun_id, bali->free_aun_cnt);
300
301 return 0;
302}
303
304/**
305 * ba_clone() - Clone a chunk of the block allocation table
306 * @ba_lun: Block allocator from which to allocate a block.
307 * @to_free: Block to free.
308 *
309 * Return: 0 on success, -1 on failure
310 */
311static int ba_clone(struct ba_lun *ba_lun, u64 to_clone)
312{
313 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
314
315 if (validate_alloc(bali, to_clone)) {
316 pr_debug("%s: AUN %llX is not allocated on lun_id %llX\n",
317 __func__, to_clone, ba_lun->lun_id);
318 return -1;
319 }
320
321 pr_debug("%s: Received a request to clone AUN %llX on lun_id %llX\n",
322 __func__, to_clone, ba_lun->lun_id);
323
324 if (bali->aun_clone_map[to_clone] == MAX_AUN_CLONE_CNT) {
325 pr_debug("%s: AUN %llX on lun_id %llX hit max clones already\n",
326 __func__, to_clone, ba_lun->lun_id);
327 return -1;
328 }
329
330 bali->aun_clone_map[to_clone]++;
331
332 return 0;
333}
334
335/**
336 * ba_space() - returns the amount of free space left in the block allocator
337 * @ba_lun: Block allocator.
338 *
339 * Return: Amount of free space in block allocator
340 */
341static u64 ba_space(struct ba_lun *ba_lun)
342{
343 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
344
345 return bali->free_aun_cnt;
346}
347
348/**
349 * cxlflash_ba_terminate() - frees resources associated with the block allocator
350 * @ba_lun: Block allocator.
351 *
352 * Safe to call in a partially allocated state.
353 */
354void cxlflash_ba_terminate(struct ba_lun *ba_lun)
355{
356 struct ba_lun_info *bali = ba_lun->ba_lun_handle;
357
358 if (bali) {
359 kfree(bali->aun_clone_map);
360 kfree(bali->lun_alloc_map);
361 kfree(bali);
362 ba_lun->ba_lun_handle = NULL;
363 }
364}
365
366/**
367 * init_vlun() - initializes a LUN for virtual use
368 * @lun_info: LUN information structure that owns the block allocator.
369 *
370 * Return: 0 on success, -errno on failure
371 */
372static int init_vlun(struct llun_info *lli)
373{
374 int rc = 0;
375 struct glun_info *gli = lli->parent;
376 struct blka *blka = &gli->blka;
377
378 memset(blka, 0, sizeof(*blka));
379 mutex_init(&blka->mutex);
380
381 /* LUN IDs are unique per port, save the index instead */
382 blka->ba_lun.lun_id = lli->lun_index;
383 blka->ba_lun.lsize = gli->max_lba + 1;
384 blka->ba_lun.lba_size = gli->blk_len;
385
386 blka->ba_lun.au_size = MC_CHUNK_SIZE;
387 blka->nchunk = blka->ba_lun.lsize / MC_CHUNK_SIZE;
388
389 rc = ba_init(&blka->ba_lun);
390 if (unlikely(rc))
391 pr_debug("%s: cannot init block_alloc, rc=%d\n", __func__, rc);
392
393 pr_debug("%s: returning rc=%d lli=%p\n", __func__, rc, lli);
394 return rc;
395}
396
397/**
398 * write_same16() - sends a SCSI WRITE_SAME16 (0) command to specified LUN
399 * @sdev: SCSI device associated with LUN.
400 * @lba: Logical block address to start write same.
401 * @nblks: Number of logical blocks to write same.
402 *
403 * Return: 0 on success, -errno on failure
404 */
405static int write_same16(struct scsi_device *sdev,
406 u64 lba,
407 u32 nblks)
408{
409 u8 *cmd_buf = NULL;
410 u8 *scsi_cmd = NULL;
411 u8 *sense_buf = NULL;
412 int rc = 0;
413 int result = 0;
414 int ws_limit = SISLITE_MAX_WS_BLOCKS;
415 u64 offset = lba;
416 int left = nblks;
417 u32 tout = sdev->request_queue->rq_timeout;
418 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
419 struct device *dev = &cfg->dev->dev;
420
421 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL);
422 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL);
423 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
424 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) {
425 rc = -ENOMEM;
426 goto out;
427 }
428
429 while (left > 0) {
430
431 scsi_cmd[0] = WRITE_SAME_16;
432 put_unaligned_be64(offset, &scsi_cmd[2]);
433 put_unaligned_be32(ws_limit < left ? ws_limit : left,
434 &scsi_cmd[10]);
435
436 result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
437 CMD_BUFSIZE, sense_buf, tout, 5, 0, NULL);
438 if (result) {
439 dev_err_ratelimited(dev, "%s: command failed for "
440 "offset %lld result=0x%x\n",
441 __func__, offset, result);
442 rc = -EIO;
443 goto out;
444 }
445 left -= ws_limit;
446 offset += ws_limit;
447 }
448
449out:
450 kfree(cmd_buf);
451 kfree(scsi_cmd);
452 kfree(sense_buf);
453 pr_debug("%s: returning rc=%d\n", __func__, rc);
454 return rc;
455}
456
457/**
458 * grow_lxt() - expands the translation table associated with the specified RHTE
459 * @afu: AFU associated with the host.
460 * @sdev: SCSI device associated with LUN.
461 * @ctxid: Context ID of context owning the RHTE.
462 * @rhndl: Resource handle associated with the RHTE.
463 * @rhte: Resource handle entry (RHTE).
464 * @new_size: Number of translation entries associated with RHTE.
465 *
466 * By design, this routine employs a 'best attempt' allocation and will
467 * truncate the requested size down if there is not sufficient space in
468 * the block allocator to satisfy the request but there does exist some
469 * amount of space. The user is made aware of this by returning the size
470 * allocated.
471 *
472 * Return: 0 on success, -errno on failure
473 */
474static int grow_lxt(struct afu *afu,
475 struct scsi_device *sdev,
476 ctx_hndl_t ctxid,
477 res_hndl_t rhndl,
478 struct sisl_rht_entry *rhte,
479 u64 *new_size)
480{
481 struct sisl_lxt_entry *lxt = NULL, *lxt_old = NULL;
482 struct llun_info *lli = sdev->hostdata;
483 struct glun_info *gli = lli->parent;
484 struct blka *blka = &gli->blka;
485 u32 av_size;
486 u32 ngrps, ngrps_old;
487 u64 aun; /* chunk# allocated by block allocator */
488 u64 delta = *new_size - rhte->lxt_cnt;
489 u64 my_new_size;
490 int i, rc = 0;
491
492 /*
493 * Check what is available in the block allocator before re-allocating
494 * LXT array. This is done up front under the mutex which must not be
495 * released until after allocation is complete.
496 */
497 mutex_lock(&blka->mutex);
498 av_size = ba_space(&blka->ba_lun);
499 if (unlikely(av_size <= 0)) {
500 pr_debug("%s: ba_space error: av_size %d\n", __func__, av_size);
501 mutex_unlock(&blka->mutex);
502 rc = -ENOSPC;
503 goto out;
504 }
505
506 if (av_size < delta)
507 delta = av_size;
508
509 lxt_old = rhte->lxt_start;
510 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
511 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt + delta);
512
513 if (ngrps != ngrps_old) {
514 /* reallocate to fit new size */
515 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
516 GFP_KERNEL);
517 if (unlikely(!lxt)) {
518 mutex_unlock(&blka->mutex);
519 rc = -ENOMEM;
520 goto out;
521 }
522
523 /* copy over all old entries */
524 memcpy(lxt, lxt_old, (sizeof(*lxt) * rhte->lxt_cnt));
525 } else
526 lxt = lxt_old;
527
528 /* nothing can fail from now on */
529 my_new_size = rhte->lxt_cnt + delta;
530
531 /* add new entries to the end */
532 for (i = rhte->lxt_cnt; i < my_new_size; i++) {
533 /*
534 * Due to the earlier check of available space, ba_alloc
535 * cannot fail here. If it did due to internal error,
536 * leave a rlba_base of -1u which will likely be a
537 * invalid LUN (too large).
538 */
539 aun = ba_alloc(&blka->ba_lun);
540 if ((aun == -1ULL) || (aun >= blka->nchunk))
541 pr_debug("%s: ba_alloc error: allocated chunk# %llX, "
542 "max %llX\n", __func__, aun, blka->nchunk - 1);
543
544 /* select both ports, use r/w perms from RHT */
545 lxt[i].rlba_base = ((aun << MC_CHUNK_SHIFT) |
546 (lli->lun_index << LXT_LUNIDX_SHIFT) |
547 (RHT_PERM_RW << LXT_PERM_SHIFT |
548 lli->port_sel));
549 }
550
551 mutex_unlock(&blka->mutex);
552
553 /*
554 * The following sequence is prescribed in the SISlite spec
555 * for syncing up with the AFU when adding LXT entries.
556 */
557 dma_wmb(); /* Make LXT updates are visible */
558
559 rhte->lxt_start = lxt;
560 dma_wmb(); /* Make RHT entry's LXT table update visible */
561
562 rhte->lxt_cnt = my_new_size;
563 dma_wmb(); /* Make RHT entry's LXT table size update visible */
564
565 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
566
567 /* free old lxt if reallocated */
568 if (lxt != lxt_old)
569 kfree(lxt_old);
570 *new_size = my_new_size;
571out:
572 pr_debug("%s: returning rc=%d\n", __func__, rc);
573 return rc;
574}
575
576/**
577 * shrink_lxt() - reduces translation table associated with the specified RHTE
578 * @afu: AFU associated with the host.
579 * @sdev: SCSI device associated with LUN.
580 * @rhndl: Resource handle associated with the RHTE.
581 * @rhte: Resource handle entry (RHTE).
582 * @ctxi: Context owning resources.
583 * @new_size: Number of translation entries associated with RHTE.
584 *
585 * Return: 0 on success, -errno on failure
586 */
587static int shrink_lxt(struct afu *afu,
588 struct scsi_device *sdev,
589 res_hndl_t rhndl,
590 struct sisl_rht_entry *rhte,
591 struct ctx_info *ctxi,
592 u64 *new_size)
593{
594 struct sisl_lxt_entry *lxt, *lxt_old;
595 struct llun_info *lli = sdev->hostdata;
596 struct glun_info *gli = lli->parent;
597 struct blka *blka = &gli->blka;
598 ctx_hndl_t ctxid = DECODE_CTXID(ctxi->ctxid);
599 bool needs_ws = ctxi->rht_needs_ws[rhndl];
600 bool needs_sync = !ctxi->err_recovery_active;
601 u32 ngrps, ngrps_old;
602 u64 aun; /* chunk# allocated by block allocator */
603 u64 delta = rhte->lxt_cnt - *new_size;
604 u64 my_new_size;
605 int i, rc = 0;
606
607 lxt_old = rhte->lxt_start;
608 ngrps_old = LXT_NUM_GROUPS(rhte->lxt_cnt);
609 ngrps = LXT_NUM_GROUPS(rhte->lxt_cnt - delta);
610
611 if (ngrps != ngrps_old) {
612 /* Reallocate to fit new size unless new size is 0 */
613 if (ngrps) {
614 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
615 GFP_KERNEL);
616 if (unlikely(!lxt)) {
617 rc = -ENOMEM;
618 goto out;
619 }
620
621 /* Copy over old entries that will remain */
622 memcpy(lxt, lxt_old,
623 (sizeof(*lxt) * (rhte->lxt_cnt - delta)));
624 } else
625 lxt = NULL;
626 } else
627 lxt = lxt_old;
628
629 /* Nothing can fail from now on */
630 my_new_size = rhte->lxt_cnt - delta;
631
632 /*
633 * The following sequence is prescribed in the SISlite spec
634 * for syncing up with the AFU when removing LXT entries.
635 */
636 rhte->lxt_cnt = my_new_size;
637 dma_wmb(); /* Make RHT entry's LXT table size update visible */
638
639 rhte->lxt_start = lxt;
640 dma_wmb(); /* Make RHT entry's LXT table update visible */
641
642 if (needs_sync)
643 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
644
645 if (needs_ws) {
646 /*
647 * Mark the context as unavailable, so that we can release
648 * the mutex safely.
649 */
650 ctxi->unavail = true;
651 mutex_unlock(&ctxi->mutex);
652 }
653
654 /* Free LBAs allocated to freed chunks */
655 mutex_lock(&blka->mutex);
656 for (i = delta - 1; i >= 0; i--) {
657 /* Mask the higher 48 bits before shifting, even though
658 * it is a noop
659 */
660 aun = (lxt_old[my_new_size + i].rlba_base & SISL_ASTATUS_MASK);
661 aun = (aun >> MC_CHUNK_SHIFT);
662 if (needs_ws)
663 write_same16(sdev, aun, MC_CHUNK_SIZE);
664 ba_free(&blka->ba_lun, aun);
665 }
666 mutex_unlock(&blka->mutex);
667
668 if (needs_ws) {
669 /* Make the context visible again */
670 mutex_lock(&ctxi->mutex);
671 ctxi->unavail = false;
672 }
673
674 /* Free old lxt if reallocated */
675 if (lxt != lxt_old)
676 kfree(lxt_old);
677 *new_size = my_new_size;
678out:
679 pr_debug("%s: returning rc=%d\n", __func__, rc);
680 return rc;
681}
682
683/**
684 * _cxlflash_vlun_resize() - changes the size of a virtual lun
685 * @sdev: SCSI device associated with LUN owning virtual LUN.
686 * @ctxi: Context owning resources.
687 * @resize: Resize ioctl data structure.
688 *
689 * On successful return, the user is informed of the new size (in blocks)
690 * of the virtual lun in last LBA format. When the size of the virtual
691 * lun is zero, the last LBA is reflected as -1. See comment in the
692 * prologue for _cxlflash_disk_release() regarding AFU syncs and contexts
693 * on the error recovery list.
694 *
695 * Return: 0 on success, -errno on failure
696 */
697int _cxlflash_vlun_resize(struct scsi_device *sdev,
698 struct ctx_info *ctxi,
699 struct dk_cxlflash_resize *resize)
700{
701 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
702 struct llun_info *lli = sdev->hostdata;
703 struct glun_info *gli = lli->parent;
704 struct afu *afu = cfg->afu;
705 bool put_ctx = false;
706
707 res_hndl_t rhndl = resize->rsrc_handle;
708 u64 new_size;
709 u64 nsectors;
710 u64 ctxid = DECODE_CTXID(resize->context_id),
711 rctxid = resize->context_id;
712
713 struct sisl_rht_entry *rhte;
714
715 int rc = 0;
716
717 /*
718 * The requested size (req_size) is always assumed to be in 4k blocks,
719 * so we have to convert it here from 4k to chunk size.
720 */
721 nsectors = (resize->req_size * CXLFLASH_BLOCK_SIZE) / gli->blk_len;
722 new_size = DIV_ROUND_UP(nsectors, MC_CHUNK_SIZE);
723
724 pr_debug("%s: ctxid=%llu rhndl=0x%llx, req_size=0x%llx,"
725 "new_size=%llx\n", __func__, ctxid, resize->rsrc_handle,
726 resize->req_size, new_size);
727
728 if (unlikely(gli->mode != MODE_VIRTUAL)) {
729 pr_debug("%s: LUN mode does not support resize! (%d)\n",
730 __func__, gli->mode);
731 rc = -EINVAL;
732 goto out;
733
734 }
735
736 if (!ctxi) {
737 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK);
738 if (unlikely(!ctxi)) {
739 pr_debug("%s: Bad context! (%llu)\n", __func__, ctxid);
740 rc = -EINVAL;
741 goto out;
742 }
743
744 put_ctx = true;
745 }
746
747 rhte = get_rhte(ctxi, rhndl, lli);
748 if (unlikely(!rhte)) {
749 pr_debug("%s: Bad resource handle! (%u)\n", __func__, rhndl);
750 rc = -EINVAL;
751 goto out;
752 }
753
754 if (new_size > rhte->lxt_cnt)
755 rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
756 else if (new_size < rhte->lxt_cnt)
757 rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
758
759 resize->hdr.return_flags = 0;
760 resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
761 resize->last_lba /= CXLFLASH_BLOCK_SIZE;
762 resize->last_lba--;
763
764out:
765 if (put_ctx)
766 put_context(ctxi);
767 pr_debug("%s: resized to %lld returning rc=%d\n",
768 __func__, resize->last_lba, rc);
769 return rc;
770}
771
772int cxlflash_vlun_resize(struct scsi_device *sdev,
773 struct dk_cxlflash_resize *resize)
774{
775 return _cxlflash_vlun_resize(sdev, NULL, resize);
776}
777
778/**
779 * cxlflash_restore_luntable() - Restore LUN table to prior state
780 * @cfg: Internal structure associated with the host.
781 */
782void cxlflash_restore_luntable(struct cxlflash_cfg *cfg)
783{
784 struct llun_info *lli, *temp;
785 u32 chan;
786 u32 lind;
787 struct afu *afu = cfg->afu;
788 struct sisl_global_map *agm = &afu->afu_map->global;
789
790 mutex_lock(&global.mutex);
791
792 list_for_each_entry_safe(lli, temp, &cfg->lluns, list) {
793 if (!lli->in_table)
794 continue;
795
796 lind = lli->lun_index;
797
798 if (lli->port_sel == BOTH_PORTS) {
799 writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
800 writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
801 pr_debug("%s: Virtual LUN on slot %d id0=%llx, "
802 "id1=%llx\n", __func__, lind,
803 lli->lun_id[0], lli->lun_id[1]);
804 } else {
805 chan = PORT2CHAN(lli->port_sel);
806 writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
807 pr_debug("%s: Virtual LUN on slot %d chan=%d, "
808 "id=%llx\n", __func__, lind, chan,
809 lli->lun_id[chan]);
810 }
811 }
812
813 mutex_unlock(&global.mutex);
814}
815
816/**
817 * init_luntable() - write an entry in the LUN table
818 * @cfg: Internal structure associated with the host.
819 * @lli: Per adapter LUN information structure.
820 *
821 * On successful return, a LUN table entry is created.
822 * At the top for LUNs visible on both ports.
823 * At the bottom for LUNs visible only on one port.
824 *
825 * Return: 0 on success, -errno on failure
826 */
827static int init_luntable(struct cxlflash_cfg *cfg, struct llun_info *lli)
828{
829 u32 chan;
830 u32 lind;
831 int rc = 0;
832 struct afu *afu = cfg->afu;
833 struct sisl_global_map *agm = &afu->afu_map->global;
834
835 mutex_lock(&global.mutex);
836
837 if (lli->in_table)
838 goto out;
839
840 if (lli->port_sel == BOTH_PORTS) {
841 /*
842 * If this LUN is visible from both ports, we will put
843 * it in the top half of the LUN table.
844 */
845 if ((cfg->promote_lun_index == cfg->last_lun_index[0]) ||
846 (cfg->promote_lun_index == cfg->last_lun_index[1])) {
847 rc = -ENOSPC;
848 goto out;
849 }
850
851 lind = lli->lun_index = cfg->promote_lun_index;
852 writeq_be(lli->lun_id[0], &agm->fc_port[0][lind]);
853 writeq_be(lli->lun_id[1], &agm->fc_port[1][lind]);
854 cfg->promote_lun_index++;
855 pr_debug("%s: Virtual LUN on slot %d id0=%llx, id1=%llx\n",
856 __func__, lind, lli->lun_id[0], lli->lun_id[1]);
857 } else {
858 /*
859 * If this LUN is visible only from one port, we will put
860 * it in the bottom half of the LUN table.
861 */
862 chan = PORT2CHAN(lli->port_sel);
863 if (cfg->promote_lun_index == cfg->last_lun_index[chan]) {
864 rc = -ENOSPC;
865 goto out;
866 }
867
868 lind = lli->lun_index = cfg->last_lun_index[chan];
869 writeq_be(lli->lun_id[chan], &agm->fc_port[chan][lind]);
870 cfg->last_lun_index[chan]--;
871 pr_debug("%s: Virtual LUN on slot %d chan=%d, id=%llx\n",
872 __func__, lind, chan, lli->lun_id[chan]);
873 }
874
875 lli->in_table = true;
876out:
877 mutex_unlock(&global.mutex);
878 pr_debug("%s: returning rc=%d\n", __func__, rc);
879 return rc;
880}
881
882/**
883 * cxlflash_disk_virtual_open() - open a virtual disk of specified size
884 * @sdev: SCSI device associated with LUN owning virtual LUN.
885 * @arg: UVirtual ioctl data structure.
886 *
887 * On successful return, the user is informed of the resource handle
888 * to be used to identify the virtual lun and the size (in blocks) of
889 * the virtual lun in last LBA format. When the size of the virtual lun
890 * is zero, the last LBA is reflected as -1.
891 *
892 * Return: 0 on success, -errno on failure
893 */
894int cxlflash_disk_virtual_open(struct scsi_device *sdev, void *arg)
895{
896 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
897 struct device *dev = &cfg->dev->dev;
898 struct llun_info *lli = sdev->hostdata;
899 struct glun_info *gli = lli->parent;
900
901 struct dk_cxlflash_uvirtual *virt = (struct dk_cxlflash_uvirtual *)arg;
902 struct dk_cxlflash_resize resize;
903
904 u64 ctxid = DECODE_CTXID(virt->context_id),
905 rctxid = virt->context_id;
906 u64 lun_size = virt->lun_size;
907 u64 last_lba = 0;
908 u64 rsrc_handle = -1;
909
910 int rc = 0;
911
912 struct ctx_info *ctxi = NULL;
913 struct sisl_rht_entry *rhte = NULL;
914
915 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size);
916
917 mutex_lock(&gli->mutex);
918 if (gli->mode == MODE_NONE) {
919 /* Setup the LUN table and block allocator on first call */
920 rc = init_luntable(cfg, lli);
921 if (rc) {
922 dev_err(dev, "%s: call to init_luntable failed "
923 "rc=%d!\n", __func__, rc);
924 goto err0;
925 }
926
927 rc = init_vlun(lli);
928 if (rc) {
929 dev_err(dev, "%s: call to init_vlun failed rc=%d!\n",
930 __func__, rc);
931 rc = -ENOMEM;
932 goto err0;
933 }
934 }
935
936 rc = cxlflash_lun_attach(gli, MODE_VIRTUAL, true);
937 if (unlikely(rc)) {
938 dev_err(dev, "%s: Failed to attach to LUN! (VIRTUAL)\n",
939 __func__);
940 goto err0;
941 }
942 mutex_unlock(&gli->mutex);
943
944 ctxi = get_context(cfg, rctxid, lli, 0);
945 if (unlikely(!ctxi)) {
946 dev_err(dev, "%s: Bad context! (%llu)\n", __func__, ctxid);
947 rc = -EINVAL;
948 goto err1;
949 }
950
951 rhte = rhte_checkout(ctxi, lli);
952 if (unlikely(!rhte)) {
953 dev_err(dev, "%s: too many opens for this context\n", __func__);
954 rc = -EMFILE; /* too many opens */
955 goto err1;
956 }
957
958 rsrc_handle = (rhte - ctxi->rht_start);
959
960 /* Populate RHT format 0 */
961 rhte->nmask = MC_RHT_NMASK;
962 rhte->fp = SISL_RHT_FP(0U, ctxi->rht_perms);
963
964 /* Resize even if requested size is 0 */
965 marshal_virt_to_resize(virt, &resize);
966 resize.rsrc_handle = rsrc_handle;
967 rc = _cxlflash_vlun_resize(sdev, ctxi, &resize);
968 if (rc) {
969 dev_err(dev, "%s: resize failed rc %d\n", __func__, rc);
970 goto err2;
971 }
972 last_lba = resize.last_lba;
973
974 if (virt->hdr.flags & DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME)
975 ctxi->rht_needs_ws[rsrc_handle] = true;
976
977 virt->hdr.return_flags = 0;
978 virt->last_lba = last_lba;
979 virt->rsrc_handle = rsrc_handle;
980
981out:
982 if (likely(ctxi))
983 put_context(ctxi);
984 pr_debug("%s: returning handle 0x%llx rc=%d llba %lld\n",
985 __func__, rsrc_handle, rc, last_lba);
986 return rc;
987
988err2:
989 rhte_checkin(ctxi, rhte);
990err1:
991 cxlflash_lun_detach(gli);
992 goto out;
993err0:
994 /* Special common cleanup prior to successful LUN attach */
995 cxlflash_ba_terminate(&gli->blka.ba_lun);
996 mutex_unlock(&gli->mutex);
997 goto out;
998}
999
1000/**
1001 * clone_lxt() - copies translation tables from source to destination RHTE
1002 * @afu: AFU associated with the host.
1003 * @blka: Block allocator associated with LUN.
1004 * @ctxid: Context ID of context owning the RHTE.
1005 * @rhndl: Resource handle associated with the RHTE.
1006 * @rhte: Destination resource handle entry (RHTE).
1007 * @rhte_src: Source resource handle entry (RHTE).
1008 *
1009 * Return: 0 on success, -errno on failure
1010 */
1011static int clone_lxt(struct afu *afu,
1012 struct blka *blka,
1013 ctx_hndl_t ctxid,
1014 res_hndl_t rhndl,
1015 struct sisl_rht_entry *rhte,
1016 struct sisl_rht_entry *rhte_src)
1017{
1018 struct sisl_lxt_entry *lxt;
1019 u32 ngrps;
1020 u64 aun; /* chunk# allocated by block allocator */
1021 int i, j;
1022
1023 ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
1024
1025 if (ngrps) {
1026 /* allocate new LXTs for clone */
1027 lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
1028 GFP_KERNEL);
1029 if (unlikely(!lxt))
1030 return -ENOMEM;
1031
1032 /* copy over */
1033 memcpy(lxt, rhte_src->lxt_start,
1034 (sizeof(*lxt) * rhte_src->lxt_cnt));
1035
1036 /* clone the LBAs in block allocator via ref_cnt */
1037 mutex_lock(&blka->mutex);
1038 for (i = 0; i < rhte_src->lxt_cnt; i++) {
1039 aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
1040 if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
1041 /* free the clones already made */
1042 for (j = 0; j < i; j++) {
1043 aun = (lxt[j].rlba_base >>
1044 MC_CHUNK_SHIFT);
1045 ba_free(&blka->ba_lun, aun);
1046 }
1047
1048 mutex_unlock(&blka->mutex);
1049 kfree(lxt);
1050 return -EIO;
1051 }
1052 }
1053 mutex_unlock(&blka->mutex);
1054 } else {
1055 lxt = NULL;
1056 }
1057
1058 /*
1059 * The following sequence is prescribed in the SISlite spec
1060 * for syncing up with the AFU when adding LXT entries.
1061 */
1062 dma_wmb(); /* Make LXT updates are visible */
1063
1064 rhte->lxt_start = lxt;
1065 dma_wmb(); /* Make RHT entry's LXT table update visible */
1066
1067 rhte->lxt_cnt = rhte_src->lxt_cnt;
1068 dma_wmb(); /* Make RHT entry's LXT table size update visible */
1069
1070 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
1071
1072 pr_debug("%s: returning\n", __func__);
1073 return 0;
1074}
1075
1076/**
1077 * cxlflash_disk_clone() - clone a context by making snapshot of another
1078 * @sdev: SCSI device associated with LUN owning virtual LUN.
1079 * @clone: Clone ioctl data structure.
1080 *
1081 * This routine effectively performs cxlflash_disk_open operation for each
1082 * in-use virtual resource in the source context. Note that the destination
1083 * context must be in pristine state and cannot have any resource handles
1084 * open at the time of the clone.
1085 *
1086 * Return: 0 on success, -errno on failure
1087 */
1088int cxlflash_disk_clone(struct scsi_device *sdev,
1089 struct dk_cxlflash_clone *clone)
1090{
1091 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata;
1092 struct llun_info *lli = sdev->hostdata;
1093 struct glun_info *gli = lli->parent;
1094 struct blka *blka = &gli->blka;
1095 struct afu *afu = cfg->afu;
1096 struct dk_cxlflash_release release = { { 0 }, 0 };
1097
1098 struct ctx_info *ctxi_src = NULL,
1099 *ctxi_dst = NULL;
1100 struct lun_access *lun_access_src, *lun_access_dst;
1101 u32 perms;
1102 u64 ctxid_src = DECODE_CTXID(clone->context_id_src),
1103 ctxid_dst = DECODE_CTXID(clone->context_id_dst),
1104 rctxid_src = clone->context_id_src,
1105 rctxid_dst = clone->context_id_dst;
1106 int adap_fd_src = clone->adap_fd_src;
1107 int i, j;
1108 int rc = 0;
1109 bool found;
1110 LIST_HEAD(sidecar);
1111
1112 pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu adap_fd_src=%d\n",
1113 __func__, ctxid_src, ctxid_dst, adap_fd_src);
1114
1115 /* Do not clone yourself */
1116 if (unlikely(rctxid_src == rctxid_dst)) {
1117 rc = -EINVAL;
1118 goto out;
1119 }
1120
1121 if (unlikely(gli->mode != MODE_VIRTUAL)) {
1122 rc = -EINVAL;
1123 pr_debug("%s: Clone not supported on physical LUNs! (%d)\n",
1124 __func__, gli->mode);
1125 goto out;
1126 }
1127
1128 ctxi_src = get_context(cfg, rctxid_src, lli, CTX_CTRL_CLONE);
1129 ctxi_dst = get_context(cfg, rctxid_dst, lli, 0);
1130 if (unlikely(!ctxi_src || !ctxi_dst)) {
1131 pr_debug("%s: Bad context! (%llu,%llu)\n", __func__,
1132 ctxid_src, ctxid_dst);
1133 rc = -EINVAL;
1134 goto out;
1135 }
1136
1137 if (unlikely(adap_fd_src != ctxi_src->lfd)) {
1138 pr_debug("%s: Invalid source adapter fd! (%d)\n",
1139 __func__, adap_fd_src);
1140 rc = -EINVAL;
1141 goto out;
1142 }
1143
1144 /* Verify there is no open resource handle in the destination context */
1145 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
1146 if (ctxi_dst->rht_start[i].nmask != 0) {
1147 rc = -EINVAL;
1148 goto out;
1149 }
1150
1151 /* Clone LUN access list */
1152 list_for_each_entry(lun_access_src, &ctxi_src->luns, list) {
1153 found = false;
1154 list_for_each_entry(lun_access_dst, &ctxi_dst->luns, list)
1155 if (lun_access_dst->sdev == lun_access_src->sdev) {
1156 found = true;
1157 break;
1158 }
1159
1160 if (!found) {
1161 lun_access_dst = kzalloc(sizeof(*lun_access_dst),
1162 GFP_KERNEL);
1163 if (unlikely(!lun_access_dst)) {
1164 pr_err("%s: Unable to allocate lun_access!\n",
1165 __func__);
1166 rc = -ENOMEM;
1167 goto out;
1168 }
1169
1170 *lun_access_dst = *lun_access_src;
1171 list_add(&lun_access_dst->list, &sidecar);
1172 }
1173 }
1174
1175 if (unlikely(!ctxi_src->rht_out)) {
1176 pr_debug("%s: Nothing to clone!\n", __func__);
1177 goto out_success;
1178 }
1179
1180 /* User specified permission on attach */
1181 perms = ctxi_dst->rht_perms;
1182
1183 /*
1184 * Copy over checked-out RHT (and their associated LXT) entries by
1185 * hand, stopping after we've copied all outstanding entries and
1186 * cleaning up if the clone fails.
1187 *
1188 * Note: This loop is equivalent to performing cxlflash_disk_open and
1189 * cxlflash_vlun_resize. As such, LUN accounting needs to be taken into
1190 * account by attaching after each successful RHT entry clone. In the
1191 * event that a clone failure is experienced, the LUN detach is handled
1192 * via the cleanup performed by _cxlflash_disk_release.
1193 */
1194 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) {
1195 if (ctxi_src->rht_out == ctxi_dst->rht_out)
1196 break;
1197 if (ctxi_src->rht_start[i].nmask == 0)
1198 continue;
1199
1200 /* Consume a destination RHT entry */
1201 ctxi_dst->rht_out++;
1202 ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask;
1203 ctxi_dst->rht_start[i].fp =
1204 SISL_RHT_FP_CLONE(ctxi_src->rht_start[i].fp, perms);
1205 ctxi_dst->rht_lun[i] = ctxi_src->rht_lun[i];
1206
1207 rc = clone_lxt(afu, blka, ctxid_dst, i,
1208 &ctxi_dst->rht_start[i],
1209 &ctxi_src->rht_start[i]);
1210 if (rc) {
1211 marshal_clone_to_rele(clone, &release);
1212 for (j = 0; j < i; j++) {
1213 release.rsrc_handle = j;
1214 _cxlflash_disk_release(sdev, ctxi_dst,
1215 &release);
1216 }
1217
1218 /* Put back the one we failed on */
1219 rhte_checkin(ctxi_dst, &ctxi_dst->rht_start[i]);
1220 goto err;
1221 }
1222
1223 cxlflash_lun_attach(gli, gli->mode, false);
1224 }
1225
1226out_success:
1227 list_splice(&sidecar, &ctxi_dst->luns);
1228 sys_close(adap_fd_src);
1229
1230 /* fall through */
1231out:
1232 if (ctxi_src)
1233 put_context(ctxi_src);
1234 if (ctxi_dst)
1235 put_context(ctxi_dst);
1236 pr_debug("%s: returning rc=%d\n", __func__, rc);
1237 return rc;
1238
1239err:
1240 list_for_each_entry_safe(lun_access_src, lun_access_dst, &sidecar, list)
1241 kfree(lun_access_src);
1242 goto out;
1243}
diff --git a/drivers/scsi/cxlflash/vlun.h b/drivers/scsi/cxlflash/vlun.h
new file mode 100644
index 000000000000..8b29a74946e4
--- /dev/null
+++ b/drivers/scsi/cxlflash/vlun.h
@@ -0,0 +1,86 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_VLUN_H
16#define _CXLFLASH_VLUN_H
17
18/* RHT - Resource Handle Table */
19#define MC_RHT_NMASK 16 /* in bits */
20#define MC_CHUNK_SHIFT MC_RHT_NMASK /* shift to go from LBA to chunk# */
21
22#define HIBIT (BITS_PER_LONG - 1)
23
24#define MAX_AUN_CLONE_CNT 0xFF
25
26/*
27 * LXT - LBA Translation Table
28 *
29 * +-------+-------+-------+-------+-------+-------+-------+---+---+
30 * | RLBA_BASE |LUN_IDX| P |SEL|
31 * +-------+-------+-------+-------+-------+-------+-------+---+---+
32 *
33 * The LXT Entry contains the physical LBA where the chunk starts (RLBA_BASE).
34 * AFU ORes the low order bits from the virtual LBA (offset into the chunk)
35 * with RLBA_BASE. The result is the physical LBA to be sent to storage.
36 * The LXT Entry also contains an index to a LUN TBL and a bitmask of which
37 * outgoing (FC) * ports can be selected. The port select bit-mask is ANDed
38 * with a global port select bit-mask maintained by the driver.
39 * In addition, it has permission bits that are ANDed with the
40 * RHT permissions to arrive at the final permissions for the chunk.
41 *
42 * LXT tables are allocated dynamically in groups. This is done to avoid
43 * a malloc/free overhead each time the LXT has to grow or shrink.
44 *
45 * Based on the current lxt_cnt (used), it is always possible to know
46 * how many are allocated (used+free). The number of allocated entries is
47 * not stored anywhere.
48 *
49 * The LXT table is re-allocated whenever it needs to cross into another group.
50*/
51#define LXT_GROUP_SIZE 8
52#define LXT_NUM_GROUPS(lxt_cnt) (((lxt_cnt) + 7)/8) /* alloc'ed groups */
53#define LXT_LUNIDX_SHIFT 8 /* LXT entry, shift for LUN index */
54#define LXT_PERM_SHIFT 4 /* LXT entry, shift for permission bits */
55
56struct ba_lun_info {
57 u64 *lun_alloc_map;
58 u32 lun_bmap_size;
59 u32 total_aus;
60 u64 free_aun_cnt;
61
62 /* indices to be used for elevator lookup of free map */
63 u32 free_low_idx;
64 u32 free_curr_idx;
65 u32 free_high_idx;
66
67 u8 *aun_clone_map;
68};
69
70struct ba_lun {
71 u64 lun_id;
72 u64 wwpn;
73 size_t lsize; /* LUN size in number of LBAs */
74 size_t lba_size; /* LBA size in number of bytes */
75 size_t au_size; /* Allocation Unit size in number of LBAs */
76 struct ba_lun_info *ba_lun_handle;
77};
78
79/* Block Allocator */
80struct blka {
81 struct ba_lun ba_lun;
82 u64 nchunk; /* number of chunks */
83 struct mutex mutex;
84};
85
86#endif /* ifndef _CXLFLASH_SUPERPIPE_H */
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 1dafeb43333b..40669f8dd0df 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 * 14 *
14 * You should have received a copy of the GNU General Public License 15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 * 16 *
20 */ 17 */
21 18
@@ -132,6 +129,11 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
132 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD}, 129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
133 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE}, 130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
134 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580}, 131 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
132 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
133 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
134 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
135 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, 137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
136 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, 138 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, 139 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -190,6 +192,11 @@ static struct board_type products[] = {
190 {0x21CD103C, "Smart Array", &SA5_access}, 192 {0x21CD103C, "Smart Array", &SA5_access},
191 {0x21CE103C, "Smart HBA", &SA5_access}, 193 {0x21CE103C, "Smart HBA", &SA5_access},
192 {0x05809005, "SmartHBA-SA", &SA5_access}, 194 {0x05809005, "SmartHBA-SA", &SA5_access},
195 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
196 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
197 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
198 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
199 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
193 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, 200 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
194 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, 201 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
195 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, 202 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -267,6 +274,7 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
267static void hpsa_command_resubmit_worker(struct work_struct *work); 274static void hpsa_command_resubmit_worker(struct work_struct *work);
268static u32 lockup_detected(struct ctlr_info *h); 275static u32 lockup_detected(struct ctlr_info *h);
269static int detect_controller_lockup(struct ctlr_info *h); 276static int detect_controller_lockup(struct ctlr_info *h);
277static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device);
270 278
271static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 279static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
272{ 280{
@@ -325,7 +333,7 @@ static int check_for_unit_attention(struct ctlr_info *h,
325 333
326 decode_sense_data(c->err_info->SenseInfo, sense_len, 334 decode_sense_data(c->err_info->SenseInfo, sense_len,
327 &sense_key, &asc, &ascq); 335 &sense_key, &asc, &ascq);
328 if (sense_key != UNIT_ATTENTION || asc == -1) 336 if (sense_key != UNIT_ATTENTION || asc == 0xff)
329 return 0; 337 return 0;
330 338
331 switch (asc) { 339 switch (asc) {
@@ -717,12 +725,107 @@ static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
717 return snprintf(buf, 20, "%d\n", offload_enabled); 725 return snprintf(buf, 20, "%d\n", offload_enabled);
718} 726}
719 727
728#define MAX_PATHS 8
729#define PATH_STRING_LEN 50
730
731static ssize_t path_info_show(struct device *dev,
732 struct device_attribute *attr, char *buf)
733{
734 struct ctlr_info *h;
735 struct scsi_device *sdev;
736 struct hpsa_scsi_dev_t *hdev;
737 unsigned long flags;
738 int i;
739 int output_len = 0;
740 u8 box;
741 u8 bay;
742 u8 path_map_index = 0;
743 char *active;
744 unsigned char phys_connector[2];
745 unsigned char path[MAX_PATHS][PATH_STRING_LEN];
746
747 memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
748 sdev = to_scsi_device(dev);
749 h = sdev_to_hba(sdev);
750 spin_lock_irqsave(&h->devlock, flags);
751 hdev = sdev->hostdata;
752 if (!hdev) {
753 spin_unlock_irqrestore(&h->devlock, flags);
754 return -ENODEV;
755 }
756
757 bay = hdev->bay;
758 for (i = 0; i < MAX_PATHS; i++) {
759 path_map_index = 1<<i;
760 if (i == hdev->active_path_index)
761 active = "Active";
762 else if (hdev->path_map & path_map_index)
763 active = "Inactive";
764 else
765 continue;
766
767 output_len = snprintf(path[i],
768 PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
769 h->scsi_host->host_no,
770 hdev->bus, hdev->target, hdev->lun,
771 scsi_device_type(hdev->devtype));
772
773 if (is_ext_target(h, hdev) ||
774 (hdev->devtype == TYPE_RAID) ||
775 is_logical_dev_addr_mode(hdev->scsi3addr)) {
776 output_len += snprintf(path[i] + output_len,
777 PATH_STRING_LEN, "%s\n",
778 active);
779 continue;
780 }
781
782 box = hdev->box[i];
783 memcpy(&phys_connector, &hdev->phys_connector[i],
784 sizeof(phys_connector));
785 if (phys_connector[0] < '0')
786 phys_connector[0] = '0';
787 if (phys_connector[1] < '0')
788 phys_connector[1] = '0';
789 if (hdev->phys_connector[i] > 0)
790 output_len += snprintf(path[i] + output_len,
791 PATH_STRING_LEN,
792 "PORT: %.2s ",
793 phys_connector);
794 if (hdev->devtype == TYPE_DISK &&
795 hdev->expose_state != HPSA_DO_NOT_EXPOSE) {
796 if (box == 0 || box == 0xFF) {
797 output_len += snprintf(path[i] + output_len,
798 PATH_STRING_LEN,
799 "BAY: %hhu %s\n",
800 bay, active);
801 } else {
802 output_len += snprintf(path[i] + output_len,
803 PATH_STRING_LEN,
804 "BOX: %hhu BAY: %hhu %s\n",
805 box, bay, active);
806 }
807 } else if (box != 0 && box != 0xFF) {
808 output_len += snprintf(path[i] + output_len,
809 PATH_STRING_LEN, "BOX: %hhu %s\n",
810 box, active);
811 } else
812 output_len += snprintf(path[i] + output_len,
813 PATH_STRING_LEN, "%s\n", active);
814 }
815
816 spin_unlock_irqrestore(&h->devlock, flags);
817 return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
818 path[0], path[1], path[2], path[3],
819 path[4], path[5], path[6], path[7]);
820}
821
720static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 822static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
721static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 823static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
722static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 824static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
723static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 825static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
724static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, 826static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
725 host_show_hp_ssd_smart_path_enabled, NULL); 827 host_show_hp_ssd_smart_path_enabled, NULL);
828static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
726static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, 829static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
727 host_show_hp_ssd_smart_path_status, 830 host_show_hp_ssd_smart_path_status,
728 host_store_hp_ssd_smart_path_status); 831 host_store_hp_ssd_smart_path_status);
@@ -744,6 +847,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {
744 &dev_attr_lunid, 847 &dev_attr_lunid,
745 &dev_attr_unique_id, 848 &dev_attr_unique_id,
746 &dev_attr_hp_ssd_smart_path_enabled, 849 &dev_attr_hp_ssd_smart_path_enabled,
850 &dev_attr_path_info,
747 &dev_attr_lockup_detected, 851 &dev_attr_lockup_detected,
748 NULL, 852 NULL,
749}; 853};
@@ -1083,17 +1187,19 @@ static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
1083 1187
1084 /* This is a non-zero lun of a multi-lun device. 1188 /* This is a non-zero lun of a multi-lun device.
1085 * Search through our list and find the device which 1189 * Search through our list and find the device which
1086 * has the same 8 byte LUN address, excepting byte 4. 1190 * has the same 8 byte LUN address, excepting byte 4 and 5.
1087 * Assign the same bus and target for this new LUN. 1191 * Assign the same bus and target for this new LUN.
1088 * Use the logical unit number from the firmware. 1192 * Use the logical unit number from the firmware.
1089 */ 1193 */
1090 memcpy(addr1, device->scsi3addr, 8); 1194 memcpy(addr1, device->scsi3addr, 8);
1091 addr1[4] = 0; 1195 addr1[4] = 0;
1196 addr1[5] = 0;
1092 for (i = 0; i < n; i++) { 1197 for (i = 0; i < n; i++) {
1093 sd = h->dev[i]; 1198 sd = h->dev[i];
1094 memcpy(addr2, sd->scsi3addr, 8); 1199 memcpy(addr2, sd->scsi3addr, 8);
1095 addr2[4] = 0; 1200 addr2[4] = 0;
1096 /* differ only in byte 4? */ 1201 addr2[5] = 0;
1202 /* differ only in byte 4 and 5? */
1097 if (memcmp(addr1, addr2, 8) == 0) { 1203 if (memcmp(addr1, addr2, 8) == 0) {
1098 device->bus = sd->bus; 1204 device->bus = sd->bus;
1099 device->target = sd->target; 1205 device->target = sd->target;
@@ -1286,8 +1392,9 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1286 return 1; 1392 return 1;
1287 if (dev1->offload_enabled != dev2->offload_enabled) 1393 if (dev1->offload_enabled != dev2->offload_enabled)
1288 return 1; 1394 return 1;
1289 if (dev1->queue_depth != dev2->queue_depth) 1395 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1290 return 1; 1396 if (dev1->queue_depth != dev2->queue_depth)
1397 return 1;
1291 return 0; 1398 return 0;
1292} 1399}
1293 1400
@@ -1376,17 +1483,23 @@ static void hpsa_show_volume_status(struct ctlr_info *h,
1376 h->scsi_host->host_no, 1483 h->scsi_host->host_no,
1377 sd->bus, sd->target, sd->lun); 1484 sd->bus, sd->target, sd->lun);
1378 break; 1485 break;
1486 case HPSA_LV_NOT_AVAILABLE:
1487 dev_info(&h->pdev->dev,
1488 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1489 h->scsi_host->host_no,
1490 sd->bus, sd->target, sd->lun);
1491 break;
1379 case HPSA_LV_UNDERGOING_RPI: 1492 case HPSA_LV_UNDERGOING_RPI:
1380 dev_info(&h->pdev->dev, 1493 dev_info(&h->pdev->dev,
1381 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", 1494 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1382 h->scsi_host->host_no, 1495 h->scsi_host->host_no,
1383 sd->bus, sd->target, sd->lun); 1496 sd->bus, sd->target, sd->lun);
1384 break; 1497 break;
1385 case HPSA_LV_PENDING_RPI: 1498 case HPSA_LV_PENDING_RPI:
1386 dev_info(&h->pdev->dev, 1499 dev_info(&h->pdev->dev,
1387 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", 1500 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1388 h->scsi_host->host_no, 1501 h->scsi_host->host_no,
1389 sd->bus, sd->target, sd->lun); 1502 sd->bus, sd->target, sd->lun);
1390 break; 1503 break;
1391 case HPSA_LV_ENCRYPTED_NO_KEY: 1504 case HPSA_LV_ENCRYPTED_NO_KEY:
1392 dev_info(&h->pdev->dev, 1505 dev_info(&h->pdev->dev,
@@ -2585,34 +2698,6 @@ out:
2585 return rc; 2698 return rc;
2586} 2699}
2587 2700
2588static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2589 unsigned char *scsi3addr, unsigned char page,
2590 struct bmic_controller_parameters *buf, size_t bufsize)
2591{
2592 int rc = IO_OK;
2593 struct CommandList *c;
2594 struct ErrorInfo *ei;
2595
2596 c = cmd_alloc(h);
2597 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2598 page, scsi3addr, TYPE_CMD)) {
2599 rc = -1;
2600 goto out;
2601 }
2602 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2603 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2604 if (rc)
2605 goto out;
2606 ei = c->err_info;
2607 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2608 hpsa_scsi_interpret_error(h, c);
2609 rc = -1;
2610 }
2611out:
2612 cmd_free(h, c);
2613 return rc;
2614}
2615
2616static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, 2701static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2617 u8 reset_type, int reply_queue) 2702 u8 reset_type, int reply_queue)
2618{ 2703{
@@ -2749,11 +2834,10 @@ static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2749 lockup_detected(h)); 2834 lockup_detected(h));
2750 2835
2751 if (unlikely(lockup_detected(h))) { 2836 if (unlikely(lockup_detected(h))) {
2752 dev_warn(&h->pdev->dev, 2837 dev_warn(&h->pdev->dev,
2753 "Controller lockup detected during reset wait\n"); 2838 "Controller lockup detected during reset wait\n");
2754 mutex_unlock(&h->reset_mutex); 2839 rc = -ENODEV;
2755 rc = -ENODEV; 2840 }
2756 }
2757 2841
2758 if (unlikely(rc)) 2842 if (unlikely(rc))
2759 atomic_set(&dev->reset_cmds_out, 0); 2843 atomic_set(&dev->reset_cmds_out, 0);
@@ -3186,6 +3270,7 @@ static int hpsa_volume_offline(struct ctlr_info *h,
3186 /* Keep volume offline in certain cases: */ 3270 /* Keep volume offline in certain cases: */
3187 switch (ldstat) { 3271 switch (ldstat) {
3188 case HPSA_LV_UNDERGOING_ERASE: 3272 case HPSA_LV_UNDERGOING_ERASE:
3273 case HPSA_LV_NOT_AVAILABLE:
3189 case HPSA_LV_UNDERGOING_RPI: 3274 case HPSA_LV_UNDERGOING_RPI:
3190 case HPSA_LV_PENDING_RPI: 3275 case HPSA_LV_PENDING_RPI:
3191 case HPSA_LV_ENCRYPTED_NO_KEY: 3276 case HPSA_LV_ENCRYPTED_NO_KEY:
@@ -3562,29 +3647,6 @@ static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3562 return NULL; 3647 return NULL;
3563} 3648}
3564 3649
3565static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3566{
3567 int rc;
3568 int hba_mode_enabled;
3569 struct bmic_controller_parameters *ctlr_params;
3570 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3571 GFP_KERNEL);
3572
3573 if (!ctlr_params)
3574 return -ENOMEM;
3575 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3576 sizeof(struct bmic_controller_parameters));
3577 if (rc) {
3578 kfree(ctlr_params);
3579 return rc;
3580 }
3581
3582 hba_mode_enabled =
3583 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3584 kfree(ctlr_params);
3585 return hba_mode_enabled;
3586}
3587
3588/* get physical drive ioaccel handle and queue depth */ 3650/* get physical drive ioaccel handle and queue depth */
3589static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, 3651static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3590 struct hpsa_scsi_dev_t *dev, 3652 struct hpsa_scsi_dev_t *dev,
@@ -3615,6 +3677,31 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3615 atomic_set(&dev->reset_cmds_out, 0); 3677 atomic_set(&dev->reset_cmds_out, 0);
3616} 3678}
3617 3679
3680static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
3681 u8 *lunaddrbytes,
3682 struct bmic_identify_physical_device *id_phys)
3683{
3684 if (PHYS_IOACCEL(lunaddrbytes)
3685 && this_device->ioaccel_handle)
3686 this_device->hba_ioaccel_enabled = 1;
3687
3688 memcpy(&this_device->active_path_index,
3689 &id_phys->active_path_number,
3690 sizeof(this_device->active_path_index));
3691 memcpy(&this_device->path_map,
3692 &id_phys->redundant_path_present_map,
3693 sizeof(this_device->path_map));
3694 memcpy(&this_device->box,
3695 &id_phys->alternate_paths_phys_box_on_port,
3696 sizeof(this_device->box));
3697 memcpy(&this_device->phys_connector,
3698 &id_phys->alternate_paths_phys_connector,
3699 sizeof(this_device->phys_connector));
3700 memcpy(&this_device->bay,
3701 &id_phys->phys_bay_in_box,
3702 sizeof(this_device->bay));
3703}
3704
3618static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) 3705static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3619{ 3706{
3620 /* the idea here is we could get notified 3707 /* the idea here is we could get notified
@@ -3637,7 +3724,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3637 int ncurrent = 0; 3724 int ncurrent = 0;
3638 int i, n_ext_target_devs, ndevs_to_allocate; 3725 int i, n_ext_target_devs, ndevs_to_allocate;
3639 int raid_ctlr_position; 3726 int raid_ctlr_position;
3640 int rescan_hba_mode;
3641 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS); 3727 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3642 3728
3643 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); 3729 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -3653,17 +3739,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3653 } 3739 }
3654 memset(lunzerobits, 0, sizeof(lunzerobits)); 3740 memset(lunzerobits, 0, sizeof(lunzerobits));
3655 3741
3656 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3657 if (rescan_hba_mode < 0)
3658 goto out;
3659
3660 if (!h->hba_mode_enabled && rescan_hba_mode)
3661 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3662 else if (h->hba_mode_enabled && !rescan_hba_mode)
3663 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3664
3665 h->hba_mode_enabled = rescan_hba_mode;
3666
3667 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals, 3742 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3668 logdev_list, &nlogicals)) 3743 logdev_list, &nlogicals))
3669 goto out; 3744 goto out;
@@ -3739,9 +3814,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3739 /* do not expose masked devices */ 3814 /* do not expose masked devices */
3740 if (MASKED_DEVICE(lunaddrbytes) && 3815 if (MASKED_DEVICE(lunaddrbytes) &&
3741 i < nphysicals + (raid_ctlr_position == 0)) { 3816 i < nphysicals + (raid_ctlr_position == 0)) {
3742 if (h->hba_mode_enabled)
3743 dev_warn(&h->pdev->dev,
3744 "Masked physical device detected\n");
3745 this_device->expose_state = HPSA_DO_NOT_EXPOSE; 3817 this_device->expose_state = HPSA_DO_NOT_EXPOSE;
3746 } else { 3818 } else {
3747 this_device->expose_state = 3819 this_device->expose_state =
@@ -3761,30 +3833,21 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3761 ncurrent++; 3833 ncurrent++;
3762 break; 3834 break;
3763 case TYPE_DISK: 3835 case TYPE_DISK:
3764 if (i >= nphysicals) { 3836 if (i < nphysicals + (raid_ctlr_position == 0)) {
3765 ncurrent++; 3837 /* The disk is in HBA mode. */
3766 break; 3838 /* Never use RAID mapper in HBA mode. */
3767 }
3768
3769 if (h->hba_mode_enabled)
3770 /* never use raid mapper in HBA mode */
3771 this_device->offload_enabled = 0; 3839 this_device->offload_enabled = 0;
3772 else if (!(h->transMethod & CFGTBL_Trans_io_accel1 || 3840 hpsa_get_ioaccel_drive_info(h, this_device,
3773 h->transMethod & CFGTBL_Trans_io_accel2)) 3841 lunaddrbytes, id_phys);
3774 break; 3842 hpsa_get_path_info(this_device, lunaddrbytes,
3775 3843 id_phys);
3776 hpsa_get_ioaccel_drive_info(h, this_device, 3844 }
3777 lunaddrbytes, id_phys);
3778 atomic_set(&this_device->ioaccel_cmds_out, 0);
3779 ncurrent++; 3845 ncurrent++;
3780 break; 3846 break;
3781 case TYPE_TAPE: 3847 case TYPE_TAPE:
3782 case TYPE_MEDIUM_CHANGER: 3848 case TYPE_MEDIUM_CHANGER:
3783 ncurrent++;
3784 break;
3785 case TYPE_ENCLOSURE: 3849 case TYPE_ENCLOSURE:
3786 if (h->hba_mode_enabled) 3850 ncurrent++;
3787 ncurrent++;
3788 break; 3851 break;
3789 case TYPE_RAID: 3852 case TYPE_RAID:
3790 /* Only present the Smartarray HBA as a RAID controller. 3853 /* Only present the Smartarray HBA as a RAID controller.
@@ -5104,7 +5167,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5104 int rc; 5167 int rc;
5105 struct ctlr_info *h; 5168 struct ctlr_info *h;
5106 struct hpsa_scsi_dev_t *dev; 5169 struct hpsa_scsi_dev_t *dev;
5107 char msg[40]; 5170 char msg[48];
5108 5171
5109 /* find the controller to which the command to be aborted was sent */ 5172 /* find the controller to which the command to be aborted was sent */
5110 h = sdev_to_hba(scsicmd->device); 5173 h = sdev_to_hba(scsicmd->device);
@@ -5122,16 +5185,18 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5122 5185
5123 /* if controller locked up, we can guarantee command won't complete */ 5186 /* if controller locked up, we can guarantee command won't complete */
5124 if (lockup_detected(h)) { 5187 if (lockup_detected(h)) {
5125 sprintf(msg, "cmd %d RESET FAILED, lockup detected", 5188 snprintf(msg, sizeof(msg),
5126 hpsa_get_cmd_index(scsicmd)); 5189 "cmd %d RESET FAILED, lockup detected",
5190 hpsa_get_cmd_index(scsicmd));
5127 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5191 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5128 return FAILED; 5192 return FAILED;
5129 } 5193 }
5130 5194
5131 /* this reset request might be the result of a lockup; check */ 5195 /* this reset request might be the result of a lockup; check */
5132 if (detect_controller_lockup(h)) { 5196 if (detect_controller_lockup(h)) {
5133 sprintf(msg, "cmd %d RESET FAILED, new lockup detected", 5197 snprintf(msg, sizeof(msg),
5134 hpsa_get_cmd_index(scsicmd)); 5198 "cmd %d RESET FAILED, new lockup detected",
5199 hpsa_get_cmd_index(scsicmd));
5135 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5200 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5136 return FAILED; 5201 return FAILED;
5137 } 5202 }
@@ -5145,7 +5210,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5145 /* send a reset to the SCSI LUN which the command was sent to */ 5210 /* send a reset to the SCSI LUN which the command was sent to */
5146 rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN, 5211 rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
5147 DEFAULT_REPLY_QUEUE); 5212 DEFAULT_REPLY_QUEUE);
5148 sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed"); 5213 snprintf(msg, sizeof(msg), "reset %s",
5214 rc == 0 ? "completed successfully" : "failed");
5149 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5215 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5150 return rc == 0 ? SUCCESS : FAILED; 5216 return rc == 0 ? SUCCESS : FAILED;
5151} 5217}
@@ -7989,7 +8055,6 @@ reinit_after_soft_reset:
7989 8055
7990 pci_set_drvdata(pdev, h); 8056 pci_set_drvdata(pdev, h);
7991 h->ndevices = 0; 8057 h->ndevices = 0;
7992 h->hba_mode_enabled = 0;
7993 8058
7994 spin_lock_init(&h->devlock); 8059 spin_lock_init(&h->devlock);
7995 rc = hpsa_put_ctlr_into_performant_mode(h); 8060 rc = hpsa_put_ctlr_into_performant_mode(h);
@@ -8054,7 +8119,7 @@ reinit_after_soft_reset:
8054 rc = hpsa_kdump_soft_reset(h); 8119 rc = hpsa_kdump_soft_reset(h);
8055 if (rc) 8120 if (rc)
8056 /* Neither hard nor soft reset worked, we're hosed. */ 8121 /* Neither hard nor soft reset worked, we're hosed. */
8057 goto clean9; 8122 goto clean7;
8058 8123
8059 dev_info(&h->pdev->dev, "Board READY.\n"); 8124 dev_info(&h->pdev->dev, "Board READY.\n");
8060 dev_info(&h->pdev->dev, 8125 dev_info(&h->pdev->dev,
@@ -8100,8 +8165,6 @@ reinit_after_soft_reset:
8100 h->heartbeat_sample_interval); 8165 h->heartbeat_sample_interval);
8101 return 0; 8166 return 0;
8102 8167
8103clean9: /* wq, sh, perf, sg, cmd, irq, shost, pci, lu, aer/h */
8104 kfree(h->hba_inquiry_data);
8105clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8168clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8106 hpsa_free_performant_mode(h); 8169 hpsa_free_performant_mode(h);
8107 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8170 h->access.set_intr_mask(h, HPSA_INTR_OFF);
@@ -8209,6 +8272,14 @@ static void hpsa_remove_one(struct pci_dev *pdev)
8209 destroy_workqueue(h->rescan_ctlr_wq); 8272 destroy_workqueue(h->rescan_ctlr_wq);
8210 destroy_workqueue(h->resubmit_wq); 8273 destroy_workqueue(h->resubmit_wq);
8211 8274
8275 /*
8276 * Call before disabling interrupts.
8277 * scsi_remove_host can trigger I/O operations especially
8278 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8279 * operations which cannot complete and will hang the system.
8280 */
8281 if (h->scsi_host)
8282 scsi_remove_host(h->scsi_host); /* init_one 8 */
8212 /* includes hpsa_free_irqs - init_one 4 */ 8283 /* includes hpsa_free_irqs - init_one 4 */
8213 /* includes hpsa_disable_interrupt_mode - pci_init 2 */ 8284 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8214 hpsa_shutdown(pdev); 8285 hpsa_shutdown(pdev);
@@ -8217,8 +8288,6 @@ static void hpsa_remove_one(struct pci_dev *pdev)
8217 8288
8218 kfree(h->hba_inquiry_data); /* init_one 10 */ 8289 kfree(h->hba_inquiry_data); /* init_one 10 */
8219 h->hba_inquiry_data = NULL; /* init_one 10 */ 8290 h->hba_inquiry_data = NULL; /* init_one 10 */
8220 if (h->scsi_host)
8221 scsi_remove_host(h->scsi_host); /* init_one 8 */
8222 hpsa_free_ioaccel2_sg_chain_blocks(h); 8291 hpsa_free_ioaccel2_sg_chain_blocks(h);
8223 hpsa_free_performant_mode(h); /* init_one 7 */ 8292 hpsa_free_performant_mode(h); /* init_one 7 */
8224 hpsa_free_sg_chain_blocks(h); /* init_one 6 */ 8293 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6ee4da6b1153..27debb363529 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 * 14 *
14 * You should have received a copy of the GNU General Public License 15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 * 16 *
20 */ 17 */
21#ifndef HPSA_H 18#ifndef HPSA_H
@@ -53,6 +50,11 @@ struct hpsa_scsi_dev_t {
53 * device via "ioaccel" path. 50 * device via "ioaccel" path.
54 */ 51 */
55 u32 ioaccel_handle; 52 u32 ioaccel_handle;
53 u8 active_path_index;
54 u8 path_map;
55 u8 bay;
56 u8 box[8];
57 u16 phys_connector[8];
56 int offload_config; /* I/O accel RAID offload configured */ 58 int offload_config; /* I/O accel RAID offload configured */
57 int offload_enabled; /* I/O accel RAID offload enabled */ 59 int offload_enabled; /* I/O accel RAID offload enabled */
58 int offload_to_be_enabled; 60 int offload_to_be_enabled;
@@ -114,7 +116,6 @@ struct bmic_controller_parameters {
114 u8 automatic_drive_slamming; 116 u8 automatic_drive_slamming;
115 u8 reserved1; 117 u8 reserved1;
116 u8 nvram_flags; 118 u8 nvram_flags;
117#define HBA_MODE_ENABLED_FLAG (1 << 3)
118 u8 cache_nvram_flags; 119 u8 cache_nvram_flags;
119 u8 drive_config_flags; 120 u8 drive_config_flags;
120 u16 reserved2; 121 u16 reserved2;
@@ -153,7 +154,6 @@ struct ctlr_info {
153 unsigned int msi_vector; 154 unsigned int msi_vector;
154 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */ 155 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
155 struct access_method access; 156 struct access_method access;
156 char hba_mode_enabled;
157 157
158 /* queue and queue Info */ 158 /* queue and queue Info */
159 unsigned int Qdepth; 159 unsigned int Qdepth;
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index c601622cc98e..47c756ba8dce 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Disk Array driver for HP Smart Array SAS controllers 2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P. 3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -11,11 +12,7 @@
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details. 13 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 * 14 *
14 * You should have received a copy of the GNU General Public License 15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 * 16 *
20 */ 17 */
21#ifndef HPSA_CMD_H 18#ifndef HPSA_CMD_H
@@ -167,6 +164,7 @@
167/* Logical volume states */ 164/* Logical volume states */
168#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff 165#define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff
169#define HPSA_LV_OK 0x0 166#define HPSA_LV_OK 0x0
167#define HPSA_LV_NOT_AVAILABLE 0x0b
170#define HPSA_LV_UNDERGOING_ERASE 0x0F 168#define HPSA_LV_UNDERGOING_ERASE 0x0F
171#define HPSA_LV_UNDERGOING_RPI 0x12 169#define HPSA_LV_UNDERGOING_RPI 0x12
172#define HPSA_LV_PENDING_RPI 0x13 170#define HPSA_LV_PENDING_RPI 0x13
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index e995218476ed..a83f705ed8a5 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * HighPoint RR3xxx/4xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
42 42
43static char driver_name[] = "hptiop"; 43static char driver_name[] = "hptiop";
44static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver"; 44static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
45static const char driver_ver[] = "v1.8"; 45static const char driver_ver[] = "v1.10.0";
46 46
47static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); 47static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
48static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, 48static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -764,9 +764,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
764 scsi_set_resid(scp, 764 scsi_set_resid(scp,
765 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); 765 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
766 scp->result = SAM_STAT_CHECK_CONDITION; 766 scp->result = SAM_STAT_CHECK_CONDITION;
767 memcpy(scp->sense_buffer, &req->sg_list, 767 memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
768 min_t(size_t, SCSI_SENSE_BUFFERSIZE,
769 le32_to_cpu(req->dataxfer_length)));
770 goto skip_resid; 768 goto skip_resid;
771 break; 769 break;
772 770
@@ -1037,8 +1035,9 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
1037 1035
1038 scp->result = 0; 1036 scp->result = 0;
1039 1037
1040 if (scp->device->channel || scp->device->lun || 1038 if (scp->device->channel ||
1041 scp->device->id > hba->max_devices) { 1039 (scp->device->id > hba->max_devices) ||
1040 ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
1042 scp->result = DID_BAD_TARGET << 16; 1041 scp->result = DID_BAD_TARGET << 16;
1043 free_req(hba, _req); 1042 free_req(hba, _req);
1044 goto cmd_done; 1043 goto cmd_done;
@@ -1168,6 +1167,14 @@ static struct device_attribute *hptiop_attrs[] = {
1168 NULL 1167 NULL
1169}; 1168};
1170 1169
1170static int hptiop_slave_config(struct scsi_device *sdev)
1171{
1172 if (sdev->type == TYPE_TAPE)
1173 blk_queue_max_hw_sectors(sdev->request_queue, 8192);
1174
1175 return 0;
1176}
1177
1171static struct scsi_host_template driver_template = { 1178static struct scsi_host_template driver_template = {
1172 .module = THIS_MODULE, 1179 .module = THIS_MODULE,
1173 .name = driver_name, 1180 .name = driver_name,
@@ -1179,6 +1186,7 @@ static struct scsi_host_template driver_template = {
1179 .use_clustering = ENABLE_CLUSTERING, 1186 .use_clustering = ENABLE_CLUSTERING,
1180 .proc_name = driver_name, 1187 .proc_name = driver_name,
1181 .shost_attrs = hptiop_attrs, 1188 .shost_attrs = hptiop_attrs,
1189 .slave_configure = hptiop_slave_config,
1182 .this_id = -1, 1190 .this_id = -1,
1183 .change_queue_depth = hptiop_adjust_disk_queue_depth, 1191 .change_queue_depth = hptiop_adjust_disk_queue_depth,
1184}; 1192};
@@ -1323,6 +1331,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1323 } 1331 }
1324 1332
1325 hba = (struct hptiop_hba *)host->hostdata; 1333 hba = (struct hptiop_hba *)host->hostdata;
1334 memset(hba, 0, sizeof(struct hptiop_hba));
1326 1335
1327 hba->ops = iop_ops; 1336 hba->ops = iop_ops;
1328 hba->pcidev = pcidev; 1337 hba->pcidev = pcidev;
@@ -1336,7 +1345,7 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1336 init_waitqueue_head(&hba->reset_wq); 1345 init_waitqueue_head(&hba->reset_wq);
1337 init_waitqueue_head(&hba->ioctl_wq); 1346 init_waitqueue_head(&hba->ioctl_wq);
1338 1347
1339 host->max_lun = 1; 1348 host->max_lun = 128;
1340 host->max_channel = 0; 1349 host->max_channel = 0;
1341 host->io_port = 0; 1350 host->io_port = 0;
1342 host->n_io_port = 0; 1351 host->n_io_port = 0;
@@ -1428,34 +1437,33 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1428 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests); 1437 dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
1429 1438
1430 hba->req_size = req_size; 1439 hba->req_size = req_size;
1431 start_virt = dma_alloc_coherent(&pcidev->dev, 1440 hba->req_list = NULL;
1432 hba->req_size*hba->max_requests + 0x20,
1433 &start_phy, GFP_KERNEL);
1434 1441
1435 if (!start_virt) { 1442 for (i = 0; i < hba->max_requests; i++) {
1436 printk(KERN_ERR "scsi%d: fail to alloc request mem\n", 1443 start_virt = dma_alloc_coherent(&pcidev->dev,
1437 hba->host->host_no); 1444 hba->req_size + 0x20,
1438 goto free_request_irq; 1445 &start_phy, GFP_KERNEL);
1439 } 1446
1447 if (!start_virt) {
1448 printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
1449 hba->host->host_no);
1450 goto free_request_mem;
1451 }
1440 1452
1441 hba->dma_coherent = start_virt; 1453 hba->dma_coherent[i] = start_virt;
1442 hba->dma_coherent_handle = start_phy; 1454 hba->dma_coherent_handle[i] = start_phy;
1443 1455
1444 if ((start_phy & 0x1f) != 0) { 1456 if ((start_phy & 0x1f) != 0) {
1445 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; 1457 offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
1446 start_phy += offset; 1458 start_phy += offset;
1447 start_virt += offset; 1459 start_virt += offset;
1448 } 1460 }
1449 1461
1450 hba->req_list = NULL;
1451 for (i = 0; i < hba->max_requests; i++) {
1452 hba->reqs[i].next = NULL; 1462 hba->reqs[i].next = NULL;
1453 hba->reqs[i].req_virt = start_virt; 1463 hba->reqs[i].req_virt = start_virt;
1454 hba->reqs[i].req_shifted_phy = start_phy >> 5; 1464 hba->reqs[i].req_shifted_phy = start_phy >> 5;
1455 hba->reqs[i].index = i; 1465 hba->reqs[i].index = i;
1456 free_req(hba, &hba->reqs[i]); 1466 free_req(hba, &hba->reqs[i]);
1457 start_virt = (char *)start_virt + hba->req_size;
1458 start_phy = start_phy + hba->req_size;
1459 } 1467 }
1460 1468
1461 /* Enable Interrupt and start background task */ 1469 /* Enable Interrupt and start background task */
@@ -1474,11 +1482,16 @@ static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
1474 return 0; 1482 return 0;
1475 1483
1476free_request_mem: 1484free_request_mem:
1477 dma_free_coherent(&hba->pcidev->dev, 1485 for (i = 0; i < hba->max_requests; i++) {
1478 hba->req_size * hba->max_requests + 0x20, 1486 if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1479 hba->dma_coherent, hba->dma_coherent_handle); 1487 dma_free_coherent(&hba->pcidev->dev,
1488 hba->req_size + 0x20,
1489 hba->dma_coherent[i],
1490 hba->dma_coherent_handle[i]);
1491 else
1492 break;
1493 }
1480 1494
1481free_request_irq:
1482 free_irq(hba->pcidev->irq, hba); 1495 free_irq(hba->pcidev->irq, hba);
1483 1496
1484unmap_pci_bar: 1497unmap_pci_bar:
@@ -1546,6 +1559,7 @@ static void hptiop_remove(struct pci_dev *pcidev)
1546{ 1559{
1547 struct Scsi_Host *host = pci_get_drvdata(pcidev); 1560 struct Scsi_Host *host = pci_get_drvdata(pcidev);
1548 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; 1561 struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
1562 u32 i;
1549 1563
1550 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no); 1564 dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
1551 1565
@@ -1555,10 +1569,15 @@ static void hptiop_remove(struct pci_dev *pcidev)
1555 1569
1556 free_irq(hba->pcidev->irq, hba); 1570 free_irq(hba->pcidev->irq, hba);
1557 1571
1558 dma_free_coherent(&hba->pcidev->dev, 1572 for (i = 0; i < hba->max_requests; i++) {
1559 hba->req_size * hba->max_requests + 0x20, 1573 if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
1560 hba->dma_coherent, 1574 dma_free_coherent(&hba->pcidev->dev,
1561 hba->dma_coherent_handle); 1575 hba->req_size + 0x20,
1576 hba->dma_coherent[i],
1577 hba->dma_coherent_handle[i]);
1578 else
1579 break;
1580 }
1562 1581
1563 hba->ops->internal_memfree(hba); 1582 hba->ops->internal_memfree(hba);
1564 1583
@@ -1653,6 +1672,14 @@ static struct pci_device_id hptiop_id_table[] = {
1653 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, 1672 { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
1654 { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops }, 1673 { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
1655 { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops }, 1674 { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
1675 { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
1676 { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
1677 { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
1678 { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
1679 { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
1680 { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
1681 { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
1682 { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
1656 {}, 1683 {},
1657}; 1684};
1658 1685
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 020619d60b08..4d1c51153b70 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * HighPoint RR3xxx/4xxx controller driver for Linux 2 * HighPoint RR3xxx/4xxx controller driver for Linux
3 * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved. 3 * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -327,8 +327,8 @@ struct hptiop_hba {
327 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; 327 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS];
328 328
329 /* used to free allocated dma area */ 329 /* used to free allocated dma area */
330 void *dma_coherent; 330 void *dma_coherent[HPTIOP_MAX_REQUESTS];
331 dma_addr_t dma_coherent_handle; 331 dma_addr_t dma_coherent_handle[HPTIOP_MAX_REQUESTS];
332 332
333 atomic_t reset_count; 333 atomic_t reset_count;
334 atomic_t resetting; 334 atomic_t resetting;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index a9aa38903efe..341191952155 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -1165,7 +1165,8 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res,
1165 1165
1166 if (ioa_cfg->sis64) { 1166 if (ioa_cfg->sis64) {
1167 proto = cfgtew->u.cfgte64->proto; 1167 proto = cfgtew->u.cfgte64->proto;
1168 res->res_flags = cfgtew->u.cfgte64->res_flags; 1168 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1169 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1169 res->qmodel = IPR_QUEUEING_MODEL64(res); 1170 res->qmodel = IPR_QUEUEING_MODEL64(res);
1170 res->type = cfgtew->u.cfgte64->res_type; 1171 res->type = cfgtew->u.cfgte64->res_type;
1171 1172
@@ -1313,8 +1314,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
1313 int new_path = 0; 1314 int new_path = 0;
1314 1315
1315 if (res->ioa_cfg->sis64) { 1316 if (res->ioa_cfg->sis64) {
1316 res->flags = cfgtew->u.cfgte64->flags; 1317 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1317 res->res_flags = cfgtew->u.cfgte64->res_flags; 1318 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1318 res->type = cfgtew->u.cfgte64->res_type; 1319 res->type = cfgtew->u.cfgte64->res_type;
1319 1320
1320 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data, 1321 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
@@ -1900,7 +1901,7 @@ static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1900 * Return value: 1901 * Return value:
1901 * none 1902 * none
1902 **/ 1903 **/
1903static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len) 1904static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1904{ 1905{
1905 int i; 1906 int i;
1906 1907
@@ -2270,7 +2271,7 @@ static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2270 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2271 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2271 } 2272 }
2272 2273
2273 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); 2274 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2274} 2275}
2275 2276
2276/** 2277/**
@@ -2364,7 +2365,7 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2364 ((unsigned long)fabric + be16_to_cpu(fabric->length)); 2365 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2365 } 2366 }
2366 2367
2367 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len); 2368 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2368} 2369}
2369 2370
2370/** 2371/**
@@ -4455,7 +4456,7 @@ static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *a
4455 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 4456 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4456 res = (struct ipr_resource_entry *)sdev->hostdata; 4457 res = (struct ipr_resource_entry *)sdev->hostdata;
4457 if (res && ioa_cfg->sis64) 4458 if (res && ioa_cfg->sis64)
4458 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id); 4459 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4459 else if (res) 4460 else if (res)
4460 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn); 4461 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4461 4462
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 6b97ee45c7b4..e4fb17a58649 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -39,8 +39,8 @@
39/* 39/*
40 * Literals 40 * Literals
41 */ 41 */
42#define IPR_DRIVER_VERSION "2.6.1" 42#define IPR_DRIVER_VERSION "2.6.2"
43#define IPR_DRIVER_DATE "(March 12, 2015)" 43#define IPR_DRIVER_DATE "(June 11, 2015)"
44 44
45/* 45/*
46 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding 46 * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -1005,13 +1005,13 @@ struct ipr_hostrcb_type_24_error {
1005struct ipr_hostrcb_type_07_error { 1005struct ipr_hostrcb_type_07_error {
1006 u8 failure_reason[64]; 1006 u8 failure_reason[64];
1007 struct ipr_vpd vpd; 1007 struct ipr_vpd vpd;
1008 u32 data[222]; 1008 __be32 data[222];
1009}__attribute__((packed, aligned (4))); 1009}__attribute__((packed, aligned (4)));
1010 1010
1011struct ipr_hostrcb_type_17_error { 1011struct ipr_hostrcb_type_17_error {
1012 u8 failure_reason[64]; 1012 u8 failure_reason[64];
1013 struct ipr_ext_vpd vpd; 1013 struct ipr_ext_vpd vpd;
1014 u32 data[476]; 1014 __be32 data[476];
1015}__attribute__((packed, aligned (4))); 1015}__attribute__((packed, aligned (4)));
1016 1016
1017struct ipr_hostrcb_config_element { 1017struct ipr_hostrcb_config_element {
@@ -1289,18 +1289,17 @@ struct ipr_resource_entry {
1289 (((res)->bus << 24) | ((res)->target << 8) | (res)->lun) 1289 (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
1290 1290
1291 u8 ata_class; 1291 u8 ata_class;
1292
1293 u8 flags;
1294 __be16 res_flags;
1295
1296 u8 type; 1292 u8 type;
1297 1293
1294 u16 flags;
1295 u16 res_flags;
1296
1298 u8 qmodel; 1297 u8 qmodel;
1299 struct ipr_std_inq_data std_inq_data; 1298 struct ipr_std_inq_data std_inq_data;
1300 1299
1301 __be32 res_handle; 1300 __be32 res_handle;
1302 __be64 dev_id; 1301 __be64 dev_id;
1303 __be64 lun_wwn; 1302 u64 lun_wwn;
1304 struct scsi_lun dev_lun; 1303 struct scsi_lun dev_lun;
1305 u8 res_path[8]; 1304 u8 res_path[8];
1306 1305
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 2d5909c4685c..5121272f28fd 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -191,7 +191,7 @@ static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
191} 191}
192 192
193/** 193/**
194 * fc_fcp_pkt_destory() - Release hold on a fcp_pkt 194 * fc_fcp_pkt_destroy() - Release hold on a fcp_pkt
195 * @seq: The sequence that the FCP packet is on (required by destructor API) 195 * @seq: The sequence that the FCP packet is on (required by destructor API)
196 * @fsp: The FCP packet to be released 196 * @fsp: The FCP packet to be released
197 * 197 *
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ce96d5bf8ae7..759cbebed7c7 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -701,7 +701,7 @@ lpfc_work_done(struct lpfc_hba *phba)
701 HA_RXMASK)); 701 HA_RXMASK));
702 } 702 }
703 } 703 }
704 if ((phba->sli_rev == LPFC_SLI_REV4) & 704 if ((phba->sli_rev == LPFC_SLI_REV4) &&
705 (!list_empty(&pring->txq))) 705 (!list_empty(&pring->txq)))
706 lpfc_drain_txq(phba); 706 lpfc_drain_txq(phba);
707 /* 707 /*
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index bc7b34c02723..9d05302a3bcd 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -268,8 +268,8 @@ mega_query_adapter(adapter_t *adapter)
268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */ 268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
269 269
270 if ((retval = issue_scb_block(adapter, raw_mbox))) 270 if ((retval = issue_scb_block(adapter, raw_mbox)))
271 printk(KERN_WARNING 271 dev_warn(&adapter->dev->dev,
272 "megaraid: Product_info cmd failed with error: %d\n", 272 "Product_info cmd failed with error: %d\n",
273 retval); 273 retval);
274 274
275 pci_unmap_single(adapter->dev, prod_info_dma_handle, 275 pci_unmap_single(adapter->dev, prod_info_dma_handle,
@@ -334,7 +334,7 @@ mega_query_adapter(adapter_t *adapter)
334 adapter->bios_version[4] = 0; 334 adapter->bios_version[4] = 0;
335 } 335 }
336 336
337 printk(KERN_NOTICE "megaraid: [%s:%s] detected %d logical drives.\n", 337 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
338 adapter->fw_version, adapter->bios_version, adapter->numldrv); 338 adapter->fw_version, adapter->bios_version, adapter->numldrv);
339 339
340 /* 340 /*
@@ -342,7 +342,7 @@ mega_query_adapter(adapter_t *adapter)
342 */ 342 */
343 adapter->support_ext_cdb = mega_support_ext_cdb(adapter); 343 adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
344 if (adapter->support_ext_cdb) 344 if (adapter->support_ext_cdb)
345 printk(KERN_NOTICE "megaraid: supports extended CDBs.\n"); 345 dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
346 346
347 347
348 return 0; 348 return 0;
@@ -678,11 +678,11 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
678 678
679 if(!(adapter->flag & (1L << cmd->device->channel))) { 679 if(!(adapter->flag & (1L << cmd->device->channel))) {
680 680
681 printk(KERN_NOTICE 681 dev_notice(&adapter->dev->dev,
682 "scsi%d: scanning scsi channel %d ", 682 "scsi%d: scanning scsi channel %d "
683 "for logical drives\n",
683 adapter->host->host_no, 684 adapter->host->host_no,
684 cmd->device->channel); 685 cmd->device->channel);
685 printk("for logical drives.\n");
686 686
687 adapter->flag |= (1L << cmd->device->channel); 687 adapter->flag |= (1L << cmd->device->channel);
688 } 688 }
@@ -983,11 +983,11 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
983 case READ_CAPACITY: 983 case READ_CAPACITY:
984 if(!(adapter->flag & (1L << cmd->device->channel))) { 984 if(!(adapter->flag & (1L << cmd->device->channel))) {
985 985
986 printk(KERN_NOTICE 986 dev_notice(&adapter->dev->dev,
987 "scsi%d: scanning scsi channel %d [P%d] ", 987 "scsi%d: scanning scsi channel %d [P%d] "
988 "for physical devices\n",
988 adapter->host->host_no, 989 adapter->host->host_no,
989 cmd->device->channel, channel); 990 cmd->device->channel, channel);
990 printk("for physical devices.\n");
991 991
992 adapter->flag |= (1L << cmd->device->channel); 992 adapter->flag |= (1L << cmd->device->channel);
993 } 993 }
@@ -1045,11 +1045,11 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb, Scsi_Cmnd *cmd,
1045 case READ_CAPACITY: 1045 case READ_CAPACITY:
1046 if(!(adapter->flag & (1L << cmd->device->channel))) { 1046 if(!(adapter->flag & (1L << cmd->device->channel))) {
1047 1047
1048 printk(KERN_NOTICE 1048 dev_notice(&adapter->dev->dev,
1049 "scsi%d: scanning scsi channel %d [P%d] ", 1049 "scsi%d: scanning scsi channel %d [P%d] "
1050 "for physical devices\n",
1050 adapter->host->host_no, 1051 adapter->host->host_no,
1051 cmd->device->channel, channel); 1052 cmd->device->channel, channel);
1052 printk("for physical devices.\n");
1053 1053
1054 adapter->flag |= (1L << cmd->device->channel); 1054 adapter->flag |= (1L << cmd->device->channel);
1055 } 1055 }
@@ -1241,7 +1241,7 @@ issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
1241 return mbox->m_in.status; 1241 return mbox->m_in.status;
1242 1242
1243bug_blocked_mailbox: 1243bug_blocked_mailbox:
1244 printk(KERN_WARNING "megaraid: Blocked mailbox......!!\n"); 1244 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
1245 udelay (1000); 1245 udelay (1000);
1246 return -1; 1246 return -1;
1247} 1247}
@@ -1454,9 +1454,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1454 * Make sure f/w has completed a valid command 1454 * Make sure f/w has completed a valid command
1455 */ 1455 */
1456 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) { 1456 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
1457 printk(KERN_CRIT 1457 dev_crit(&adapter->dev->dev, "invalid command "
1458 "megaraid: invalid command "); 1458 "Id %d, scb->state:%x, scsi cmd:%p\n",
1459 printk("Id %d, scb->state:%x, scsi cmd:%p\n",
1460 cmdid, scb->state, scb->cmd); 1459 cmdid, scb->state, scb->cmd);
1461 1460
1462 continue; 1461 continue;
@@ -1467,8 +1466,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1467 */ 1466 */
1468 if( scb->state & SCB_ABORT ) { 1467 if( scb->state & SCB_ABORT ) {
1469 1468
1470 printk(KERN_WARNING 1469 dev_warn(&adapter->dev->dev,
1471 "megaraid: aborted cmd [%x] complete.\n", 1470 "aborted cmd [%x] complete\n",
1472 scb->idx); 1471 scb->idx);
1473 1472
1474 scb->cmd->result = (DID_ABORT << 16); 1473 scb->cmd->result = (DID_ABORT << 16);
@@ -1486,8 +1485,8 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1486 */ 1485 */
1487 if( scb->state & SCB_RESET ) { 1486 if( scb->state & SCB_RESET ) {
1488 1487
1489 printk(KERN_WARNING 1488 dev_warn(&adapter->dev->dev,
1490 "megaraid: reset cmd [%x] complete.\n", 1489 "reset cmd [%x] complete\n",
1491 scb->idx); 1490 scb->idx);
1492 1491
1493 scb->cmd->result = (DID_RESET << 16); 1492 scb->cmd->result = (DID_RESET << 16);
@@ -1553,8 +1552,7 @@ mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1553 if( sg_page(sgl) ) { 1552 if( sg_page(sgl) ) {
1554 c = *(unsigned char *) sg_virt(&sgl[0]); 1553 c = *(unsigned char *) sg_virt(&sgl[0]);
1555 } else { 1554 } else {
1556 printk(KERN_WARNING 1555 dev_warn(&adapter->dev->dev, "invalid sg\n");
1557 "megaraid: invalid sg.\n");
1558 c = 0; 1556 c = 0;
1559 } 1557 }
1560 1558
@@ -1902,11 +1900,10 @@ megaraid_reset(struct scsi_cmnd *cmd)
1902 mc.opcode = MEGA_RESET_RESERVATIONS; 1900 mc.opcode = MEGA_RESET_RESERVATIONS;
1903 1901
1904 if( mega_internal_command(adapter, &mc, NULL) != 0 ) { 1902 if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
1905 printk(KERN_WARNING 1903 dev_warn(&adapter->dev->dev, "reservation reset failed\n");
1906 "megaraid: reservation reset failed.\n");
1907 } 1904 }
1908 else { 1905 else {
1909 printk(KERN_INFO "megaraid: reservation reset.\n"); 1906 dev_info(&adapter->dev->dev, "reservation reset\n");
1910 } 1907 }
1911#endif 1908#endif
1912 1909
@@ -1939,7 +1936,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1939 struct list_head *pos, *next; 1936 struct list_head *pos, *next;
1940 scb_t *scb; 1937 scb_t *scb;
1941 1938
1942 printk(KERN_WARNING "megaraid: %s cmd=%x <c=%d t=%d l=%d>\n", 1939 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
1943 (aor == SCB_ABORT)? "ABORTING":"RESET", 1940 (aor == SCB_ABORT)? "ABORTING":"RESET",
1944 cmd->cmnd[0], cmd->device->channel, 1941 cmd->cmnd[0], cmd->device->channel,
1945 cmd->device->id, (u32)cmd->device->lun); 1942 cmd->device->id, (u32)cmd->device->lun);
@@ -1963,8 +1960,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1963 */ 1960 */
1964 if( scb->state & SCB_ISSUED ) { 1961 if( scb->state & SCB_ISSUED ) {
1965 1962
1966 printk(KERN_WARNING 1963 dev_warn(&adapter->dev->dev,
1967 "megaraid: %s[%x], fw owner.\n", 1964 "%s[%x], fw owner\n",
1968 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1965 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1969 scb->idx); 1966 scb->idx);
1970 1967
@@ -1976,8 +1973,8 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor)
1976 * Not yet issued! Remove from the pending 1973 * Not yet issued! Remove from the pending
1977 * list 1974 * list
1978 */ 1975 */
1979 printk(KERN_WARNING 1976 dev_warn(&adapter->dev->dev,
1980 "megaraid: %s-[%x], driver owner.\n", 1977 "%s-[%x], driver owner\n",
1981 (aor==SCB_ABORT) ? "ABORTING":"RESET", 1978 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1982 scb->idx); 1979 scb->idx);
1983 1980
@@ -2197,7 +2194,7 @@ proc_show_rebuild_rate(struct seq_file *m, void *v)
2197 2194
2198 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2195 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2199 seq_puts(m, "Adapter inquiry failed.\n"); 2196 seq_puts(m, "Adapter inquiry failed.\n");
2200 printk(KERN_WARNING "megaraid: inquiry failed.\n"); 2197 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2201 goto free_inquiry; 2198 goto free_inquiry;
2202 } 2199 }
2203 2200
@@ -2241,7 +2238,7 @@ proc_show_battery(struct seq_file *m, void *v)
2241 2238
2242 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2239 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2243 seq_puts(m, "Adapter inquiry failed.\n"); 2240 seq_puts(m, "Adapter inquiry failed.\n");
2244 printk(KERN_WARNING "megaraid: inquiry failed.\n"); 2241 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2245 goto free_inquiry; 2242 goto free_inquiry;
2246 } 2243 }
2247 2244
@@ -2350,7 +2347,7 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
2350 2347
2351 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2348 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2352 seq_puts(m, "Adapter inquiry failed.\n"); 2349 seq_puts(m, "Adapter inquiry failed.\n");
2353 printk(KERN_WARNING "megaraid: inquiry failed.\n"); 2350 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2354 goto free_inquiry; 2351 goto free_inquiry;
2355 } 2352 }
2356 2353
@@ -2525,7 +2522,7 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
2525 2522
2526 if( mega_adapinq(adapter, dma_handle) != 0 ) { 2523 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2527 seq_puts(m, "Adapter inquiry failed.\n"); 2524 seq_puts(m, "Adapter inquiry failed.\n");
2528 printk(KERN_WARNING "megaraid: inquiry failed.\n"); 2525 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2529 goto free_inquiry; 2526 goto free_inquiry;
2530 } 2527 }
2531 2528
@@ -2799,7 +2796,7 @@ mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2799 dir = adapter->controller_proc_dir_entry = 2796 dir = adapter->controller_proc_dir_entry =
2800 proc_mkdir_data(string, 0, parent, adapter); 2797 proc_mkdir_data(string, 0, parent, adapter);
2801 if(!dir) { 2798 if(!dir) {
2802 printk(KERN_WARNING "\nmegaraid: proc_mkdir failed\n"); 2799 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
2803 return; 2800 return;
2804 } 2801 }
2805 2802
@@ -2807,7 +2804,7 @@ mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2807 de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops, 2804 de = proc_create_data(f->name, S_IRUSR, dir, &mega_proc_fops,
2808 f->show); 2805 f->show);
2809 if (!de) { 2806 if (!de) {
2810 printk(KERN_WARNING "\nmegaraid: proc_create failed\n"); 2807 dev_warn(&adapter->dev->dev, "proc_create failed\n");
2811 return; 2808 return;
2812 } 2809 }
2813 2810
@@ -2874,9 +2871,9 @@ megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2874 return rval; 2871 return rval;
2875 } 2872 }
2876 2873
2877 printk(KERN_INFO 2874 dev_info(&adapter->dev->dev,
2878 "megaraid: invalid partition on this disk on channel %d\n", 2875 "invalid partition on this disk on channel %d\n",
2879 sdev->channel); 2876 sdev->channel);
2880 2877
2881 /* Default heads (64) & sectors (32) */ 2878 /* Default heads (64) & sectors (32) */
2882 heads = 64; 2879 heads = 64;
@@ -2936,7 +2933,7 @@ mega_init_scb(adapter_t *adapter)
2936 scb->sgl = (mega_sglist *)scb->sgl64; 2933 scb->sgl = (mega_sglist *)scb->sgl64;
2937 2934
2938 if( !scb->sgl ) { 2935 if( !scb->sgl ) {
2939 printk(KERN_WARNING "RAID: Can't allocate sglist.\n"); 2936 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
2940 mega_free_sgl(adapter); 2937 mega_free_sgl(adapter);
2941 return -1; 2938 return -1;
2942 } 2939 }
@@ -2946,7 +2943,7 @@ mega_init_scb(adapter_t *adapter)
2946 &scb->pthru_dma_addr); 2943 &scb->pthru_dma_addr);
2947 2944
2948 if( !scb->pthru ) { 2945 if( !scb->pthru ) {
2949 printk(KERN_WARNING "RAID: Can't allocate passthru.\n"); 2946 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
2950 mega_free_sgl(adapter); 2947 mega_free_sgl(adapter);
2951 return -1; 2948 return -1;
2952 } 2949 }
@@ -2956,8 +2953,8 @@ mega_init_scb(adapter_t *adapter)
2956 &scb->epthru_dma_addr); 2953 &scb->epthru_dma_addr);
2957 2954
2958 if( !scb->epthru ) { 2955 if( !scb->epthru ) {
2959 printk(KERN_WARNING 2956 dev_warn(&adapter->dev->dev,
2960 "Can't allocate extended passthru.\n"); 2957 "Can't allocate extended passthru\n");
2961 mega_free_sgl(adapter); 2958 mega_free_sgl(adapter);
2962 return -1; 2959 return -1;
2963 } 2960 }
@@ -3154,8 +3151,8 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3154 * Do we support this feature 3151 * Do we support this feature
3155 */ 3152 */
3156 if( !adapter->support_random_del ) { 3153 if( !adapter->support_random_del ) {
3157 printk(KERN_WARNING "megaraid: logdrv "); 3154 dev_warn(&adapter->dev->dev, "logdrv "
3158 printk("delete on non-supporting F/W.\n"); 3155 "delete on non-supporting F/W\n");
3159 3156
3160 return (-EINVAL); 3157 return (-EINVAL);
3161 } 3158 }
@@ -3179,7 +3176,7 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3179 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 || 3176 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
3180 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) { 3177 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
3181 3178
3182 printk(KERN_WARNING "megaraid: rejected passthru.\n"); 3179 dev_warn(&adapter->dev->dev, "rejected passthru\n");
3183 3180
3184 return (-EINVAL); 3181 return (-EINVAL);
3185 } 3182 }
@@ -3683,11 +3680,11 @@ mega_enum_raid_scsi(adapter_t *adapter)
3683 3680
3684 for( i = 0; i < adapter->product_info.nchannels; i++ ) { 3681 for( i = 0; i < adapter->product_info.nchannels; i++ ) {
3685 if( (adapter->mega_ch_class >> i) & 0x01 ) { 3682 if( (adapter->mega_ch_class >> i) & 0x01 ) {
3686 printk(KERN_INFO "megaraid: channel[%d] is raid.\n", 3683 dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
3687 i); 3684 i);
3688 } 3685 }
3689 else { 3686 else {
3690 printk(KERN_INFO "megaraid: channel[%d] is scsi.\n", 3687 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
3691 i); 3688 i);
3692 } 3689 }
3693 } 3690 }
@@ -3893,7 +3890,7 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
3893 3890
3894 /* log this event */ 3891 /* log this event */
3895 if(rval) { 3892 if(rval) {
3896 printk(KERN_WARNING "megaraid: Delete LD-%d failed.", logdrv); 3893 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
3897 return rval; 3894 return rval;
3898 } 3895 }
3899 3896
@@ -4161,7 +4158,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4161 * this information. 4158 * this information.
4162 */ 4159 */
4163 if (rval && trace_level) { 4160 if (rval && trace_level) {
4164 printk("megaraid: cmd [%x, %x, %x] status:[%x]\n", 4161 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
4165 mc->cmd, mc->opcode, mc->subopcode, rval); 4162 mc->cmd, mc->opcode, mc->subopcode, rval);
4166 } 4163 }
4167 4164
@@ -4244,11 +4241,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4244 subsysvid = pdev->subsystem_vendor; 4241 subsysvid = pdev->subsystem_vendor;
4245 subsysid = pdev->subsystem_device; 4242 subsysid = pdev->subsystem_device;
4246 4243
4247 printk(KERN_NOTICE "megaraid: found 0x%4.04x:0x%4.04x:bus %d:", 4244 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
4248 id->vendor, id->device, pci_bus); 4245 id->vendor, id->device);
4249
4250 printk("slot %d:func %d\n",
4251 PCI_SLOT(pci_dev_func), PCI_FUNC(pci_dev_func));
4252 4246
4253 /* Read the base port and IRQ from PCI */ 4247 /* Read the base port and IRQ from PCI */
4254 mega_baseport = pci_resource_start(pdev, 0); 4248 mega_baseport = pci_resource_start(pdev, 0);
@@ -4259,14 +4253,13 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4259 flag |= BOARD_MEMMAP; 4253 flag |= BOARD_MEMMAP;
4260 4254
4261 if (!request_mem_region(mega_baseport, 128, "megaraid")) { 4255 if (!request_mem_region(mega_baseport, 128, "megaraid")) {
4262 printk(KERN_WARNING "megaraid: mem region busy!\n"); 4256 dev_warn(&pdev->dev, "mem region busy!\n");
4263 goto out_disable_device; 4257 goto out_disable_device;
4264 } 4258 }
4265 4259
4266 mega_baseport = (unsigned long)ioremap(mega_baseport, 128); 4260 mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
4267 if (!mega_baseport) { 4261 if (!mega_baseport) {
4268 printk(KERN_WARNING 4262 dev_warn(&pdev->dev, "could not map hba memory\n");
4269 "megaraid: could not map hba memory\n");
4270 goto out_release_region; 4263 goto out_release_region;
4271 } 4264 }
4272 } else { 4265 } else {
@@ -4285,7 +4278,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4285 adapter = (adapter_t *)host->hostdata; 4278 adapter = (adapter_t *)host->hostdata;
4286 memset(adapter, 0, sizeof(adapter_t)); 4279 memset(adapter, 0, sizeof(adapter_t));
4287 4280
4288 printk(KERN_NOTICE 4281 dev_notice(&pdev->dev,
4289 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n", 4282 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
4290 host->host_no, mega_baseport, irq); 4283 host->host_no, mega_baseport, irq);
4291 4284
@@ -4323,21 +4316,20 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4323 adapter->mega_buffer = pci_alloc_consistent(adapter->dev, 4316 adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
4324 MEGA_BUFFER_SIZE, &adapter->buf_dma_handle); 4317 MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
4325 if (!adapter->mega_buffer) { 4318 if (!adapter->mega_buffer) {
4326 printk(KERN_WARNING "megaraid: out of RAM.\n"); 4319 dev_warn(&pdev->dev, "out of RAM\n");
4327 goto out_host_put; 4320 goto out_host_put;
4328 } 4321 }
4329 4322
4330 adapter->scb_list = kmalloc(sizeof(scb_t) * MAX_COMMANDS, GFP_KERNEL); 4323 adapter->scb_list = kmalloc(sizeof(scb_t) * MAX_COMMANDS, GFP_KERNEL);
4331 if (!adapter->scb_list) { 4324 if (!adapter->scb_list) {
4332 printk(KERN_WARNING "megaraid: out of RAM.\n"); 4325 dev_warn(&pdev->dev, "out of RAM\n");
4333 goto out_free_cmd_buffer; 4326 goto out_free_cmd_buffer;
4334 } 4327 }
4335 4328
4336 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ? 4329 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4337 megaraid_isr_memmapped : megaraid_isr_iomapped, 4330 megaraid_isr_memmapped : megaraid_isr_iomapped,
4338 IRQF_SHARED, "megaraid", adapter)) { 4331 IRQF_SHARED, "megaraid", adapter)) {
4339 printk(KERN_WARNING 4332 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
4340 "megaraid: Couldn't register IRQ %d!\n", irq);
4341 goto out_free_scb_list; 4333 goto out_free_scb_list;
4342 } 4334 }
4343 4335
@@ -4357,9 +4349,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4357 if (!strcmp(adapter->fw_version, "3.00") || 4349 if (!strcmp(adapter->fw_version, "3.00") ||
4358 !strcmp(adapter->fw_version, "3.01")) { 4350 !strcmp(adapter->fw_version, "3.01")) {
4359 4351
4360 printk( KERN_WARNING 4352 dev_warn(&pdev->dev,
4361 "megaraid: Your card is a Dell PERC " 4353 "Your card is a Dell PERC "
4362 "2/SC RAID controller with " 4354 "2/SC RAID controller with "
4363 "firmware\nmegaraid: 3.00 or 3.01. " 4355 "firmware\nmegaraid: 3.00 or 3.01. "
4364 "This driver is known to have " 4356 "This driver is known to have "
4365 "corruption issues\nmegaraid: with " 4357 "corruption issues\nmegaraid: with "
@@ -4390,12 +4382,12 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4390 if (!strcmp(adapter->fw_version, "H01.07") || 4382 if (!strcmp(adapter->fw_version, "H01.07") ||
4391 !strcmp(adapter->fw_version, "H01.08") || 4383 !strcmp(adapter->fw_version, "H01.08") ||
4392 !strcmp(adapter->fw_version, "H01.09") ) { 4384 !strcmp(adapter->fw_version, "H01.09") ) {
4393 printk(KERN_WARNING 4385 dev_warn(&pdev->dev,
4394 "megaraid: Firmware H.01.07, " 4386 "Firmware H.01.07, "
4395 "H.01.08, and H.01.09 on 1M/2M " 4387 "H.01.08, and H.01.09 on 1M/2M "
4396 "controllers\n" 4388 "controllers\n"
4397 "megaraid: do not support 64 bit " 4389 "do not support 64 bit "
4398 "addressing.\nmegaraid: DISABLING " 4390 "addressing.\nDISABLING "
4399 "64 bit support.\n"); 4391 "64 bit support.\n");
4400 adapter->flag &= ~BOARD_64BIT; 4392 adapter->flag &= ~BOARD_64BIT;
4401 } 4393 }
@@ -4503,8 +4495,8 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4503 */ 4495 */
4504 adapter->has_cluster = mega_support_cluster(adapter); 4496 adapter->has_cluster = mega_support_cluster(adapter);
4505 if (adapter->has_cluster) { 4497 if (adapter->has_cluster) {
4506 printk(KERN_NOTICE 4498 dev_notice(&pdev->dev,
4507 "megaraid: Cluster driver, initiator id:%d\n", 4499 "Cluster driver, initiator id:%d\n",
4508 adapter->this_id); 4500 adapter->this_id);
4509 } 4501 }
4510#endif 4502#endif
@@ -4571,7 +4563,7 @@ __megaraid_shutdown(adapter_t *adapter)
4571 issue_scb_block(adapter, raw_mbox); 4563 issue_scb_block(adapter, raw_mbox);
4572 4564
4573 if (atomic_read(&adapter->pend_cmds) > 0) 4565 if (atomic_read(&adapter->pend_cmds) > 0)
4574 printk(KERN_WARNING "megaraid: pending commands!!\n"); 4566 dev_warn(&adapter->dev->dev, "pending commands!!\n");
4575 4567
4576 /* 4568 /*
4577 * Have a delibrate delay to make sure all the caches are 4569 * Have a delibrate delay to make sure all the caches are
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 71b884dae27c..eaa81e552fd2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -216,7 +216,7 @@ struct megasas_cmd *megasas_get_cmd(struct megasas_instance
216 struct megasas_cmd, list); 216 struct megasas_cmd, list);
217 list_del_init(&cmd->list); 217 list_del_init(&cmd->list);
218 } else { 218 } else {
219 printk(KERN_ERR "megasas: Command pool empty!\n"); 219 dev_err(&instance->pdev->dev, "Command pool empty!\n");
220 } 220 }
221 221
222 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags); 222 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
@@ -273,6 +273,7 @@ static inline void
273megasas_enable_intr_xscale(struct megasas_instance *instance) 273megasas_enable_intr_xscale(struct megasas_instance *instance)
274{ 274{
275 struct megasas_register_set __iomem *regs; 275 struct megasas_register_set __iomem *regs;
276
276 regs = instance->reg_set; 277 regs = instance->reg_set;
277 writel(0, &(regs)->outbound_intr_mask); 278 writel(0, &(regs)->outbound_intr_mask);
278 279
@@ -289,6 +290,7 @@ megasas_disable_intr_xscale(struct megasas_instance *instance)
289{ 290{
290 struct megasas_register_set __iomem *regs; 291 struct megasas_register_set __iomem *regs;
291 u32 mask = 0x1f; 292 u32 mask = 0x1f;
293
292 regs = instance->reg_set; 294 regs = instance->reg_set;
293 writel(mask, &regs->outbound_intr_mask); 295 writel(mask, &regs->outbound_intr_mask);
294 /* Dummy readl to force pci flush */ 296 /* Dummy readl to force pci flush */
@@ -313,6 +315,7 @@ megasas_clear_intr_xscale(struct megasas_register_set __iomem * regs)
313{ 315{
314 u32 status; 316 u32 status;
315 u32 mfiStatus = 0; 317 u32 mfiStatus = 0;
318
316 /* 319 /*
317 * Check if it is our interrupt 320 * Check if it is our interrupt
318 */ 321 */
@@ -348,6 +351,7 @@ megasas_fire_cmd_xscale(struct megasas_instance *instance,
348 struct megasas_register_set __iomem *regs) 351 struct megasas_register_set __iomem *regs)
349{ 352{
350 unsigned long flags; 353 unsigned long flags;
354
351 spin_lock_irqsave(&instance->hba_lock, flags); 355 spin_lock_irqsave(&instance->hba_lock, flags);
352 writel((frame_phys_addr >> 3)|(frame_count), 356 writel((frame_phys_addr >> 3)|(frame_count),
353 &(regs)->inbound_queue_port); 357 &(regs)->inbound_queue_port);
@@ -364,15 +368,16 @@ megasas_adp_reset_xscale(struct megasas_instance *instance,
364{ 368{
365 u32 i; 369 u32 i;
366 u32 pcidata; 370 u32 pcidata;
371
367 writel(MFI_ADP_RESET, &regs->inbound_doorbell); 372 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
368 373
369 for (i = 0; i < 3; i++) 374 for (i = 0; i < 3; i++)
370 msleep(1000); /* sleep for 3 secs */ 375 msleep(1000); /* sleep for 3 secs */
371 pcidata = 0; 376 pcidata = 0;
372 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata); 377 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
373 printk(KERN_NOTICE "pcidata = %x\n", pcidata); 378 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
374 if (pcidata & 0x2) { 379 if (pcidata & 0x2) {
375 printk(KERN_NOTICE "mfi 1068 offset read=%x\n", pcidata); 380 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
376 pcidata &= ~0x2; 381 pcidata &= ~0x2;
377 pci_write_config_dword(instance->pdev, 382 pci_write_config_dword(instance->pdev,
378 MFI_1068_PCSR_OFFSET, pcidata); 383 MFI_1068_PCSR_OFFSET, pcidata);
@@ -383,9 +388,9 @@ megasas_adp_reset_xscale(struct megasas_instance *instance,
383 pcidata = 0; 388 pcidata = 0;
384 pci_read_config_dword(instance->pdev, 389 pci_read_config_dword(instance->pdev,
385 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata); 390 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
386 printk(KERN_NOTICE "1068 offset handshake read=%x\n", pcidata); 391 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
387 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) { 392 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
388 printk(KERN_NOTICE "1068 offset pcidt=%x\n", pcidata); 393 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
389 pcidata = 0; 394 pcidata = 0;
390 pci_write_config_dword(instance->pdev, 395 pci_write_config_dword(instance->pdev,
391 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata); 396 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
@@ -402,7 +407,6 @@ static int
402megasas_check_reset_xscale(struct megasas_instance *instance, 407megasas_check_reset_xscale(struct megasas_instance *instance,
403 struct megasas_register_set __iomem *regs) 408 struct megasas_register_set __iomem *regs)
404{ 409{
405
406 if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && 410 if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
407 (le32_to_cpu(*instance->consumer) == 411 (le32_to_cpu(*instance->consumer) ==
408 MEGASAS_ADPRESET_INPROG_SIGN)) 412 MEGASAS_ADPRESET_INPROG_SIGN))
@@ -433,7 +437,7 @@ static struct megasas_instance_template megasas_instance_template_xscale = {
433 437
434/** 438/**
435* The following functions are defined for ppc (deviceid : 0x60) 439* The following functions are defined for ppc (deviceid : 0x60)
436* controllers 440* controllers
437*/ 441*/
438 442
439/** 443/**
@@ -444,6 +448,7 @@ static inline void
444megasas_enable_intr_ppc(struct megasas_instance *instance) 448megasas_enable_intr_ppc(struct megasas_instance *instance)
445{ 449{
446 struct megasas_register_set __iomem *regs; 450 struct megasas_register_set __iomem *regs;
451
447 regs = instance->reg_set; 452 regs = instance->reg_set;
448 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 453 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
449 454
@@ -462,6 +467,7 @@ megasas_disable_intr_ppc(struct megasas_instance *instance)
462{ 467{
463 struct megasas_register_set __iomem *regs; 468 struct megasas_register_set __iomem *regs;
464 u32 mask = 0xFFFFFFFF; 469 u32 mask = 0xFFFFFFFF;
470
465 regs = instance->reg_set; 471 regs = instance->reg_set;
466 writel(mask, &regs->outbound_intr_mask); 472 writel(mask, &regs->outbound_intr_mask);
467 /* Dummy readl to force pci flush */ 473 /* Dummy readl to force pci flush */
@@ -522,6 +528,7 @@ megasas_fire_cmd_ppc(struct megasas_instance *instance,
522 struct megasas_register_set __iomem *regs) 528 struct megasas_register_set __iomem *regs)
523{ 529{
524 unsigned long flags; 530 unsigned long flags;
531
525 spin_lock_irqsave(&instance->hba_lock, flags); 532 spin_lock_irqsave(&instance->hba_lock, flags);
526 writel((frame_phys_addr | (frame_count<<1))|1, 533 writel((frame_phys_addr | (frame_count<<1))|1,
527 &(regs)->inbound_queue_port); 534 &(regs)->inbound_queue_port);
@@ -566,6 +573,7 @@ static inline void
566megasas_enable_intr_skinny(struct megasas_instance *instance) 573megasas_enable_intr_skinny(struct megasas_instance *instance)
567{ 574{
568 struct megasas_register_set __iomem *regs; 575 struct megasas_register_set __iomem *regs;
576
569 regs = instance->reg_set; 577 regs = instance->reg_set;
570 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask); 578 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
571 579
@@ -584,6 +592,7 @@ megasas_disable_intr_skinny(struct megasas_instance *instance)
584{ 592{
585 struct megasas_register_set __iomem *regs; 593 struct megasas_register_set __iomem *regs;
586 u32 mask = 0xFFFFFFFF; 594 u32 mask = 0xFFFFFFFF;
595
587 regs = instance->reg_set; 596 regs = instance->reg_set;
588 writel(mask, &regs->outbound_intr_mask); 597 writel(mask, &regs->outbound_intr_mask);
589 /* Dummy readl to force pci flush */ 598 /* Dummy readl to force pci flush */
@@ -634,8 +643,8 @@ megasas_clear_intr_skinny(struct megasas_register_set __iomem *regs)
634 writel(status, &regs->outbound_intr_status); 643 writel(status, &regs->outbound_intr_status);
635 644
636 /* 645 /*
637 * dummy read to flush PCI 646 * dummy read to flush PCI
638 */ 647 */
639 readl(&regs->outbound_intr_status); 648 readl(&regs->outbound_intr_status);
640 649
641 return mfiStatus; 650 return mfiStatus;
@@ -654,6 +663,7 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance,
654 struct megasas_register_set __iomem *regs) 663 struct megasas_register_set __iomem *regs)
655{ 664{
656 unsigned long flags; 665 unsigned long flags;
666
657 spin_lock_irqsave(&instance->hba_lock, flags); 667 spin_lock_irqsave(&instance->hba_lock, flags);
658 writel(upper_32_bits(frame_phys_addr), 668 writel(upper_32_bits(frame_phys_addr),
659 &(regs)->inbound_high_queue_port); 669 &(regs)->inbound_high_queue_port);
@@ -706,6 +716,7 @@ static inline void
706megasas_enable_intr_gen2(struct megasas_instance *instance) 716megasas_enable_intr_gen2(struct megasas_instance *instance)
707{ 717{
708 struct megasas_register_set __iomem *regs; 718 struct megasas_register_set __iomem *regs;
719
709 regs = instance->reg_set; 720 regs = instance->reg_set;
710 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear); 721 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
711 722
@@ -725,6 +736,7 @@ megasas_disable_intr_gen2(struct megasas_instance *instance)
725{ 736{
726 struct megasas_register_set __iomem *regs; 737 struct megasas_register_set __iomem *regs;
727 u32 mask = 0xFFFFFFFF; 738 u32 mask = 0xFFFFFFFF;
739
728 regs = instance->reg_set; 740 regs = instance->reg_set;
729 writel(mask, &regs->outbound_intr_mask); 741 writel(mask, &regs->outbound_intr_mask);
730 /* Dummy readl to force pci flush */ 742 /* Dummy readl to force pci flush */
@@ -750,6 +762,7 @@ megasas_clear_intr_gen2(struct megasas_register_set __iomem *regs)
750{ 762{
751 u32 status; 763 u32 status;
752 u32 mfiStatus = 0; 764 u32 mfiStatus = 0;
765
753 /* 766 /*
754 * Check if it is our interrupt 767 * Check if it is our interrupt
755 */ 768 */
@@ -786,6 +799,7 @@ megasas_fire_cmd_gen2(struct megasas_instance *instance,
786 struct megasas_register_set __iomem *regs) 799 struct megasas_register_set __iomem *regs)
787{ 800{
788 unsigned long flags; 801 unsigned long flags;
802
789 spin_lock_irqsave(&instance->hba_lock, flags); 803 spin_lock_irqsave(&instance->hba_lock, flags);
790 writel((frame_phys_addr | (frame_count<<1))|1, 804 writel((frame_phys_addr | (frame_count<<1))|1,
791 &(regs)->inbound_queue_port); 805 &(regs)->inbound_queue_port);
@@ -800,10 +814,10 @@ static int
800megasas_adp_reset_gen2(struct megasas_instance *instance, 814megasas_adp_reset_gen2(struct megasas_instance *instance,
801 struct megasas_register_set __iomem *reg_set) 815 struct megasas_register_set __iomem *reg_set)
802{ 816{
803 u32 retry = 0 ; 817 u32 retry = 0 ;
804 u32 HostDiag; 818 u32 HostDiag;
805 u32 __iomem *seq_offset = &reg_set->seq_offset; 819 u32 __iomem *seq_offset = &reg_set->seq_offset;
806 u32 __iomem *hostdiag_offset = &reg_set->host_diag; 820 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
807 821
808 if (instance->instancet == &megasas_instance_template_skinny) { 822 if (instance->instancet == &megasas_instance_template_skinny) {
809 seq_offset = &reg_set->fusion_seq_offset; 823 seq_offset = &reg_set->fusion_seq_offset;
@@ -821,10 +835,10 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
821 835
822 HostDiag = (u32)readl(hostdiag_offset); 836 HostDiag = (u32)readl(hostdiag_offset);
823 837
824 while ( !( HostDiag & DIAG_WRITE_ENABLE) ) { 838 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
825 msleep(100); 839 msleep(100);
826 HostDiag = (u32)readl(hostdiag_offset); 840 HostDiag = (u32)readl(hostdiag_offset);
827 printk(KERN_NOTICE "RESETGEN2: retry=%x, hostdiag=%x\n", 841 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
828 retry, HostDiag); 842 retry, HostDiag);
829 843
830 if (retry++ >= 100) 844 if (retry++ >= 100)
@@ -832,17 +846,17 @@ megasas_adp_reset_gen2(struct megasas_instance *instance,
832 846
833 } 847 }
834 848
835 printk(KERN_NOTICE "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag); 849 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
836 850
837 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset); 851 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
838 852
839 ssleep(10); 853 ssleep(10);
840 854
841 HostDiag = (u32)readl(hostdiag_offset); 855 HostDiag = (u32)readl(hostdiag_offset);
842 while ( ( HostDiag & DIAG_RESET_ADAPTER) ) { 856 while (HostDiag & DIAG_RESET_ADAPTER) {
843 msleep(100); 857 msleep(100);
844 HostDiag = (u32)readl(hostdiag_offset); 858 HostDiag = (u32)readl(hostdiag_offset);
845 printk(KERN_NOTICE "RESET_GEN2: retry=%x, hostdiag=%x\n", 859 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
846 retry, HostDiag); 860 retry, HostDiag);
847 861
848 if (retry++ >= 1000) 862 if (retry++ >= 1000)
@@ -904,7 +918,6 @@ int
904megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) 918megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
905{ 919{
906 int seconds; 920 int seconds;
907
908 struct megasas_header *frame_hdr = &cmd->frame->hdr; 921 struct megasas_header *frame_hdr = &cmd->frame->hdr;
909 922
910 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; 923 frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE;
@@ -940,6 +953,7 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance,
940 struct megasas_cmd *cmd, int timeout) 953 struct megasas_cmd *cmd, int timeout)
941{ 954{
942 int ret = 0; 955 int ret = 0;
956
943 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 957 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
944 958
945 instance->instancet->issue_dcmd(instance, cmd); 959 instance->instancet->issue_dcmd(instance, cmd);
@@ -1120,7 +1134,7 @@ static u32 megasas_get_frame_count(struct megasas_instance *instance,
1120 int num_cnt; 1134 int num_cnt;
1121 int sge_bytes; 1135 int sge_bytes;
1122 u32 sge_sz; 1136 u32 sge_sz;
1123 u32 frame_count=0; 1137 u32 frame_count = 0;
1124 1138
1125 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 1139 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1126 sizeof(struct megasas_sge32); 1140 sizeof(struct megasas_sge32);
@@ -1151,14 +1165,14 @@ static u32 megasas_get_frame_count(struct megasas_instance *instance,
1151 num_cnt = sge_count - 3; 1165 num_cnt = sge_count - 3;
1152 } 1166 }
1153 1167
1154 if(num_cnt>0){ 1168 if (num_cnt > 0) {
1155 sge_bytes = sge_sz * num_cnt; 1169 sge_bytes = sge_sz * num_cnt;
1156 1170
1157 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) + 1171 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1158 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ; 1172 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1159 } 1173 }
1160 /* Main frame */ 1174 /* Main frame */
1161 frame_count +=1; 1175 frame_count += 1;
1162 1176
1163 if (frame_count > 7) 1177 if (frame_count > 7)
1164 frame_count = 8; 1178 frame_count = 8;
@@ -1215,9 +1229,9 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1215 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); 1229 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1216 1230
1217 /* 1231 /*
1218 * If the command is for the tape device, set the 1232 * If the command is for the tape device, set the
1219 * pthru timeout to the os layer timeout value. 1233 * pthru timeout to the os layer timeout value.
1220 */ 1234 */
1221 if (scp->device->type == TYPE_TAPE) { 1235 if (scp->device->type == TYPE_TAPE) {
1222 if ((scp->request->timeout / HZ) > 0xFFFF) 1236 if ((scp->request->timeout / HZ) > 0xFFFF)
1223 pthru->timeout = cpu_to_le16(0xFFFF); 1237 pthru->timeout = cpu_to_le16(0xFFFF);
@@ -1241,7 +1255,7 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1241 &pthru->sgl); 1255 &pthru->sgl);
1242 1256
1243 if (pthru->sge_count > instance->max_num_sge) { 1257 if (pthru->sge_count > instance->max_num_sge) {
1244 printk(KERN_ERR "megasas: DCDB two many SGE NUM=%x\n", 1258 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1245 pthru->sge_count); 1259 pthru->sge_count);
1246 return 0; 1260 return 0;
1247 } 1261 }
@@ -1382,7 +1396,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1382 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); 1396 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1383 1397
1384 if (ldio->sge_count > instance->max_num_sge) { 1398 if (ldio->sge_count > instance->max_num_sge) {
1385 printk(KERN_ERR "megasas: build_ld_io: sge_count = %x\n", 1399 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1386 ldio->sge_count); 1400 ldio->sge_count);
1387 return 0; 1401 return 0;
1388 } 1402 }
@@ -1435,7 +1449,7 @@ inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1435 1449
1436 /** 1450 /**
1437 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds 1451 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1438 * in FW 1452 * in FW
1439 * @instance: Adapter soft state 1453 * @instance: Adapter soft state
1440 */ 1454 */
1441static inline void 1455static inline void
@@ -1449,63 +1463,60 @@ megasas_dump_pending_frames(struct megasas_instance *instance)
1449 u32 sgcount; 1463 u32 sgcount;
1450 u32 max_cmd = instance->max_fw_cmds; 1464 u32 max_cmd = instance->max_fw_cmds;
1451 1465
1452 printk(KERN_ERR "\nmegasas[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no); 1466 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1453 printk(KERN_ERR "megasas[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding)); 1467 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1454 if (IS_DMA64) 1468 if (IS_DMA64)
1455 printk(KERN_ERR "\nmegasas[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no); 1469 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1456 else 1470 else
1457 printk(KERN_ERR "\nmegasas[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no); 1471 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1458 1472
1459 printk(KERN_ERR "megasas[%d]: Pending OS cmds in FW : \n",instance->host->host_no); 1473 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1460 for (i = 0; i < max_cmd; i++) { 1474 for (i = 0; i < max_cmd; i++) {
1461 cmd = instance->cmd_list[i]; 1475 cmd = instance->cmd_list[i];
1462 if(!cmd->scmd) 1476 if (!cmd->scmd)
1463 continue; 1477 continue;
1464 printk(KERN_ERR "megasas[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr); 1478 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1465 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) { 1479 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1466 ldio = (struct megasas_io_frame *)cmd->frame; 1480 ldio = (struct megasas_io_frame *)cmd->frame;
1467 mfi_sgl = &ldio->sgl; 1481 mfi_sgl = &ldio->sgl;
1468 sgcount = ldio->sge_count; 1482 sgcount = ldio->sge_count;
1469 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," 1483 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1470 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1484 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1471 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, 1485 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1472 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), 1486 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1473 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); 1487 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1474 } 1488 } else {
1475 else {
1476 pthru = (struct megasas_pthru_frame *) cmd->frame; 1489 pthru = (struct megasas_pthru_frame *) cmd->frame;
1477 mfi_sgl = &pthru->sgl; 1490 mfi_sgl = &pthru->sgl;
1478 sgcount = pthru->sge_count; 1491 sgcount = pthru->sge_count;
1479 printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " 1492 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1480 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", 1493 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1481 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, 1494 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1482 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), 1495 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1483 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); 1496 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1484 } 1497 }
1485 if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ 1498 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1486 for (n = 0; n < sgcount; n++){ 1499 for (n = 0; n < sgcount; n++) {
1487 if (IS_DMA64) 1500 if (IS_DMA64)
1488 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ", 1501 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1489 le32_to_cpu(mfi_sgl->sge64[n].length), 1502 le32_to_cpu(mfi_sgl->sge64[n].length),
1490 le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); 1503 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1491 else 1504 else
1492 printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ", 1505 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1493 le32_to_cpu(mfi_sgl->sge32[n].length), 1506 le32_to_cpu(mfi_sgl->sge32[n].length),
1494 le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); 1507 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1495 } 1508 }
1496 } 1509 }
1497 printk(KERN_ERR "\n");
1498 } /*for max_cmd*/ 1510 } /*for max_cmd*/
1499 printk(KERN_ERR "\nmegasas[%d]: Pending Internal cmds in FW : \n",instance->host->host_no); 1511 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1500 for (i = 0; i < max_cmd; i++) { 1512 for (i = 0; i < max_cmd; i++) {
1501 1513
1502 cmd = instance->cmd_list[i]; 1514 cmd = instance->cmd_list[i];
1503 1515
1504 if(cmd->sync_cmd == 1){ 1516 if (cmd->sync_cmd == 1)
1505 printk(KERN_ERR "0x%08lx : ", (unsigned long)cmd->frame_phys_addr); 1517 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1506 }
1507 } 1518 }
1508 printk(KERN_ERR "megasas[%d]: Dumping Done.\n\n",instance->host->host_no); 1519 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1509} 1520}
1510 1521
1511u32 1522u32
@@ -1623,7 +1634,7 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1623 } 1634 }
1624 1635
1625 if (instance->instancet->build_and_issue_cmd(instance, scmd)) { 1636 if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
1626 printk(KERN_ERR "megasas: Err returned from build_and_issue_cmd\n"); 1637 dev_err(&instance->pdev->dev, "Err returned from build_and_issue_cmd\n");
1627 return SCSI_MLQUEUE_HOST_BUSY; 1638 return SCSI_MLQUEUE_HOST_BUSY;
1628 } 1639 }
1629 1640
@@ -1651,8 +1662,8 @@ static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1651static int megasas_slave_configure(struct scsi_device *sdev) 1662static int megasas_slave_configure(struct scsi_device *sdev)
1652{ 1663{
1653 /* 1664 /*
1654 * The RAID firmware may require extended timeouts. 1665 * The RAID firmware may require extended timeouts.
1655 */ 1666 */
1656 blk_queue_rq_timeout(sdev->request_queue, 1667 blk_queue_rq_timeout(sdev->request_queue,
1657 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ); 1668 MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
1658 1669
@@ -1661,8 +1672,9 @@ static int megasas_slave_configure(struct scsi_device *sdev)
1661 1672
1662static int megasas_slave_alloc(struct scsi_device *sdev) 1673static int megasas_slave_alloc(struct scsi_device *sdev)
1663{ 1674{
1664 u16 pd_index = 0; 1675 u16 pd_index = 0;
1665 struct megasas_instance *instance ; 1676 struct megasas_instance *instance ;
1677
1666 instance = megasas_lookup_instance(sdev->host->host_no); 1678 instance = megasas_lookup_instance(sdev->host->host_no);
1667 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) { 1679 if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
1668 /* 1680 /*
@@ -1728,8 +1740,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
1728 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) || 1740 (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
1729 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || 1741 (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
1730 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { 1742 (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
1731 writel(MFI_STOP_ADP, 1743 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
1732 &instance->reg_set->doorbell);
1733 /* Flush */ 1744 /* Flush */
1734 readl(&instance->reg_set->doorbell); 1745 readl(&instance->reg_set->doorbell);
1735 if (instance->mpio && instance->requestorId) 1746 if (instance->mpio && instance->requestorId)
@@ -1783,7 +1794,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1783 unsigned long flags; 1794 unsigned long flags;
1784 1795
1785 /* If we have already declared adapter dead, donot complete cmds */ 1796 /* If we have already declared adapter dead, donot complete cmds */
1786 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR ) 1797 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
1787 return; 1798 return;
1788 1799
1789 spin_lock_irqsave(&instance->completion_lock, flags); 1800 spin_lock_irqsave(&instance->completion_lock, flags);
@@ -1794,7 +1805,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr)
1794 while (consumer != producer) { 1805 while (consumer != producer) {
1795 context = le32_to_cpu(instance->reply_queue[consumer]); 1806 context = le32_to_cpu(instance->reply_queue[consumer]);
1796 if (context >= instance->max_fw_cmds) { 1807 if (context >= instance->max_fw_cmds) {
1797 printk(KERN_ERR "Unexpected context value %x\n", 1808 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
1798 context); 1809 context);
1799 BUG(); 1810 BUG();
1800 } 1811 }
@@ -1873,8 +1884,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
1873 cmd = megasas_get_cmd(instance); 1884 cmd = megasas_get_cmd(instance);
1874 1885
1875 if (!cmd) { 1886 if (!cmd) {
1876 printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:" 1887 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
1877 "Failed to get cmd for scsi%d.\n", 1888 "Failed to get cmd for scsi%d\n",
1878 instance->host->host_no); 1889 instance->host->host_no);
1879 return -ENOMEM; 1890 return -ENOMEM;
1880 } 1891 }
@@ -1882,8 +1893,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
1882 dcmd = &cmd->frame->dcmd; 1893 dcmd = &cmd->frame->dcmd;
1883 1894
1884 if (!instance->vf_affiliation_111) { 1895 if (!instance->vf_affiliation_111) {
1885 printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF " 1896 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
1886 "affiliation for scsi%d.\n", instance->host->host_no); 1897 "affiliation for scsi%d\n", instance->host->host_no);
1887 megasas_return_cmd(instance, cmd); 1898 megasas_return_cmd(instance, cmd);
1888 return -ENOMEM; 1899 return -ENOMEM;
1889 } 1900 }
@@ -1897,8 +1908,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
1897 sizeof(struct MR_LD_VF_AFFILIATION_111), 1908 sizeof(struct MR_LD_VF_AFFILIATION_111),
1898 &new_affiliation_111_h); 1909 &new_affiliation_111_h);
1899 if (!new_affiliation_111) { 1910 if (!new_affiliation_111) {
1900 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate " 1911 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
1901 "memory for new affiliation for scsi%d.\n", 1912 "memory for new affiliation for scsi%d\n",
1902 instance->host->host_no); 1913 instance->host->host_no);
1903 megasas_return_cmd(instance, cmd); 1914 megasas_return_cmd(instance, cmd);
1904 return -ENOMEM; 1915 return -ENOMEM;
@@ -1929,14 +1940,14 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
1929 dcmd->sgl.sge32[0].length = cpu_to_le32( 1940 dcmd->sgl.sge32[0].length = cpu_to_le32(
1930 sizeof(struct MR_LD_VF_AFFILIATION_111)); 1941 sizeof(struct MR_LD_VF_AFFILIATION_111));
1931 1942
1932 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " 1943 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
1933 "scsi%d\n", instance->host->host_no); 1944 "scsi%d\n", instance->host->host_no);
1934 1945
1935 megasas_issue_blocked_cmd(instance, cmd, 0); 1946 megasas_issue_blocked_cmd(instance, cmd, 0);
1936 1947
1937 if (dcmd->cmd_status) { 1948 if (dcmd->cmd_status) {
1938 printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD" 1949 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
1939 " failed with status 0x%x for scsi%d.\n", 1950 " failed with status 0x%x for scsi%d\n",
1940 dcmd->cmd_status, instance->host->host_no); 1951 dcmd->cmd_status, instance->host->host_no);
1941 retval = 1; /* Do a scan if we couldn't get affiliation */ 1952 retval = 1; /* Do a scan if we couldn't get affiliation */
1942 goto out; 1953 goto out;
@@ -1947,9 +1958,8 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
1947 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++) 1958 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
1948 if (instance->vf_affiliation_111->map[ld].policy[thisVf] != 1959 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
1949 new_affiliation_111->map[ld].policy[thisVf]) { 1960 new_affiliation_111->map[ld].policy[thisVf]) {
1950 printk(KERN_WARNING "megasas: SR-IOV: " 1961 dev_warn(&instance->pdev->dev, "SR-IOV: "
1951 "Got new LD/VF affiliation " 1962 "Got new LD/VF affiliation for scsi%d\n",
1952 "for scsi%d.\n",
1953 instance->host->host_no); 1963 instance->host->host_no);
1954 memcpy(instance->vf_affiliation_111, 1964 memcpy(instance->vf_affiliation_111,
1955 new_affiliation_111, 1965 new_affiliation_111,
@@ -1985,8 +1995,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
1985 cmd = megasas_get_cmd(instance); 1995 cmd = megasas_get_cmd(instance);
1986 1996
1987 if (!cmd) { 1997 if (!cmd) {
1988 printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: " 1998 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
1989 "Failed to get cmd for scsi%d.\n", 1999 "Failed to get cmd for scsi%d\n",
1990 instance->host->host_no); 2000 instance->host->host_no);
1991 return -ENOMEM; 2001 return -ENOMEM;
1992 } 2002 }
@@ -1994,8 +2004,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
1994 dcmd = &cmd->frame->dcmd; 2004 dcmd = &cmd->frame->dcmd;
1995 2005
1996 if (!instance->vf_affiliation) { 2006 if (!instance->vf_affiliation) {
1997 printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF " 2007 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
1998 "affiliation for scsi%d.\n", instance->host->host_no); 2008 "affiliation for scsi%d\n", instance->host->host_no);
1999 megasas_return_cmd(instance, cmd); 2009 megasas_return_cmd(instance, cmd);
2000 return -ENOMEM; 2010 return -ENOMEM;
2001 } 2011 }
@@ -2010,8 +2020,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2010 sizeof(struct MR_LD_VF_AFFILIATION), 2020 sizeof(struct MR_LD_VF_AFFILIATION),
2011 &new_affiliation_h); 2021 &new_affiliation_h);
2012 if (!new_affiliation) { 2022 if (!new_affiliation) {
2013 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate " 2023 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2014 "memory for new affiliation for scsi%d.\n", 2024 "memory for new affiliation for scsi%d\n",
2015 instance->host->host_no); 2025 instance->host->host_no);
2016 megasas_return_cmd(instance, cmd); 2026 megasas_return_cmd(instance, cmd);
2017 return -ENOMEM; 2027 return -ENOMEM;
@@ -2042,14 +2052,14 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2042 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) * 2052 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2043 sizeof(struct MR_LD_VF_AFFILIATION)); 2053 sizeof(struct MR_LD_VF_AFFILIATION));
2044 2054
2045 printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for " 2055 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2046 "scsi%d\n", instance->host->host_no); 2056 "scsi%d\n", instance->host->host_no);
2047 2057
2048 megasas_issue_blocked_cmd(instance, cmd, 0); 2058 megasas_issue_blocked_cmd(instance, cmd, 0);
2049 2059
2050 if (dcmd->cmd_status) { 2060 if (dcmd->cmd_status) {
2051 printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD" 2061 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2052 " failed with status 0x%x for scsi%d.\n", 2062 " failed with status 0x%x for scsi%d\n",
2053 dcmd->cmd_status, instance->host->host_no); 2063 dcmd->cmd_status, instance->host->host_no);
2054 retval = 1; /* Do a scan if we couldn't get affiliation */ 2064 retval = 1; /* Do a scan if we couldn't get affiliation */
2055 goto out; 2065 goto out;
@@ -2057,8 +2067,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2057 2067
2058 if (!initial) { 2068 if (!initial) {
2059 if (!new_affiliation->ldCount) { 2069 if (!new_affiliation->ldCount) {
2060 printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF " 2070 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2061 "affiliation for passive path for scsi%d.\n", 2071 "affiliation for passive path for scsi%d\n",
2062 instance->host->host_no); 2072 instance->host->host_no);
2063 retval = 1; 2073 retval = 1;
2064 goto out; 2074 goto out;
@@ -2123,8 +2133,8 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2123 } 2133 }
2124out: 2134out:
2125 if (doscan) { 2135 if (doscan) {
2126 printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF " 2136 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2127 "affiliation for scsi%d.\n", instance->host->host_no); 2137 "affiliation for scsi%d\n", instance->host->host_no);
2128 memcpy(instance->vf_affiliation, new_affiliation, 2138 memcpy(instance->vf_affiliation, new_affiliation,
2129 new_affiliation->size); 2139 new_affiliation->size);
2130 retval = 1; 2140 retval = 1;
@@ -2164,8 +2174,8 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2164 cmd = megasas_get_cmd(instance); 2174 cmd = megasas_get_cmd(instance);
2165 2175
2166 if (!cmd) { 2176 if (!cmd) {
2167 printk(KERN_DEBUG "megasas: megasas_sriov_start_heartbeat: " 2177 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2168 "Failed to get cmd for scsi%d.\n", 2178 "Failed to get cmd for scsi%d\n",
2169 instance->host->host_no); 2179 instance->host->host_no);
2170 return -ENOMEM; 2180 return -ENOMEM;
2171 } 2181 }
@@ -2178,9 +2188,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2178 sizeof(struct MR_CTRL_HB_HOST_MEM), 2188 sizeof(struct MR_CTRL_HB_HOST_MEM),
2179 &instance->hb_host_mem_h); 2189 &instance->hb_host_mem_h);
2180 if (!instance->hb_host_mem) { 2190 if (!instance->hb_host_mem) {
2181 printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate" 2191 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2182 " memory for heartbeat host memory for " 2192 " memory for heartbeat host memory for scsi%d\n",
2183 "scsi%d.\n", instance->host->host_no); 2193 instance->host->host_no);
2184 retval = -ENOMEM; 2194 retval = -ENOMEM;
2185 goto out; 2195 goto out;
2186 } 2196 }
@@ -2200,7 +2210,7 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2200 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h); 2210 dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->hb_host_mem_h);
2201 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM)); 2211 dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2202 2212
2203 printk(KERN_WARNING "megasas: SR-IOV: Starting heartbeat for scsi%d\n", 2213 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2204 instance->host->host_no); 2214 instance->host->host_no);
2205 2215
2206 if (instance->ctrl_context && !instance->mask_interrupts) 2216 if (instance->ctrl_context && !instance->mask_interrupts)
@@ -2236,7 +2246,7 @@ void megasas_sriov_heartbeat_handler(unsigned long instance_addr)
2236 mod_timer(&instance->sriov_heartbeat_timer, 2246 mod_timer(&instance->sriov_heartbeat_timer,
2237 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); 2247 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2238 } else { 2248 } else {
2239 printk(KERN_WARNING "megasas: SR-IOV: Heartbeat never " 2249 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2240 "completed for scsi%d\n", instance->host->host_no); 2250 "completed for scsi%d\n", instance->host->host_no);
2241 schedule_work(&instance->work_init); 2251 schedule_work(&instance->work_init);
2242 } 2252 }
@@ -2274,7 +2284,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2274 &clist_local); 2284 &clist_local);
2275 spin_unlock_irqrestore(&instance->hba_lock, flags); 2285 spin_unlock_irqrestore(&instance->hba_lock, flags);
2276 2286
2277 printk(KERN_NOTICE "megasas: HBA reset wait ...\n"); 2287 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2278 for (i = 0; i < wait_time; i++) { 2288 for (i = 0; i < wait_time; i++) {
2279 msleep(1000); 2289 msleep(1000);
2280 spin_lock_irqsave(&instance->hba_lock, flags); 2290 spin_lock_irqsave(&instance->hba_lock, flags);
@@ -2285,28 +2295,28 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2285 } 2295 }
2286 2296
2287 if (adprecovery != MEGASAS_HBA_OPERATIONAL) { 2297 if (adprecovery != MEGASAS_HBA_OPERATIONAL) {
2288 printk(KERN_NOTICE "megasas: reset: Stopping HBA.\n"); 2298 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2289 spin_lock_irqsave(&instance->hba_lock, flags); 2299 spin_lock_irqsave(&instance->hba_lock, flags);
2290 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; 2300 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
2291 spin_unlock_irqrestore(&instance->hba_lock, flags); 2301 spin_unlock_irqrestore(&instance->hba_lock, flags);
2292 return FAILED; 2302 return FAILED;
2293 } 2303 }
2294 2304
2295 reset_index = 0; 2305 reset_index = 0;
2296 while (!list_empty(&clist_local)) { 2306 while (!list_empty(&clist_local)) {
2297 reset_cmd = list_entry((&clist_local)->next, 2307 reset_cmd = list_entry((&clist_local)->next,
2298 struct megasas_cmd, list); 2308 struct megasas_cmd, list);
2299 list_del_init(&reset_cmd->list); 2309 list_del_init(&reset_cmd->list);
2300 if (reset_cmd->scmd) { 2310 if (reset_cmd->scmd) {
2301 reset_cmd->scmd->result = DID_RESET << 16; 2311 reset_cmd->scmd->result = DID_RESET << 16;
2302 printk(KERN_NOTICE "%d:%p reset [%02x]\n", 2312 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2303 reset_index, reset_cmd, 2313 reset_index, reset_cmd,
2304 reset_cmd->scmd->cmnd[0]); 2314 reset_cmd->scmd->cmnd[0]);
2305 2315
2306 reset_cmd->scmd->scsi_done(reset_cmd->scmd); 2316 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2307 megasas_return_cmd(instance, reset_cmd); 2317 megasas_return_cmd(instance, reset_cmd);
2308 } else if (reset_cmd->sync_cmd) { 2318 } else if (reset_cmd->sync_cmd) {
2309 printk(KERN_NOTICE "megasas:%p synch cmds" 2319 dev_notice(&instance->pdev->dev, "%p synch cmds"
2310 "reset queue\n", 2320 "reset queue\n",
2311 reset_cmd); 2321 reset_cmd);
2312 2322
@@ -2315,7 +2325,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2315 reset_cmd->frame_phys_addr, 2325 reset_cmd->frame_phys_addr,
2316 0, instance->reg_set); 2326 0, instance->reg_set);
2317 } else { 2327 } else {
2318 printk(KERN_NOTICE "megasas: %p unexpected" 2328 dev_notice(&instance->pdev->dev, "%p unexpected"
2319 "cmds lst\n", 2329 "cmds lst\n",
2320 reset_cmd); 2330 reset_cmd);
2321 } 2331 }
@@ -2326,14 +2336,13 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2326 } 2336 }
2327 2337
2328 for (i = 0; i < resetwaittime; i++) { 2338 for (i = 0; i < resetwaittime; i++) {
2329
2330 int outstanding = atomic_read(&instance->fw_outstanding); 2339 int outstanding = atomic_read(&instance->fw_outstanding);
2331 2340
2332 if (!outstanding) 2341 if (!outstanding)
2333 break; 2342 break;
2334 2343
2335 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2344 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2336 printk(KERN_NOTICE "megasas: [%2d]waiting for %d " 2345 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2337 "commands to complete\n",i,outstanding); 2346 "commands to complete\n",i,outstanding);
2338 /* 2347 /*
2339 * Call cmd completion routine. Cmd to be 2348 * Call cmd completion routine. Cmd to be
@@ -2365,10 +2374,8 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2365 i++; 2374 i++;
2366 } while (i <= 3); 2375 } while (i <= 3);
2367 2376
2368 if (atomic_read(&instance->fw_outstanding) && 2377 if (atomic_read(&instance->fw_outstanding) && !kill_adapter_flag) {
2369 !kill_adapter_flag) {
2370 if (instance->disableOnlineCtrlReset == 0) { 2378 if (instance->disableOnlineCtrlReset == 0) {
2371
2372 megasas_do_ocr(instance); 2379 megasas_do_ocr(instance);
2373 2380
2374 /* wait for 5 secs to let FW finish the pending cmds */ 2381 /* wait for 5 secs to let FW finish the pending cmds */
@@ -2384,11 +2391,11 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2384 2391
2385 if (atomic_read(&instance->fw_outstanding) || 2392 if (atomic_read(&instance->fw_outstanding) ||
2386 (kill_adapter_flag == 2)) { 2393 (kill_adapter_flag == 2)) {
2387 printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n"); 2394 dev_notice(&instance->pdev->dev, "pending cmds after reset\n");
2388 /* 2395 /*
2389 * Send signal to FW to stop processing any pending cmds. 2396 * Send signal to FW to stop processing any pending cmds.
2390 * The controller will be taken offline by the OS now. 2397 * The controller will be taken offline by the OS now.
2391 */ 2398 */
2392 if ((instance->pdev->device == 2399 if ((instance->pdev->device ==
2393 PCI_DEVICE_ID_LSI_SAS0073SKINNY) || 2400 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2394 (instance->pdev->device == 2401 (instance->pdev->device ==
@@ -2401,12 +2408,12 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2401 } 2408 }
2402 megasas_dump_pending_frames(instance); 2409 megasas_dump_pending_frames(instance);
2403 spin_lock_irqsave(&instance->hba_lock, flags); 2410 spin_lock_irqsave(&instance->hba_lock, flags);
2404 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR; 2411 instance->adprecovery = MEGASAS_HW_CRITICAL_ERROR;
2405 spin_unlock_irqrestore(&instance->hba_lock, flags); 2412 spin_unlock_irqrestore(&instance->hba_lock, flags);
2406 return FAILED; 2413 return FAILED;
2407 } 2414 }
2408 2415
2409 printk(KERN_NOTICE "megaraid_sas: no pending cmds after reset\n"); 2416 dev_notice(&instance->pdev->dev, "no pending cmds after reset\n");
2410 2417
2411 return SUCCESS; 2418 return SUCCESS;
2412} 2419}
@@ -2430,16 +2437,15 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd)
2430 scmd->cmnd[0], scmd->retries); 2437 scmd->cmnd[0], scmd->retries);
2431 2438
2432 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2439 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2433 printk(KERN_ERR "megasas: cannot recover from previous reset " 2440 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2434 "failures\n");
2435 return FAILED; 2441 return FAILED;
2436 } 2442 }
2437 2443
2438 ret_val = megasas_wait_for_outstanding(instance); 2444 ret_val = megasas_wait_for_outstanding(instance);
2439 if (ret_val == SUCCESS) 2445 if (ret_val == SUCCESS)
2440 printk(KERN_NOTICE "megasas: reset successful \n"); 2446 dev_notice(&instance->pdev->dev, "reset successful\n");
2441 else 2447 else
2442 printk(KERN_ERR "megasas: failed to do reset\n"); 2448 dev_err(&instance->pdev->dev, "failed to do reset\n");
2443 2449
2444 return ret_val; 2450 return ret_val;
2445} 2451}
@@ -2481,14 +2487,10 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2481 */ 2487 */
2482static int megasas_reset_device(struct scsi_cmnd *scmd) 2488static int megasas_reset_device(struct scsi_cmnd *scmd)
2483{ 2489{
2484 int ret;
2485
2486 /* 2490 /*
2487 * First wait for all commands to complete 2491 * First wait for all commands to complete
2488 */ 2492 */
2489 ret = megasas_generic_reset(scmd); 2493 return megasas_generic_reset(scmd);
2490
2491 return ret;
2492} 2494}
2493 2495
2494/** 2496/**
@@ -2498,6 +2500,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2498{ 2500{
2499 int ret; 2501 int ret;
2500 struct megasas_instance *instance; 2502 struct megasas_instance *instance;
2503
2501 instance = (struct megasas_instance *)scmd->device->host->hostdata; 2504 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2502 2505
2503 /* 2506 /*
@@ -2516,7 +2519,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
2516 2519
2517/** 2520/**
2518 * megasas_bios_param - Returns disk geometry for a disk 2521 * megasas_bios_param - Returns disk geometry for a disk
2519 * @sdev: device handle 2522 * @sdev: device handle
2520 * @bdev: block device 2523 * @bdev: block device
2521 * @capacity: drive capacity 2524 * @capacity: drive capacity
2522 * @geom: geometry parameters 2525 * @geom: geometry parameters
@@ -2529,6 +2532,7 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2529 int sectors; 2532 int sectors;
2530 sector_t cylinders; 2533 sector_t cylinders;
2531 unsigned long tmp; 2534 unsigned long tmp;
2535
2532 /* Default heads (64) & sectors (32) */ 2536 /* Default heads (64) & sectors (32) */
2533 heads = 64; 2537 heads = 64;
2534 sectors = 32; 2538 sectors = 32;
@@ -2575,6 +2579,7 @@ static void
2575megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd) 2579megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2576{ 2580{
2577 unsigned long flags; 2581 unsigned long flags;
2582
2578 /* 2583 /*
2579 * Don't signal app if it is just an aborted previously registered aen 2584 * Don't signal app if it is just an aborted previously registered aen
2580 */ 2585 */
@@ -2595,9 +2600,10 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
2595 if ((instance->unload == 0) && 2600 if ((instance->unload == 0) &&
2596 ((instance->issuepend_done == 1))) { 2601 ((instance->issuepend_done == 1))) {
2597 struct megasas_aen_event *ev; 2602 struct megasas_aen_event *ev;
2603
2598 ev = kzalloc(sizeof(*ev), GFP_ATOMIC); 2604 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
2599 if (!ev) { 2605 if (!ev) {
2600 printk(KERN_ERR "megasas_service_aen: out of memory\n"); 2606 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
2601 } else { 2607 } else {
2602 ev->instance = instance; 2608 ev->instance = instance;
2603 instance->ev = ev; 2609 instance->ev = ev;
@@ -2654,8 +2660,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
2654 2660
2655 buff_addr = (unsigned long) buf; 2661 buff_addr = (unsigned long) buf;
2656 2662
2657 if (buff_offset > 2663 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
2658 (instance->fw_crash_buffer_size * dmachunk)) {
2659 dev_err(&instance->pdev->dev, 2664 dev_err(&instance->pdev->dev,
2660 "Firmware crash dump offset is out of range\n"); 2665 "Firmware crash dump offset is out of range\n");
2661 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2666 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
@@ -2667,7 +2672,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
2667 2672
2668 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] + 2673 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
2669 (buff_offset % dmachunk); 2674 (buff_offset % dmachunk);
2670 memcpy(buf, (void *)src_addr, size); 2675 memcpy(buf, (void *)src_addr, size);
2671 spin_unlock_irqrestore(&instance->crashdump_lock, flags); 2676 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
2672 2677
2673 return size; 2678 return size;
@@ -2727,6 +2732,7 @@ megasas_fw_crash_state_show(struct device *cdev,
2727 struct Scsi_Host *shost = class_to_shost(cdev); 2732 struct Scsi_Host *shost = class_to_shost(cdev);
2728 struct megasas_instance *instance = 2733 struct megasas_instance *instance =
2729 (struct megasas_instance *) shost->hostdata; 2734 (struct megasas_instance *) shost->hostdata;
2735
2730 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state); 2736 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
2731} 2737}
2732 2738
@@ -2811,8 +2817,6 @@ megasas_complete_abort(struct megasas_instance *instance,
2811 cmd->cmd_status_drv = 0; 2817 cmd->cmd_status_drv = 0;
2812 wake_up(&instance->abort_cmd_wait_q); 2818 wake_up(&instance->abort_cmd_wait_q);
2813 } 2819 }
2814
2815 return;
2816} 2820}
2817 2821
2818/** 2822/**
@@ -2820,10 +2824,10 @@ megasas_complete_abort(struct megasas_instance *instance,
2820 * @instance: Adapter soft state 2824 * @instance: Adapter soft state
2821 * @cmd: Command to be completed 2825 * @cmd: Command to be completed
2822 * @alt_status: If non-zero, use this value as status to 2826 * @alt_status: If non-zero, use this value as status to
2823 * SCSI mid-layer instead of the value returned 2827 * SCSI mid-layer instead of the value returned
2824 * by the FW. This should be used if caller wants 2828 * by the FW. This should be used if caller wants
2825 * an alternate status (as in the case of aborted 2829 * an alternate status (as in the case of aborted
2826 * commands) 2830 * commands)
2827 */ 2831 */
2828void 2832void
2829megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, 2833megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
@@ -2847,10 +2851,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2847 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel 2851 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
2848 when booting the kdump kernel. Ignore this command to 2852 when booting the kdump kernel. Ignore this command to
2849 prevent a kernel panic on shutdown of the kdump kernel. */ 2853 prevent a kernel panic on shutdown of the kdump kernel. */
2850 printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command " 2854 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
2851 "completed.\n"); 2855 "completed\n");
2852 printk(KERN_WARNING "megaraid_sas: If you have a controller " 2856 dev_warn(&instance->pdev->dev, "If you have a controller "
2853 "other than PERC5, please upgrade your firmware.\n"); 2857 "other than PERC5, please upgrade your firmware\n");
2854 break; 2858 break;
2855 case MFI_CMD_PD_SCSI_IO: 2859 case MFI_CMD_PD_SCSI_IO:
2856 case MFI_CMD_LD_SCSI_IO: 2860 case MFI_CMD_LD_SCSI_IO:
@@ -2918,7 +2922,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2918 break; 2922 break;
2919 2923
2920 default: 2924 default:
2921 printk(KERN_DEBUG "megasas: MFI FW status %#x\n", 2925 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
2922 hdr->cmd_status); 2926 hdr->cmd_status);
2923 cmd->scmd->result = DID_ERROR << 16; 2927 cmd->scmd->result = DID_ERROR << 16;
2924 break; 2928 break;
@@ -2944,8 +2948,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2944 if (cmd->frame->hdr.cmd_status != 0) { 2948 if (cmd->frame->hdr.cmd_status != 0) {
2945 if (cmd->frame->hdr.cmd_status != 2949 if (cmd->frame->hdr.cmd_status !=
2946 MFI_STAT_NOT_FOUND) 2950 MFI_STAT_NOT_FOUND)
2947 printk(KERN_WARNING "megasas: map sync" 2951 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
2948 "failed, status = 0x%x.\n",
2949 cmd->frame->hdr.cmd_status); 2952 cmd->frame->hdr.cmd_status);
2950 else { 2953 else {
2951 megasas_return_cmd(instance, cmd); 2954 megasas_return_cmd(instance, cmd);
@@ -2997,7 +3000,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
2997 break; 3000 break;
2998 3001
2999 default: 3002 default:
3000 printk("megasas: Unknown command completed! [0x%X]\n", 3003 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3001 hdr->cmd); 3004 hdr->cmd);
3002 break; 3005 break;
3003 } 3006 }
@@ -3005,7 +3008,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3005 3008
3006/** 3009/**
3007 * megasas_issue_pending_cmds_again - issue all pending cmds 3010 * megasas_issue_pending_cmds_again - issue all pending cmds
3008 * in FW again because of the fw reset 3011 * in FW again because of the fw reset
3009 * @instance: Adapter soft state 3012 * @instance: Adapter soft state
3010 */ 3013 */
3011static inline void 3014static inline void
@@ -3023,19 +3026,19 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3023 spin_unlock_irqrestore(&instance->hba_lock, flags); 3026 spin_unlock_irqrestore(&instance->hba_lock, flags);
3024 3027
3025 while (!list_empty(&clist_local)) { 3028 while (!list_empty(&clist_local)) {
3026 cmd = list_entry((&clist_local)->next, 3029 cmd = list_entry((&clist_local)->next,
3027 struct megasas_cmd, list); 3030 struct megasas_cmd, list);
3028 list_del_init(&cmd->list); 3031 list_del_init(&cmd->list);
3029 3032
3030 if (cmd->sync_cmd || cmd->scmd) { 3033 if (cmd->sync_cmd || cmd->scmd) {
3031 printk(KERN_NOTICE "megaraid_sas: command %p, %p:%d" 3034 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3032 "detected to be pending while HBA reset.\n", 3035 "detected to be pending while HBA reset\n",
3033 cmd, cmd->scmd, cmd->sync_cmd); 3036 cmd, cmd->scmd, cmd->sync_cmd);
3034 3037
3035 cmd->retry_for_fw_reset++; 3038 cmd->retry_for_fw_reset++;
3036 3039
3037 if (cmd->retry_for_fw_reset == 3) { 3040 if (cmd->retry_for_fw_reset == 3) {
3038 printk(KERN_NOTICE "megaraid_sas: cmd %p, %p:%d" 3041 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3039 "was tried multiple times during reset." 3042 "was tried multiple times during reset."
3040 "Shutting down the HBA\n", 3043 "Shutting down the HBA\n",
3041 cmd, cmd->scmd, cmd->sync_cmd); 3044 cmd, cmd->scmd, cmd->sync_cmd);
@@ -3048,18 +3051,18 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3048 3051
3049 if (cmd->sync_cmd == 1) { 3052 if (cmd->sync_cmd == 1) {
3050 if (cmd->scmd) { 3053 if (cmd->scmd) {
3051 printk(KERN_NOTICE "megaraid_sas: unexpected" 3054 dev_notice(&instance->pdev->dev, "unexpected"
3052 "cmd attached to internal command!\n"); 3055 "cmd attached to internal command!\n");
3053 } 3056 }
3054 printk(KERN_NOTICE "megasas: %p synchronous cmd" 3057 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3055 "on the internal reset queue," 3058 "on the internal reset queue,"
3056 "issue it again.\n", cmd); 3059 "issue it again.\n", cmd);
3057 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; 3060 cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS;
3058 instance->instancet->fire_cmd(instance, 3061 instance->instancet->fire_cmd(instance,
3059 cmd->frame_phys_addr , 3062 cmd->frame_phys_addr,
3060 0, instance->reg_set); 3063 0, instance->reg_set);
3061 } else if (cmd->scmd) { 3064 } else if (cmd->scmd) {
3062 printk(KERN_NOTICE "megasas: %p scsi cmd [%02x]" 3065 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3063 "detected on the internal queue, issue again.\n", 3066 "detected on the internal queue, issue again.\n",
3064 cmd, cmd->scmd->cmnd[0]); 3067 cmd, cmd->scmd->cmnd[0]);
3065 3068
@@ -3068,22 +3071,22 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3068 cmd->frame_phys_addr, 3071 cmd->frame_phys_addr,
3069 cmd->frame_count-1, instance->reg_set); 3072 cmd->frame_count-1, instance->reg_set);
3070 } else { 3073 } else {
3071 printk(KERN_NOTICE "megasas: %p unexpected cmd on the" 3074 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3072 "internal reset defer list while re-issue!!\n", 3075 "internal reset defer list while re-issue!!\n",
3073 cmd); 3076 cmd);
3074 } 3077 }
3075 } 3078 }
3076 3079
3077 if (instance->aen_cmd) { 3080 if (instance->aen_cmd) {
3078 printk(KERN_NOTICE "megaraid_sas: aen_cmd in def process\n"); 3081 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3079 megasas_return_cmd(instance, instance->aen_cmd); 3082 megasas_return_cmd(instance, instance->aen_cmd);
3080 3083
3081 instance->aen_cmd = NULL; 3084 instance->aen_cmd = NULL;
3082 } 3085 }
3083 3086
3084 /* 3087 /*
3085 * Initiate AEN (Asynchronous Event Notification) 3088 * Initiate AEN (Asynchronous Event Notification)
3086 */ 3089 */
3087 seq_num = instance->last_seq_num; 3090 seq_num = instance->last_seq_num;
3088 class_locale.members.reserved = 0; 3091 class_locale.members.reserved = 0;
3089 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3092 class_locale.members.locale = MR_EVT_LOCALE_ALL;
@@ -3110,17 +3113,17 @@ megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3110 u32 defer_index; 3113 u32 defer_index;
3111 unsigned long flags; 3114 unsigned long flags;
3112 3115
3113 defer_index = 0; 3116 defer_index = 0;
3114 spin_lock_irqsave(&instance->mfi_pool_lock, flags); 3117 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3115 for (i = 0; i < max_cmd; i++) { 3118 for (i = 0; i < max_cmd; i++) {
3116 cmd = instance->cmd_list[i]; 3119 cmd = instance->cmd_list[i];
3117 if (cmd->sync_cmd == 1 || cmd->scmd) { 3120 if (cmd->sync_cmd == 1 || cmd->scmd) {
3118 printk(KERN_NOTICE "megasas: moving cmd[%d]:%p:%d:%p" 3121 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3119 "on the defer queue as internal\n", 3122 "on the defer queue as internal\n",
3120 defer_index, cmd, cmd->sync_cmd, cmd->scmd); 3123 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3121 3124
3122 if (!list_empty(&cmd->list)) { 3125 if (!list_empty(&cmd->list)) {
3123 printk(KERN_NOTICE "megaraid_sas: ERROR while" 3126 dev_notice(&instance->pdev->dev, "ERROR while"
3124 " moving this cmd:%p, %d %p, it was" 3127 " moving this cmd:%p, %d %p, it was"
3125 "discovered on some list?\n", 3128 "discovered on some list?\n",
3126 cmd, cmd->sync_cmd, cmd->scmd); 3129 cmd, cmd->sync_cmd, cmd->scmd);
@@ -3145,13 +3148,13 @@ process_fw_state_change_wq(struct work_struct *work)
3145 unsigned long flags; 3148 unsigned long flags;
3146 3149
3147 if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) { 3150 if (instance->adprecovery != MEGASAS_ADPRESET_SM_INFAULT) {
3148 printk(KERN_NOTICE "megaraid_sas: error, recovery st %x \n", 3151 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3149 instance->adprecovery); 3152 instance->adprecovery);
3150 return ; 3153 return ;
3151 } 3154 }
3152 3155
3153 if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) { 3156 if (instance->adprecovery == MEGASAS_ADPRESET_SM_INFAULT) {
3154 printk(KERN_NOTICE "megaraid_sas: FW detected to be in fault" 3157 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3155 "state, restarting it...\n"); 3158 "state, restarting it...\n");
3156 3159
3157 instance->instancet->disable_intr(instance); 3160 instance->instancet->disable_intr(instance);
@@ -3159,21 +3162,21 @@ process_fw_state_change_wq(struct work_struct *work)
3159 3162
3160 atomic_set(&instance->fw_reset_no_pci_access, 1); 3163 atomic_set(&instance->fw_reset_no_pci_access, 1);
3161 instance->instancet->adp_reset(instance, instance->reg_set); 3164 instance->instancet->adp_reset(instance, instance->reg_set);
3162 atomic_set(&instance->fw_reset_no_pci_access, 0 ); 3165 atomic_set(&instance->fw_reset_no_pci_access, 0);
3163 3166
3164 printk(KERN_NOTICE "megaraid_sas: FW restarted successfully," 3167 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3165 "initiating next stage...\n"); 3168 "initiating next stage...\n");
3166 3169
3167 printk(KERN_NOTICE "megaraid_sas: HBA recovery state machine," 3170 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3168 "state 2 starting...\n"); 3171 "state 2 starting...\n");
3169 3172
3170 /*waitting for about 20 second before start the second init*/ 3173 /* waiting for about 20 second before start the second init */
3171 for (wait = 0; wait < 30; wait++) { 3174 for (wait = 0; wait < 30; wait++) {
3172 msleep(1000); 3175 msleep(1000);
3173 } 3176 }
3174 3177
3175 if (megasas_transition_to_ready(instance, 1)) { 3178 if (megasas_transition_to_ready(instance, 1)) {
3176 printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); 3179 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3177 3180
3178 atomic_set(&instance->fw_reset_no_pci_access, 1); 3181 atomic_set(&instance->fw_reset_no_pci_access, 1);
3179 megaraid_sas_kill_hba(instance); 3182 megaraid_sas_kill_hba(instance);
@@ -3200,15 +3203,14 @@ process_fw_state_change_wq(struct work_struct *work)
3200 megasas_issue_pending_cmds_again(instance); 3203 megasas_issue_pending_cmds_again(instance);
3201 instance->issuepend_done = 1; 3204 instance->issuepend_done = 1;
3202 } 3205 }
3203 return ;
3204} 3206}
3205 3207
3206/** 3208/**
3207 * megasas_deplete_reply_queue - Processes all completed commands 3209 * megasas_deplete_reply_queue - Processes all completed commands
3208 * @instance: Adapter soft state 3210 * @instance: Adapter soft state
3209 * @alt_status: Alternate status to be returned to 3211 * @alt_status: Alternate status to be returned to
3210 * SCSI mid-layer instead of the status 3212 * SCSI mid-layer instead of the status
3211 * returned by the FW 3213 * returned by the FW
3212 * Note: this must be called with hba lock held 3214 * Note: this must be called with hba lock held
3213 */ 3215 */
3214static int 3216static int
@@ -3238,13 +3240,13 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
3238 instance->reg_set) & MFI_STATE_MASK; 3240 instance->reg_set) & MFI_STATE_MASK;
3239 3241
3240 if (fw_state != MFI_STATE_FAULT) { 3242 if (fw_state != MFI_STATE_FAULT) {
3241 printk(KERN_NOTICE "megaraid_sas: fw state:%x\n", 3243 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3242 fw_state); 3244 fw_state);
3243 } 3245 }
3244 3246
3245 if ((fw_state == MFI_STATE_FAULT) && 3247 if ((fw_state == MFI_STATE_FAULT) &&
3246 (instance->disableOnlineCtrlReset == 0)) { 3248 (instance->disableOnlineCtrlReset == 0)) {
3247 printk(KERN_NOTICE "megaraid_sas: wait adp restart\n"); 3249 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3248 3250
3249 if ((instance->pdev->device == 3251 if ((instance->pdev->device ==
3250 PCI_DEVICE_ID_LSI_SAS1064R) || 3252 PCI_DEVICE_ID_LSI_SAS1064R) ||
@@ -3265,14 +3267,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance,
3265 atomic_set(&instance->fw_outstanding, 0); 3267 atomic_set(&instance->fw_outstanding, 0);
3266 megasas_internal_reset_defer_cmds(instance); 3268 megasas_internal_reset_defer_cmds(instance);
3267 3269
3268 printk(KERN_NOTICE "megasas: fwState=%x, stage:%d\n", 3270 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3269 fw_state, instance->adprecovery); 3271 fw_state, instance->adprecovery);
3270 3272
3271 schedule_work(&instance->work_init); 3273 schedule_work(&instance->work_init);
3272 return IRQ_HANDLED; 3274 return IRQ_HANDLED;
3273 3275
3274 } else { 3276 } else {
3275 printk(KERN_NOTICE "megasas: fwstate:%x, dis_OCR=%x\n", 3277 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3276 fw_state, instance->disableOnlineCtrlReset); 3278 fw_state, instance->disableOnlineCtrlReset);
3277 } 3279 }
3278 } 3280 }
@@ -3288,13 +3290,13 @@ static irqreturn_t megasas_isr(int irq, void *devp)
3288 struct megasas_irq_context *irq_context = devp; 3290 struct megasas_irq_context *irq_context = devp;
3289 struct megasas_instance *instance = irq_context->instance; 3291 struct megasas_instance *instance = irq_context->instance;
3290 unsigned long flags; 3292 unsigned long flags;
3291 irqreturn_t rc; 3293 irqreturn_t rc;
3292 3294
3293 if (atomic_read(&instance->fw_reset_no_pci_access)) 3295 if (atomic_read(&instance->fw_reset_no_pci_access))
3294 return IRQ_HANDLED; 3296 return IRQ_HANDLED;
3295 3297
3296 spin_lock_irqsave(&instance->hba_lock, flags); 3298 spin_lock_irqsave(&instance->hba_lock, flags);
3297 rc = megasas_deplete_reply_queue(instance, DID_OK); 3299 rc = megasas_deplete_reply_queue(instance, DID_OK);
3298 spin_unlock_irqrestore(&instance->hba_lock, flags); 3300 spin_unlock_irqrestore(&instance->hba_lock, flags);
3299 3301
3300 return rc; 3302 return rc;
@@ -3322,7 +3324,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3322 fw_state = abs_state & MFI_STATE_MASK; 3324 fw_state = abs_state & MFI_STATE_MASK;
3323 3325
3324 if (fw_state != MFI_STATE_READY) 3326 if (fw_state != MFI_STATE_READY)
3325 printk(KERN_INFO "megasas: Waiting for FW to come to ready" 3327 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
3326 " state\n"); 3328 " state\n");
3327 3329
3328 while (fw_state != MFI_STATE_READY) { 3330 while (fw_state != MFI_STATE_READY) {
@@ -3330,7 +3332,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3330 switch (fw_state) { 3332 switch (fw_state) {
3331 3333
3332 case MFI_STATE_FAULT: 3334 case MFI_STATE_FAULT:
3333 printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); 3335 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW in FAULT state!!\n");
3334 if (ocr) { 3336 if (ocr) {
3335 max_wait = MEGASAS_RESET_WAIT_TIME; 3337 max_wait = MEGASAS_RESET_WAIT_TIME;
3336 cur_state = MFI_STATE_FAULT; 3338 cur_state = MFI_STATE_FAULT;
@@ -3469,7 +3471,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3469 break; 3471 break;
3470 3472
3471 default: 3473 default:
3472 printk(KERN_DEBUG "megasas: Unknown state 0x%x\n", 3474 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
3473 fw_state); 3475 fw_state);
3474 return -ENODEV; 3476 return -ENODEV;
3475 } 3477 }
@@ -3491,7 +3493,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3491 * Return error if fw_state hasn't changed after max_wait 3493 * Return error if fw_state hasn't changed after max_wait
3492 */ 3494 */
3493 if (curr_abs_state == abs_state) { 3495 if (curr_abs_state == abs_state) {
3494 printk(KERN_DEBUG "FW state [%d] hasn't changed " 3496 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
3495 "in %d secs\n", fw_state, max_wait); 3497 "in %d secs\n", fw_state, max_wait);
3496 return -ENODEV; 3498 return -ENODEV;
3497 } 3499 }
@@ -3499,7 +3501,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3499 abs_state = curr_abs_state; 3501 abs_state = curr_abs_state;
3500 fw_state = curr_abs_state & MFI_STATE_MASK; 3502 fw_state = curr_abs_state & MFI_STATE_MASK;
3501 } 3503 }
3502 printk(KERN_INFO "megasas: FW now in Ready state\n"); 3504 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
3503 3505
3504 return 0; 3506 return 0;
3505} 3507}
@@ -3570,9 +3572,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3570 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) : 3572 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
3571 sizeof(struct megasas_sge32); 3573 sizeof(struct megasas_sge32);
3572 3574
3573 if (instance->flag_ieee) { 3575 if (instance->flag_ieee)
3574 sge_sz = sizeof(struct megasas_sge_skinny); 3576 sge_sz = sizeof(struct megasas_sge_skinny);
3575 }
3576 3577
3577 /* 3578 /*
3578 * For MFI controllers. 3579 * For MFI controllers.
@@ -3594,7 +3595,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3594 instance->pdev, total_sz, 256, 0); 3595 instance->pdev, total_sz, 256, 0);
3595 3596
3596 if (!instance->frame_dma_pool) { 3597 if (!instance->frame_dma_pool) {
3597 printk(KERN_DEBUG "megasas: failed to setup frame pool\n"); 3598 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
3598 return -ENOMEM; 3599 return -ENOMEM;
3599 } 3600 }
3600 3601
@@ -3602,7 +3603,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3602 instance->pdev, 128, 4, 0); 3603 instance->pdev, 128, 4, 0);
3603 3604
3604 if (!instance->sense_dma_pool) { 3605 if (!instance->sense_dma_pool) {
3605 printk(KERN_DEBUG "megasas: failed to setup sense pool\n"); 3606 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
3606 3607
3607 pci_pool_destroy(instance->frame_dma_pool); 3608 pci_pool_destroy(instance->frame_dma_pool);
3608 instance->frame_dma_pool = NULL; 3609 instance->frame_dma_pool = NULL;
@@ -3630,7 +3631,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3630 * whatever has been allocated 3631 * whatever has been allocated
3631 */ 3632 */
3632 if (!cmd->frame || !cmd->sense) { 3633 if (!cmd->frame || !cmd->sense) {
3633 printk(KERN_DEBUG "megasas: pci_pool_alloc failed \n"); 3634 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
3634 megasas_teardown_frame_pool(instance); 3635 megasas_teardown_frame_pool(instance);
3635 return -ENOMEM; 3636 return -ENOMEM;
3636 } 3637 }
@@ -3656,6 +3657,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
3656void megasas_free_cmds(struct megasas_instance *instance) 3657void megasas_free_cmds(struct megasas_instance *instance)
3657{ 3658{
3658 int i; 3659 int i;
3660
3659 /* First free the MFI frame pool */ 3661 /* First free the MFI frame pool */
3660 megasas_teardown_frame_pool(instance); 3662 megasas_teardown_frame_pool(instance);
3661 3663
@@ -3708,7 +3710,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
3708 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL); 3710 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
3709 3711
3710 if (!instance->cmd_list) { 3712 if (!instance->cmd_list) {
3711 printk(KERN_DEBUG "megasas: out of memory\n"); 3713 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
3712 return -ENOMEM; 3714 return -ENOMEM;
3713 } 3715 }
3714 3716
@@ -3744,7 +3746,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
3744 * Create a frame pool and assign one frame to each cmd 3746 * Create a frame pool and assign one frame to each cmd
3745 */ 3747 */
3746 if (megasas_create_frame_pool(instance)) { 3748 if (megasas_create_frame_pool(instance)) {
3747 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); 3749 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
3748 megasas_free_cmds(instance); 3750 megasas_free_cmds(instance);
3749 } 3751 }
3750 3752
@@ -3773,7 +3775,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
3773 cmd = megasas_get_cmd(instance); 3775 cmd = megasas_get_cmd(instance);
3774 3776
3775 if (!cmd) { 3777 if (!cmd) {
3776 printk(KERN_DEBUG "megasas (get_pd_list): Failed to get cmd\n"); 3778 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
3777 return -ENOMEM; 3779 return -ENOMEM;
3778 } 3780 }
3779 3781
@@ -3783,7 +3785,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
3783 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h); 3785 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST), &ci_h);
3784 3786
3785 if (!ci) { 3787 if (!ci) {
3786 printk(KERN_DEBUG "Failed to alloc mem for pd_list\n"); 3788 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for pd_list\n");
3787 megasas_return_cmd(instance, cmd); 3789 megasas_return_cmd(instance, cmd);
3788 return -ENOMEM; 3790 return -ENOMEM;
3789 } 3791 }
@@ -3811,12 +3813,12 @@ megasas_get_pd_list(struct megasas_instance *instance)
3811 ret = megasas_issue_polled(instance, cmd); 3813 ret = megasas_issue_polled(instance, cmd);
3812 3814
3813 /* 3815 /*
3814 * the following function will get the instance PD LIST. 3816 * the following function will get the instance PD LIST.
3815 */ 3817 */
3816 3818
3817 pd_addr = ci->addr; 3819 pd_addr = ci->addr;
3818 3820
3819 if ( ret == 0 && 3821 if (ret == 0 &&
3820 (le32_to_cpu(ci->count) < 3822 (le32_to_cpu(ci->count) <
3821 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { 3823 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) {
3822 3824
@@ -3868,7 +3870,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
3868 cmd = megasas_get_cmd(instance); 3870 cmd = megasas_get_cmd(instance);
3869 3871
3870 if (!cmd) { 3872 if (!cmd) {
3871 printk(KERN_DEBUG "megasas_get_ld_list: Failed to get cmd\n"); 3873 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
3872 return -ENOMEM; 3874 return -ENOMEM;
3873 } 3875 }
3874 3876
@@ -3879,7 +3881,7 @@ megasas_get_ld_list(struct megasas_instance *instance)
3879 &ci_h); 3881 &ci_h);
3880 3882
3881 if (!ci) { 3883 if (!ci) {
3882 printk(KERN_DEBUG "Failed to alloc mem in get_ld_list\n"); 3884 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem in get_ld_list\n");
3883 megasas_return_cmd(instance, cmd); 3885 megasas_return_cmd(instance, cmd);
3884 return -ENOMEM; 3886 return -ENOMEM;
3885 } 3887 }
@@ -3954,8 +3956,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
3954 cmd = megasas_get_cmd(instance); 3956 cmd = megasas_get_cmd(instance);
3955 3957
3956 if (!cmd) { 3958 if (!cmd) {
3957 printk(KERN_WARNING 3959 dev_warn(&instance->pdev->dev,
3958 "megasas:(megasas_ld_list_query): Failed to get cmd\n"); 3960 "megasas_ld_list_query: Failed to get cmd\n");
3959 return -ENOMEM; 3961 return -ENOMEM;
3960 } 3962 }
3961 3963
@@ -3965,8 +3967,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
3965 sizeof(struct MR_LD_TARGETID_LIST), &ci_h); 3967 sizeof(struct MR_LD_TARGETID_LIST), &ci_h);
3966 3968
3967 if (!ci) { 3969 if (!ci) {
3968 printk(KERN_WARNING 3970 dev_warn(&instance->pdev->dev,
3969 "megasas: Failed to alloc mem for ld_list_query\n"); 3971 "Failed to alloc mem for ld_list_query\n");
3970 megasas_return_cmd(instance, cmd); 3972 megasas_return_cmd(instance, cmd);
3971 return -ENOMEM; 3973 return -ENOMEM;
3972 } 3974 }
@@ -4052,11 +4054,11 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4052 instance->supportmax256vd ? "Extended VD(240 VD)firmware" : 4054 instance->supportmax256vd ? "Extended VD(240 VD)firmware" :
4053 "Legacy(64 VD) firmware"); 4055 "Legacy(64 VD) firmware");
4054 4056
4055 old_map_sz = sizeof(struct MR_FW_RAID_MAP) + 4057 old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
4056 (sizeof(struct MR_LD_SPAN_MAP) * 4058 (sizeof(struct MR_LD_SPAN_MAP) *
4057 (instance->fw_supported_vd_count - 1)); 4059 (instance->fw_supported_vd_count - 1));
4058 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT); 4060 new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
4059 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) + 4061 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP) +
4060 (sizeof(struct MR_LD_SPAN_MAP) * 4062 (sizeof(struct MR_LD_SPAN_MAP) *
4061 (instance->drv_supported_vd_count - 1)); 4063 (instance->drv_supported_vd_count - 1));
4062 4064
@@ -4067,7 +4069,6 @@ static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4067 fusion->current_map_sz = new_map_sz; 4069 fusion->current_map_sz = new_map_sz;
4068 else 4070 else
4069 fusion->current_map_sz = old_map_sz; 4071 fusion->current_map_sz = old_map_sz;
4070
4071} 4072}
4072 4073
4073/** 4074/**
@@ -4093,7 +4094,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4093 cmd = megasas_get_cmd(instance); 4094 cmd = megasas_get_cmd(instance);
4094 4095
4095 if (!cmd) { 4096 if (!cmd) {
4096 printk(KERN_DEBUG "megasas: Failed to get a free cmd\n"); 4097 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
4097 return -ENOMEM; 4098 return -ENOMEM;
4098 } 4099 }
4099 4100
@@ -4103,7 +4104,7 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
4103 sizeof(struct megasas_ctrl_info), &ci_h); 4104 sizeof(struct megasas_ctrl_info), &ci_h);
4104 4105
4105 if (!ci) { 4106 if (!ci) {
4106 printk(KERN_DEBUG "Failed to alloc mem for ctrl info\n"); 4107 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ctrl info\n");
4107 megasas_return_cmd(instance, cmd); 4108 megasas_return_cmd(instance, cmd);
4108 return -ENOMEM; 4109 return -ENOMEM;
4109 } 4110 }
@@ -4214,9 +4215,7 @@ static int
4214megasas_issue_init_mfi(struct megasas_instance *instance) 4215megasas_issue_init_mfi(struct megasas_instance *instance)
4215{ 4216{
4216 __le32 context; 4217 __le32 context;
4217
4218 struct megasas_cmd *cmd; 4218 struct megasas_cmd *cmd;
4219
4220 struct megasas_init_frame *init_frame; 4219 struct megasas_init_frame *init_frame;
4221 struct megasas_init_queue_info *initq_info; 4220 struct megasas_init_queue_info *initq_info;
4222 dma_addr_t init_frame_h; 4221 dma_addr_t init_frame_h;
@@ -4269,7 +4268,7 @@ megasas_issue_init_mfi(struct megasas_instance *instance)
4269 */ 4268 */
4270 4269
4271 if (megasas_issue_polled(instance, cmd)) { 4270 if (megasas_issue_polled(instance, cmd)) {
4272 printk(KERN_ERR "megasas: Failed to init firmware\n"); 4271 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
4273 megasas_return_cmd(instance, cmd); 4272 megasas_return_cmd(instance, cmd);
4274 goto fail_fw_init; 4273 goto fail_fw_init;
4275 } 4274 }
@@ -4342,7 +4341,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
4342 &instance->reply_queue_h); 4341 &instance->reply_queue_h);
4343 4342
4344 if (!instance->reply_queue) { 4343 if (!instance->reply_queue) {
4345 printk(KERN_DEBUG "megasas: Out of DMA mem for reply queue\n"); 4344 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
4346 goto fail_reply_queue; 4345 goto fail_reply_queue;
4347 } 4346 }
4348 4347
@@ -4361,7 +4360,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
4361 (instance->instancet->read_fw_status_reg(reg_set) & 4360 (instance->instancet->read_fw_status_reg(reg_set) &
4362 0x04000000); 4361 0x04000000);
4363 4362
4364 printk(KERN_NOTICE "megasas_init_mfi: fw_support_ieee=%d", 4363 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
4365 instance->fw_support_ieee); 4364 instance->fw_support_ieee);
4366 4365
4367 if (instance->fw_support_ieee) 4366 if (instance->fw_support_ieee)
@@ -4505,7 +4504,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
4505 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); 4504 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long));
4506 if (pci_request_selected_regions(instance->pdev, instance->bar, 4505 if (pci_request_selected_regions(instance->pdev, instance->bar,
4507 "megasas: LSI")) { 4506 "megasas: LSI")) {
4508 printk(KERN_DEBUG "megasas: IO memory region busy!\n"); 4507 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
4509 return -EBUSY; 4508 return -EBUSY;
4510 } 4509 }
4511 4510
@@ -4513,7 +4512,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
4513 instance->reg_set = ioremap_nocache(base_addr, 8192); 4512 instance->reg_set = ioremap_nocache(base_addr, 8192);
4514 4513
4515 if (!instance->reg_set) { 4514 if (!instance->reg_set) {
4516 printk(KERN_DEBUG "megasas: Failed to map IO mem\n"); 4515 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
4517 goto fail_ioremap; 4516 goto fail_ioremap;
4518 } 4517 }
4519 4518
@@ -4551,7 +4550,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
4551 (instance, instance->reg_set); 4550 (instance, instance->reg_set);
4552 atomic_set(&instance->fw_reset_no_pci_access, 0); 4551 atomic_set(&instance->fw_reset_no_pci_access, 0);
4553 dev_info(&instance->pdev->dev, 4552 dev_info(&instance->pdev->dev,
4554 "megasas: FW restarted successfully from %s!\n", 4553 "FW restarted successfully from %s!\n",
4555 __func__); 4554 __func__);
4556 4555
4557 /*waitting for about 30 second before retry*/ 4556 /*waitting for about 30 second before retry*/
@@ -4652,16 +4651,15 @@ static int megasas_init_fw(struct megasas_instance *instance)
4652 4651
4653 instance->instancet->enable_intr(instance); 4652 instance->instancet->enable_intr(instance);
4654 4653
4655 printk(KERN_ERR "megasas: INIT adapter done\n"); 4654 dev_err(&instance->pdev->dev, "INIT adapter done\n");
4656 4655
4657 /** for passthrough 4656 /** for passthrough
4658 * the following function will get the PD LIST. 4657 * the following function will get the PD LIST.
4659 */ 4658 */
4660 4659 memset(instance->pd_list, 0,
4661 memset(instance->pd_list, 0 ,
4662 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); 4660 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
4663 if (megasas_get_pd_list(instance) < 0) { 4661 if (megasas_get_pd_list(instance) < 0) {
4664 printk(KERN_ERR "megasas: failed to get PD list\n"); 4662 dev_err(&instance->pdev->dev, "failed to get PD list\n");
4665 goto fail_get_pd_list; 4663 goto fail_get_pd_list;
4666 } 4664 }
4667 4665
@@ -4686,7 +4684,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
4686 le16_to_cpu(ctrl_info->max_strips_per_io); 4684 le16_to_cpu(ctrl_info->max_strips_per_io);
4687 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); 4685 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
4688 4686
4689 tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); 4687 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
4690 4688
4691 instance->disableOnlineCtrlReset = 4689 instance->disableOnlineCtrlReset =
4692 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 4690 ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
@@ -4960,7 +4958,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
4960 aen_cmd, 30); 4958 aen_cmd, 30);
4961 4959
4962 if (ret_val) { 4960 if (ret_val) {
4963 printk(KERN_DEBUG "megasas: Failed to abort " 4961 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
4964 "previous AEN command\n"); 4962 "previous AEN command\n");
4965 return ret_val; 4963 return ret_val;
4966 } 4964 }
@@ -5051,7 +5049,7 @@ static int megasas_start_aen(struct megasas_instance *instance)
5051static int megasas_io_attach(struct megasas_instance *instance) 5049static int megasas_io_attach(struct megasas_instance *instance)
5052{ 5050{
5053 struct Scsi_Host *host = instance->host; 5051 struct Scsi_Host *host = instance->host;
5054 u32 error; 5052 u32 error;
5055 5053
5056 /* 5054 /*
5057 * Export parameters required by SCSI mid-layer 5055 * Export parameters required by SCSI mid-layer
@@ -5079,7 +5077,7 @@ static int megasas_io_attach(struct megasas_instance *instance)
5079 (max_sectors <= MEGASAS_MAX_SECTORS)) { 5077 (max_sectors <= MEGASAS_MAX_SECTORS)) {
5080 instance->max_sectors_per_req = max_sectors; 5078 instance->max_sectors_per_req = max_sectors;
5081 } else { 5079 } else {
5082 printk(KERN_INFO "megasas: max_sectors should be > 0" 5080 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
5083 "and <= %d (or < 1MB for GEN2 controller)\n", 5081 "and <= %d (or < 1MB for GEN2 controller)\n",
5084 instance->max_sectors_per_req); 5082 instance->max_sectors_per_req);
5085 } 5083 }
@@ -5126,7 +5124,7 @@ static int
5126megasas_set_dma_mask(struct pci_dev *pdev) 5124megasas_set_dma_mask(struct pci_dev *pdev)
5127{ 5125{
5128 /* 5126 /*
5129 * All our contollers are capable of performing 64-bit DMA 5127 * All our controllers are capable of performing 64-bit DMA
5130 */ 5128 */
5131 if (IS_DMA64) { 5129 if (IS_DMA64) {
5132 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 5130 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
@@ -5206,13 +5204,13 @@ static int megasas_probe_one(struct pci_dev *pdev,
5206 sizeof(struct megasas_instance)); 5204 sizeof(struct megasas_instance));
5207 5205
5208 if (!host) { 5206 if (!host) {
5209 printk(KERN_DEBUG "megasas: scsi_host_alloc failed\n"); 5207 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
5210 goto fail_alloc_instance; 5208 goto fail_alloc_instance;
5211 } 5209 }
5212 5210
5213 instance = (struct megasas_instance *)host->hostdata; 5211 instance = (struct megasas_instance *)host->hostdata;
5214 memset(instance, 0, sizeof(*instance)); 5212 memset(instance, 0, sizeof(*instance));
5215 atomic_set( &instance->fw_reset_no_pci_access, 0 ); 5213 atomic_set(&instance->fw_reset_no_pci_access, 0);
5216 instance->pdev = pdev; 5214 instance->pdev = pdev;
5217 5215
5218 switch (instance->pdev->device) { 5216 switch (instance->pdev->device) {
@@ -5226,7 +5224,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5226 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL, 5224 instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
5227 instance->ctrl_context_pages); 5225 instance->ctrl_context_pages);
5228 if (!instance->ctrl_context) { 5226 if (!instance->ctrl_context) {
5229 printk(KERN_DEBUG "megasas: Failed to allocate " 5227 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5230 "memory for Fusion context info\n"); 5228 "memory for Fusion context info\n");
5231 goto fail_alloc_dma_buf; 5229 goto fail_alloc_dma_buf;
5232 } 5230 }
@@ -5245,7 +5243,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5245 &instance->consumer_h); 5243 &instance->consumer_h);
5246 5244
5247 if (!instance->producer || !instance->consumer) { 5245 if (!instance->producer || !instance->consumer) {
5248 printk(KERN_DEBUG "megasas: Failed to allocate" 5246 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate"
5249 "memory for producer, consumer\n"); 5247 "memory for producer, consumer\n");
5250 goto fail_alloc_dma_buf; 5248 goto fail_alloc_dma_buf;
5251 } 5249 }
@@ -5276,7 +5274,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5276 CRASH_DMA_BUF_SIZE, 5274 CRASH_DMA_BUF_SIZE,
5277 &instance->crash_dump_h); 5275 &instance->crash_dump_h);
5278 if (!instance->crash_dump_buf) 5276 if (!instance->crash_dump_buf)
5279 dev_err(&instance->pdev->dev, "Can't allocate Firmware " 5277 dev_err(&pdev->dev, "Can't allocate Firmware "
5280 "crash dump DMA buffer\n"); 5278 "crash dump DMA buffer\n");
5281 5279
5282 megasas_poll_wait_aen = 0; 5280 megasas_poll_wait_aen = 0;
@@ -5292,7 +5290,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5292 &instance->evt_detail_h); 5290 &instance->evt_detail_h);
5293 5291
5294 if (!instance->evt_detail) { 5292 if (!instance->evt_detail) {
5295 printk(KERN_DEBUG "megasas: Failed to allocate memory for " 5293 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate memory for "
5296 "event detail structure\n"); 5294 "event detail structure\n");
5297 goto fail_alloc_dma_buf; 5295 goto fail_alloc_dma_buf;
5298 } 5296 }
@@ -5356,7 +5354,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5356 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111), 5354 pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
5357 &instance->vf_affiliation_111_h); 5355 &instance->vf_affiliation_111_h);
5358 if (!instance->vf_affiliation_111) 5356 if (!instance->vf_affiliation_111)
5359 printk(KERN_WARNING "megasas: Can't allocate " 5357 dev_warn(&pdev->dev, "Can't allocate "
5360 "memory for VF affiliation buffer\n"); 5358 "memory for VF affiliation buffer\n");
5361 } else { 5359 } else {
5362 instance->vf_affiliation = 5360 instance->vf_affiliation =
@@ -5365,7 +5363,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5365 sizeof(struct MR_LD_VF_AFFILIATION), 5363 sizeof(struct MR_LD_VF_AFFILIATION),
5366 &instance->vf_affiliation_h); 5364 &instance->vf_affiliation_h);
5367 if (!instance->vf_affiliation) 5365 if (!instance->vf_affiliation)
5368 printk(KERN_WARNING "megasas: Can't allocate " 5366 dev_warn(&pdev->dev, "Can't allocate "
5369 "memory for VF affiliation buffer\n"); 5367 "memory for VF affiliation buffer\n");
5370 } 5368 }
5371 } 5369 }
@@ -5399,7 +5397,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5399 * Initiate AEN (Asynchronous Event Notification) 5397 * Initiate AEN (Asynchronous Event Notification)
5400 */ 5398 */
5401 if (megasas_start_aen(instance)) { 5399 if (megasas_start_aen(instance)) {
5402 printk(KERN_DEBUG "megasas: start aen failed\n"); 5400 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
5403 goto fail_start_aen; 5401 goto fail_start_aen;
5404 } 5402 }
5405 5403
@@ -5409,8 +5407,8 @@ static int megasas_probe_one(struct pci_dev *pdev,
5409 5407
5410 return 0; 5408 return 0;
5411 5409
5412 fail_start_aen: 5410fail_start_aen:
5413 fail_io_attach: 5411fail_io_attach:
5414 megasas_mgmt_info.count--; 5412 megasas_mgmt_info.count--;
5415 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 5413 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
5416 megasas_mgmt_info.max_index--; 5414 megasas_mgmt_info.max_index--;
@@ -5428,7 +5426,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5428 if (instance->msix_vectors) 5426 if (instance->msix_vectors)
5429 pci_disable_msix(instance->pdev); 5427 pci_disable_msix(instance->pdev);
5430fail_init_mfi: 5428fail_init_mfi:
5431 fail_alloc_dma_buf: 5429fail_alloc_dma_buf:
5432 if (instance->evt_detail) 5430 if (instance->evt_detail)
5433 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail), 5431 pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
5434 instance->evt_detail, 5432 instance->evt_detail,
@@ -5442,8 +5440,8 @@ fail_init_mfi:
5442 instance->consumer_h); 5440 instance->consumer_h);
5443 scsi_host_put(host); 5441 scsi_host_put(host);
5444 5442
5445 fail_alloc_instance: 5443fail_alloc_instance:
5446 fail_set_dma_mask: 5444fail_set_dma_mask:
5447 pci_disable_device(pdev); 5445 pci_disable_device(pdev);
5448 5446
5449 return -ENODEV; 5447 return -ENODEV;
@@ -5485,8 +5483,6 @@ static void megasas_flush_cache(struct megasas_instance *instance)
5485 " from %s\n", __func__); 5483 " from %s\n", __func__);
5486 5484
5487 megasas_return_cmd(instance, cmd); 5485 megasas_return_cmd(instance, cmd);
5488
5489 return;
5490} 5486}
5491 5487
5492/** 5488/**
@@ -5532,8 +5528,6 @@ static void megasas_shutdown_controller(struct megasas_instance *instance,
5532 "from %s\n", __func__); 5528 "from %s\n", __func__);
5533 5529
5534 megasas_return_cmd(instance, cmd); 5530 megasas_return_cmd(instance, cmd);
5535
5536 return;
5537} 5531}
5538 5532
5539#ifdef CONFIG_PM 5533#ifdef CONFIG_PM
@@ -5607,7 +5601,7 @@ megasas_resume(struct pci_dev *pdev)
5607 rval = pci_enable_device_mem(pdev); 5601 rval = pci_enable_device_mem(pdev);
5608 5602
5609 if (rval) { 5603 if (rval) {
5610 printk(KERN_ERR "megasas: Enable device failed\n"); 5604 dev_err(&pdev->dev, "Enable device failed\n");
5611 return rval; 5605 return rval;
5612 } 5606 }
5613 5607
@@ -5686,7 +5680,7 @@ megasas_resume(struct pci_dev *pdev)
5686 * Initiate AEN (Asynchronous Event Notification) 5680 * Initiate AEN (Asynchronous Event Notification)
5687 */ 5681 */
5688 if (megasas_start_aen(instance)) 5682 if (megasas_start_aen(instance))
5689 printk(KERN_ERR "megasas: Start AEN failed\n"); 5683 dev_err(&instance->pdev->dev, "Start AEN failed\n");
5690 5684
5691 return 0; 5685 return 0;
5692 5686
@@ -5839,8 +5833,6 @@ static void megasas_detach_one(struct pci_dev *pdev)
5839 scsi_host_put(host); 5833 scsi_host_put(host);
5840 5834
5841 pci_disable_device(pdev); 5835 pci_disable_device(pdev);
5842
5843 return;
5844} 5836}
5845 5837
5846/** 5838/**
@@ -5909,11 +5901,11 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
5909{ 5901{
5910 unsigned int mask; 5902 unsigned int mask;
5911 unsigned long flags; 5903 unsigned long flags;
5904
5912 poll_wait(file, &megasas_poll_wait, wait); 5905 poll_wait(file, &megasas_poll_wait, wait);
5913 spin_lock_irqsave(&poll_aen_lock, flags); 5906 spin_lock_irqsave(&poll_aen_lock, flags);
5914 if (megasas_poll_wait_aen) 5907 if (megasas_poll_wait_aen)
5915 mask = (POLLIN | POLLRDNORM); 5908 mask = (POLLIN | POLLRDNORM);
5916
5917 else 5909 else
5918 mask = 0; 5910 mask = 0;
5919 megasas_poll_wait_aen = 0; 5911 megasas_poll_wait_aen = 0;
@@ -5927,8 +5919,7 @@ static unsigned int megasas_mgmt_poll(struct file *file, poll_table *wait)
5927 * @cmd: MFI command frame 5919 * @cmd: MFI command frame
5928 */ 5920 */
5929 5921
5930static int megasas_set_crash_dump_params_ioctl( 5922static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
5931 struct megasas_cmd *cmd)
5932{ 5923{
5933 struct megasas_instance *local_instance; 5924 struct megasas_instance *local_instance;
5934 int i, error = 0; 5925 int i, error = 0;
@@ -5982,14 +5973,14 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5982 memset(kbuff_arr, 0, sizeof(kbuff_arr)); 5973 memset(kbuff_arr, 0, sizeof(kbuff_arr));
5983 5974
5984 if (ioc->sge_count > MAX_IOCTL_SGE) { 5975 if (ioc->sge_count > MAX_IOCTL_SGE) {
5985 printk(KERN_DEBUG "megasas: SGE count [%d] > max limit [%d]\n", 5976 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
5986 ioc->sge_count, MAX_IOCTL_SGE); 5977 ioc->sge_count, MAX_IOCTL_SGE);
5987 return -EINVAL; 5978 return -EINVAL;
5988 } 5979 }
5989 5980
5990 cmd = megasas_get_cmd(instance); 5981 cmd = megasas_get_cmd(instance);
5991 if (!cmd) { 5982 if (!cmd) {
5992 printk(KERN_DEBUG "megasas: Failed to get a cmd packet\n"); 5983 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
5993 return -ENOMEM; 5984 return -ENOMEM;
5994 } 5985 }
5995 5986
@@ -6034,8 +6025,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6034 ioc->sgl[i].iov_len, 6025 ioc->sgl[i].iov_len,
6035 &buf_handle, GFP_KERNEL); 6026 &buf_handle, GFP_KERNEL);
6036 if (!kbuff_arr[i]) { 6027 if (!kbuff_arr[i]) {
6037 printk(KERN_DEBUG "megasas: Failed to alloc " 6028 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
6038 "kernel SGL buffer for IOCTL \n"); 6029 "kernel SGL buffer for IOCTL\n");
6039 error = -ENOMEM; 6030 error = -ENOMEM;
6040 goto out; 6031 goto out;
6041 } 6032 }
@@ -6108,7 +6099,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6108 6099
6109 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)), 6100 if (copy_to_user((void __user *)((unsigned long)(*sense_ptr)),
6110 sense, ioc->sense_len)) { 6101 sense, ioc->sense_len)) {
6111 printk(KERN_ERR "megasas: Failed to copy out to user " 6102 dev_err(&instance->pdev->dev, "Failed to copy out to user "
6112 "sense data\n"); 6103 "sense data\n");
6113 error = -EFAULT; 6104 error = -EFAULT;
6114 goto out; 6105 goto out;
@@ -6120,11 +6111,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
6120 */ 6111 */
6121 if (copy_to_user(&user_ioc->frame.hdr.cmd_status, 6112 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
6122 &cmd->frame->hdr.cmd_status, sizeof(u8))) { 6113 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
6123 printk(KERN_DEBUG "megasas: Error copying out cmd_status\n"); 6114 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
6124 error = -EFAULT; 6115 error = -EFAULT;
6125 } 6116 }
6126 6117
6127 out: 6118out:
6128 if (sense) { 6119 if (sense) {
6129 dma_free_coherent(&instance->pdev->dev, ioc->sense_len, 6120 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
6130 sense, sense_handle); 6121 sense, sense_handle);
@@ -6180,7 +6171,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6180 } 6171 }
6181 6172
6182 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 6173 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
6183 printk(KERN_ERR "Controller in crit error\n"); 6174 dev_err(&instance->pdev->dev, "Controller in crit error\n");
6184 error = -ENODEV; 6175 error = -ENODEV;
6185 goto out_kfree_ioc; 6176 goto out_kfree_ioc;
6186 } 6177 }
@@ -6205,7 +6196,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6205 spin_unlock_irqrestore(&instance->hba_lock, flags); 6196 spin_unlock_irqrestore(&instance->hba_lock, flags);
6206 6197
6207 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 6198 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6208 printk(KERN_NOTICE "megasas: waiting" 6199 dev_notice(&instance->pdev->dev, "waiting"
6209 "for controller reset to finish\n"); 6200 "for controller reset to finish\n");
6210 } 6201 }
6211 6202
@@ -6216,7 +6207,7 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6216 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { 6207 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
6217 spin_unlock_irqrestore(&instance->hba_lock, flags); 6208 spin_unlock_irqrestore(&instance->hba_lock, flags);
6218 6209
6219 printk(KERN_ERR "megaraid_sas: timed out while" 6210 dev_err(&instance->pdev->dev, "timed out while"
6220 "waiting for HBA to recover\n"); 6211 "waiting for HBA to recover\n");
6221 error = -ENODEV; 6212 error = -ENODEV;
6222 goto out_up; 6213 goto out_up;
@@ -6224,10 +6215,10 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6224 spin_unlock_irqrestore(&instance->hba_lock, flags); 6215 spin_unlock_irqrestore(&instance->hba_lock, flags);
6225 6216
6226 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); 6217 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
6227 out_up: 6218out_up:
6228 up(&instance->ioctl_sem); 6219 up(&instance->ioctl_sem);
6229 6220
6230 out_kfree_ioc: 6221out_kfree_ioc:
6231 kfree(ioc); 6222 kfree(ioc);
6232 return error; 6223 return error;
6233} 6224}
@@ -6275,7 +6266,7 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
6275 spin_unlock_irqrestore(&instance->hba_lock, flags); 6266 spin_unlock_irqrestore(&instance->hba_lock, flags);
6276 6267
6277 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 6268 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6278 printk(KERN_NOTICE "megasas: waiting for" 6269 dev_notice(&instance->pdev->dev, "waiting for"
6279 "controller reset to finish\n"); 6270 "controller reset to finish\n");
6280 } 6271 }
6281 6272
@@ -6285,8 +6276,8 @@ static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
6285 spin_lock_irqsave(&instance->hba_lock, flags); 6276 spin_lock_irqsave(&instance->hba_lock, flags);
6286 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { 6277 if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
6287 spin_unlock_irqrestore(&instance->hba_lock, flags); 6278 spin_unlock_irqrestore(&instance->hba_lock, flags);
6288 printk(KERN_ERR "megaraid_sas: timed out while waiting" 6279 dev_err(&instance->pdev->dev, "timed out while waiting"
6289 "for HBA to recover.\n"); 6280 "for HBA to recover\n");
6290 return -ENODEV; 6281 return -ENODEV;
6291 } 6282 }
6292 spin_unlock_irqrestore(&instance->hba_lock, flags); 6283 spin_unlock_irqrestore(&instance->hba_lock, flags);
@@ -6462,7 +6453,8 @@ static ssize_t
6462megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count) 6453megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t count)
6463{ 6454{
6464 int retval = count; 6455 int retval = count;
6465 if(sscanf(buf,"%u",&megasas_dbg_lvl)<1){ 6456
6457 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
6466 printk(KERN_ERR "megasas: could not set dbg_lvl\n"); 6458 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
6467 retval = -EINVAL; 6459 retval = -EINVAL;
6468 } 6460 }
@@ -6502,7 +6494,7 @@ megasas_aen_polling(struct work_struct *work)
6502 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) 6494 if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL)
6503 break; 6495 break;
6504 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 6496 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
6505 printk(KERN_NOTICE "megasas: %s waiting for " 6497 dev_notice(&instance->pdev->dev, "%s waiting for "
6506 "controller reset to finish for scsi%d\n", 6498 "controller reset to finish for scsi%d\n",
6507 __func__, instance->host->host_no); 6499 __func__, instance->host->host_no);
6508 } 6500 }
@@ -6524,14 +6516,12 @@ megasas_aen_polling(struct work_struct *work)
6524 pd_index = 6516 pd_index =
6525 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 6517 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
6526 6518
6527 sdev1 = 6519 sdev1 = scsi_device_lookup(host, i, j, 0);
6528 scsi_device_lookup(host, i, j, 0);
6529 6520
6530 if (instance->pd_list[pd_index].driveState 6521 if (instance->pd_list[pd_index].driveState
6531 == MR_PD_STATE_SYSTEM) { 6522 == MR_PD_STATE_SYSTEM) {
6532 if (!sdev1) { 6523 if (!sdev1)
6533 scsi_add_device(host, i, j, 0); 6524 scsi_add_device(host, i, j, 0);
6534 }
6535 6525
6536 if (sdev1) 6526 if (sdev1)
6537 scsi_device_put(sdev1); 6527 scsi_device_put(sdev1);
@@ -6552,14 +6542,12 @@ megasas_aen_polling(struct work_struct *work)
6552 pd_index = 6542 pd_index =
6553 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; 6543 (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
6554 6544
6555 sdev1 = 6545 sdev1 = scsi_device_lookup(host, i, j, 0);
6556 scsi_device_lookup(host, i, j, 0);
6557 6546
6558 if (instance->pd_list[pd_index].driveState 6547 if (instance->pd_list[pd_index].driveState
6559 == MR_PD_STATE_SYSTEM) { 6548 == MR_PD_STATE_SYSTEM) {
6560 if (sdev1) { 6549 if (sdev1)
6561 scsi_device_put(sdev1); 6550 scsi_device_put(sdev1);
6562 }
6563 } else { 6551 } else {
6564 if (sdev1) { 6552 if (sdev1) {
6565 scsi_remove_device(sdev1); 6553 scsi_remove_device(sdev1);
@@ -6644,13 +6632,13 @@ megasas_aen_polling(struct work_struct *work)
6644 break; 6632 break;
6645 } 6633 }
6646 } else { 6634 } else {
6647 printk(KERN_ERR "invalid evt_detail!\n"); 6635 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
6648 kfree(ev); 6636 kfree(ev);
6649 return; 6637 return;
6650 } 6638 }
6651 6639
6652 if (doscan) { 6640 if (doscan) {
6653 printk(KERN_INFO "megaraid_sas: scanning for scsi%d...\n", 6641 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
6654 instance->host->host_no); 6642 instance->host->host_no);
6655 if (megasas_get_pd_list(instance) == 0) { 6643 if (megasas_get_pd_list(instance) == 0) {
6656 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { 6644 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
@@ -6705,7 +6693,7 @@ megasas_aen_polling(struct work_struct *work)
6705 } 6693 }
6706 } 6694 }
6707 6695
6708 if ( instance->aen_cmd != NULL ) { 6696 if (instance->aen_cmd != NULL) {
6709 kfree(ev); 6697 kfree(ev);
6710 return ; 6698 return ;
6711 } 6699 }
@@ -6722,7 +6710,7 @@ megasas_aen_polling(struct work_struct *work)
6722 mutex_unlock(&instance->aen_mutex); 6710 mutex_unlock(&instance->aen_mutex);
6723 6711
6724 if (error) 6712 if (error)
6725 printk(KERN_ERR "register aen failed error %x\n", error); 6713 dev_err(&instance->pdev->dev, "register aen failed error %x\n", error);
6726 6714
6727 kfree(ev); 6715 kfree(ev);
6728} 6716}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 46a0f8f4f677..f0837cc3b163 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -221,7 +221,7 @@ static void megasas_teardown_frame_pool_fusion(
221 struct megasas_cmd_fusion *cmd; 221 struct megasas_cmd_fusion *cmd;
222 222
223 if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) { 223 if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
224 printk(KERN_ERR "megasas: dma pool is null. SG Pool %p, " 224 dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
225 "sense pool : %p\n", fusion->sg_dma_pool, 225 "sense pool : %p\n", fusion->sg_dma_pool,
226 fusion->sense_dma_pool); 226 fusion->sense_dma_pool);
227 return; 227 return;
@@ -332,8 +332,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
332 total_sz_chain_frame, 4, 332 total_sz_chain_frame, 4,
333 0); 333 0);
334 if (!fusion->sg_dma_pool) { 334 if (!fusion->sg_dma_pool) {
335 printk(KERN_DEBUG "megasas: failed to setup request pool " 335 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
336 "fusion\n");
337 return -ENOMEM; 336 return -ENOMEM;
338 } 337 }
339 fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion", 338 fusion->sense_dma_pool = pci_pool_create("megasas sense pool fusion",
@@ -341,8 +340,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
341 SCSI_SENSE_BUFFERSIZE, 64, 0); 340 SCSI_SENSE_BUFFERSIZE, 64, 0);
342 341
343 if (!fusion->sense_dma_pool) { 342 if (!fusion->sense_dma_pool) {
344 printk(KERN_DEBUG "megasas: failed to setup sense pool " 343 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
345 "fusion\n");
346 pci_pool_destroy(fusion->sg_dma_pool); 344 pci_pool_destroy(fusion->sg_dma_pool);
347 fusion->sg_dma_pool = NULL; 345 fusion->sg_dma_pool = NULL;
348 return -ENOMEM; 346 return -ENOMEM;
@@ -366,7 +364,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
366 * whatever has been allocated 364 * whatever has been allocated
367 */ 365 */
368 if (!cmd->sg_frame || !cmd->sense) { 366 if (!cmd->sg_frame || !cmd->sense) {
369 printk(KERN_DEBUG "megasas: pci_pool_alloc failed\n"); 367 dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
370 megasas_teardown_frame_pool_fusion(instance); 368 megasas_teardown_frame_pool_fusion(instance);
371 return -ENOMEM; 369 return -ENOMEM;
372 } 370 }
@@ -412,7 +410,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
412 &fusion->req_frames_desc_phys, GFP_KERNEL); 410 &fusion->req_frames_desc_phys, GFP_KERNEL);
413 411
414 if (!fusion->req_frames_desc) { 412 if (!fusion->req_frames_desc) {
415 printk(KERN_ERR "megasas; Could not allocate memory for " 413 dev_err(&instance->pdev->dev, "Could not allocate memory for "
416 "request_frames\n"); 414 "request_frames\n");
417 goto fail_req_desc; 415 goto fail_req_desc;
418 } 416 }
@@ -423,7 +421,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
423 fusion->reply_alloc_sz * count, 16, 0); 421 fusion->reply_alloc_sz * count, 16, 0);
424 422
425 if (!fusion->reply_frames_desc_pool) { 423 if (!fusion->reply_frames_desc_pool) {
426 printk(KERN_ERR "megasas; Could not allocate memory for " 424 dev_err(&instance->pdev->dev, "Could not allocate memory for "
427 "reply_frame pool\n"); 425 "reply_frame pool\n");
428 goto fail_reply_desc; 426 goto fail_reply_desc;
429 } 427 }
@@ -432,7 +430,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
432 pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL, 430 pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
433 &fusion->reply_frames_desc_phys); 431 &fusion->reply_frames_desc_phys);
434 if (!fusion->reply_frames_desc) { 432 if (!fusion->reply_frames_desc) {
435 printk(KERN_ERR "megasas; Could not allocate memory for " 433 dev_err(&instance->pdev->dev, "Could not allocate memory for "
436 "reply_frame pool\n"); 434 "reply_frame pool\n");
437 pci_pool_destroy(fusion->reply_frames_desc_pool); 435 pci_pool_destroy(fusion->reply_frames_desc_pool);
438 goto fail_reply_desc; 436 goto fail_reply_desc;
@@ -449,7 +447,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
449 fusion->io_frames_alloc_sz, 16, 0); 447 fusion->io_frames_alloc_sz, 16, 0);
450 448
451 if (!fusion->io_request_frames_pool) { 449 if (!fusion->io_request_frames_pool) {
452 printk(KERN_ERR "megasas: Could not allocate memory for " 450 dev_err(&instance->pdev->dev, "Could not allocate memory for "
453 "io_request_frame pool\n"); 451 "io_request_frame pool\n");
454 goto fail_io_frames; 452 goto fail_io_frames;
455 } 453 }
@@ -458,7 +456,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
458 pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL, 456 pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
459 &fusion->io_request_frames_phys); 457 &fusion->io_request_frames_phys);
460 if (!fusion->io_request_frames) { 458 if (!fusion->io_request_frames) {
461 printk(KERN_ERR "megasas: Could not allocate memory for " 459 dev_err(&instance->pdev->dev, "Could not allocate memory for "
462 "io_request_frames frames\n"); 460 "io_request_frames frames\n");
463 pci_pool_destroy(fusion->io_request_frames_pool); 461 pci_pool_destroy(fusion->io_request_frames_pool);
464 goto fail_io_frames; 462 goto fail_io_frames;
@@ -473,7 +471,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
473 * max_cmd, GFP_KERNEL); 471 * max_cmd, GFP_KERNEL);
474 472
475 if (!fusion->cmd_list) { 473 if (!fusion->cmd_list) {
476 printk(KERN_DEBUG "megasas: out of memory. Could not alloc " 474 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
477 "memory for cmd_list_fusion\n"); 475 "memory for cmd_list_fusion\n");
478 goto fail_cmd_list; 476 goto fail_cmd_list;
479 } 477 }
@@ -483,7 +481,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
483 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion), 481 fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
484 GFP_KERNEL); 482 GFP_KERNEL);
485 if (!fusion->cmd_list[i]) { 483 if (!fusion->cmd_list[i]) {
486 printk(KERN_ERR "Could not alloc cmd list fusion\n"); 484 dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
487 485
488 for (j = 0; j < i; j++) 486 for (j = 0; j < i; j++)
489 kfree(fusion->cmd_list[j]); 487 kfree(fusion->cmd_list[j]);
@@ -527,7 +525,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
527 * Create a frame pool and assign one frame to each cmd 525 * Create a frame pool and assign one frame to each cmd
528 */ 526 */
529 if (megasas_create_frame_pool_fusion(instance)) { 527 if (megasas_create_frame_pool_fusion(instance)) {
530 printk(KERN_DEBUG "megasas: Error creating frame DMA pool\n"); 528 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
531 megasas_free_cmds_fusion(instance); 529 megasas_free_cmds_fusion(instance);
532 goto fail_req_desc; 530 goto fail_req_desc;
533 } 531 }
@@ -613,7 +611,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
613 cmd = megasas_get_cmd(instance); 611 cmd = megasas_get_cmd(instance);
614 612
615 if (!cmd) { 613 if (!cmd) {
616 printk(KERN_ERR "Could not allocate cmd for INIT Frame\n"); 614 dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
617 ret = 1; 615 ret = 1;
618 goto fail_get_cmd; 616 goto fail_get_cmd;
619 } 617 }
@@ -624,7 +622,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
624 &ioc_init_handle, GFP_KERNEL); 622 &ioc_init_handle, GFP_KERNEL);
625 623
626 if (!IOCInitMessage) { 624 if (!IOCInitMessage) {
627 printk(KERN_ERR "Could not allocate memory for " 625 dev_err(&instance->pdev->dev, "Could not allocate memory for "
628 "IOCInitMessage\n"); 626 "IOCInitMessage\n");
629 ret = 1; 627 ret = 1;
630 goto fail_fw_init; 628 goto fail_fw_init;
@@ -714,7 +712,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
714 ret = 1; 712 ret = 1;
715 goto fail_fw_init; 713 goto fail_fw_init;
716 } 714 }
717 printk(KERN_ERR "megasas:IOC Init cmd success\n"); 715 dev_err(&instance->pdev->dev, "Init cmd success\n");
718 716
719 ret = 0; 717 ret = 0;
720 718
@@ -757,7 +755,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
757 cmd = megasas_get_cmd(instance); 755 cmd = megasas_get_cmd(instance);
758 756
759 if (!cmd) { 757 if (!cmd) {
760 printk(KERN_DEBUG "megasas: Failed to get cmd for map info.\n"); 758 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
761 return -ENOMEM; 759 return -ENOMEM;
762 } 760 }
763 761
@@ -776,7 +774,7 @@ megasas_get_ld_map_info(struct megasas_instance *instance)
776 ci_h = fusion->ld_map_phys[(instance->map_id & 1)]; 774 ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
777 775
778 if (!ci) { 776 if (!ci) {
779 printk(KERN_DEBUG "Failed to alloc mem for ld_map_info\n"); 777 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
780 megasas_return_cmd(instance, cmd); 778 megasas_return_cmd(instance, cmd);
781 return -ENOMEM; 779 return -ENOMEM;
782 } 780 }
@@ -851,8 +849,7 @@ megasas_sync_map_info(struct megasas_instance *instance)
851 cmd = megasas_get_cmd(instance); 849 cmd = megasas_get_cmd(instance);
852 850
853 if (!cmd) { 851 if (!cmd) {
854 printk(KERN_DEBUG "megasas: Failed to get cmd for sync" 852 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
855 "info.\n");
856 return -ENOMEM; 853 return -ENOMEM;
857 } 854 }
858 855
@@ -1097,7 +1094,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance)
1097 &fusion->ld_map_phys[i], 1094 &fusion->ld_map_phys[i],
1098 GFP_KERNEL); 1095 GFP_KERNEL);
1099 if (!fusion->ld_map[i]) { 1096 if (!fusion->ld_map[i]) {
1100 printk(KERN_ERR "megasas: Could not allocate memory " 1097 dev_err(&instance->pdev->dev, "Could not allocate memory "
1101 "for map info\n"); 1098 "for map info\n");
1102 goto fail_map_info; 1099 goto fail_map_info;
1103 } 1100 }
@@ -1162,7 +1159,7 @@ map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
1162 cmd->scmd->result = DID_IMM_RETRY << 16; 1159 cmd->scmd->result = DID_IMM_RETRY << 16;
1163 break; 1160 break;
1164 default: 1161 default:
1165 printk(KERN_DEBUG "megasas: FW status %#x\n", status); 1162 dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
1166 cmd->scmd->result = DID_ERROR << 16; 1163 cmd->scmd->result = DID_ERROR << 16;
1167 break; 1164 break;
1168 } 1165 }
@@ -1851,7 +1848,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
1851 &io_request->SGL, cmd); 1848 &io_request->SGL, cmd);
1852 1849
1853 if (sge_count > instance->max_num_sge) { 1850 if (sge_count > instance->max_num_sge) {
1854 printk(KERN_ERR "megasas: Error. sge_count (0x%x) exceeds " 1851 dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
1855 "max (0x%x) allowed\n", sge_count, 1852 "max (0x%x) allowed\n", sge_count,
1856 instance->max_num_sge); 1853 instance->max_num_sge);
1857 return 1; 1854 return 1;
@@ -1885,7 +1882,7 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
1885 struct fusion_context *fusion; 1882 struct fusion_context *fusion;
1886 1883
1887 if (index >= instance->max_fw_cmds) { 1884 if (index >= instance->max_fw_cmds) {
1888 printk(KERN_ERR "megasas: Invalid SMID (0x%x)request for " 1885 dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
1889 "descriptor for scsi%d\n", index, 1886 "descriptor for scsi%d\n", index,
1890 instance->host->host_no); 1887 instance->host->host_no);
1891 return NULL; 1888 return NULL;
@@ -1927,7 +1924,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1927 1924
1928 if (megasas_build_io_fusion(instance, scmd, cmd)) { 1925 if (megasas_build_io_fusion(instance, scmd, cmd)) {
1929 megasas_return_cmd_fusion(instance, cmd); 1926 megasas_return_cmd_fusion(instance, cmd);
1930 printk(KERN_ERR "megasas: Error building command.\n"); 1927 dev_err(&instance->pdev->dev, "Error building command\n");
1931 cmd->request_desc = NULL; 1928 cmd->request_desc = NULL;
1932 return 1; 1929 return 1;
1933 } 1930 }
@@ -1937,7 +1934,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
1937 1934
1938 if (cmd->io_request->ChainOffset != 0 && 1935 if (cmd->io_request->ChainOffset != 0 &&
1939 cmd->io_request->ChainOffset != 0xF) 1936 cmd->io_request->ChainOffset != 0xF)
1940 printk(KERN_ERR "megasas: The chain offset value is not " 1937 dev_err(&instance->pdev->dev, "The chain offset value is not "
1941 "correct : %x\n", cmd->io_request->ChainOffset); 1938 "correct : %x\n", cmd->io_request->ChainOffset);
1942 1939
1943 /* 1940 /*
@@ -2025,7 +2022,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
2025 if (reply_descript_type == 2022 if (reply_descript_type ==
2026 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { 2023 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
2027 if (megasas_dbg_lvl == 5) 2024 if (megasas_dbg_lvl == 5)
2028 printk(KERN_ERR "\nmegasas: FAST Path " 2025 dev_err(&instance->pdev->dev, "\nFAST Path "
2029 "IO Success\n"); 2026 "IO Success\n");
2030 } 2027 }
2031 /* Fall thru and complete IO */ 2028 /* Fall thru and complete IO */
@@ -2186,7 +2183,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp)
2186 else if (fw_state == MFI_STATE_FAULT) 2183 else if (fw_state == MFI_STATE_FAULT)
2187 schedule_work(&instance->work_init); 2184 schedule_work(&instance->work_init);
2188 } else if (fw_state == MFI_STATE_FAULT) { 2185 } else if (fw_state == MFI_STATE_FAULT) {
2189 printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt" 2186 dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
2190 "for scsi%d\n", instance->host->host_no); 2187 "for scsi%d\n", instance->host->host_no);
2191 schedule_work(&instance->work_init); 2188 schedule_work(&instance->work_init);
2192 } 2189 }
@@ -2269,7 +2266,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
2269 u16 index; 2266 u16 index;
2270 2267
2271 if (build_mpt_mfi_pass_thru(instance, cmd)) { 2268 if (build_mpt_mfi_pass_thru(instance, cmd)) {
2272 printk(KERN_ERR "Couldn't build MFI pass thru cmd\n"); 2269 dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
2273 return NULL; 2270 return NULL;
2274 } 2271 }
2275 2272
@@ -2303,7 +2300,7 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance,
2303 2300
2304 req_desc = build_mpt_cmd(instance, cmd); 2301 req_desc = build_mpt_cmd(instance, cmd);
2305 if (!req_desc) { 2302 if (!req_desc) {
2306 printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); 2303 dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
2307 return; 2304 return;
2308 } 2305 }
2309 megasas_fire_cmd_fusion(instance, req_desc); 2306 megasas_fire_cmd_fusion(instance, req_desc);
@@ -2413,7 +2410,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2413 fw_state = instance->instancet->read_fw_status_reg( 2410 fw_state = instance->instancet->read_fw_status_reg(
2414 instance->reg_set) & MFI_STATE_MASK; 2411 instance->reg_set) & MFI_STATE_MASK;
2415 if (fw_state == MFI_STATE_FAULT) { 2412 if (fw_state == MFI_STATE_FAULT) {
2416 printk(KERN_WARNING "megasas: Found FW in FAULT state," 2413 dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
2417 " will reset adapter scsi%d.\n", 2414 " will reset adapter scsi%d.\n",
2418 instance->host->host_no); 2415 instance->host->host_no);
2419 retval = 1; 2416 retval = 1;
@@ -2436,7 +2433,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2436 hb_seconds_missed++; 2433 hb_seconds_missed++;
2437 if (hb_seconds_missed == 2434 if (hb_seconds_missed ==
2438 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) { 2435 (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
2439 printk(KERN_WARNING "megasas: SR-IOV:" 2436 dev_warn(&instance->pdev->dev, "SR-IOV:"
2440 " Heartbeat never completed " 2437 " Heartbeat never completed "
2441 " while polling during I/O " 2438 " while polling during I/O "
2442 " timeout handling for " 2439 " timeout handling for "
@@ -2454,7 +2451,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2454 goto out; 2451 goto out;
2455 2452
2456 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) { 2453 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2457 printk(KERN_NOTICE "megasas: [%2d]waiting for %d " 2454 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2458 "commands to complete for scsi%d\n", i, 2455 "commands to complete for scsi%d\n", i,
2459 outstanding, instance->host->host_no); 2456 outstanding, instance->host->host_no);
2460 megasas_complete_cmd_dpc_fusion( 2457 megasas_complete_cmd_dpc_fusion(
@@ -2464,7 +2461,7 @@ int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
2464 } 2461 }
2465 2462
2466 if (atomic_read(&instance->fw_outstanding)) { 2463 if (atomic_read(&instance->fw_outstanding)) {
2467 printk("megaraid_sas: pending commands remain after waiting, " 2464 dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
2468 "will reset adapter scsi%d.\n", 2465 "will reset adapter scsi%d.\n",
2469 instance->host->host_no); 2466 instance->host->host_no);
2470 retval = 1; 2467 retval = 1;
@@ -2564,7 +2561,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2564 mutex_lock(&instance->reset_mutex); 2561 mutex_lock(&instance->reset_mutex);
2565 2562
2566 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { 2563 if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
2567 printk(KERN_WARNING "megaraid_sas: Hardware critical error, " 2564 dev_warn(&instance->pdev->dev, "Hardware critical error, "
2568 "returning FAILED for scsi%d.\n", 2565 "returning FAILED for scsi%d.\n",
2569 instance->host->host_no); 2566 instance->host->host_no);
2570 mutex_unlock(&instance->reset_mutex); 2567 mutex_unlock(&instance->reset_mutex);
@@ -2618,7 +2615,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2618 if (megasas_wait_for_outstanding_fusion(instance, iotimeout, 2615 if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
2619 &convert)) { 2616 &convert)) {
2620 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; 2617 instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
2621 printk(KERN_WARNING "megaraid_sas: resetting fusion " 2618 dev_warn(&instance->pdev->dev, "resetting fusion "
2622 "adapter scsi%d.\n", instance->host->host_no); 2619 "adapter scsi%d.\n", instance->host->host_no);
2623 if (convert) 2620 if (convert)
2624 iotimeout = 0; 2621 iotimeout = 0;
@@ -2645,7 +2642,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2645 if (instance->disableOnlineCtrlReset || 2642 if (instance->disableOnlineCtrlReset ||
2646 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2643 (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
2647 /* Reset not supported, kill adapter */ 2644 /* Reset not supported, kill adapter */
2648 printk(KERN_WARNING "megaraid_sas: Reset not supported" 2645 dev_warn(&instance->pdev->dev, "Reset not supported"
2649 ", killing adapter scsi%d.\n", 2646 ", killing adapter scsi%d.\n",
2650 instance->host->host_no); 2647 instance->host->host_no);
2651 megaraid_sas_kill_hba(instance); 2648 megaraid_sas_kill_hba(instance);
@@ -2663,7 +2660,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2663 instance->hb_host_mem->HB.driverCounter)) { 2660 instance->hb_host_mem->HB.driverCounter)) {
2664 instance->hb_host_mem->HB.driverCounter = 2661 instance->hb_host_mem->HB.driverCounter =
2665 instance->hb_host_mem->HB.fwCounter; 2662 instance->hb_host_mem->HB.fwCounter;
2666 printk(KERN_WARNING "megasas: SR-IOV:" 2663 dev_warn(&instance->pdev->dev, "SR-IOV:"
2667 "Late FW heartbeat update for " 2664 "Late FW heartbeat update for "
2668 "scsi%d.\n", 2665 "scsi%d.\n",
2669 instance->host->host_no); 2666 instance->host->host_no);
@@ -2679,8 +2676,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2679 abs_state = status_reg & 2676 abs_state = status_reg &
2680 MFI_STATE_MASK; 2677 MFI_STATE_MASK;
2681 if (abs_state == MFI_STATE_READY) { 2678 if (abs_state == MFI_STATE_READY) {
2682 printk(KERN_WARNING "megasas" 2679 dev_warn(&instance->pdev->dev,
2683 ": SR-IOV: FW was found" 2680 "SR-IOV: FW was found"
2684 "to be in ready state " 2681 "to be in ready state "
2685 "for scsi%d.\n", 2682 "for scsi%d.\n",
2686 instance->host->host_no); 2683 instance->host->host_no);
@@ -2689,7 +2686,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2689 msleep(20); 2686 msleep(20);
2690 } 2687 }
2691 if (abs_state != MFI_STATE_READY) { 2688 if (abs_state != MFI_STATE_READY) {
2692 printk(KERN_WARNING "megasas: SR-IOV: " 2689 dev_warn(&instance->pdev->dev, "SR-IOV: "
2693 "FW not in ready state after %d" 2690 "FW not in ready state after %d"
2694 " seconds for scsi%d, status_reg = " 2691 " seconds for scsi%d, status_reg = "
2695 "0x%x.\n", 2692 "0x%x.\n",
@@ -2731,7 +2728,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2731 host_diag = 2728 host_diag =
2732 readl(&instance->reg_set->fusion_host_diag); 2729 readl(&instance->reg_set->fusion_host_diag);
2733 if (retry++ == 100) { 2730 if (retry++ == 100) {
2734 printk(KERN_WARNING "megaraid_sas: " 2731 dev_warn(&instance->pdev->dev,
2735 "Host diag unlock failed! " 2732 "Host diag unlock failed! "
2736 "for scsi%d\n", 2733 "for scsi%d\n",
2737 instance->host->host_no); 2734 instance->host->host_no);
@@ -2754,7 +2751,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2754 host_diag = 2751 host_diag =
2755 readl(&instance->reg_set->fusion_host_diag); 2752 readl(&instance->reg_set->fusion_host_diag);
2756 if (retry++ == 1000) { 2753 if (retry++ == 1000) {
2757 printk(KERN_WARNING "megaraid_sas: " 2754 dev_warn(&instance->pdev->dev,
2758 "Diag reset adapter never " 2755 "Diag reset adapter never "
2759 "cleared for scsi%d!\n", 2756 "cleared for scsi%d!\n",
2760 instance->host->host_no); 2757 instance->host->host_no);
@@ -2777,7 +2774,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2777 instance->reg_set) & MFI_STATE_MASK; 2774 instance->reg_set) & MFI_STATE_MASK;
2778 } 2775 }
2779 if (abs_state <= MFI_STATE_FW_INIT) { 2776 if (abs_state <= MFI_STATE_FW_INIT) {
2780 printk(KERN_WARNING "megaraid_sas: firmware " 2777 dev_warn(&instance->pdev->dev, "firmware "
2781 "state < MFI_STATE_FW_INIT, state = " 2778 "state < MFI_STATE_FW_INIT, state = "
2782 "0x%x for scsi%d\n", abs_state, 2779 "0x%x for scsi%d\n", abs_state,
2783 instance->host->host_no); 2780 instance->host->host_no);
@@ -2786,7 +2783,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2786 2783
2787 /* Wait for FW to become ready */ 2784 /* Wait for FW to become ready */
2788 if (megasas_transition_to_ready(instance, 1)) { 2785 if (megasas_transition_to_ready(instance, 1)) {
2789 printk(KERN_WARNING "megaraid_sas: Failed to " 2786 dev_warn(&instance->pdev->dev, "Failed to "
2790 "transition controller to ready " 2787 "transition controller to ready "
2791 "for scsi%d.\n", 2788 "for scsi%d.\n",
2792 instance->host->host_no); 2789 instance->host->host_no);
@@ -2795,7 +2792,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2795 2792
2796 megasas_reset_reply_desc(instance); 2793 megasas_reset_reply_desc(instance);
2797 if (megasas_ioc_init_fusion(instance)) { 2794 if (megasas_ioc_init_fusion(instance)) {
2798 printk(KERN_WARNING "megaraid_sas: " 2795 dev_warn(&instance->pdev->dev,
2799 "megasas_ioc_init_fusion() failed!" 2796 "megasas_ioc_init_fusion() failed!"
2800 " for scsi%d\n", 2797 " for scsi%d\n",
2801 instance->host->host_no); 2798 instance->host->host_no);
@@ -2836,7 +2833,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2836 } 2833 }
2837 2834
2838 /* Adapter reset completed successfully */ 2835 /* Adapter reset completed successfully */
2839 printk(KERN_WARNING "megaraid_sas: Reset " 2836 dev_warn(&instance->pdev->dev, "Reset "
2840 "successful for scsi%d.\n", 2837 "successful for scsi%d.\n",
2841 instance->host->host_no); 2838 instance->host->host_no);
2842 2839
@@ -2852,7 +2849,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
2852 goto out; 2849 goto out;
2853 } 2850 }
2854 /* Reset failed, kill the adapter */ 2851 /* Reset failed, kill the adapter */
2855 printk(KERN_WARNING "megaraid_sas: Reset failed, killing " 2852 dev_warn(&instance->pdev->dev, "Reset failed, killing "
2856 "adapter scsi%d.\n", instance->host->host_no); 2853 "adapter scsi%d.\n", instance->host->host_no);
2857 megaraid_sas_kill_hba(instance); 2854 megaraid_sas_kill_hba(instance);
2858 instance->skip_heartbeat_timer_del = 1; 2855 instance->skip_heartbeat_timer_del = 1;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 11248de92b3b..6dec7cff316f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1557,7 +1557,8 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1557 goto out_fail; 1557 goto out_fail;
1558 } 1558 }
1559 1559
1560 for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { 1560 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
1561 (!memap_sz || !pio_sz); i++) {
1561 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1562 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1562 if (pio_sz) 1563 if (pio_sz)
1563 continue; 1564 continue;
@@ -1572,16 +1573,17 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1572 chip_phys = (u64)ioc->chip_phys; 1573 chip_phys = (u64)ioc->chip_phys;
1573 memap_sz = pci_resource_len(pdev, i); 1574 memap_sz = pci_resource_len(pdev, i);
1574 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 1575 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1575 if (ioc->chip == NULL) {
1576 printk(MPT2SAS_ERR_FMT "unable to map "
1577 "adapter memory!\n", ioc->name);
1578 r = -EINVAL;
1579 goto out_fail;
1580 }
1581 } 1576 }
1582 } 1577 }
1583 } 1578 }
1584 1579
1580 if (ioc->chip == NULL) {
1581 printk(MPT2SAS_ERR_FMT "unable to map adapter memory! "
1582 "or resource not found\n", ioc->name);
1583 r = -EINVAL;
1584 goto out_fail;
1585 }
1586
1585 _base_mask_interrupts(ioc); 1587 _base_mask_interrupts(ioc);
1586 1588
1587 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 1589 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 14a781b6b88d..43f87e904b98 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1843,7 +1843,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1843 goto out_fail; 1843 goto out_fail;
1844 } 1844 }
1845 1845
1846 for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { 1846 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
1847 (!memap_sz || !pio_sz); i++) {
1847 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { 1848 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1848 if (pio_sz) 1849 if (pio_sz)
1849 continue; 1850 continue;
@@ -1856,15 +1857,16 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
1856 chip_phys = (u64)ioc->chip_phys; 1857 chip_phys = (u64)ioc->chip_phys;
1857 memap_sz = pci_resource_len(pdev, i); 1858 memap_sz = pci_resource_len(pdev, i);
1858 ioc->chip = ioremap(ioc->chip_phys, memap_sz); 1859 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1859 if (ioc->chip == NULL) {
1860 pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
1861 ioc->name);
1862 r = -EINVAL;
1863 goto out_fail;
1864 }
1865 } 1860 }
1866 } 1861 }
1867 1862
1863 if (ioc->chip == NULL) {
1864 pr_err(MPT3SAS_FMT "unable to map adapter memory! "
1865 " or resource not found\n", ioc->name);
1866 r = -EINVAL;
1867 goto out_fail;
1868 }
1869
1868 _base_mask_interrupts(ioc); 1870 _base_mask_interrupts(ioc);
1869 1871
1870 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 1872 r = _base_get_ioc_facts(ioc, CAN_SLEEP);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index d40d734aa53a..f466a6aa8830 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -338,8 +338,11 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
338 338
339 res_start = pci_resource_start(pdev, bar); 339 res_start = pci_resource_start(pdev, bar);
340 res_len = pci_resource_len(pdev, bar); 340 res_len = pci_resource_len(pdev, bar);
341 if (!res_start || !res_len) 341 if (!res_start || !res_len) {
342 iounmap(mvi->regs_ex);
343 mvi->regs_ex = NULL;
342 goto err_out; 344 goto err_out;
345 }
343 346
344 res_flag = pci_resource_flags(pdev, bar); 347 res_flag = pci_resource_flags(pdev, bar);
345 if (res_flag & IORESOURCE_CACHEABLE) 348 if (res_flag & IORESOURCE_CACHEABLE)
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 74a4bb9af07b..f14ec6e042b9 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -49,13 +49,15 @@ enum chip_flavors {
49 chip_8019, 49 chip_8019,
50 chip_8074, 50 chip_8074,
51 chip_8076, 51 chip_8076,
52 chip_8077 52 chip_8077,
53 chip_8006,
53}; 54};
54 55
55enum phy_speed { 56enum phy_speed {
56 PHY_SPEED_15 = 0x01, 57 PHY_SPEED_15 = 0x01,
57 PHY_SPEED_30 = 0x02, 58 PHY_SPEED_30 = 0x02,
58 PHY_SPEED_60 = 0x04, 59 PHY_SPEED_60 = 0x04,
60 PHY_SPEED_120 = 0x08,
59}; 61};
60 62
61enum data_direction { 63enum data_direction {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 96dcc097a463..39306b1e704c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3263,6 +3263,10 @@ void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate)
3263 struct sas_phy *sas_phy = phy->sas_phy.phy; 3263 struct sas_phy *sas_phy = phy->sas_phy.phy;
3264 3264
3265 switch (link_rate) { 3265 switch (link_rate) {
3266 case PHY_SPEED_120:
3267 phy->sas_phy.linkrate = SAS_LINK_RATE_12_0_GBPS;
3268 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_12_0_GBPS;
3269 break;
3266 case PHY_SPEED_60: 3270 case PHY_SPEED_60:
3267 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; 3271 phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS;
3268 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; 3272 phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index a132f2664d2f..5c0356fb6310 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -57,6 +57,7 @@ static const struct pm8001_chip_info pm8001_chips[] = {
57 [chip_8074] = {0, 8, &pm8001_80xx_dispatch,}, 57 [chip_8074] = {0, 8, &pm8001_80xx_dispatch,},
58 [chip_8076] = {0, 16, &pm8001_80xx_dispatch,}, 58 [chip_8076] = {0, 16, &pm8001_80xx_dispatch,},
59 [chip_8077] = {0, 16, &pm8001_80xx_dispatch,}, 59 [chip_8077] = {0, 16, &pm8001_80xx_dispatch,},
60 [chip_8006] = {0, 16, &pm8001_80xx_dispatch,},
60}; 61};
61static int pm8001_id; 62static int pm8001_id;
62 63
@@ -1107,6 +1108,8 @@ err_out_enable:
1107 */ 1108 */
1108static struct pci_device_id pm8001_pci_table[] = { 1109static struct pci_device_id pm8001_pci_table[] = {
1109 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, 1110 { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 },
1111 { PCI_VDEVICE(PMC_Sierra, 0x8006), chip_8006 },
1112 { PCI_VDEVICE(ADAPTEC2, 0x8006), chip_8006 },
1110 { PCI_VDEVICE(ATTO, 0x0042), chip_8001 }, 1113 { PCI_VDEVICE(ATTO, 0x0042), chip_8001 },
1111 /* Support for SPC/SPCv/SPCve controllers */ 1114 /* Support for SPC/SPCv/SPCve controllers */
1112 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, 1115 { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 },
@@ -1217,7 +1220,7 @@ MODULE_AUTHOR("Anand Kumar Santhanam <AnandKumar.Santhanam@pmcs.com>");
1217MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>"); 1220MODULE_AUTHOR("Sangeetha Gnanasekaran <Sangeetha.Gnanasekaran@pmcs.com>");
1218MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>"); 1221MODULE_AUTHOR("Nikith Ganigarakoppal <Nikith.Ganigarakoppal@pmcs.com>");
1219MODULE_DESCRIPTION( 1222MODULE_DESCRIPTION(
1220 "PMC-Sierra PM8001/8081/8088/8089/8074/8076/8077 " 1223 "PMC-Sierra PM8001/8006/8081/8088/8089/8074/8076/8077 "
1221 "SAS/SATA controller driver"); 1224 "SAS/SATA controller driver");
1222MODULE_VERSION(DRV_VERSION); 1225MODULE_VERSION(DRV_VERSION);
1223MODULE_LICENSE("GPL"); 1226MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index b93f289b42b3..949198c01ced 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -790,6 +790,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
790 ccb->device = pm8001_dev; 790 ccb->device = pm8001_dev;
791 ccb->ccb_tag = ccb_tag; 791 ccb->ccb_tag = ccb_tag;
792 ccb->task = task; 792 ccb->task = task;
793 ccb->n_elem = 0;
793 794
794 res = PM8001_CHIP_DISP->task_abort(pm8001_ha, 795 res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
795 pm8001_dev, flag, task_tag, ccb_tag); 796 pm8001_dev, flag, task_tag, ccb_tag);
@@ -975,19 +976,27 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
975 phy = sas_get_local_phy(dev); 976 phy = sas_get_local_phy(dev);
976 977
977 if (dev_is_sata(dev)) { 978 if (dev_is_sata(dev)) {
978 DECLARE_COMPLETION_ONSTACK(completion_setstate);
979 if (scsi_is_sas_phy_local(phy)) { 979 if (scsi_is_sas_phy_local(phy)) {
980 rc = 0; 980 rc = 0;
981 goto out; 981 goto out;
982 } 982 }
983 rc = sas_phy_reset(phy, 1); 983 rc = sas_phy_reset(phy, 1);
984 if (rc) {
985 PM8001_EH_DBG(pm8001_ha,
986 pm8001_printk("phy reset failed for device %x\n"
987 "with rc %d\n", pm8001_dev->device_id, rc));
988 rc = TMF_RESP_FUNC_FAILED;
989 goto out;
990 }
984 msleep(2000); 991 msleep(2000);
985 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , 992 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
986 dev, 1, 0); 993 dev, 1, 0);
987 pm8001_dev->setds_completion = &completion_setstate; 994 if (rc) {
988 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha, 995 PM8001_EH_DBG(pm8001_ha,
989 pm8001_dev, 0x01); 996 pm8001_printk("task abort failed %x\n"
990 wait_for_completion(&completion_setstate); 997 "with rc %d\n", pm8001_dev->device_id, rc));
998 rc = TMF_RESP_FUNC_FAILED;
999 }
991 } else { 1000 } else {
992 rc = sas_phy_reset(phy, 1); 1001 rc = sas_phy_reset(phy, 1);
993 msleep(2000); 1002 msleep(2000);
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 8dd8b7840f04..e2e97db38ae8 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
58#include "pm8001_defs.h" 58#include "pm8001_defs.h"
59 59
60#define DRV_NAME "pm80xx" 60#define DRV_NAME "pm80xx"
61#define DRV_VERSION "0.1.37" 61#define DRV_VERSION "0.1.38"
62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ 62#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */ 63#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ 64#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -241,7 +241,7 @@ struct pm8001_chip_info {
241struct pm8001_port { 241struct pm8001_port {
242 struct asd_sas_port sas_port; 242 struct asd_sas_port sas_port;
243 u8 port_attached; 243 u8 port_attached;
244 u8 wide_port_phymap; 244 u16 wide_port_phymap;
245 u8 port_state; 245 u8 port_state;
246 struct list_head list; 246 struct list_head list;
247}; 247};
@@ -569,6 +569,14 @@ struct pm8001_fw_image_header {
569#define NCQ_READ_LOG_FLAG 0x80000000 569#define NCQ_READ_LOG_FLAG 0x80000000
570#define NCQ_ABORT_ALL_FLAG 0x40000000 570#define NCQ_ABORT_ALL_FLAG 0x40000000
571#define NCQ_2ND_RLE_FLAG 0x20000000 571#define NCQ_2ND_RLE_FLAG 0x20000000
572
573/* Device states */
574#define DS_OPERATIONAL 0x01
575#define DS_PORT_IN_RESET 0x02
576#define DS_IN_RECOVERY 0x03
577#define DS_IN_ERROR 0x04
578#define DS_NON_OPERATIONAL 0x07
579
572/** 580/**
573 * brief param structure for firmware flash update. 581 * brief param structure for firmware flash update.
574 */ 582 */
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 05cce463ab01..0e1628f2018e 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -309,6 +309,9 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
309 pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); 309 pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET);
310 pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = 310 pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset =
311 pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); 311 pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET);
312 /* read port recover and reset timeout */
313 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer =
314 pm8001_mr32(address, MAIN_PORT_RECOVERY_TIMER);
312} 315}
313 316
314/** 317/**
@@ -585,6 +588,12 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
585 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); 588 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
586 pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, 589 pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY,
587 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); 590 pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay);
591
592 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer &= 0xffff0000;
593 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer |=
594 PORT_RECOVERY_TIMEOUT;
595 pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER,
596 pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer);
588} 597}
589 598
590/** 599/**
@@ -843,6 +852,7 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
843 int rc; 852 int rc;
844 u32 tag; 853 u32 tag;
845 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; 854 u32 opc = OPC_INB_SET_CONTROLLER_CONFIG;
855 u32 page_code;
846 856
847 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); 857 memset(&payload, 0, sizeof(struct set_ctrl_cfg_req));
848 rc = pm8001_tag_alloc(pm8001_ha, &tag); 858 rc = pm8001_tag_alloc(pm8001_ha, &tag);
@@ -851,8 +861,14 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha)
851 861
852 circularQ = &pm8001_ha->inbnd_q_tbl[0]; 862 circularQ = &pm8001_ha->inbnd_q_tbl[0];
853 payload.tag = cpu_to_le32(tag); 863 payload.tag = cpu_to_le32(tag);
864
865 if (IS_SPCV_12G(pm8001_ha->pdev))
866 page_code = THERMAL_PAGE_CODE_7H;
867 else
868 page_code = THERMAL_PAGE_CODE_8H;
869
854 payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | 870 payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) |
855 (THERMAL_ENABLE << 8) | THERMAL_OP_CODE; 871 (THERMAL_ENABLE << 8) | page_code;
856 payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); 872 payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8);
857 873
858 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); 874 rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0);
@@ -1593,6 +1609,13 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb)
1593 ts->stat = SAS_OPEN_REJECT; 1609 ts->stat = SAS_OPEN_REJECT;
1594 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; 1610 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1595 break; 1611 break;
1612 case IO_XFER_ERROR_INVALID_SSP_RSP_FRAME:
1613 PM8001_IO_DBG(pm8001_ha,
1614 pm8001_printk("IO_XFER_ERROR_INVALID_SSP_RSP_FRAME\n"));
1615 ts->resp = SAS_TASK_COMPLETE;
1616 ts->stat = SAS_OPEN_REJECT;
1617 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
1618 break;
1596 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: 1619 case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED:
1597 PM8001_IO_DBG(pm8001_ha, 1620 PM8001_IO_DBG(pm8001_ha,
1598 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); 1621 pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n"));
@@ -2829,6 +2852,32 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha,
2829static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, 2852static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha,
2830 u32 phyId, u32 phy_op); 2853 u32 phyId, u32 phy_op);
2831 2854
2855static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha,
2856 void *piomb)
2857{
2858 struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4);
2859 u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate);
2860 u8 phy_id = (u8)((phyid_npip_portstate & 0xFF0000) >> 16);
2861 u32 lr_status_evt_portid =
2862 le32_to_cpu(pPayload->lr_status_evt_portid);
2863 u8 deviceType = pPayload->sas_identify.dev_type;
2864 u8 link_rate = (u8)((lr_status_evt_portid & 0xF0000000) >> 28);
2865 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2866 u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF);
2867 struct pm8001_port *port = &pm8001_ha->port[port_id];
2868
2869 if (deviceType == SAS_END_DEVICE) {
2870 pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id,
2871 PHY_NOTIFY_ENABLE_SPINUP);
2872 }
2873
2874 port->wide_port_phymap |= (1U << phy_id);
2875 pm8001_get_lrate_mode(phy, link_rate);
2876 phy->sas_phy.oob_mode = SAS_OOB_MODE;
2877 phy->phy_state = PHY_STATE_LINK_UP_SPCV;
2878 phy->phy_attached = 1;
2879}
2880
2832/** 2881/**
2833 * hw_event_sas_phy_up -FW tells me a SAS phy up event. 2882 * hw_event_sas_phy_up -FW tells me a SAS phy up event.
2834 * @pm8001_ha: our hba card information 2883 * @pm8001_ha: our hba card information
@@ -2856,6 +2905,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
2856 unsigned long flags; 2905 unsigned long flags;
2857 u8 deviceType = pPayload->sas_identify.dev_type; 2906 u8 deviceType = pPayload->sas_identify.dev_type;
2858 port->port_state = portstate; 2907 port->port_state = portstate;
2908 port->wide_port_phymap |= (1U << phy_id);
2859 phy->phy_state = PHY_STATE_LINK_UP_SPCV; 2909 phy->phy_state = PHY_STATE_LINK_UP_SPCV;
2860 PM8001_MSG_DBG(pm8001_ha, pm8001_printk( 2910 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
2861 "portid:%d; phyid:%d; linkrate:%d; " 2911 "portid:%d; phyid:%d; linkrate:%d; "
@@ -2981,7 +3031,6 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2981 struct pm8001_port *port = &pm8001_ha->port[port_id]; 3031 struct pm8001_port *port = &pm8001_ha->port[port_id];
2982 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3032 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
2983 port->port_state = portstate; 3033 port->port_state = portstate;
2984 phy->phy_type = 0;
2985 phy->identify.device_type = 0; 3034 phy->identify.device_type = 0;
2986 phy->phy_attached = 0; 3035 phy->phy_attached = 0;
2987 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); 3036 memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE);
@@ -2993,9 +3042,13 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
2993 pm8001_printk(" PortInvalid portID %d\n", port_id)); 3042 pm8001_printk(" PortInvalid portID %d\n", port_id));
2994 PM8001_MSG_DBG(pm8001_ha, 3043 PM8001_MSG_DBG(pm8001_ha,
2995 pm8001_printk(" Last phy Down and port invalid\n")); 3044 pm8001_printk(" Last phy Down and port invalid\n"));
2996 port->port_attached = 0; 3045 if (phy->phy_type & PORT_TYPE_SATA) {
2997 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3046 phy->phy_type = 0;
2998 port_id, phy_id, 0, 0); 3047 port->port_attached = 0;
3048 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3049 port_id, phy_id, 0, 0);
3050 }
3051 sas_phy_disconnected(&phy->sas_phy);
2999 break; 3052 break;
3000 case PORT_IN_RESET: 3053 case PORT_IN_RESET:
3001 PM8001_MSG_DBG(pm8001_ha, 3054 PM8001_MSG_DBG(pm8001_ha,
@@ -3003,22 +3056,26 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
3003 break; 3056 break;
3004 case PORT_NOT_ESTABLISHED: 3057 case PORT_NOT_ESTABLISHED:
3005 PM8001_MSG_DBG(pm8001_ha, 3058 PM8001_MSG_DBG(pm8001_ha,
3006 pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); 3059 pm8001_printk(" Phy Down and PORT_NOT_ESTABLISHED\n"));
3007 port->port_attached = 0; 3060 port->port_attached = 0;
3008 break; 3061 break;
3009 case PORT_LOSTCOMM: 3062 case PORT_LOSTCOMM:
3010 PM8001_MSG_DBG(pm8001_ha, 3063 PM8001_MSG_DBG(pm8001_ha,
3011 pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); 3064 pm8001_printk(" Phy Down and PORT_LOSTCOMM\n"));
3012 PM8001_MSG_DBG(pm8001_ha, 3065 PM8001_MSG_DBG(pm8001_ha,
3013 pm8001_printk(" Last phy Down and port invalid\n")); 3066 pm8001_printk(" Last phy Down and port invalid\n"));
3014 port->port_attached = 0; 3067 if (phy->phy_type & PORT_TYPE_SATA) {
3015 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, 3068 port->port_attached = 0;
3016 port_id, phy_id, 0, 0); 3069 phy->phy_type = 0;
3070 pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
3071 port_id, phy_id, 0, 0);
3072 }
3073 sas_phy_disconnected(&phy->sas_phy);
3017 break; 3074 break;
3018 default: 3075 default:
3019 port->port_attached = 0; 3076 port->port_attached = 0;
3020 PM8001_MSG_DBG(pm8001_ha, 3077 PM8001_MSG_DBG(pm8001_ha,
3021 pm8001_printk(" phy Down and(default) = 0x%x\n", 3078 pm8001_printk(" Phy Down and(default) = 0x%x\n",
3022 portstate)); 3079 portstate));
3023 break; 3080 break;
3024 3081
@@ -3084,7 +3141,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3084 */ 3141 */
3085static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) 3142static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3086{ 3143{
3087 unsigned long flags; 3144 unsigned long flags, i;
3088 struct hw_event_resp *pPayload = 3145 struct hw_event_resp *pPayload =
3089 (struct hw_event_resp *)(piomb + 4); 3146 (struct hw_event_resp *)(piomb + 4);
3090 u32 lr_status_evt_portid = 3147 u32 lr_status_evt_portid =
@@ -3097,9 +3154,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3097 (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); 3154 (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8);
3098 u8 status = 3155 u8 status =
3099 (u8)((lr_status_evt_portid & 0x0F000000) >> 24); 3156 (u8)((lr_status_evt_portid & 0x0F000000) >> 24);
3100
3101 struct sas_ha_struct *sas_ha = pm8001_ha->sas; 3157 struct sas_ha_struct *sas_ha = pm8001_ha->sas;
3102 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; 3158 struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
3159 struct pm8001_port *port = &pm8001_ha->port[port_id];
3103 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; 3160 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id];
3104 PM8001_MSG_DBG(pm8001_ha, 3161 PM8001_MSG_DBG(pm8001_ha,
3105 pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", 3162 pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n",
@@ -3125,7 +3182,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3125 case HW_EVENT_PHY_DOWN: 3182 case HW_EVENT_PHY_DOWN:
3126 PM8001_MSG_DBG(pm8001_ha, 3183 PM8001_MSG_DBG(pm8001_ha,
3127 pm8001_printk("HW_EVENT_PHY_DOWN\n")); 3184 pm8001_printk("HW_EVENT_PHY_DOWN\n"));
3128 sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); 3185 if (phy->phy_type & PORT_TYPE_SATA)
3186 sas_ha->notify_phy_event(&phy->sas_phy,
3187 PHYE_LOSS_OF_SIGNAL);
3129 phy->phy_attached = 0; 3188 phy->phy_attached = 0;
3130 phy->phy_state = 0; 3189 phy->phy_state = 0;
3131 hw_event_phy_down(pm8001_ha, piomb); 3190 hw_event_phy_down(pm8001_ha, piomb);
@@ -3169,9 +3228,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3169 pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); 3228 pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n"));
3170 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3229 pm80xx_hw_event_ack_req(pm8001_ha, 0,
3171 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); 3230 HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
3172 sas_phy_disconnected(sas_phy);
3173 phy->phy_attached = 0;
3174 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3175 break; 3231 break;
3176 case HW_EVENT_LINK_ERR_DISPARITY_ERROR: 3232 case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
3177 PM8001_MSG_DBG(pm8001_ha, 3233 PM8001_MSG_DBG(pm8001_ha,
@@ -3179,9 +3235,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3179 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3235 pm80xx_hw_event_ack_req(pm8001_ha, 0,
3180 HW_EVENT_LINK_ERR_DISPARITY_ERROR, 3236 HW_EVENT_LINK_ERR_DISPARITY_ERROR,
3181 port_id, phy_id, 0, 0); 3237 port_id, phy_id, 0, 0);
3182 sas_phy_disconnected(sas_phy);
3183 phy->phy_attached = 0;
3184 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3185 break; 3238 break;
3186 case HW_EVENT_LINK_ERR_CODE_VIOLATION: 3239 case HW_EVENT_LINK_ERR_CODE_VIOLATION:
3187 PM8001_MSG_DBG(pm8001_ha, 3240 PM8001_MSG_DBG(pm8001_ha,
@@ -3189,9 +3242,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3189 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3242 pm80xx_hw_event_ack_req(pm8001_ha, 0,
3190 HW_EVENT_LINK_ERR_CODE_VIOLATION, 3243 HW_EVENT_LINK_ERR_CODE_VIOLATION,
3191 port_id, phy_id, 0, 0); 3244 port_id, phy_id, 0, 0);
3192 sas_phy_disconnected(sas_phy);
3193 phy->phy_attached = 0;
3194 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3195 break; 3245 break;
3196 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: 3246 case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
3197 PM8001_MSG_DBG(pm8001_ha, pm8001_printk( 3247 PM8001_MSG_DBG(pm8001_ha, pm8001_printk(
@@ -3199,9 +3249,6 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3199 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3249 pm80xx_hw_event_ack_req(pm8001_ha, 0,
3200 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, 3250 HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH,
3201 port_id, phy_id, 0, 0); 3251 port_id, phy_id, 0, 0);
3202 sas_phy_disconnected(sas_phy);
3203 phy->phy_attached = 0;
3204 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
3205 break; 3252 break;
3206 case HW_EVENT_MALFUNCTION: 3253 case HW_EVENT_MALFUNCTION:
3207 PM8001_MSG_DBG(pm8001_ha, 3254 PM8001_MSG_DBG(pm8001_ha,
@@ -3257,13 +3304,19 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
3257 pm80xx_hw_event_ack_req(pm8001_ha, 0, 3304 pm80xx_hw_event_ack_req(pm8001_ha, 0,
3258 HW_EVENT_PORT_RECOVERY_TIMER_TMO, 3305 HW_EVENT_PORT_RECOVERY_TIMER_TMO,
3259 port_id, phy_id, 0, 0); 3306 port_id, phy_id, 0, 0);
3260 sas_phy_disconnected(sas_phy); 3307 for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
3261 phy->phy_attached = 0; 3308 if (port->wide_port_phymap & (1 << i)) {
3262 sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); 3309 phy = &pm8001_ha->phy[i];
3310 sas_ha->notify_phy_event(&phy->sas_phy,
3311 PHYE_LOSS_OF_SIGNAL);
3312 port->wide_port_phymap &= ~(1 << i);
3313 }
3314 }
3263 break; 3315 break;
3264 case HW_EVENT_PORT_RECOVER: 3316 case HW_EVENT_PORT_RECOVER:
3265 PM8001_MSG_DBG(pm8001_ha, 3317 PM8001_MSG_DBG(pm8001_ha,
3266 pm8001_printk("HW_EVENT_PORT_RECOVER\n")); 3318 pm8001_printk("HW_EVENT_PORT_RECOVER\n"));
3319 hw_event_port_recover(pm8001_ha, piomb);
3267 break; 3320 break;
3268 case HW_EVENT_PORT_RESET_COMPLETE: 3321 case HW_EVENT_PORT_RESET_COMPLETE:
3269 PM8001_MSG_DBG(pm8001_ha, 3322 PM8001_MSG_DBG(pm8001_ha,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 9970a385795d..7a443bad6163 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -177,7 +177,8 @@
177/* Thermal related */ 177/* Thermal related */
178#define THERMAL_ENABLE 0x1 178#define THERMAL_ENABLE 0x1
179#define THERMAL_LOG_ENABLE 0x1 179#define THERMAL_LOG_ENABLE 0x1
180#define THERMAL_OP_CODE 0x6 180#define THERMAL_PAGE_CODE_7H 0x6
181#define THERMAL_PAGE_CODE_8H 0x7
181#define LTEMPHIL 70 182#define LTEMPHIL 70
182#define RTEMPHIL 100 183#define RTEMPHIL 100
183 184
@@ -1174,7 +1175,7 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
1174#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 1175#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54
1175#define MPI_IO_RQE_BUSY_FULL 0x55 1176#define MPI_IO_RQE_BUSY_FULL 0x55
1176#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 1177#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56
1177#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57 1178#define IO_XFER_ERROR_INVALID_SSP_RSP_FRAME 0x57
1178#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 1179#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58
1179 1180
1180#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 1181#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 437254e1c4de..6b942d9e5b74 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -884,7 +884,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
884 struct device, kobj))); 884 struct device, kobj)));
885 struct qla_hw_data *ha = vha->hw; 885 struct qla_hw_data *ha = vha->hw;
886 int rval; 886 int rval;
887 uint16_t actual_size;
888 887
889 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) 888 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
890 return 0; 889 return 0;
@@ -901,7 +900,6 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
901 } 900 }
902 901
903do_read: 902do_read:
904 actual_size = 0;
905 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE); 903 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
906 904
907 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma, 905 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
@@ -1079,8 +1077,7 @@ qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1079 char *buf) 1077 char *buf)
1080{ 1078{
1081 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1079 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1082 return scnprintf(buf, PAGE_SIZE, "%s\n", 1080 return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_desc);
1083 vha->hw->model_desc ? vha->hw->model_desc : "");
1084} 1081}
1085 1082
1086static ssize_t 1083static ssize_t
@@ -1348,7 +1345,8 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1348 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1345 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1349 struct qla_hw_data *ha = vha->hw; 1346 struct qla_hw_data *ha = vha->hw;
1350 1347
1351 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha)) 1348 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
1349 !IS_QLA27XX(ha))
1352 return scnprintf(buf, PAGE_SIZE, "\n"); 1350 return scnprintf(buf, PAGE_SIZE, "\n");
1353 1351
1354 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", 1352 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1537,6 +1535,20 @@ qla2x00_allow_cna_fw_dump_store(struct device *dev,
1537 return strlen(buf); 1535 return strlen(buf);
1538} 1536}
1539 1537
1538static ssize_t
1539qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
1540 char *buf)
1541{
1542 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1543 struct qla_hw_data *ha = vha->hw;
1544
1545 if (!IS_QLA27XX(ha))
1546 return scnprintf(buf, PAGE_SIZE, "\n");
1547
1548 return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1549 ha->pep_version[0], ha->pep_version[1], ha->pep_version[2]);
1550}
1551
1540static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL); 1552static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1541static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); 1553static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1542static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); 1554static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +1593,7 @@ static DEVICE_ATTR(fw_dump_size, S_IRUGO, qla2x00_fw_dump_size_show, NULL);
1581static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, 1593static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
1582 qla2x00_allow_cna_fw_dump_show, 1594 qla2x00_allow_cna_fw_dump_show,
1583 qla2x00_allow_cna_fw_dump_store); 1595 qla2x00_allow_cna_fw_dump_store);
1596static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
1584 1597
1585struct device_attribute *qla2x00_host_attrs[] = { 1598struct device_attribute *qla2x00_host_attrs[] = {
1586 &dev_attr_driver_version, 1599 &dev_attr_driver_version,
@@ -1614,6 +1627,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
1614 &dev_attr_diag_megabytes, 1627 &dev_attr_diag_megabytes,
1615 &dev_attr_fw_dump_size, 1628 &dev_attr_fw_dump_size,
1616 &dev_attr_allow_cna_fw_dump, 1629 &dev_attr_allow_cna_fw_dump,
1630 &dev_attr_pep_version,
1617 NULL, 1631 NULL,
1618}; 1632};
1619 1633
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2e2bb6f45ce6..c26acde797f0 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -405,7 +405,7 @@ done:
405 return rval; 405 return rval;
406} 406}
407 407
408inline uint16_t 408static inline uint16_t
409qla24xx_calc_ct_iocbs(uint16_t dsds) 409qla24xx_calc_ct_iocbs(uint16_t dsds)
410{ 410{
411 uint16_t iocbs; 411 uint16_t iocbs;
@@ -1733,7 +1733,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1733 struct Scsi_Host *host = bsg_job->shost; 1733 struct Scsi_Host *host = bsg_job->shost;
1734 scsi_qla_host_t *vha = shost_priv(host); 1734 scsi_qla_host_t *vha = shost_priv(host);
1735 struct qla_hw_data *ha = vha->hw; 1735 struct qla_hw_data *ha = vha->hw;
1736 uint16_t thread_id;
1737 uint32_t rval = EXT_STATUS_OK; 1736 uint32_t rval = EXT_STATUS_OK;
1738 uint16_t req_sg_cnt = 0; 1737 uint16_t req_sg_cnt = 0;
1739 uint16_t rsp_sg_cnt = 0; 1738 uint16_t rsp_sg_cnt = 0;
@@ -1790,8 +1789,6 @@ qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1790 goto done; 1789 goto done;
1791 } 1790 }
1792 1791
1793 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1794
1795 mutex_lock(&ha->selflogin_lock); 1792 mutex_lock(&ha->selflogin_lock);
1796 if (vha->self_login_loop_id == 0) { 1793 if (vha->self_login_loop_id == 0) {
1797 /* Initialize all required fields of fcport */ 1794 /* Initialize all required fields of fcport */
@@ -2174,7 +2171,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2174{ 2171{
2175 int ret = -EINVAL; 2172 int ret = -EINVAL;
2176 struct fc_rport *rport; 2173 struct fc_rport *rport;
2177 fc_port_t *fcport = NULL;
2178 struct Scsi_Host *host; 2174 struct Scsi_Host *host;
2179 scsi_qla_host_t *vha; 2175 scsi_qla_host_t *vha;
2180 2176
@@ -2183,7 +2179,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2183 2179
2184 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) { 2180 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
2185 rport = bsg_job->rport; 2181 rport = bsg_job->rport;
2186 fcport = *(fc_port_t **) rport->dd_data;
2187 host = rport_to_shost(rport); 2182 host = rport_to_shost(rport);
2188 vha = shost_priv(host); 2183 vha = shost_priv(host);
2189 } else { 2184 } else {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 8b011aef12bd..34dc9a35670b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -19,14 +19,14 @@
19 * | Device Discovery | 0x2016 | 0x2020-0x2022, | 19 * | Device Discovery | 0x2016 | 0x2020-0x2022, |
20 * | | | 0x2011-0x2012, | 20 * | | | 0x2011-0x2012, |
21 * | | | 0x2099-0x20a4 | 21 * | | | 0x2099-0x20a4 |
22 * | Queue Command and IO tracing | 0x3059 | 0x300b | 22 * | Queue Command and IO tracing | 0x3075 | 0x300b |
23 * | | | 0x3027-0x3028 | 23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 | 24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 | 25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 | 26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a | 27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 | 28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x5087 | 0x502b-0x502f | 29 * | Async Events | 0x508a | 0x502b-0x502f |
30 * | | | 0x5047 | 30 * | | | 0x5047 |
31 * | | | 0x5084,0x5075 | 31 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 | 32 * | | | 0x503d,0x5044 |
@@ -117,7 +117,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
117{ 117{
118 int rval; 118 int rval;
119 uint32_t cnt, stat, timer, dwords, idx; 119 uint32_t cnt, stat, timer, dwords, idx;
120 uint16_t mb0, mb1; 120 uint16_t mb0;
121 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 121 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
122 dma_addr_t dump_dma = ha->gid_list_dma; 122 dma_addr_t dump_dma = ha->gid_list_dma;
123 uint32_t *dump = (uint32_t *)ha->gid_list; 123 uint32_t *dump = (uint32_t *)ha->gid_list;
@@ -161,7 +161,7 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
161 &ha->mbx_cmd_flags); 161 &ha->mbx_cmd_flags);
162 162
163 mb0 = RD_REG_WORD(&reg->mailbox0); 163 mb0 = RD_REG_WORD(&reg->mailbox0);
164 mb1 = RD_REG_WORD(&reg->mailbox1); 164 RD_REG_WORD(&reg->mailbox1);
165 165
166 WRT_REG_DWORD(&reg->hccr, 166 WRT_REG_DWORD(&reg->hccr,
167 HCCRX_CLR_RISC_INT); 167 HCCRX_CLR_RISC_INT);
@@ -486,7 +486,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
486 return ptr; 486 return ptr;
487 487
488 *last_chain = &fcec->type; 488 *last_chain = &fcec->type;
489 fcec->type = __constant_htonl(DUMP_CHAIN_FCE); 489 fcec->type = htonl(DUMP_CHAIN_FCE);
490 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) + 490 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
491 fce_calc_size(ha->fce_bufs)); 491 fce_calc_size(ha->fce_bufs));
492 fcec->size = htonl(fce_calc_size(ha->fce_bufs)); 492 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
@@ -527,7 +527,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
527 /* aqp = ha->atio_q_map[que]; */ 527 /* aqp = ha->atio_q_map[que]; */
528 q = ptr; 528 q = ptr;
529 *last_chain = &q->type; 529 *last_chain = &q->type;
530 q->type = __constant_htonl(DUMP_CHAIN_QUEUE); 530 q->type = htonl(DUMP_CHAIN_QUEUE);
531 q->chain_size = htonl( 531 q->chain_size = htonl(
532 sizeof(struct qla2xxx_mqueue_chain) + 532 sizeof(struct qla2xxx_mqueue_chain) +
533 sizeof(struct qla2xxx_mqueue_header) + 533 sizeof(struct qla2xxx_mqueue_header) +
@@ -536,7 +536,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
536 536
537 /* Add header. */ 537 /* Add header. */
538 qh = ptr; 538 qh = ptr;
539 qh->queue = __constant_htonl(TYPE_ATIO_QUEUE); 539 qh->queue = htonl(TYPE_ATIO_QUEUE);
540 qh->number = htonl(que); 540 qh->number = htonl(que);
541 qh->size = htonl(aqp->length * sizeof(request_t)); 541 qh->size = htonl(aqp->length * sizeof(request_t));
542 ptr += sizeof(struct qla2xxx_mqueue_header); 542 ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -571,7 +571,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
571 /* Add chain. */ 571 /* Add chain. */
572 q = ptr; 572 q = ptr;
573 *last_chain = &q->type; 573 *last_chain = &q->type;
574 q->type = __constant_htonl(DUMP_CHAIN_QUEUE); 574 q->type = htonl(DUMP_CHAIN_QUEUE);
575 q->chain_size = htonl( 575 q->chain_size = htonl(
576 sizeof(struct qla2xxx_mqueue_chain) + 576 sizeof(struct qla2xxx_mqueue_chain) +
577 sizeof(struct qla2xxx_mqueue_header) + 577 sizeof(struct qla2xxx_mqueue_header) +
@@ -580,7 +580,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
580 580
581 /* Add header. */ 581 /* Add header. */
582 qh = ptr; 582 qh = ptr;
583 qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE); 583 qh->queue = htonl(TYPE_REQUEST_QUEUE);
584 qh->number = htonl(que); 584 qh->number = htonl(que);
585 qh->size = htonl(req->length * sizeof(request_t)); 585 qh->size = htonl(req->length * sizeof(request_t));
586 ptr += sizeof(struct qla2xxx_mqueue_header); 586 ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -599,7 +599,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
599 /* Add chain. */ 599 /* Add chain. */
600 q = ptr; 600 q = ptr;
601 *last_chain = &q->type; 601 *last_chain = &q->type;
602 q->type = __constant_htonl(DUMP_CHAIN_QUEUE); 602 q->type = htonl(DUMP_CHAIN_QUEUE);
603 q->chain_size = htonl( 603 q->chain_size = htonl(
604 sizeof(struct qla2xxx_mqueue_chain) + 604 sizeof(struct qla2xxx_mqueue_chain) +
605 sizeof(struct qla2xxx_mqueue_header) + 605 sizeof(struct qla2xxx_mqueue_header) +
@@ -608,7 +608,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
608 608
609 /* Add header. */ 609 /* Add header. */
610 qh = ptr; 610 qh = ptr;
611 qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE); 611 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
612 qh->number = htonl(que); 612 qh->number = htonl(que);
613 qh->size = htonl(rsp->length * sizeof(response_t)); 613 qh->size = htonl(rsp->length * sizeof(response_t));
614 ptr += sizeof(struct qla2xxx_mqueue_header); 614 ptr += sizeof(struct qla2xxx_mqueue_header);
@@ -627,15 +627,15 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
627 uint32_t cnt, que_idx; 627 uint32_t cnt, que_idx;
628 uint8_t que_cnt; 628 uint8_t que_cnt;
629 struct qla2xxx_mq_chain *mq = ptr; 629 struct qla2xxx_mq_chain *mq = ptr;
630 device_reg_t __iomem *reg; 630 device_reg_t *reg;
631 631
632 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) 632 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
633 return ptr; 633 return ptr;
634 634
635 mq = ptr; 635 mq = ptr;
636 *last_chain = &mq->type; 636 *last_chain = &mq->type;
637 mq->type = __constant_htonl(DUMP_CHAIN_MQ); 637 mq->type = htonl(DUMP_CHAIN_MQ);
638 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain)); 638 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
639 639
640 que_cnt = ha->max_req_queues > ha->max_rsp_queues ? 640 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
641 ha->max_req_queues : ha->max_rsp_queues; 641 ha->max_req_queues : ha->max_rsp_queues;
@@ -695,8 +695,10 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
695 695
696 flags = 0; 696 flags = 0;
697 697
698#ifndef __CHECKER__
698 if (!hardware_locked) 699 if (!hardware_locked)
699 spin_lock_irqsave(&ha->hardware_lock, flags); 700 spin_lock_irqsave(&ha->hardware_lock, flags);
701#endif
700 702
701 if (!ha->fw_dump) { 703 if (!ha->fw_dump) {
702 ql_log(ql_log_warn, vha, 0xd002, 704 ql_log(ql_log_warn, vha, 0xd002,
@@ -832,8 +834,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
832 qla2xxx_dump_post_process(base_vha, rval); 834 qla2xxx_dump_post_process(base_vha, rval);
833 835
834qla2300_fw_dump_failed: 836qla2300_fw_dump_failed:
837#ifndef __CHECKER__
835 if (!hardware_locked) 838 if (!hardware_locked)
836 spin_unlock_irqrestore(&ha->hardware_lock, flags); 839 spin_unlock_irqrestore(&ha->hardware_lock, flags);
840#else
841 ;
842#endif
837} 843}
838 844
839/** 845/**
@@ -859,8 +865,10 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
859 mb0 = mb2 = 0; 865 mb0 = mb2 = 0;
860 flags = 0; 866 flags = 0;
861 867
868#ifndef __CHECKER__
862 if (!hardware_locked) 869 if (!hardware_locked)
863 spin_lock_irqsave(&ha->hardware_lock, flags); 870 spin_lock_irqsave(&ha->hardware_lock, flags);
871#endif
864 872
865 if (!ha->fw_dump) { 873 if (!ha->fw_dump) {
866 ql_log(ql_log_warn, vha, 0xd004, 874 ql_log(ql_log_warn, vha, 0xd004,
@@ -1030,8 +1038,12 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1030 qla2xxx_dump_post_process(base_vha, rval); 1038 qla2xxx_dump_post_process(base_vha, rval);
1031 1039
1032qla2100_fw_dump_failed: 1040qla2100_fw_dump_failed:
1041#ifndef __CHECKER__
1033 if (!hardware_locked) 1042 if (!hardware_locked)
1034 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1043 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1044#else
1045 ;
1046#endif
1035} 1047}
1036 1048
1037void 1049void
@@ -1039,7 +1051,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1039{ 1051{
1040 int rval; 1052 int rval;
1041 uint32_t cnt; 1053 uint32_t cnt;
1042 uint32_t risc_address;
1043 struct qla_hw_data *ha = vha->hw; 1054 struct qla_hw_data *ha = vha->hw;
1044 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1055 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1045 uint32_t __iomem *dmp_reg; 1056 uint32_t __iomem *dmp_reg;
@@ -1047,7 +1058,6 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1047 uint16_t __iomem *mbx_reg; 1058 uint16_t __iomem *mbx_reg;
1048 unsigned long flags; 1059 unsigned long flags;
1049 struct qla24xx_fw_dump *fw; 1060 struct qla24xx_fw_dump *fw;
1050 uint32_t ext_mem_cnt;
1051 void *nxt; 1061 void *nxt;
1052 void *nxt_chain; 1062 void *nxt_chain;
1053 uint32_t *last_chain = NULL; 1063 uint32_t *last_chain = NULL;
@@ -1056,12 +1066,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1056 if (IS_P3P_TYPE(ha)) 1066 if (IS_P3P_TYPE(ha))
1057 return; 1067 return;
1058 1068
1059 risc_address = ext_mem_cnt = 0;
1060 flags = 0; 1069 flags = 0;
1061 ha->fw_dump_cap_flags = 0; 1070 ha->fw_dump_cap_flags = 0;
1062 1071
1072#ifndef __CHECKER__
1063 if (!hardware_locked) 1073 if (!hardware_locked)
1064 spin_lock_irqsave(&ha->hardware_lock, flags); 1074 spin_lock_irqsave(&ha->hardware_lock, flags);
1075#endif
1065 1076
1066 if (!ha->fw_dump) { 1077 if (!ha->fw_dump) {
1067 ql_log(ql_log_warn, vha, 0xd006, 1078 ql_log(ql_log_warn, vha, 0xd006,
@@ -1274,8 +1285,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1274 nxt_chain = (void *)ha->fw_dump + ha->chain_offset; 1285 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1275 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1286 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1276 if (last_chain) { 1287 if (last_chain) {
1277 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1288 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1278 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1289 *last_chain |= htonl(DUMP_CHAIN_LAST);
1279 } 1290 }
1280 1291
1281 /* Adjust valid length. */ 1292 /* Adjust valid length. */
@@ -1285,8 +1296,12 @@ qla24xx_fw_dump_failed_0:
1285 qla2xxx_dump_post_process(base_vha, rval); 1296 qla2xxx_dump_post_process(base_vha, rval);
1286 1297
1287qla24xx_fw_dump_failed: 1298qla24xx_fw_dump_failed:
1299#ifndef __CHECKER__
1288 if (!hardware_locked) 1300 if (!hardware_locked)
1289 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1302#else
1303 ;
1304#endif
1290} 1305}
1291 1306
1292void 1307void
@@ -1294,7 +1309,6 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1294{ 1309{
1295 int rval; 1310 int rval;
1296 uint32_t cnt; 1311 uint32_t cnt;
1297 uint32_t risc_address;
1298 struct qla_hw_data *ha = vha->hw; 1312 struct qla_hw_data *ha = vha->hw;
1299 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1313 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1300 uint32_t __iomem *dmp_reg; 1314 uint32_t __iomem *dmp_reg;
@@ -1302,17 +1316,17 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1302 uint16_t __iomem *mbx_reg; 1316 uint16_t __iomem *mbx_reg;
1303 unsigned long flags; 1317 unsigned long flags;
1304 struct qla25xx_fw_dump *fw; 1318 struct qla25xx_fw_dump *fw;
1305 uint32_t ext_mem_cnt;
1306 void *nxt, *nxt_chain; 1319 void *nxt, *nxt_chain;
1307 uint32_t *last_chain = NULL; 1320 uint32_t *last_chain = NULL;
1308 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1321 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1309 1322
1310 risc_address = ext_mem_cnt = 0;
1311 flags = 0; 1323 flags = 0;
1312 ha->fw_dump_cap_flags = 0; 1324 ha->fw_dump_cap_flags = 0;
1313 1325
1326#ifndef __CHECKER__
1314 if (!hardware_locked) 1327 if (!hardware_locked)
1315 spin_lock_irqsave(&ha->hardware_lock, flags); 1328 spin_lock_irqsave(&ha->hardware_lock, flags);
1329#endif
1316 1330
1317 if (!ha->fw_dump) { 1331 if (!ha->fw_dump) {
1318 ql_log(ql_log_warn, vha, 0xd008, 1332 ql_log(ql_log_warn, vha, 0xd008,
@@ -1329,7 +1343,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1329 } 1343 }
1330 fw = &ha->fw_dump->isp.isp25; 1344 fw = &ha->fw_dump->isp.isp25;
1331 qla2xxx_prep_dump(ha, ha->fw_dump); 1345 qla2xxx_prep_dump(ha, ha->fw_dump);
1332 ha->fw_dump->version = __constant_htonl(2); 1346 ha->fw_dump->version = htonl(2);
1333 1347
1334 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status)); 1348 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1335 1349
@@ -1593,8 +1607,8 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1593 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1607 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1594 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1608 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1595 if (last_chain) { 1609 if (last_chain) {
1596 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1610 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1597 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1611 *last_chain |= htonl(DUMP_CHAIN_LAST);
1598 } 1612 }
1599 1613
1600 /* Adjust valid length. */ 1614 /* Adjust valid length. */
@@ -1604,8 +1618,12 @@ qla25xx_fw_dump_failed_0:
1604 qla2xxx_dump_post_process(base_vha, rval); 1618 qla2xxx_dump_post_process(base_vha, rval);
1605 1619
1606qla25xx_fw_dump_failed: 1620qla25xx_fw_dump_failed:
1621#ifndef __CHECKER__
1607 if (!hardware_locked) 1622 if (!hardware_locked)
1608 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624#else
1625 ;
1626#endif
1609} 1627}
1610 1628
1611void 1629void
@@ -1613,7 +1631,6 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1613{ 1631{
1614 int rval; 1632 int rval;
1615 uint32_t cnt; 1633 uint32_t cnt;
1616 uint32_t risc_address;
1617 struct qla_hw_data *ha = vha->hw; 1634 struct qla_hw_data *ha = vha->hw;
1618 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1635 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1619 uint32_t __iomem *dmp_reg; 1636 uint32_t __iomem *dmp_reg;
@@ -1621,17 +1638,17 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1621 uint16_t __iomem *mbx_reg; 1638 uint16_t __iomem *mbx_reg;
1622 unsigned long flags; 1639 unsigned long flags;
1623 struct qla81xx_fw_dump *fw; 1640 struct qla81xx_fw_dump *fw;
1624 uint32_t ext_mem_cnt;
1625 void *nxt, *nxt_chain; 1641 void *nxt, *nxt_chain;
1626 uint32_t *last_chain = NULL; 1642 uint32_t *last_chain = NULL;
1627 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1643 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1628 1644
1629 risc_address = ext_mem_cnt = 0;
1630 flags = 0; 1645 flags = 0;
1631 ha->fw_dump_cap_flags = 0; 1646 ha->fw_dump_cap_flags = 0;
1632 1647
1648#ifndef __CHECKER__
1633 if (!hardware_locked) 1649 if (!hardware_locked)
1634 spin_lock_irqsave(&ha->hardware_lock, flags); 1650 spin_lock_irqsave(&ha->hardware_lock, flags);
1651#endif
1635 1652
1636 if (!ha->fw_dump) { 1653 if (!ha->fw_dump) {
1637 ql_log(ql_log_warn, vha, 0xd00a, 1654 ql_log(ql_log_warn, vha, 0xd00a,
@@ -1914,8 +1931,8 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1914 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1931 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1915 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 1932 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1916 if (last_chain) { 1933 if (last_chain) {
1917 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1934 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1918 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1935 *last_chain |= htonl(DUMP_CHAIN_LAST);
1919 } 1936 }
1920 1937
1921 /* Adjust valid length. */ 1938 /* Adjust valid length. */
@@ -1925,16 +1942,19 @@ qla81xx_fw_dump_failed_0:
1925 qla2xxx_dump_post_process(base_vha, rval); 1942 qla2xxx_dump_post_process(base_vha, rval);
1926 1943
1927qla81xx_fw_dump_failed: 1944qla81xx_fw_dump_failed:
1945#ifndef __CHECKER__
1928 if (!hardware_locked) 1946 if (!hardware_locked)
1929 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1947 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1948#else
1949 ;
1950#endif
1930} 1951}
1931 1952
1932void 1953void
1933qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) 1954qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1934{ 1955{
1935 int rval; 1956 int rval;
1936 uint32_t cnt, reg_data; 1957 uint32_t cnt;
1937 uint32_t risc_address;
1938 struct qla_hw_data *ha = vha->hw; 1958 struct qla_hw_data *ha = vha->hw;
1939 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1959 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1940 uint32_t __iomem *dmp_reg; 1960 uint32_t __iomem *dmp_reg;
@@ -1942,17 +1962,17 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1942 uint16_t __iomem *mbx_reg; 1962 uint16_t __iomem *mbx_reg;
1943 unsigned long flags; 1963 unsigned long flags;
1944 struct qla83xx_fw_dump *fw; 1964 struct qla83xx_fw_dump *fw;
1945 uint32_t ext_mem_cnt;
1946 void *nxt, *nxt_chain; 1965 void *nxt, *nxt_chain;
1947 uint32_t *last_chain = NULL; 1966 uint32_t *last_chain = NULL;
1948 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 1967 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1949 1968
1950 risc_address = ext_mem_cnt = 0;
1951 flags = 0; 1969 flags = 0;
1952 ha->fw_dump_cap_flags = 0; 1970 ha->fw_dump_cap_flags = 0;
1953 1971
1972#ifndef __CHECKER__
1954 if (!hardware_locked) 1973 if (!hardware_locked)
1955 spin_lock_irqsave(&ha->hardware_lock, flags); 1974 spin_lock_irqsave(&ha->hardware_lock, flags);
1975#endif
1956 1976
1957 if (!ha->fw_dump) { 1977 if (!ha->fw_dump) {
1958 ql_log(ql_log_warn, vha, 0xd00c, 1978 ql_log(ql_log_warn, vha, 0xd00c,
@@ -1979,16 +1999,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1979 1999
1980 WRT_REG_DWORD(&reg->iobase_addr, 0x6000); 2000 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1981 dmp_reg = &reg->iobase_window; 2001 dmp_reg = &reg->iobase_window;
1982 reg_data = RD_REG_DWORD(dmp_reg); 2002 RD_REG_DWORD(dmp_reg);
1983 WRT_REG_DWORD(dmp_reg, 0); 2003 WRT_REG_DWORD(dmp_reg, 0);
1984 2004
1985 dmp_reg = &reg->unused_4_1[0]; 2005 dmp_reg = &reg->unused_4_1[0];
1986 reg_data = RD_REG_DWORD(dmp_reg); 2006 RD_REG_DWORD(dmp_reg);
1987 WRT_REG_DWORD(dmp_reg, 0); 2007 WRT_REG_DWORD(dmp_reg, 0);
1988 2008
1989 WRT_REG_DWORD(&reg->iobase_addr, 0x6010); 2009 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1990 dmp_reg = &reg->unused_4_1[2]; 2010 dmp_reg = &reg->unused_4_1[2];
1991 reg_data = RD_REG_DWORD(dmp_reg); 2011 RD_REG_DWORD(dmp_reg);
1992 WRT_REG_DWORD(dmp_reg, 0); 2012 WRT_REG_DWORD(dmp_reg, 0);
1993 2013
1994 /* select PCR and disable ecc checking and correction */ 2014 /* select PCR and disable ecc checking and correction */
@@ -2420,8 +2440,8 @@ copy_queue:
2420 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2440 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2421 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain); 2441 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2422 if (last_chain) { 2442 if (last_chain) {
2423 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 2443 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2424 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 2444 *last_chain |= htonl(DUMP_CHAIN_LAST);
2425 } 2445 }
2426 2446
2427 /* Adjust valid length. */ 2447 /* Adjust valid length. */
@@ -2431,8 +2451,12 @@ qla83xx_fw_dump_failed_0:
2431 qla2xxx_dump_post_process(base_vha, rval); 2451 qla2xxx_dump_post_process(base_vha, rval);
2432 2452
2433qla83xx_fw_dump_failed: 2453qla83xx_fw_dump_failed:
2454#ifndef __CHECKER__
2434 if (!hardware_locked) 2455 if (!hardware_locked)
2435 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2456 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2457#else
2458 ;
2459#endif
2436} 2460}
2437 2461
2438/****************************************************************************/ 2462/****************************************************************************/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9ad819edcd67..388d79088b59 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3061,6 +3061,7 @@ struct qla_hw_data {
3061#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031 3061#define PCI_DEVICE_ID_QLOGIC_ISP2031 0x2031
3062#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 3062#define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
3063#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 3063#define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
3064#define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261
3064 3065
3065 uint32_t device_type; 3066 uint32_t device_type;
3066#define DT_ISP2100 BIT_0 3067#define DT_ISP2100 BIT_0
@@ -3084,7 +3085,8 @@ struct qla_hw_data {
3084#define DT_ISP8044 BIT_18 3085#define DT_ISP8044 BIT_18
3085#define DT_ISP2071 BIT_19 3086#define DT_ISP2071 BIT_19
3086#define DT_ISP2271 BIT_20 3087#define DT_ISP2271 BIT_20
3087#define DT_ISP_LAST (DT_ISP2271 << 1) 3088#define DT_ISP2261 BIT_21
3089#define DT_ISP_LAST (DT_ISP2261 << 1)
3088 3090
3089#define DT_T10_PI BIT_25 3091#define DT_T10_PI BIT_25
3090#define DT_IIDMA BIT_26 3092#define DT_IIDMA BIT_26
@@ -3116,6 +3118,7 @@ struct qla_hw_data {
3116#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00) 3118#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
3117#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) 3119#define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
3118#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) 3120#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
3121#define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261)
3119 3122
3120#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ 3123#define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
3121 IS_QLA6312(ha) || IS_QLA6322(ha)) 3124 IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -3124,7 +3127,7 @@ struct qla_hw_data {
3124#define IS_QLA25XX(ha) (IS_QLA2532(ha)) 3127#define IS_QLA25XX(ha) (IS_QLA2532(ha))
3125#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) 3128#define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
3126#define IS_QLA84XX(ha) (IS_QLA8432(ha)) 3129#define IS_QLA84XX(ha) (IS_QLA8432(ha))
3127#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha)) 3130#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
3128#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ 3131#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
3129 IS_QLA84XX(ha)) 3132 IS_QLA84XX(ha))
3130#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ 3133#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3166,6 +3169,7 @@ struct qla_hw_data {
3166#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3169#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3167#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) 3170#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3168#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3171#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3172#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3169 3173
3170 /* HBA serial number */ 3174 /* HBA serial number */
3171 uint8_t serial0; 3175 uint8_t serial0;
@@ -3288,6 +3292,7 @@ struct qla_hw_data {
3288 uint8_t mpi_version[3]; 3292 uint8_t mpi_version[3];
3289 uint32_t mpi_capabilities; 3293 uint32_t mpi_capabilities;
3290 uint8_t phy_version[3]; 3294 uint8_t phy_version[3];
3295 uint8_t pep_version[3];
3291 3296
3292 /* Firmware dump template */ 3297 /* Firmware dump template */
3293 void *fw_dump_template; 3298 void *fw_dump_template;
@@ -3420,9 +3425,9 @@ struct qla_hw_data {
3420 mempool_t *ctx_mempool; 3425 mempool_t *ctx_mempool;
3421#define FCP_CMND_DMA_POOL_SIZE 512 3426#define FCP_CMND_DMA_POOL_SIZE 512
3422 3427
3423 unsigned long nx_pcibase; /* Base I/O address */ 3428 void __iomem *nx_pcibase; /* Base I/O address */
3424 uint8_t *nxdb_rd_ptr; /* Doorbell read pointer */ 3429 void __iomem *nxdb_rd_ptr; /* Doorbell read pointer */
3425 unsigned long nxdb_wr_ptr; /* Door bell write pointer */ 3430 void __iomem *nxdb_wr_ptr; /* Door bell write pointer */
3426 3431
3427 uint32_t crb_win; 3432 uint32_t crb_win;
3428 uint32_t curr_window; 3433 uint32_t curr_window;
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index dccc4dcc39c8..94e8a8592f69 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -35,10 +35,10 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
35 ms_pkt->entry_type = MS_IOCB_TYPE; 35 ms_pkt->entry_type = MS_IOCB_TYPE;
36 ms_pkt->entry_count = 1; 36 ms_pkt->entry_count = 1;
37 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); 37 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
38 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 38 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
39 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 39 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
40 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 40 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
41 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); 41 ms_pkt->total_dsd_count = cpu_to_le16(2);
42 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 42 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
43 ms_pkt->req_bytecount = cpu_to_le32(req_size); 43 ms_pkt->req_bytecount = cpu_to_le32(req_size);
44 44
@@ -74,10 +74,10 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
74 74
75 ct_pkt->entry_type = CT_IOCB_TYPE; 75 ct_pkt->entry_type = CT_IOCB_TYPE;
76 ct_pkt->entry_count = 1; 76 ct_pkt->entry_count = 1;
77 ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); 77 ct_pkt->nport_handle = cpu_to_le16(NPH_SNS);
78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 78 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
79 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 79 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
80 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 80 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
81 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 81 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
82 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 82 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
83 83
@@ -142,7 +142,7 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
142 case CS_DATA_UNDERRUN: 142 case CS_DATA_UNDERRUN:
143 case CS_DATA_OVERRUN: /* Overrun? */ 143 case CS_DATA_OVERRUN: /* Overrun? */
144 if (ct_rsp->header.response != 144 if (ct_rsp->header.response !=
145 __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) { 145 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077, 146 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
147 "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n", 147 "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
148 routine, vha->d_id.b.domain, 148 routine, vha->d_id.b.domain,
@@ -1153,10 +1153,10 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1153 ms_pkt->entry_type = MS_IOCB_TYPE; 1153 ms_pkt->entry_type = MS_IOCB_TYPE;
1154 ms_pkt->entry_count = 1; 1154 ms_pkt->entry_count = 1;
1155 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id); 1155 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1156 ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); 1156 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1157 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1157 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1158 ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1158 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1159 ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); 1159 ms_pkt->total_dsd_count = cpu_to_le16(2);
1160 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); 1160 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1161 ms_pkt->req_bytecount = cpu_to_le32(req_size); 1161 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1162 1162
@@ -1193,8 +1193,8 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1193 ct_pkt->entry_count = 1; 1193 ct_pkt->entry_count = 1;
1194 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); 1194 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1195 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 1195 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1196 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 1196 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1197 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 1197 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1198 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 1198 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1199 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 1199 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1200 1200
@@ -1281,19 +1281,19 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1281 1281
1282 /* Prepare FDMI command arguments -- attribute block, attributes. */ 1282 /* Prepare FDMI command arguments -- attribute block, attributes. */
1283 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE); 1283 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1284 ct_req->req.rhba.entry_count = __constant_cpu_to_be32(1); 1284 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1285 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE); 1285 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1286 size = 2 * WWN_SIZE + 4 + 4; 1286 size = 2 * WWN_SIZE + 4 + 4;
1287 1287
1288 /* Attributes */ 1288 /* Attributes */
1289 ct_req->req.rhba.attrs.count = 1289 ct_req->req.rhba.attrs.count =
1290 __constant_cpu_to_be32(FDMI_HBA_ATTR_COUNT); 1290 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1291 entries = ct_req->req.rhba.hba_identifier; 1291 entries = ct_req->req.rhba.hba_identifier;
1292 1292
1293 /* Nodename. */ 1293 /* Nodename. */
1294 eiter = entries + size; 1294 eiter = entries + size;
1295 eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME); 1295 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1296 eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE); 1296 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1297 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE); 1297 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1298 size += 4 + WWN_SIZE; 1298 size += 4 + WWN_SIZE;
1299 1299
@@ -1302,7 +1302,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1302 1302
1303 /* Manufacturer. */ 1303 /* Manufacturer. */
1304 eiter = entries + size; 1304 eiter = entries + size;
1305 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER); 1305 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1306 alen = strlen(QLA2XXX_MANUFACTURER); 1306 alen = strlen(QLA2XXX_MANUFACTURER);
1307 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer), 1307 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1308 "%s", "QLogic Corporation"); 1308 "%s", "QLogic Corporation");
@@ -1315,7 +1315,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1315 1315
1316 /* Serial number. */ 1316 /* Serial number. */
1317 eiter = entries + size; 1317 eiter = entries + size;
1318 eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER); 1318 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1319 if (IS_FWI2_CAPABLE(ha)) 1319 if (IS_FWI2_CAPABLE(ha))
1320 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num, 1320 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1321 sizeof(eiter->a.serial_num)); 1321 sizeof(eiter->a.serial_num));
@@ -1335,7 +1335,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1335 1335
1336 /* Model name. */ 1336 /* Model name. */
1337 eiter = entries + size; 1337 eiter = entries + size;
1338 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL); 1338 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1339 snprintf(eiter->a.model, sizeof(eiter->a.model), 1339 snprintf(eiter->a.model, sizeof(eiter->a.model),
1340 "%s", ha->model_number); 1340 "%s", ha->model_number);
1341 alen = strlen(eiter->a.model); 1341 alen = strlen(eiter->a.model);
@@ -1348,7 +1348,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1348 1348
1349 /* Model description. */ 1349 /* Model description. */
1350 eiter = entries + size; 1350 eiter = entries + size;
1351 eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION); 1351 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1352 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc), 1352 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1353 "%s", ha->model_desc); 1353 "%s", ha->model_desc);
1354 alen = strlen(eiter->a.model_desc); 1354 alen = strlen(eiter->a.model_desc);
@@ -1361,7 +1361,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1361 1361
1362 /* Hardware version. */ 1362 /* Hardware version. */
1363 eiter = entries + size; 1363 eiter = entries + size;
1364 eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION); 1364 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1365 if (!IS_FWI2_CAPABLE(ha)) { 1365 if (!IS_FWI2_CAPABLE(ha)) {
1366 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version), 1366 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1367 "HW:%s", ha->adapter_id); 1367 "HW:%s", ha->adapter_id);
@@ -1385,7 +1385,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1385 1385
1386 /* Driver version. */ 1386 /* Driver version. */
1387 eiter = entries + size; 1387 eiter = entries + size;
1388 eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION); 1388 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1389 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version), 1389 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1390 "%s", qla2x00_version_str); 1390 "%s", qla2x00_version_str);
1391 alen = strlen(eiter->a.driver_version); 1391 alen = strlen(eiter->a.driver_version);
@@ -1398,7 +1398,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1398 1398
1399 /* Option ROM version. */ 1399 /* Option ROM version. */
1400 eiter = entries + size; 1400 eiter = entries + size;
1401 eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION); 1401 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1402 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version), 1402 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1403 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]); 1403 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1404 alen = strlen(eiter->a.orom_version); 1404 alen = strlen(eiter->a.orom_version);
@@ -1411,7 +1411,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1411 1411
1412 /* Firmware version */ 1412 /* Firmware version */
1413 eiter = entries + size; 1413 eiter = entries + size;
1414 eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION); 1414 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1415 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version, 1415 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1416 sizeof(eiter->a.fw_version)); 1416 sizeof(eiter->a.fw_version));
1417 alen = strlen(eiter->a.fw_version); 1417 alen = strlen(eiter->a.fw_version);
@@ -2484,8 +2484,8 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *vha, uint32_t req_size,
2484 ct_pkt->entry_count = 1; 2484 ct_pkt->entry_count = 1;
2485 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id); 2485 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
2486 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); 2486 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2487 ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); 2487 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
2488 ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); 2488 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
2489 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); 2489 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
2490 ct_pkt->cmd_byte_count = cpu_to_le32(req_size); 2490 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
2491 2491
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 11f2f3279eab..16a1935cc9c1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1132,7 +1132,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1132 unsigned long flags = 0; 1132 unsigned long flags = 0;
1133 struct qla_hw_data *ha = vha->hw; 1133 struct qla_hw_data *ha = vha->hw;
1134 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1134 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1135 uint32_t cnt, d2; 1135 uint32_t cnt;
1136 uint16_t wd; 1136 uint16_t wd;
1137 static int abts_cnt; /* ISP abort retry counts */ 1137 static int abts_cnt; /* ISP abort retry counts */
1138 int rval = QLA_SUCCESS; 1138 int rval = QLA_SUCCESS;
@@ -1164,7 +1164,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1164 udelay(100); 1164 udelay(100);
1165 1165
1166 /* Wait for firmware to complete NVRAM accesses. */ 1166 /* Wait for firmware to complete NVRAM accesses. */
1167 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1167 RD_REG_WORD(&reg->mailbox0);
1168 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 && 1168 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
1169 rval == QLA_SUCCESS; cnt--) { 1169 rval == QLA_SUCCESS; cnt--) {
1170 barrier(); 1170 barrier();
@@ -1183,7 +1183,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1183 RD_REG_DWORD(&reg->mailbox0)); 1183 RD_REG_DWORD(&reg->mailbox0));
1184 1184
1185 /* Wait for soft-reset to complete. */ 1185 /* Wait for soft-reset to complete. */
1186 d2 = RD_REG_DWORD(&reg->ctrl_status); 1186 RD_REG_DWORD(&reg->ctrl_status);
1187 for (cnt = 0; cnt < 6000000; cnt++) { 1187 for (cnt = 0; cnt < 6000000; cnt++) {
1188 barrier(); 1188 barrier();
1189 if ((RD_REG_DWORD(&reg->ctrl_status) & 1189 if ((RD_REG_DWORD(&reg->ctrl_status) &
@@ -1226,7 +1226,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
1226 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET); 1226 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
1227 RD_REG_DWORD(&reg->hccr); 1227 RD_REG_DWORD(&reg->hccr);
1228 1228
1229 d2 = (uint32_t) RD_REG_WORD(&reg->mailbox0); 1229 RD_REG_WORD(&reg->mailbox0);
1230 for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 && 1230 for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
1231 rval == QLA_SUCCESS; cnt--) { 1231 rval == QLA_SUCCESS; cnt--) {
1232 barrier(); 1232 barrier();
@@ -1277,16 +1277,19 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
1277static void 1277static void
1278qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) 1278qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
1279{ 1279{
1280 struct qla_hw_data *ha = vha->hw;
1281 uint32_t wd32 = 0; 1280 uint32_t wd32 = 0;
1282 uint delta_msec = 100; 1281 uint delta_msec = 100;
1283 uint elapsed_msec = 0; 1282 uint elapsed_msec = 0;
1284 uint timeout_msec; 1283 uint timeout_msec;
1285 ulong n; 1284 ulong n;
1286 1285
1287 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha)) 1286 if (vha->hw->pdev->subsystem_device != 0x0175 &&
1287 vha->hw->pdev->subsystem_device != 0x0240)
1288 return; 1288 return;
1289 1289
1290 WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE);
1291 udelay(100);
1292
1290attempt: 1293attempt:
1291 timeout_msec = TIMEOUT_SEMAPHORE; 1294 timeout_msec = TIMEOUT_SEMAPHORE;
1292 n = timeout_msec / delta_msec; 1295 n = timeout_msec / delta_msec;
@@ -1690,7 +1693,7 @@ allocate:
1690 ha->fw_dump->signature[1] = 'L'; 1693 ha->fw_dump->signature[1] = 'L';
1691 ha->fw_dump->signature[2] = 'G'; 1694 ha->fw_dump->signature[2] = 'G';
1692 ha->fw_dump->signature[3] = 'C'; 1695 ha->fw_dump->signature[3] = 'C';
1693 ha->fw_dump->version = __constant_htonl(1); 1696 ha->fw_dump->version = htonl(1);
1694 1697
1695 ha->fw_dump->fixed_size = htonl(fixed_size); 1698 ha->fw_dump->fixed_size = htonl(fixed_size);
1696 ha->fw_dump->mem_size = htonl(mem_size); 1699 ha->fw_dump->mem_size = htonl(mem_size);
@@ -2070,8 +2073,8 @@ qla2x00_config_rings(struct scsi_qla_host *vha)
2070 struct rsp_que *rsp = ha->rsp_q_map[0]; 2073 struct rsp_que *rsp = ha->rsp_q_map[0];
2071 2074
2072 /* Setup ring parameters in initialization control block. */ 2075 /* Setup ring parameters in initialization control block. */
2073 ha->init_cb->request_q_outpointer = __constant_cpu_to_le16(0); 2076 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
2074 ha->init_cb->response_q_inpointer = __constant_cpu_to_le16(0); 2077 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
2075 ha->init_cb->request_q_length = cpu_to_le16(req->length); 2078 ha->init_cb->request_q_length = cpu_to_le16(req->length);
2076 ha->init_cb->response_q_length = cpu_to_le16(rsp->length); 2079 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
2077 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 2080 ha->init_cb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -2090,7 +2093,7 @@ void
2090qla24xx_config_rings(struct scsi_qla_host *vha) 2093qla24xx_config_rings(struct scsi_qla_host *vha)
2091{ 2094{
2092 struct qla_hw_data *ha = vha->hw; 2095 struct qla_hw_data *ha = vha->hw;
2093 device_reg_t __iomem *reg = ISP_QUE_REG(ha, 0); 2096 device_reg_t *reg = ISP_QUE_REG(ha, 0);
2094 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp; 2097 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
2095 struct qla_msix_entry *msix; 2098 struct qla_msix_entry *msix;
2096 struct init_cb_24xx *icb; 2099 struct init_cb_24xx *icb;
@@ -2100,8 +2103,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
2100 2103
2101 /* Setup ring parameters in initialization control block. */ 2104 /* Setup ring parameters in initialization control block. */
2102 icb = (struct init_cb_24xx *)ha->init_cb; 2105 icb = (struct init_cb_24xx *)ha->init_cb;
2103 icb->request_q_outpointer = __constant_cpu_to_le16(0); 2106 icb->request_q_outpointer = cpu_to_le16(0);
2104 icb->response_q_inpointer = __constant_cpu_to_le16(0); 2107 icb->response_q_inpointer = cpu_to_le16(0);
2105 icb->request_q_length = cpu_to_le16(req->length); 2108 icb->request_q_length = cpu_to_le16(req->length);
2106 icb->response_q_length = cpu_to_le16(rsp->length); 2109 icb->response_q_length = cpu_to_le16(rsp->length);
2107 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 2110 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -2110,18 +2113,17 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
2110 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 2113 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
2111 2114
2112 /* Setup ATIO queue dma pointers for target mode */ 2115 /* Setup ATIO queue dma pointers for target mode */
2113 icb->atio_q_inpointer = __constant_cpu_to_le16(0); 2116 icb->atio_q_inpointer = cpu_to_le16(0);
2114 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length); 2117 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
2115 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma)); 2118 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
2116 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma)); 2119 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
2117 2120
2118 if (IS_SHADOW_REG_CAPABLE(ha)) 2121 if (IS_SHADOW_REG_CAPABLE(ha))
2119 icb->firmware_options_2 |= 2122 icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
2120 __constant_cpu_to_le32(BIT_30|BIT_29);
2121 2123
2122 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2124 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
2123 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 2125 icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
2124 icb->rid = __constant_cpu_to_le16(rid); 2126 icb->rid = cpu_to_le16(rid);
2125 if (ha->flags.msix_enabled) { 2127 if (ha->flags.msix_enabled) {
2126 msix = &ha->msix_entries[1]; 2128 msix = &ha->msix_entries[1];
2127 ql_dbg(ql_dbg_init, vha, 0x00fd, 2129 ql_dbg(ql_dbg_init, vha, 0x00fd,
@@ -2131,26 +2133,22 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
2131 } 2133 }
2132 /* Use alternate PCI bus number */ 2134 /* Use alternate PCI bus number */
2133 if (MSB(rid)) 2135 if (MSB(rid))
2134 icb->firmware_options_2 |= 2136 icb->firmware_options_2 |= cpu_to_le32(BIT_19);
2135 __constant_cpu_to_le32(BIT_19);
2136 /* Use alternate PCI devfn */ 2137 /* Use alternate PCI devfn */
2137 if (LSB(rid)) 2138 if (LSB(rid))
2138 icb->firmware_options_2 |= 2139 icb->firmware_options_2 |= cpu_to_le32(BIT_18);
2139 __constant_cpu_to_le32(BIT_18);
2140 2140
2141 /* Use Disable MSIX Handshake mode for capable adapters */ 2141 /* Use Disable MSIX Handshake mode for capable adapters */
2142 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) && 2142 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
2143 (ha->flags.msix_enabled)) { 2143 (ha->flags.msix_enabled)) {
2144 icb->firmware_options_2 &= 2144 icb->firmware_options_2 &= cpu_to_le32(~BIT_22);
2145 __constant_cpu_to_le32(~BIT_22);
2146 ha->flags.disable_msix_handshake = 1; 2145 ha->flags.disable_msix_handshake = 1;
2147 ql_dbg(ql_dbg_init, vha, 0x00fe, 2146 ql_dbg(ql_dbg_init, vha, 0x00fe,
2148 "MSIX Handshake Disable Mode turned on.\n"); 2147 "MSIX Handshake Disable Mode turned on.\n");
2149 } else { 2148 } else {
2150 icb->firmware_options_2 |= 2149 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
2151 __constant_cpu_to_le32(BIT_22);
2152 } 2150 }
2153 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_23); 2151 icb->firmware_options_2 |= cpu_to_le32(BIT_23);
2154 2152
2155 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0); 2153 WRT_REG_DWORD(&reg->isp25mq.req_q_in, 0);
2156 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0); 2154 WRT_REG_DWORD(&reg->isp25mq.req_q_out, 0);
@@ -2248,7 +2246,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2248 } 2246 }
2249 2247
2250 if (IS_FWI2_CAPABLE(ha)) { 2248 if (IS_FWI2_CAPABLE(ha)) {
2251 mid_init_cb->options = __constant_cpu_to_le16(BIT_1); 2249 mid_init_cb->options = cpu_to_le16(BIT_1);
2252 mid_init_cb->init_cb.execution_throttle = 2250 mid_init_cb->init_cb.execution_throttle =
2253 cpu_to_le16(ha->fw_xcb_count); 2251 cpu_to_le16(ha->fw_xcb_count);
2254 /* D-Port Status */ 2252 /* D-Port Status */
@@ -2677,8 +2675,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2677 nv->frame_payload_size = 1024; 2675 nv->frame_payload_size = 1024;
2678 } 2676 }
2679 2677
2680 nv->max_iocb_allocation = __constant_cpu_to_le16(256); 2678 nv->max_iocb_allocation = cpu_to_le16(256);
2681 nv->execution_throttle = __constant_cpu_to_le16(16); 2679 nv->execution_throttle = cpu_to_le16(16);
2682 nv->retry_count = 8; 2680 nv->retry_count = 8;
2683 nv->retry_delay = 1; 2681 nv->retry_delay = 1;
2684 2682
@@ -2696,7 +2694,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2696 nv->host_p[1] = BIT_2; 2694 nv->host_p[1] = BIT_2;
2697 nv->reset_delay = 5; 2695 nv->reset_delay = 5;
2698 nv->port_down_retry_count = 8; 2696 nv->port_down_retry_count = 8;
2699 nv->max_luns_per_target = __constant_cpu_to_le16(8); 2697 nv->max_luns_per_target = cpu_to_le16(8);
2700 nv->link_down_timeout = 60; 2698 nv->link_down_timeout = 60;
2701 2699
2702 rval = 1; 2700 rval = 1;
@@ -2824,7 +2822,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2824 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 2822 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
2825 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 2823 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
2826 2824
2827 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 2825 icb->execution_throttle = cpu_to_le16(0xFFFF);
2828 2826
2829 ha->retry_count = nv->retry_count; 2827 ha->retry_count = nv->retry_count;
2830 2828
@@ -2876,10 +2874,10 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2876 if (ql2xloginretrycount) 2874 if (ql2xloginretrycount)
2877 ha->login_retry_count = ql2xloginretrycount; 2875 ha->login_retry_count = ql2xloginretrycount;
2878 2876
2879 icb->lun_enables = __constant_cpu_to_le16(0); 2877 icb->lun_enables = cpu_to_le16(0);
2880 icb->command_resource_count = 0; 2878 icb->command_resource_count = 0;
2881 icb->immediate_notify_resource_count = 0; 2879 icb->immediate_notify_resource_count = 0;
2882 icb->timeout = __constant_cpu_to_le16(0); 2880 icb->timeout = cpu_to_le16(0);
2883 2881
2884 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 2882 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
2885 /* Enable RIO */ 2883 /* Enable RIO */
@@ -3958,12 +3956,10 @@ qla2x00_fabric_dev_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3958 uint16_t *next_loopid) 3956 uint16_t *next_loopid)
3959{ 3957{
3960 int rval; 3958 int rval;
3961 int retry;
3962 uint8_t opts; 3959 uint8_t opts;
3963 struct qla_hw_data *ha = vha->hw; 3960 struct qla_hw_data *ha = vha->hw;
3964 3961
3965 rval = QLA_SUCCESS; 3962 rval = QLA_SUCCESS;
3966 retry = 0;
3967 3963
3968 if (IS_ALOGIO_CAPABLE(ha)) { 3964 if (IS_ALOGIO_CAPABLE(ha)) {
3969 if (fcport->flags & FCF_ASYNC_SENT) 3965 if (fcport->flags & FCF_ASYNC_SENT)
@@ -5117,7 +5113,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
5117 /* Bad NVRAM data, set defaults parameters. */ 5113 /* Bad NVRAM data, set defaults parameters. */
5118 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 5114 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
5119 || nv->id[3] != ' ' || 5115 || nv->id[3] != ' ' ||
5120 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5116 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
5121 /* Reset NVRAM data. */ 5117 /* Reset NVRAM data. */
5122 ql_log(ql_log_warn, vha, 0x006b, 5118 ql_log(ql_log_warn, vha, 0x006b,
5123 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 5119 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
@@ -5130,12 +5126,12 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
5130 * Set default initialization control block. 5126 * Set default initialization control block.
5131 */ 5127 */
5132 memset(nv, 0, ha->nvram_size); 5128 memset(nv, 0, ha->nvram_size);
5133 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 5129 nv->nvram_version = cpu_to_le16(ICB_VERSION);
5134 nv->version = __constant_cpu_to_le16(ICB_VERSION); 5130 nv->version = cpu_to_le16(ICB_VERSION);
5135 nv->frame_payload_size = 2048; 5131 nv->frame_payload_size = 2048;
5136 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 5132 nv->execution_throttle = cpu_to_le16(0xFFFF);
5137 nv->exchange_count = __constant_cpu_to_le16(0); 5133 nv->exchange_count = cpu_to_le16(0);
5138 nv->hard_address = __constant_cpu_to_le16(124); 5134 nv->hard_address = cpu_to_le16(124);
5139 nv->port_name[0] = 0x21; 5135 nv->port_name[0] = 0x21;
5140 nv->port_name[1] = 0x00 + ha->port_no + 1; 5136 nv->port_name[1] = 0x00 + ha->port_no + 1;
5141 nv->port_name[2] = 0x00; 5137 nv->port_name[2] = 0x00;
@@ -5153,29 +5149,29 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
5153 nv->node_name[6] = 0x55; 5149 nv->node_name[6] = 0x55;
5154 nv->node_name[7] = 0x86; 5150 nv->node_name[7] = 0x86;
5155 qla24xx_nvram_wwn_from_ofw(vha, nv); 5151 qla24xx_nvram_wwn_from_ofw(vha, nv);
5156 nv->login_retry_count = __constant_cpu_to_le16(8); 5152 nv->login_retry_count = cpu_to_le16(8);
5157 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 5153 nv->interrupt_delay_timer = cpu_to_le16(0);
5158 nv->login_timeout = __constant_cpu_to_le16(0); 5154 nv->login_timeout = cpu_to_le16(0);
5159 nv->firmware_options_1 = 5155 nv->firmware_options_1 =
5160 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 5156 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
5161 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 5157 nv->firmware_options_2 = cpu_to_le32(2 << 4);
5162 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 5158 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
5163 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 5159 nv->firmware_options_3 = cpu_to_le32(2 << 13);
5164 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 5160 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
5165 nv->efi_parameters = __constant_cpu_to_le32(0); 5161 nv->efi_parameters = cpu_to_le32(0);
5166 nv->reset_delay = 5; 5162 nv->reset_delay = 5;
5167 nv->max_luns_per_target = __constant_cpu_to_le16(128); 5163 nv->max_luns_per_target = cpu_to_le16(128);
5168 nv->port_down_retry_count = __constant_cpu_to_le16(30); 5164 nv->port_down_retry_count = cpu_to_le16(30);
5169 nv->link_down_timeout = __constant_cpu_to_le16(30); 5165 nv->link_down_timeout = cpu_to_le16(30);
5170 5166
5171 rval = 1; 5167 rval = 1;
5172 } 5168 }
5173 5169
5174 if (!qla_ini_mode_enabled(vha)) { 5170 if (!qla_ini_mode_enabled(vha)) {
5175 /* Don't enable full login after initial LIP */ 5171 /* Don't enable full login after initial LIP */
5176 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 5172 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
5177 /* Don't enable LIP full login for initiator */ 5173 /* Don't enable LIP full login for initiator */
5178 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 5174 nv->host_p &= cpu_to_le32(~BIT_10);
5179 } 5175 }
5180 5176
5181 qlt_24xx_config_nvram_stage1(vha, nv); 5177 qlt_24xx_config_nvram_stage1(vha, nv);
@@ -5209,14 +5205,14 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
5209 5205
5210 qlt_24xx_config_nvram_stage2(vha, icb); 5206 qlt_24xx_config_nvram_stage2(vha, icb);
5211 5207
5212 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 5208 if (nv->host_p & cpu_to_le32(BIT_15)) {
5213 /* Use alternate WWN? */ 5209 /* Use alternate WWN? */
5214 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 5210 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
5215 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 5211 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
5216 } 5212 }
5217 5213
5218 /* Prepare nodename */ 5214 /* Prepare nodename */
5219 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 5215 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
5220 /* 5216 /*
5221 * Firmware will apply the following mask if the nodename was 5217 * Firmware will apply the following mask if the nodename was
5222 * not provided. 5218 * not provided.
@@ -5248,7 +5244,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
5248 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 5244 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
5249 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 5245 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
5250 5246
5251 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 5247 icb->execution_throttle = cpu_to_le16(0xFFFF);
5252 5248
5253 ha->retry_count = le16_to_cpu(nv->login_retry_count); 5249 ha->retry_count = le16_to_cpu(nv->login_retry_count);
5254 5250
@@ -5256,7 +5252,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
5256 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 5252 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
5257 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 5253 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
5258 if (le16_to_cpu(nv->login_timeout) < 4) 5254 if (le16_to_cpu(nv->login_timeout) < 4)
5259 nv->login_timeout = __constant_cpu_to_le16(4); 5255 nv->login_timeout = cpu_to_le16(4);
5260 ha->login_timeout = le16_to_cpu(nv->login_timeout); 5256 ha->login_timeout = le16_to_cpu(nv->login_timeout);
5261 icb->login_timeout = nv->login_timeout; 5257 icb->login_timeout = nv->login_timeout;
5262 5258
@@ -5307,7 +5303,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
5307 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 5303 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
5308 le16_to_cpu(icb->interrupt_delay_timer): 2; 5304 le16_to_cpu(icb->interrupt_delay_timer): 2;
5309 } 5305 }
5310 icb->firmware_options_2 &= __constant_cpu_to_le32( 5306 icb->firmware_options_2 &= cpu_to_le32(
5311 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 5307 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
5312 vha->flags.process_response_queue = 0; 5308 vha->flags.process_response_queue = 0;
5313 if (ha->zio_mode != QLA_ZIO_DISABLED) { 5309 if (ha->zio_mode != QLA_ZIO_DISABLED) {
@@ -6063,7 +6059,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6063 /* Bad NVRAM data, set defaults parameters. */ 6059 /* Bad NVRAM data, set defaults parameters. */
6064 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' 6060 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
6065 || nv->id[3] != ' ' || 6061 || nv->id[3] != ' ' ||
6066 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 6062 nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
6067 /* Reset NVRAM data. */ 6063 /* Reset NVRAM data. */
6068 ql_log(ql_log_info, vha, 0x0073, 6064 ql_log(ql_log_info, vha, 0x0073,
6069 "Inconsistent NVRAM detected: checksum=0x%x id=%c " 6065 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
@@ -6077,11 +6073,11 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6077 * Set default initialization control block. 6073 * Set default initialization control block.
6078 */ 6074 */
6079 memset(nv, 0, ha->nvram_size); 6075 memset(nv, 0, ha->nvram_size);
6080 nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION); 6076 nv->nvram_version = cpu_to_le16(ICB_VERSION);
6081 nv->version = __constant_cpu_to_le16(ICB_VERSION); 6077 nv->version = cpu_to_le16(ICB_VERSION);
6082 nv->frame_payload_size = 2048; 6078 nv->frame_payload_size = 2048;
6083 nv->execution_throttle = __constant_cpu_to_le16(0xFFFF); 6079 nv->execution_throttle = cpu_to_le16(0xFFFF);
6084 nv->exchange_count = __constant_cpu_to_le16(0); 6080 nv->exchange_count = cpu_to_le16(0);
6085 nv->port_name[0] = 0x21; 6081 nv->port_name[0] = 0x21;
6086 nv->port_name[1] = 0x00 + ha->port_no + 1; 6082 nv->port_name[1] = 0x00 + ha->port_no + 1;
6087 nv->port_name[2] = 0x00; 6083 nv->port_name[2] = 0x00;
@@ -6098,20 +6094,20 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6098 nv->node_name[5] = 0x1c; 6094 nv->node_name[5] = 0x1c;
6099 nv->node_name[6] = 0x55; 6095 nv->node_name[6] = 0x55;
6100 nv->node_name[7] = 0x86; 6096 nv->node_name[7] = 0x86;
6101 nv->login_retry_count = __constant_cpu_to_le16(8); 6097 nv->login_retry_count = cpu_to_le16(8);
6102 nv->interrupt_delay_timer = __constant_cpu_to_le16(0); 6098 nv->interrupt_delay_timer = cpu_to_le16(0);
6103 nv->login_timeout = __constant_cpu_to_le16(0); 6099 nv->login_timeout = cpu_to_le16(0);
6104 nv->firmware_options_1 = 6100 nv->firmware_options_1 =
6105 __constant_cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1); 6101 cpu_to_le32(BIT_14|BIT_13|BIT_2|BIT_1);
6106 nv->firmware_options_2 = __constant_cpu_to_le32(2 << 4); 6102 nv->firmware_options_2 = cpu_to_le32(2 << 4);
6107 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12); 6103 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
6108 nv->firmware_options_3 = __constant_cpu_to_le32(2 << 13); 6104 nv->firmware_options_3 = cpu_to_le32(2 << 13);
6109 nv->host_p = __constant_cpu_to_le32(BIT_11|BIT_10); 6105 nv->host_p = cpu_to_le32(BIT_11|BIT_10);
6110 nv->efi_parameters = __constant_cpu_to_le32(0); 6106 nv->efi_parameters = cpu_to_le32(0);
6111 nv->reset_delay = 5; 6107 nv->reset_delay = 5;
6112 nv->max_luns_per_target = __constant_cpu_to_le16(128); 6108 nv->max_luns_per_target = cpu_to_le16(128);
6113 nv->port_down_retry_count = __constant_cpu_to_le16(30); 6109 nv->port_down_retry_count = cpu_to_le16(30);
6114 nv->link_down_timeout = __constant_cpu_to_le16(180); 6110 nv->link_down_timeout = cpu_to_le16(180);
6115 nv->enode_mac[0] = 0x00; 6111 nv->enode_mac[0] = 0x00;
6116 nv->enode_mac[1] = 0xC0; 6112 nv->enode_mac[1] = 0xC0;
6117 nv->enode_mac[2] = 0xDD; 6113 nv->enode_mac[2] = 0xDD;
@@ -6170,13 +6166,13 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6170 qlt_81xx_config_nvram_stage2(vha, icb); 6166 qlt_81xx_config_nvram_stage2(vha, icb);
6171 6167
6172 /* Use alternate WWN? */ 6168 /* Use alternate WWN? */
6173 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 6169 if (nv->host_p & cpu_to_le32(BIT_15)) {
6174 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 6170 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
6175 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 6171 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
6176 } 6172 }
6177 6173
6178 /* Prepare nodename */ 6174 /* Prepare nodename */
6179 if ((icb->firmware_options_1 & __constant_cpu_to_le32(BIT_14)) == 0) { 6175 if ((icb->firmware_options_1 & cpu_to_le32(BIT_14)) == 0) {
6180 /* 6176 /*
6181 * Firmware will apply the following mask if the nodename was 6177 * Firmware will apply the following mask if the nodename was
6182 * not provided. 6178 * not provided.
@@ -6205,7 +6201,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6205 memcpy(vha->node_name, icb->node_name, WWN_SIZE); 6201 memcpy(vha->node_name, icb->node_name, WWN_SIZE);
6206 memcpy(vha->port_name, icb->port_name, WWN_SIZE); 6202 memcpy(vha->port_name, icb->port_name, WWN_SIZE);
6207 6203
6208 icb->execution_throttle = __constant_cpu_to_le16(0xFFFF); 6204 icb->execution_throttle = cpu_to_le16(0xFFFF);
6209 6205
6210 ha->retry_count = le16_to_cpu(nv->login_retry_count); 6206 ha->retry_count = le16_to_cpu(nv->login_retry_count);
6211 6207
@@ -6213,7 +6209,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6213 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout) 6209 if (le16_to_cpu(nv->login_timeout) < ql2xlogintimeout)
6214 nv->login_timeout = cpu_to_le16(ql2xlogintimeout); 6210 nv->login_timeout = cpu_to_le16(ql2xlogintimeout);
6215 if (le16_to_cpu(nv->login_timeout) < 4) 6211 if (le16_to_cpu(nv->login_timeout) < 4)
6216 nv->login_timeout = __constant_cpu_to_le16(4); 6212 nv->login_timeout = cpu_to_le16(4);
6217 ha->login_timeout = le16_to_cpu(nv->login_timeout); 6213 ha->login_timeout = le16_to_cpu(nv->login_timeout);
6218 icb->login_timeout = nv->login_timeout; 6214 icb->login_timeout = nv->login_timeout;
6219 6215
@@ -6259,7 +6255,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6259 6255
6260 /* if not running MSI-X we need handshaking on interrupts */ 6256 /* if not running MSI-X we need handshaking on interrupts */
6261 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) 6257 if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
6262 icb->firmware_options_2 |= __constant_cpu_to_le32(BIT_22); 6258 icb->firmware_options_2 |= cpu_to_le32(BIT_22);
6263 6259
6264 /* Enable ZIO. */ 6260 /* Enable ZIO. */
6265 if (!vha->flags.init_done) { 6261 if (!vha->flags.init_done) {
@@ -6268,7 +6264,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
6268 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ? 6264 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
6269 le16_to_cpu(icb->interrupt_delay_timer): 2; 6265 le16_to_cpu(icb->interrupt_delay_timer): 2;
6270 } 6266 }
6271 icb->firmware_options_2 &= __constant_cpu_to_le32( 6267 icb->firmware_options_2 &= cpu_to_le32(
6272 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0)); 6268 ~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
6273 vha->flags.process_response_queue = 0; 6269 vha->flags.process_response_queue = 0;
6274 if (ha->zio_mode != QLA_ZIO_DISABLED) { 6270 if (ha->zio_mode != QLA_ZIO_DISABLED) {
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 6f02b26a35cf..c49df34e9b35 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -108,8 +108,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
108 cont_pkt = (cont_entry_t *)req->ring_ptr; 108 cont_pkt = (cont_entry_t *)req->ring_ptr;
109 109
110 /* Load packet defaults. */ 110 /* Load packet defaults. */
111 *((uint32_t *)(&cont_pkt->entry_type)) = 111 *((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
112 __constant_cpu_to_le32(CONTINUE_TYPE);
113 112
114 return (cont_pkt); 113 return (cont_pkt);
115} 114}
@@ -138,8 +137,8 @@ qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
138 137
139 /* Load packet defaults. */ 138 /* Load packet defaults. */
140 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ? 139 *((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141 __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) : 140 cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142 __constant_cpu_to_le32(CONTINUE_A64_TYPE); 141 cpu_to_le32(CONTINUE_A64_TYPE);
143 142
144 return (cont_pkt); 143 return (cont_pkt);
145} 144}
@@ -204,11 +203,11 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
204 203
205 /* Update entry type to indicate Command Type 2 IOCB */ 204 /* Update entry type to indicate Command Type 2 IOCB */
206 *((uint32_t *)(&cmd_pkt->entry_type)) = 205 *((uint32_t *)(&cmd_pkt->entry_type)) =
207 __constant_cpu_to_le32(COMMAND_TYPE); 206 cpu_to_le32(COMMAND_TYPE);
208 207
209 /* No data transfer */ 208 /* No data transfer */
210 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 209 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 210 cmd_pkt->byte_count = cpu_to_le32(0);
212 return; 211 return;
213 } 212 }
214 213
@@ -261,12 +260,11 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
261 cmd = GET_CMD_SP(sp); 260 cmd = GET_CMD_SP(sp);
262 261
263 /* Update entry type to indicate Command Type 3 IOCB */ 262 /* Update entry type to indicate Command Type 3 IOCB */
264 *((uint32_t *)(&cmd_pkt->entry_type)) = 263 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
265 __constant_cpu_to_le32(COMMAND_A64_TYPE);
266 264
267 /* No data transfer */ 265 /* No data transfer */
268 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 266 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 267 cmd_pkt->byte_count = cpu_to_le32(0);
270 return; 268 return;
271 } 269 }
272 270
@@ -310,7 +308,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
310int 308int
311qla2x00_start_scsi(srb_t *sp) 309qla2x00_start_scsi(srb_t *sp)
312{ 310{
313 int ret, nseg; 311 int nseg;
314 unsigned long flags; 312 unsigned long flags;
315 scsi_qla_host_t *vha; 313 scsi_qla_host_t *vha;
316 struct scsi_cmnd *cmd; 314 struct scsi_cmnd *cmd;
@@ -327,7 +325,6 @@ qla2x00_start_scsi(srb_t *sp)
327 struct rsp_que *rsp; 325 struct rsp_que *rsp;
328 326
329 /* Setup device pointers. */ 327 /* Setup device pointers. */
330 ret = 0;
331 vha = sp->fcport->vha; 328 vha = sp->fcport->vha;
332 ha = vha->hw; 329 ha = vha->hw;
333 reg = &ha->iobase->isp; 330 reg = &ha->iobase->isp;
@@ -403,7 +400,7 @@ qla2x00_start_scsi(srb_t *sp)
403 /* Set target ID and LUN number*/ 400 /* Set target ID and LUN number*/
404 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id); 401 SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
405 cmd_pkt->lun = cpu_to_le16(cmd->device->lun); 402 cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
406 cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); 403 cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
407 404
408 /* Load SCSI command packet. */ 405 /* Load SCSI command packet. */
409 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); 406 memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
@@ -454,7 +451,7 @@ void
454qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 451qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
455{ 452{
456 struct qla_hw_data *ha = vha->hw; 453 struct qla_hw_data *ha = vha->hw;
457 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 454 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
458 455
459 if (IS_P3P_TYPE(ha)) { 456 if (IS_P3P_TYPE(ha)) {
460 qla82xx_start_iocbs(vha); 457 qla82xx_start_iocbs(vha);
@@ -597,12 +594,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
597 cmd = GET_CMD_SP(sp); 594 cmd = GET_CMD_SP(sp);
598 595
599 /* Update entry type to indicate Command Type 3 IOCB */ 596 /* Update entry type to indicate Command Type 3 IOCB */
600 *((uint32_t *)(&cmd_pkt->entry_type)) = 597 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
601 __constant_cpu_to_le32(COMMAND_TYPE_6);
602 598
603 /* No data transfer */ 599 /* No data transfer */
604 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 600 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
605 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 601 cmd_pkt->byte_count = cpu_to_le32(0);
606 return 0; 602 return 0;
607 } 603 }
608 604
@@ -611,13 +607,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
611 607
612 /* Set transfer direction */ 608 /* Set transfer direction */
613 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 609 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
614 cmd_pkt->control_flags = 610 cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
615 __constant_cpu_to_le16(CF_WRITE_DATA);
616 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 611 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
617 vha->qla_stats.output_requests++; 612 vha->qla_stats.output_requests++;
618 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 613 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
619 cmd_pkt->control_flags = 614 cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
620 __constant_cpu_to_le16(CF_READ_DATA);
621 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 615 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
622 vha->qla_stats.input_requests++; 616 vha->qla_stats.input_requests++;
623 } 617 }
@@ -680,7 +674,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
680 * 674 *
681 * Returns the number of dsd list needed to store @dsds. 675 * Returns the number of dsd list needed to store @dsds.
682 */ 676 */
683inline uint16_t 677static inline uint16_t
684qla24xx_calc_dsd_lists(uint16_t dsds) 678qla24xx_calc_dsd_lists(uint16_t dsds)
685{ 679{
686 uint16_t dsd_lists = 0; 680 uint16_t dsd_lists = 0;
@@ -700,7 +694,7 @@ qla24xx_calc_dsd_lists(uint16_t dsds)
700 * @cmd_pkt: Command type 3 IOCB 694 * @cmd_pkt: Command type 3 IOCB
701 * @tot_dsds: Total number of segments to transfer 695 * @tot_dsds: Total number of segments to transfer
702 */ 696 */
703inline void 697static inline void
704qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, 698qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
705 uint16_t tot_dsds) 699 uint16_t tot_dsds)
706{ 700{
@@ -710,32 +704,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
710 struct scsi_cmnd *cmd; 704 struct scsi_cmnd *cmd;
711 struct scatterlist *sg; 705 struct scatterlist *sg;
712 int i; 706 int i;
713 struct req_que *req;
714 707
715 cmd = GET_CMD_SP(sp); 708 cmd = GET_CMD_SP(sp);
716 709
717 /* Update entry type to indicate Command Type 3 IOCB */ 710 /* Update entry type to indicate Command Type 3 IOCB */
718 *((uint32_t *)(&cmd_pkt->entry_type)) = 711 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
719 __constant_cpu_to_le32(COMMAND_TYPE_7);
720 712
721 /* No data transfer */ 713 /* No data transfer */
722 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 714 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
723 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 715 cmd_pkt->byte_count = cpu_to_le32(0);
724 return; 716 return;
725 } 717 }
726 718
727 vha = sp->fcport->vha; 719 vha = sp->fcport->vha;
728 req = vha->req;
729 720
730 /* Set transfer direction */ 721 /* Set transfer direction */
731 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 722 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
732 cmd_pkt->task_mgmt_flags = 723 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
733 __constant_cpu_to_le16(TMF_WRITE_DATA);
734 vha->qla_stats.output_bytes += scsi_bufflen(cmd); 724 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
735 vha->qla_stats.output_requests++; 725 vha->qla_stats.output_requests++;
736 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 726 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
737 cmd_pkt->task_mgmt_flags = 727 cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
738 __constant_cpu_to_le16(TMF_READ_DATA);
739 vha->qla_stats.input_bytes += scsi_bufflen(cmd); 728 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
740 vha->qla_stats.input_requests++; 729 vha->qla_stats.input_requests++;
741 } 730 }
@@ -809,7 +798,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
809 * match LBA in CDB + N 798 * match LBA in CDB + N
810 */ 799 */
811 case SCSI_PROT_DIF_TYPE2: 800 case SCSI_PROT_DIF_TYPE2:
812 pkt->app_tag = __constant_cpu_to_le16(0); 801 pkt->app_tag = cpu_to_le16(0);
813 pkt->app_tag_mask[0] = 0x0; 802 pkt->app_tag_mask[0] = 0x0;
814 pkt->app_tag_mask[1] = 0x0; 803 pkt->app_tag_mask[1] = 0x0;
815 804
@@ -840,7 +829,7 @@ qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
840 case SCSI_PROT_DIF_TYPE1: 829 case SCSI_PROT_DIF_TYPE1:
841 pkt->ref_tag = cpu_to_le32((uint32_t) 830 pkt->ref_tag = cpu_to_le32((uint32_t)
842 (0xffffffff & scsi_get_lba(cmd))); 831 (0xffffffff & scsi_get_lba(cmd)));
843 pkt->app_tag = __constant_cpu_to_le16(0); 832 pkt->app_tag = cpu_to_le16(0);
844 pkt->app_tag_mask[0] = 0x0; 833 pkt->app_tag_mask[0] = 0x0;
845 pkt->app_tag_mask[1] = 0x0; 834 pkt->app_tag_mask[1] = 0x0;
846 835
@@ -933,11 +922,9 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
933 dma_addr_t sle_dma; 922 dma_addr_t sle_dma;
934 uint32_t sle_dma_len, tot_prot_dma_len = 0; 923 uint32_t sle_dma_len, tot_prot_dma_len = 0;
935 struct scsi_cmnd *cmd; 924 struct scsi_cmnd *cmd;
936 struct scsi_qla_host *vha;
937 925
938 memset(&sgx, 0, sizeof(struct qla2_sgx)); 926 memset(&sgx, 0, sizeof(struct qla2_sgx));
939 if (sp) { 927 if (sp) {
940 vha = sp->fcport->vha;
941 cmd = GET_CMD_SP(sp); 928 cmd = GET_CMD_SP(sp);
942 prot_int = cmd->device->sector_size; 929 prot_int = cmd->device->sector_size;
943 930
@@ -947,7 +934,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
947 934
948 sg_prot = scsi_prot_sglist(cmd); 935 sg_prot = scsi_prot_sglist(cmd);
949 } else if (tc) { 936 } else if (tc) {
950 vha = tc->vha;
951 prot_int = tc->blk_sz; 937 prot_int = tc->blk_sz;
952 sgx.tot_bytes = tc->bufflen; 938 sgx.tot_bytes = tc->bufflen;
953 sgx.cur_sg = tc->sg; 939 sgx.cur_sg = tc->sg;
@@ -1047,15 +1033,12 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1047 int i; 1033 int i;
1048 uint16_t used_dsds = tot_dsds; 1034 uint16_t used_dsds = tot_dsds;
1049 struct scsi_cmnd *cmd; 1035 struct scsi_cmnd *cmd;
1050 struct scsi_qla_host *vha;
1051 1036
1052 if (sp) { 1037 if (sp) {
1053 cmd = GET_CMD_SP(sp); 1038 cmd = GET_CMD_SP(sp);
1054 sgl = scsi_sglist(cmd); 1039 sgl = scsi_sglist(cmd);
1055 vha = sp->fcport->vha;
1056 } else if (tc) { 1040 } else if (tc) {
1057 sgl = tc->sg; 1041 sgl = tc->sg;
1058 vha = tc->vha;
1059 } else { 1042 } else {
1060 BUG(); 1043 BUG();
1061 return 1; 1044 return 1;
@@ -1231,7 +1214,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1231 uint32_t *cur_dsd, *fcp_dl; 1214 uint32_t *cur_dsd, *fcp_dl;
1232 scsi_qla_host_t *vha; 1215 scsi_qla_host_t *vha;
1233 struct scsi_cmnd *cmd; 1216 struct scsi_cmnd *cmd;
1234 int sgc;
1235 uint32_t total_bytes = 0; 1217 uint32_t total_bytes = 0;
1236 uint32_t data_bytes; 1218 uint32_t data_bytes;
1237 uint32_t dif_bytes; 1219 uint32_t dif_bytes;
@@ -1247,10 +1229,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1247 1229
1248 cmd = GET_CMD_SP(sp); 1230 cmd = GET_CMD_SP(sp);
1249 1231
1250 sgc = 0;
1251 /* Update entry type to indicate Command Type CRC_2 IOCB */ 1232 /* Update entry type to indicate Command Type CRC_2 IOCB */
1252 *((uint32_t *)(&cmd_pkt->entry_type)) = 1233 *((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1253 __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1254 1234
1255 vha = sp->fcport->vha; 1235 vha = sp->fcport->vha;
1256 ha = vha->hw; 1236 ha = vha->hw;
@@ -1258,7 +1238,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1258 /* No data transfer */ 1238 /* No data transfer */
1259 data_bytes = scsi_bufflen(cmd); 1239 data_bytes = scsi_bufflen(cmd);
1260 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1240 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1261 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1241 cmd_pkt->byte_count = cpu_to_le32(0);
1262 return QLA_SUCCESS; 1242 return QLA_SUCCESS;
1263 } 1243 }
1264 1244
@@ -1267,10 +1247,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1267 /* Set transfer direction */ 1247 /* Set transfer direction */
1268 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1248 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1269 cmd_pkt->control_flags = 1249 cmd_pkt->control_flags =
1270 __constant_cpu_to_le16(CF_WRITE_DATA); 1250 cpu_to_le16(CF_WRITE_DATA);
1271 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 1251 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1272 cmd_pkt->control_flags = 1252 cmd_pkt->control_flags =
1273 __constant_cpu_to_le16(CF_READ_DATA); 1253 cpu_to_le16(CF_READ_DATA);
1274 } 1254 }
1275 1255
1276 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || 1256 if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
@@ -1392,7 +1372,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1392 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size); 1372 crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1393 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 1373 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1394 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 1374 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1395 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); 1375 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1396 /* Fibre channel byte count */ 1376 /* Fibre channel byte count */
1397 cmd_pkt->byte_count = cpu_to_le32(total_bytes); 1377 cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1398 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + 1378 fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
@@ -1400,13 +1380,12 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1400 *fcp_dl = htonl(total_bytes); 1380 *fcp_dl = htonl(total_bytes);
1401 1381
1402 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) { 1382 if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1403 cmd_pkt->byte_count = __constant_cpu_to_le32(0); 1383 cmd_pkt->byte_count = cpu_to_le32(0);
1404 return QLA_SUCCESS; 1384 return QLA_SUCCESS;
1405 } 1385 }
1406 /* Walks data segments */ 1386 /* Walks data segments */
1407 1387
1408 cmd_pkt->control_flags |= 1388 cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1409 __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1410 1389
1411 if (!bundling && tot_prot_dsds) { 1390 if (!bundling && tot_prot_dsds) {
1412 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp, 1391 if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
@@ -1418,8 +1397,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1418 1397
1419 if (bundling && tot_prot_dsds) { 1398 if (bundling && tot_prot_dsds) {
1420 /* Walks dif segments */ 1399 /* Walks dif segments */
1421 cmd_pkt->control_flags |= 1400 cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1422 __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1423 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; 1401 cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1424 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd, 1402 if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1425 tot_prot_dsds, NULL)) 1403 tot_prot_dsds, NULL))
@@ -1442,7 +1420,7 @@ crc_queuing_error:
1442int 1420int
1443qla24xx_start_scsi(srb_t *sp) 1421qla24xx_start_scsi(srb_t *sp)
1444{ 1422{
1445 int ret, nseg; 1423 int nseg;
1446 unsigned long flags; 1424 unsigned long flags;
1447 uint32_t *clr_ptr; 1425 uint32_t *clr_ptr;
1448 uint32_t index; 1426 uint32_t index;
@@ -1458,8 +1436,6 @@ qla24xx_start_scsi(srb_t *sp)
1458 struct qla_hw_data *ha = vha->hw; 1436 struct qla_hw_data *ha = vha->hw;
1459 1437
1460 /* Setup device pointers. */ 1438 /* Setup device pointers. */
1461 ret = 0;
1462
1463 qla25xx_set_que(sp, &rsp); 1439 qla25xx_set_que(sp, &rsp);
1464 req = vha->req; 1440 req = vha->req;
1465 1441
@@ -1753,7 +1729,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
1753 cmd_pkt->entry_count = (uint8_t)req_cnt; 1729 cmd_pkt->entry_count = (uint8_t)req_cnt;
1754 /* Specify response queue number where completion should happen */ 1730 /* Specify response queue number where completion should happen */
1755 cmd_pkt->entry_status = (uint8_t) rsp->id; 1731 cmd_pkt->entry_status = (uint8_t) rsp->id;
1756 cmd_pkt->timeout = __constant_cpu_to_le16(0); 1732 cmd_pkt->timeout = cpu_to_le16(0);
1757 wmb(); 1733 wmb();
1758 1734
1759 /* Adjust ring index. */ 1735 /* Adjust ring index. */
@@ -1819,7 +1795,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1819{ 1795{
1820 struct qla_hw_data *ha = vha->hw; 1796 struct qla_hw_data *ha = vha->hw;
1821 struct req_que *req = ha->req_q_map[0]; 1797 struct req_que *req = ha->req_q_map[0];
1822 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); 1798 device_reg_t *reg = ISP_QUE_REG(ha, req->id);
1823 uint32_t index, handle; 1799 uint32_t index, handle;
1824 request_t *pkt; 1800 request_t *pkt;
1825 uint16_t cnt, req_cnt; 1801 uint16_t cnt, req_cnt;
@@ -2044,10 +2020,10 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2044 els_iocb->entry_status = 0; 2020 els_iocb->entry_status = 0;
2045 els_iocb->handle = sp->handle; 2021 els_iocb->handle = sp->handle;
2046 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2022 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2047 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2023 els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2048 els_iocb->vp_index = sp->fcport->vha->vp_idx; 2024 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2049 els_iocb->sof_type = EST_SOFI3; 2025 els_iocb->sof_type = EST_SOFI3;
2050 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2026 els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2051 2027
2052 els_iocb->opcode = 2028 els_iocb->opcode =
2053 sp->type == SRB_ELS_CMD_RPT ? 2029 sp->type == SRB_ELS_CMD_RPT ?
@@ -2091,7 +2067,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2091 struct qla_hw_data *ha = vha->hw; 2067 struct qla_hw_data *ha = vha->hw;
2092 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2068 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2093 int loop_iterartion = 0; 2069 int loop_iterartion = 0;
2094 int cont_iocb_prsnt = 0;
2095 int entry_count = 1; 2070 int entry_count = 1;
2096 2071
2097 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); 2072 memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
@@ -2099,13 +2074,13 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2099 ct_iocb->entry_status = 0; 2074 ct_iocb->entry_status = 0;
2100 ct_iocb->handle1 = sp->handle; 2075 ct_iocb->handle1 = sp->handle;
2101 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id); 2076 SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2102 ct_iocb->status = __constant_cpu_to_le16(0); 2077 ct_iocb->status = cpu_to_le16(0);
2103 ct_iocb->control_flags = __constant_cpu_to_le16(0); 2078 ct_iocb->control_flags = cpu_to_le16(0);
2104 ct_iocb->timeout = 0; 2079 ct_iocb->timeout = 0;
2105 ct_iocb->cmd_dsd_count = 2080 ct_iocb->cmd_dsd_count =
2106 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2081 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2107 ct_iocb->total_dsd_count = 2082 ct_iocb->total_dsd_count =
2108 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1); 2083 cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2109 ct_iocb->req_bytecount = 2084 ct_iocb->req_bytecount =
2110 cpu_to_le32(bsg_job->request_payload.payload_len); 2085 cpu_to_le32(bsg_job->request_payload.payload_len);
2111 ct_iocb->rsp_bytecount = 2086 ct_iocb->rsp_bytecount =
@@ -2142,7 +2117,6 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2142 vha->hw->req_q_map[0]); 2117 vha->hw->req_q_map[0]);
2143 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2118 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2144 avail_dsds = 5; 2119 avail_dsds = 5;
2145 cont_iocb_prsnt = 1;
2146 entry_count++; 2120 entry_count++;
2147 } 2121 }
2148 2122
@@ -2170,7 +2144,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2170 struct qla_hw_data *ha = vha->hw; 2144 struct qla_hw_data *ha = vha->hw;
2171 struct fc_bsg_job *bsg_job = sp->u.bsg_job; 2145 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2172 int loop_iterartion = 0; 2146 int loop_iterartion = 0;
2173 int cont_iocb_prsnt = 0;
2174 int entry_count = 1; 2147 int entry_count = 1;
2175 2148
2176 ct_iocb->entry_type = CT_IOCB_TYPE; 2149 ct_iocb->entry_type = CT_IOCB_TYPE;
@@ -2180,13 +2153,13 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2180 2153
2181 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2154 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2182 ct_iocb->vp_index = sp->fcport->vha->vp_idx; 2155 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2183 ct_iocb->comp_status = __constant_cpu_to_le16(0); 2156 ct_iocb->comp_status = cpu_to_le16(0);
2184 2157
2185 ct_iocb->cmd_dsd_count = 2158 ct_iocb->cmd_dsd_count =
2186 __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2159 cpu_to_le16(bsg_job->request_payload.sg_cnt);
2187 ct_iocb->timeout = 0; 2160 ct_iocb->timeout = 0;
2188 ct_iocb->rsp_dsd_count = 2161 ct_iocb->rsp_dsd_count =
2189 __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2162 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2190 ct_iocb->rsp_byte_count = 2163 ct_iocb->rsp_byte_count =
2191 cpu_to_le32(bsg_job->reply_payload.payload_len); 2164 cpu_to_le32(bsg_job->reply_payload.payload_len);
2192 ct_iocb->cmd_byte_count = 2165 ct_iocb->cmd_byte_count =
@@ -2217,7 +2190,6 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2217 ha->req_q_map[0]); 2190 ha->req_q_map[0]);
2218 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address; 2191 cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2219 avail_dsds = 5; 2192 avail_dsds = 5;
2220 cont_iocb_prsnt = 1;
2221 entry_count++; 2193 entry_count++;
2222 } 2194 }
2223 2195
@@ -2240,7 +2212,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2240int 2212int
2241qla82xx_start_scsi(srb_t *sp) 2213qla82xx_start_scsi(srb_t *sp)
2242{ 2214{
2243 int ret, nseg; 2215 int nseg;
2244 unsigned long flags; 2216 unsigned long flags;
2245 struct scsi_cmnd *cmd; 2217 struct scsi_cmnd *cmd;
2246 uint32_t *clr_ptr; 2218 uint32_t *clr_ptr;
@@ -2260,7 +2232,6 @@ qla82xx_start_scsi(srb_t *sp)
2260 struct rsp_que *rsp = NULL; 2232 struct rsp_que *rsp = NULL;
2261 2233
2262 /* Setup device pointers. */ 2234 /* Setup device pointers. */
2263 ret = 0;
2264 reg = &ha->iobase->isp82; 2235 reg = &ha->iobase->isp82;
2265 cmd = GET_CMD_SP(sp); 2236 cmd = GET_CMD_SP(sp);
2266 req = vha->req; 2237 req = vha->req;
@@ -2539,16 +2510,12 @@ sufficient_dsds:
2539 /* write, read and verify logic */ 2510 /* write, read and verify logic */
2540 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 2511 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2541 if (ql2xdbwr) 2512 if (ql2xdbwr)
2542 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 2513 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
2543 else { 2514 else {
2544 WRT_REG_DWORD( 2515 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
2545 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2546 dbval);
2547 wmb(); 2516 wmb();
2548 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { 2517 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2549 WRT_REG_DWORD( 2518 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
2550 (unsigned long __iomem *)ha->nxdb_wr_ptr,
2551 dbval);
2552 wmb(); 2519 wmb();
2553 } 2520 }
2554 } 2521 }
@@ -2682,7 +2649,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2682 2649
2683 /*Update entry type to indicate bidir command */ 2650 /*Update entry type to indicate bidir command */
2684 *((uint32_t *)(&cmd_pkt->entry_type)) = 2651 *((uint32_t *)(&cmd_pkt->entry_type)) =
2685 __constant_cpu_to_le32(COMMAND_BIDIRECTIONAL); 2652 cpu_to_le32(COMMAND_BIDIRECTIONAL);
2686 2653
2687 /* Set the transfer direction, in this set both flags 2654 /* Set the transfer direction, in this set both flags
2688 * Also set the BD_WRAP_BACK flag, firmware will take care 2655 * Also set the BD_WRAP_BACK flag, firmware will take care
@@ -2690,8 +2657,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2690 */ 2657 */
2691 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); 2658 cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2692 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2659 cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2693 cmd_pkt->control_flags = 2660 cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2694 __constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2695 BD_WRAP_BACK); 2661 BD_WRAP_BACK);
2696 2662
2697 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len; 2663 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5559d5e75bbf..ccf6a7f99024 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -116,7 +116,7 @@ bool
116qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 116qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
117{ 117{
118 /* Check for PCI disconnection */ 118 /* Check for PCI disconnection */
119 if (reg == 0xffffffff) { 119 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) {
120 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 120 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
121 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 121 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
122 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 122 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
@@ -560,6 +560,17 @@ qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
560 return ret; 560 return ret;
561} 561}
562 562
563static inline fc_port_t *
564qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id)
565{
566 fc_port_t *fcport;
567
568 list_for_each_entry(fcport, &vha->vp_fcports, list)
569 if (fcport->loop_id == loop_id)
570 return fcport;
571 return NULL;
572}
573
563/** 574/**
564 * qla2x00_async_event() - Process aynchronous events. 575 * qla2x00_async_event() - Process aynchronous events.
565 * @ha: SCSI driver HA context 576 * @ha: SCSI driver HA context
@@ -575,7 +586,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
575 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 586 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
576 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 587 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
577 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 588 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
578 uint32_t rscn_entry, host_pid, tmp_pid; 589 uint32_t rscn_entry, host_pid;
579 unsigned long flags; 590 unsigned long flags;
580 fc_port_t *fcport = NULL; 591 fc_port_t *fcport = NULL;
581 592
@@ -897,11 +908,29 @@ skip_rio:
897 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 908 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
898 break; 909 break;
899 910
900 /* Global event -- port logout or port unavailable. */ 911 if (mb[2] == 0x7) {
901 if (mb[1] == 0xffff && mb[2] == 0x7) {
902 ql_dbg(ql_dbg_async, vha, 0x5010, 912 ql_dbg(ql_dbg_async, vha, 0x5010,
903 "Port unavailable %04x %04x %04x.\n", 913 "Port %s %04x %04x %04x.\n",
914 mb[1] == 0xffff ? "unavailable" : "logout",
904 mb[1], mb[2], mb[3]); 915 mb[1], mb[2], mb[3]);
916
917 if (mb[1] == 0xffff)
918 goto global_port_update;
919
920 /* Port logout */
921 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]);
922 if (!fcport)
923 break;
924 if (atomic_read(&fcport->state) != FCS_ONLINE)
925 break;
926 ql_dbg(ql_dbg_async, vha, 0x508a,
927 "Marking port lost loopid=%04x portid=%06x.\n",
928 fcport->loop_id, fcport->d_id.b24);
929 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
930 break;
931
932global_port_update:
933 /* Port unavailable. */
905 ql_log(ql_log_warn, vha, 0x505e, 934 ql_log(ql_log_warn, vha, 0x505e,
906 "Link is offline.\n"); 935 "Link is offline.\n");
907 936
@@ -998,7 +1027,6 @@ skip_rio:
998 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1027 list_for_each_entry(fcport, &vha->vp_fcports, list) {
999 if (atomic_read(&fcport->state) != FCS_ONLINE) 1028 if (atomic_read(&fcport->state) != FCS_ONLINE)
1000 continue; 1029 continue;
1001 tmp_pid = fcport->d_id.b24;
1002 if (fcport->d_id.b24 == rscn_entry) { 1030 if (fcport->d_id.b24 == rscn_entry) {
1003 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1031 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1004 break; 1032 break;
@@ -1565,7 +1593,7 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
1565 "Async-%s error - hdl=%x entry-status(%x).\n", 1593 "Async-%s error - hdl=%x entry-status(%x).\n",
1566 type, sp->handle, sts->entry_status); 1594 type, sp->handle, sts->entry_status);
1567 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1595 iocb->u.tmf.data = QLA_FUNCTION_FAILED;
1568 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1596 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
1569 ql_log(ql_log_warn, fcport->vha, 0x5039, 1597 ql_log(ql_log_warn, fcport->vha, 0x5039,
1570 "Async-%s error - hdl=%x completion status(%x).\n", 1598 "Async-%s error - hdl=%x completion status(%x).\n",
1571 type, sp->handle, sts->comp_status); 1599 type, sp->handle, sts->comp_status);
@@ -2045,14 +2073,18 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2045 } 2073 }
2046 2074
2047 /* Validate handle. */ 2075 /* Validate handle. */
2048 if (handle < req->num_outstanding_cmds) 2076 if (handle < req->num_outstanding_cmds) {
2049 sp = req->outstanding_cmds[handle]; 2077 sp = req->outstanding_cmds[handle];
2050 else 2078 if (!sp) {
2051 sp = NULL; 2079 ql_dbg(ql_dbg_io, vha, 0x3075,
2052 2080 "%s(%ld): Already returned command for status handle (0x%x).\n",
2053 if (sp == NULL) { 2081 __func__, vha->host_no, sts->handle);
2082 return;
2083 }
2084 } else {
2054 ql_dbg(ql_dbg_io, vha, 0x3017, 2085 ql_dbg(ql_dbg_io, vha, 0x3017,
2055 "Invalid status handle (0x%x).\n", sts->handle); 2086 "Invalid status handle, out of range (0x%x).\n",
2087 sts->handle);
2056 2088
2057 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2089 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
2058 if (IS_P3P_TYPE(ha)) 2090 if (IS_P3P_TYPE(ha))
@@ -2339,12 +2371,12 @@ out:
2339 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2371 ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2340 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2372 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2341 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2373 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2342 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 2374 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
2343 comp_status, scsi_status, res, vha->host_no, 2375 comp_status, scsi_status, res, vha->host_no,
2344 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2376 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2345 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2377 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2346 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2378 cp->cmnd, scsi_bufflen(cp), rsp_info_len,
2347 resid_len, fw_resid_len); 2379 resid_len, fw_resid_len, sp, cp);
2348 2380
2349 if (rsp->status_srb == NULL) 2381 if (rsp->status_srb == NULL)
2350 sp->done(ha, sp, res); 2382 sp->done(ha, sp, res);
@@ -2441,13 +2473,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2441 } 2473 }
2442fatal: 2474fatal:
2443 ql_log(ql_log_warn, vha, 0x5030, 2475 ql_log(ql_log_warn, vha, 0x5030,
2444 "Error entry - invalid handle/queue.\n"); 2476 "Error entry - invalid handle/queue (%04x).\n", que);
2445
2446 if (IS_P3P_TYPE(ha))
2447 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2448 else
2449 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2450 qla2xxx_wake_dpc(vha);
2451} 2477}
2452 2478
2453/** 2479/**
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b2f713ad9034..cb11e04be568 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -555,7 +555,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
555 if (IS_FWI2_CAPABLE(ha)) 555 if (IS_FWI2_CAPABLE(ha))
556 mcp->in_mb |= MBX_17|MBX_16|MBX_15; 556 mcp->in_mb |= MBX_17|MBX_16|MBX_15;
557 if (IS_QLA27XX(ha)) 557 if (IS_QLA27XX(ha))
558 mcp->in_mb |= MBX_21|MBX_20|MBX_19|MBX_18; 558 mcp->in_mb |= MBX_23 | MBX_22 | MBX_21 | MBX_20 | MBX_19 |
559 MBX_18 | MBX_14 | MBX_13 | MBX_11 | MBX_10 | MBX_9 | MBX_8;
560
559 mcp->flags = 0; 561 mcp->flags = 0;
560 mcp->tov = MBX_TOV_SECONDS; 562 mcp->tov = MBX_TOV_SECONDS;
561 rval = qla2x00_mailbox_command(vha, mcp); 563 rval = qla2x00_mailbox_command(vha, mcp);
@@ -571,6 +573,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
571 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */ 573 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
572 else 574 else
573 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4]; 575 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
576
574 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 577 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
575 ha->mpi_version[0] = mcp->mb[10] & 0xff; 578 ha->mpi_version[0] = mcp->mb[10] & 0xff;
576 ha->mpi_version[1] = mcp->mb[11] >> 8; 579 ha->mpi_version[1] = mcp->mb[11] >> 8;
@@ -580,6 +583,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
580 ha->phy_version[1] = mcp->mb[9] >> 8; 583 ha->phy_version[1] = mcp->mb[9] >> 8;
581 ha->phy_version[2] = mcp->mb[9] & 0xff; 584 ha->phy_version[2] = mcp->mb[9] & 0xff;
582 } 585 }
586
583 if (IS_FWI2_CAPABLE(ha)) { 587 if (IS_FWI2_CAPABLE(ha)) {
584 ha->fw_attributes_h = mcp->mb[15]; 588 ha->fw_attributes_h = mcp->mb[15];
585 ha->fw_attributes_ext[0] = mcp->mb[16]; 589 ha->fw_attributes_ext[0] = mcp->mb[16];
@@ -591,7 +595,14 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
591 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n", 595 "%s: Ext_FwAttributes Upper: 0x%x, Lower: 0x%x.\n",
592 __func__, mcp->mb[17], mcp->mb[16]); 596 __func__, mcp->mb[17], mcp->mb[16]);
593 } 597 }
598
594 if (IS_QLA27XX(ha)) { 599 if (IS_QLA27XX(ha)) {
600 ha->mpi_version[0] = mcp->mb[10] & 0xff;
601 ha->mpi_version[1] = mcp->mb[11] >> 8;
602 ha->mpi_version[2] = mcp->mb[11] & 0xff;
603 ha->pep_version[0] = mcp->mb[13] & 0xff;
604 ha->pep_version[1] = mcp->mb[14] >> 8;
605 ha->pep_version[2] = mcp->mb[14] & 0xff;
595 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18]; 606 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
596 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; 607 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
597 } 608 }
@@ -1135,20 +1146,22 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1135 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff; 1146 vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
1136 } 1147 }
1137 /* If FA-WWN supported */ 1148 /* If FA-WWN supported */
1138 if (mcp->mb[7] & BIT_14) { 1149 if (IS_FAWWN_CAPABLE(vha->hw)) {
1139 vha->port_name[0] = MSB(mcp->mb[16]); 1150 if (mcp->mb[7] & BIT_14) {
1140 vha->port_name[1] = LSB(mcp->mb[16]); 1151 vha->port_name[0] = MSB(mcp->mb[16]);
1141 vha->port_name[2] = MSB(mcp->mb[17]); 1152 vha->port_name[1] = LSB(mcp->mb[16]);
1142 vha->port_name[3] = LSB(mcp->mb[17]); 1153 vha->port_name[2] = MSB(mcp->mb[17]);
1143 vha->port_name[4] = MSB(mcp->mb[18]); 1154 vha->port_name[3] = LSB(mcp->mb[17]);
1144 vha->port_name[5] = LSB(mcp->mb[18]); 1155 vha->port_name[4] = MSB(mcp->mb[18]);
1145 vha->port_name[6] = MSB(mcp->mb[19]); 1156 vha->port_name[5] = LSB(mcp->mb[18]);
1146 vha->port_name[7] = LSB(mcp->mb[19]); 1157 vha->port_name[6] = MSB(mcp->mb[19]);
1147 fc_host_port_name(vha->host) = 1158 vha->port_name[7] = LSB(mcp->mb[19]);
1148 wwn_to_u64(vha->port_name); 1159 fc_host_port_name(vha->host) =
1149 ql_dbg(ql_dbg_mbx, vha, 0x10ca, 1160 wwn_to_u64(vha->port_name);
1150 "FA-WWN acquired %016llx\n", 1161 ql_dbg(ql_dbg_mbx, vha, 0x10ca,
1151 wwn_to_u64(vha->port_name)); 1162 "FA-WWN acquired %016llx\n",
1163 wwn_to_u64(vha->port_name));
1164 }
1152 } 1165 }
1153 } 1166 }
1154 1167
@@ -1239,7 +1252,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1239 "Entered %s.\n", __func__); 1252 "Entered %s.\n", __func__);
1240 1253
1241 if (IS_P3P_TYPE(ha) && ql2xdbwr) 1254 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1242 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1255 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1243 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16))); 1256 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1244 1257
1245 if (ha->flags.npiv_supported) 1258 if (ha->flags.npiv_supported)
@@ -1865,7 +1878,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1865 uint32_t iop[2]; 1878 uint32_t iop[2];
1866 struct qla_hw_data *ha = vha->hw; 1879 struct qla_hw_data *ha = vha->hw;
1867 struct req_que *req; 1880 struct req_que *req;
1868 struct rsp_que *rsp;
1869 1881
1870 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061, 1882 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
1871 "Entered %s.\n", __func__); 1883 "Entered %s.\n", __func__);
@@ -1874,7 +1886,6 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1874 req = ha->req_q_map[0]; 1886 req = ha->req_q_map[0];
1875 else 1887 else
1876 req = vha->req; 1888 req = vha->req;
1877 rsp = req->rsp;
1878 1889
1879 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 1890 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1880 if (lg == NULL) { 1891 if (lg == NULL) {
@@ -1888,11 +1899,11 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1888 lg->entry_count = 1; 1899 lg->entry_count = 1;
1889 lg->handle = MAKE_HANDLE(req->id, lg->handle); 1900 lg->handle = MAKE_HANDLE(req->id, lg->handle);
1890 lg->nport_handle = cpu_to_le16(loop_id); 1901 lg->nport_handle = cpu_to_le16(loop_id);
1891 lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI); 1902 lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1892 if (opt & BIT_0) 1903 if (opt & BIT_0)
1893 lg->control_flags |= __constant_cpu_to_le16(LCF_COND_PLOGI); 1904 lg->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1894 if (opt & BIT_1) 1905 if (opt & BIT_1)
1895 lg->control_flags |= __constant_cpu_to_le16(LCF_SKIP_PRLI); 1906 lg->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1896 lg->port_id[0] = al_pa; 1907 lg->port_id[0] = al_pa;
1897 lg->port_id[1] = area; 1908 lg->port_id[1] = area;
1898 lg->port_id[2] = domain; 1909 lg->port_id[2] = domain;
@@ -1907,7 +1918,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1907 "Failed to complete IOCB -- error status (%x).\n", 1918 "Failed to complete IOCB -- error status (%x).\n",
1908 lg->entry_status); 1919 lg->entry_status);
1909 rval = QLA_FUNCTION_FAILED; 1920 rval = QLA_FUNCTION_FAILED;
1910 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1921 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
1911 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1922 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1912 iop[1] = le32_to_cpu(lg->io_parameter[1]); 1923 iop[1] = le32_to_cpu(lg->io_parameter[1]);
1913 1924
@@ -1961,7 +1972,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1961 mb[10] |= BIT_0; /* Class 2. */ 1972 mb[10] |= BIT_0; /* Class 2. */
1962 if (lg->io_parameter[9] || lg->io_parameter[10]) 1973 if (lg->io_parameter[9] || lg->io_parameter[10])
1963 mb[10] |= BIT_1; /* Class 3. */ 1974 mb[10] |= BIT_1; /* Class 3. */
1964 if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7)) 1975 if (lg->io_parameter[0] & cpu_to_le32(BIT_7))
1965 mb[10] |= BIT_7; /* Confirmed Completion 1976 mb[10] |= BIT_7; /* Confirmed Completion
1966 * Allowed 1977 * Allowed
1967 */ 1978 */
@@ -2142,7 +2153,6 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2142 dma_addr_t lg_dma; 2153 dma_addr_t lg_dma;
2143 struct qla_hw_data *ha = vha->hw; 2154 struct qla_hw_data *ha = vha->hw;
2144 struct req_que *req; 2155 struct req_que *req;
2145 struct rsp_que *rsp;
2146 2156
2147 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d, 2157 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2148 "Entered %s.\n", __func__); 2158 "Entered %s.\n", __func__);
@@ -2159,13 +2169,12 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2159 req = ha->req_q_map[0]; 2169 req = ha->req_q_map[0];
2160 else 2170 else
2161 req = vha->req; 2171 req = vha->req;
2162 rsp = req->rsp;
2163 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; 2172 lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2164 lg->entry_count = 1; 2173 lg->entry_count = 1;
2165 lg->handle = MAKE_HANDLE(req->id, lg->handle); 2174 lg->handle = MAKE_HANDLE(req->id, lg->handle);
2166 lg->nport_handle = cpu_to_le16(loop_id); 2175 lg->nport_handle = cpu_to_le16(loop_id);
2167 lg->control_flags = 2176 lg->control_flags =
2168 __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| 2177 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO|
2169 LCF_FREE_NPORT); 2178 LCF_FREE_NPORT);
2170 lg->port_id[0] = al_pa; 2179 lg->port_id[0] = al_pa;
2171 lg->port_id[1] = area; 2180 lg->port_id[1] = area;
@@ -2181,7 +2190,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2181 "Failed to complete IOCB -- error status (%x).\n", 2190 "Failed to complete IOCB -- error status (%x).\n",
2182 lg->entry_status); 2191 lg->entry_status);
2183 rval = QLA_FUNCTION_FAILED; 2192 rval = QLA_FUNCTION_FAILED;
2184 } else if (lg->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 2193 } else if (lg->comp_status != cpu_to_le16(CS_COMPLETE)) {
2185 ql_dbg(ql_dbg_mbx, vha, 0x1071, 2194 ql_dbg(ql_dbg_mbx, vha, 0x1071,
2186 "Failed to complete IOCB -- completion status (%x) " 2195 "Failed to complete IOCB -- completion status (%x) "
2187 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status), 2196 "ioparam=%x/%x.\n", le16_to_cpu(lg->comp_status),
@@ -2673,7 +2682,7 @@ qla24xx_abort_command(srb_t *sp)
2673 "Failed to complete IOCB -- error status (%x).\n", 2682 "Failed to complete IOCB -- error status (%x).\n",
2674 abt->entry_status); 2683 abt->entry_status);
2675 rval = QLA_FUNCTION_FAILED; 2684 rval = QLA_FUNCTION_FAILED;
2676 } else if (abt->nport_handle != __constant_cpu_to_le16(0)) { 2685 } else if (abt->nport_handle != cpu_to_le16(0)) {
2677 ql_dbg(ql_dbg_mbx, vha, 0x1090, 2686 ql_dbg(ql_dbg_mbx, vha, 0x1090,
2678 "Failed to complete IOCB -- completion status (%x).\n", 2687 "Failed to complete IOCB -- completion status (%x).\n",
2679 le16_to_cpu(abt->nport_handle)); 2688 le16_to_cpu(abt->nport_handle));
@@ -2756,8 +2765,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2756 "Failed to complete IOCB -- error status (%x).\n", 2765 "Failed to complete IOCB -- error status (%x).\n",
2757 sts->entry_status); 2766 sts->entry_status);
2758 rval = QLA_FUNCTION_FAILED; 2767 rval = QLA_FUNCTION_FAILED;
2759 } else if (sts->comp_status != 2768 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) {
2760 __constant_cpu_to_le16(CS_COMPLETE)) {
2761 ql_dbg(ql_dbg_mbx, vha, 0x1096, 2769 ql_dbg(ql_dbg_mbx, vha, 0x1096,
2762 "Failed to complete IOCB -- completion status (%x).\n", 2770 "Failed to complete IOCB -- completion status (%x).\n",
2763 le16_to_cpu(sts->comp_status)); 2771 le16_to_cpu(sts->comp_status));
@@ -2853,7 +2861,8 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
2853 mbx_cmd_t mc; 2861 mbx_cmd_t mc;
2854 mbx_cmd_t *mcp = &mc; 2862 mbx_cmd_t *mcp = &mc;
2855 2863
2856 if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw)) 2864 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
2865 !IS_QLA27XX(vha->hw))
2857 return QLA_FUNCTION_FAILED; 2866 return QLA_FUNCTION_FAILED;
2858 2867
2859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, 2868 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
@@ -2891,7 +2900,8 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
2891 mbx_cmd_t mc; 2900 mbx_cmd_t mc;
2892 mbx_cmd_t *mcp = &mc; 2901 mbx_cmd_t *mcp = &mc;
2893 2902
2894 if (!IS_QLA2031(vha->hw) && !IS_QLA27XX(vha->hw)) 2903 if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
2904 !IS_QLA27XX(vha->hw))
2895 return QLA_FUNCTION_FAILED; 2905 return QLA_FUNCTION_FAILED;
2896 2906
2897 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, 2907 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
@@ -3483,7 +3493,7 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3483 "Failed to complete IOCB -- error status (%x).\n", 3493 "Failed to complete IOCB -- error status (%x).\n",
3484 vpmod->comp_status); 3494 vpmod->comp_status);
3485 rval = QLA_FUNCTION_FAILED; 3495 rval = QLA_FUNCTION_FAILED;
3486 } else if (vpmod->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 3496 } else if (vpmod->comp_status != cpu_to_le16(CS_COMPLETE)) {
3487 ql_dbg(ql_dbg_mbx, vha, 0x10bf, 3497 ql_dbg(ql_dbg_mbx, vha, 0x10bf,
3488 "Failed to complete IOCB -- completion status (%x).\n", 3498 "Failed to complete IOCB -- completion status (%x).\n",
3489 le16_to_cpu(vpmod->comp_status)); 3499 le16_to_cpu(vpmod->comp_status));
@@ -3542,7 +3552,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3542 vce->entry_type = VP_CTRL_IOCB_TYPE; 3552 vce->entry_type = VP_CTRL_IOCB_TYPE;
3543 vce->entry_count = 1; 3553 vce->entry_count = 1;
3544 vce->command = cpu_to_le16(cmd); 3554 vce->command = cpu_to_le16(cmd);
3545 vce->vp_count = __constant_cpu_to_le16(1); 3555 vce->vp_count = cpu_to_le16(1);
3546 3556
3547 /* index map in firmware starts with 1; decrement index 3557 /* index map in firmware starts with 1; decrement index
3548 * this is ok as we never use index 0 3558 * this is ok as we never use index 0
@@ -3562,7 +3572,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3562 "Failed to complete IOCB -- error status (%x).\n", 3572 "Failed to complete IOCB -- error status (%x).\n",
3563 vce->entry_status); 3573 vce->entry_status);
3564 rval = QLA_FUNCTION_FAILED; 3574 rval = QLA_FUNCTION_FAILED;
3565 } else if (vce->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 3575 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) {
3566 ql_dbg(ql_dbg_mbx, vha, 0x10c5, 3576 ql_dbg(ql_dbg_mbx, vha, 0x10c5,
3567 "Failed to complet IOCB -- completion status (%x).\n", 3577 "Failed to complet IOCB -- completion status (%x).\n",
3568 le16_to_cpu(vce->comp_status)); 3578 le16_to_cpu(vce->comp_status));
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cc94192511cf..c5dd594f6c31 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -371,7 +371,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
371void 371void
372qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) 372qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
373{ 373{
374 int ret;
375 struct qla_hw_data *ha = vha->hw; 374 struct qla_hw_data *ha = vha->hw;
376 scsi_qla_host_t *vp; 375 scsi_qla_host_t *vp;
377 unsigned long flags = 0; 376 unsigned long flags = 0;
@@ -392,7 +391,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
392 atomic_inc(&vp->vref_count); 391 atomic_inc(&vp->vref_count);
393 spin_unlock_irqrestore(&ha->vport_slock, flags); 392 spin_unlock_irqrestore(&ha->vport_slock, flags);
394 393
395 ret = qla2x00_do_dpc_vp(vp); 394 qla2x00_do_dpc_vp(vp);
396 395
397 spin_lock_irqsave(&ha->vport_slock, flags); 396 spin_lock_irqsave(&ha->vport_slock, flags);
398 atomic_dec(&vp->vref_count); 397 atomic_dec(&vp->vref_count);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 6d190b4b82a0..b5029e543b91 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -862,7 +862,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha)
862 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2); 862 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
863 863
864 req->length = ha->req_que_len; 864 req->length = ha->req_que_len;
865 req->ring = (void *)ha->iobase + ha->req_que_off; 865 req->ring = (void __force *)ha->iobase + ha->req_que_off;
866 req->dma = bar2_hdl + ha->req_que_off; 866 req->dma = bar2_hdl + ha->req_que_off;
867 if ((!req->ring) || (req->length == 0)) { 867 if ((!req->ring) || (req->length == 0)) {
868 ql_log_pci(ql_log_info, ha->pdev, 0x012f, 868 ql_log_pci(ql_log_info, ha->pdev, 0x012f,
@@ -877,7 +877,7 @@ qlafx00_config_queues(struct scsi_qla_host *vha)
877 ha->req_que_off, (u64)req->dma); 877 ha->req_que_off, (u64)req->dma);
878 878
879 rsp->length = ha->rsp_que_len; 879 rsp->length = ha->rsp_que_len;
880 rsp->ring = (void *)ha->iobase + ha->rsp_que_off; 880 rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
881 rsp->dma = bar2_hdl + ha->rsp_que_off; 881 rsp->dma = bar2_hdl + ha->rsp_que_off;
882 if ((!rsp->ring) || (rsp->length == 0)) { 882 if ((!rsp->ring) || (rsp->length == 0)) {
883 ql_log_pci(ql_log_info, ha->pdev, 0x0131, 883 ql_log_pci(ql_log_info, ha->pdev, 0x0131,
@@ -1317,10 +1317,10 @@ int
1317qlafx00_configure_devices(scsi_qla_host_t *vha) 1317qlafx00_configure_devices(scsi_qla_host_t *vha)
1318{ 1318{
1319 int rval; 1319 int rval;
1320 unsigned long flags, save_flags; 1320 unsigned long flags;
1321 rval = QLA_SUCCESS; 1321 rval = QLA_SUCCESS;
1322 1322
1323 save_flags = flags = vha->dpc_flags; 1323 flags = vha->dpc_flags;
1324 1324
1325 ql_dbg(ql_dbg_disc, vha, 0x2090, 1325 ql_dbg(ql_dbg_disc, vha, 0x2090,
1326 "Configure devices -- dpc flags =0x%lx\n", flags); 1326 "Configure devices -- dpc flags =0x%lx\n", flags);
@@ -1425,7 +1425,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp)
1425 pkt = rsp->ring_ptr; 1425 pkt = rsp->ring_ptr;
1426 for (cnt = 0; cnt < rsp->length; cnt++) { 1426 for (cnt = 0; cnt < rsp->length; cnt++) {
1427 pkt->signature = RESPONSE_PROCESSED; 1427 pkt->signature = RESPONSE_PROCESSED;
1428 WRT_REG_DWORD((void __iomem *)&pkt->signature, 1428 WRT_REG_DWORD((void __force __iomem *)&pkt->signature,
1429 RESPONSE_PROCESSED); 1429 RESPONSE_PROCESSED);
1430 pkt++; 1430 pkt++;
1431 } 1431 }
@@ -2279,7 +2279,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2279 struct sts_entry_fx00 *sts; 2279 struct sts_entry_fx00 *sts;
2280 __le16 comp_status; 2280 __le16 comp_status;
2281 __le16 scsi_status; 2281 __le16 scsi_status;
2282 uint16_t ox_id;
2283 __le16 lscsi_status; 2282 __le16 lscsi_status;
2284 int32_t resid; 2283 int32_t resid;
2285 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2284 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
@@ -2344,7 +2343,6 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2344 2343
2345 fcport = sp->fcport; 2344 fcport = sp->fcport;
2346 2345
2347 ox_id = 0;
2348 sense_len = par_sense_len = rsp_info_len = resid_len = 2346 sense_len = par_sense_len = rsp_info_len = resid_len =
2349 fw_resid_len = 0; 2347 fw_resid_len = 0;
2350 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)) 2348 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
@@ -2528,12 +2526,12 @@ check_scsi_status:
2528 ql_dbg(ql_dbg_io, fcport->vha, 0x3058, 2526 ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2529 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2527 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
2530 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x " 2528 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
2531 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, " 2529 "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, "
2532 "par_sense_len=0x%x, rsp_info_len=0x%x\n", 2530 "par_sense_len=0x%x, rsp_info_len=0x%x\n",
2533 comp_status, scsi_status, res, vha->host_no, 2531 comp_status, scsi_status, res, vha->host_no,
2534 cp->device->id, cp->device->lun, fcport->tgt_id, 2532 cp->device->id, cp->device->lun, fcport->tgt_id,
2535 lscsi_status, cp->cmnd, scsi_bufflen(cp), 2533 lscsi_status, cp->cmnd, scsi_bufflen(cp),
2536 rsp_info_len, resid_len, fw_resid_len, sense_len, 2534 rsp_info, resid_len, fw_resid_len, sense_len,
2537 par_sense_len, rsp_info_len); 2535 par_sense_len, rsp_info_len);
2538 2536
2539 if (rsp->status_srb == NULL) 2537 if (rsp->status_srb == NULL)
@@ -3009,7 +3007,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3009 3007
3010 /* No data transfer */ 3008 /* No data transfer */
3011 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { 3009 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
3012 lcmd_pkt->byte_count = __constant_cpu_to_le32(0); 3010 lcmd_pkt->byte_count = cpu_to_le32(0);
3013 return; 3011 return;
3014 } 3012 }
3015 3013
@@ -3071,7 +3069,7 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3071int 3069int
3072qlafx00_start_scsi(srb_t *sp) 3070qlafx00_start_scsi(srb_t *sp)
3073{ 3071{
3074 int ret, nseg; 3072 int nseg;
3075 unsigned long flags; 3073 unsigned long flags;
3076 uint32_t index; 3074 uint32_t index;
3077 uint32_t handle; 3075 uint32_t handle;
@@ -3088,8 +3086,6 @@ qlafx00_start_scsi(srb_t *sp)
3088 struct scsi_lun llun; 3086 struct scsi_lun llun;
3089 3087
3090 /* Setup device pointers. */ 3088 /* Setup device pointers. */
3091 ret = 0;
3092
3093 rsp = ha->rsp_q_map[0]; 3089 rsp = ha->rsp_q_map[0];
3094 req = vha->req; 3090 req = vha->req;
3095 3091
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 1620b0ec977b..eb0cc5475c45 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -347,32 +347,31 @@ char *qdev_state(uint32_t dev_state)
347} 347}
348 348
349/* 349/*
350 * In: 'off' is offset from CRB space in 128M pci map 350 * In: 'off_in' is offset from CRB space in 128M pci map
351 * Out: 'off' is 2M pci map addr 351 * Out: 'off_out' is 2M pci map addr
352 * side effect: lock crb window 352 * side effect: lock crb window
353 */ 353 */
354static void 354static void
355qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) 355qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in,
356 void __iomem **off_out)
356{ 357{
357 u32 win_read; 358 u32 win_read;
358 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 359 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
359 360
360 ha->crb_win = CRB_HI(*off); 361 ha->crb_win = CRB_HI(off_in);
361 writel(ha->crb_win, 362 writel(ha->crb_win, CRB_WINDOW_2M + ha->nx_pcibase);
362 (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
363 363
364 /* Read back value to make sure write has gone through before trying 364 /* Read back value to make sure write has gone through before trying
365 * to use it. 365 * to use it.
366 */ 366 */
367 win_read = RD_REG_DWORD((void __iomem *) 367 win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
368 (CRB_WINDOW_2M + ha->nx_pcibase));
369 if (win_read != ha->crb_win) { 368 if (win_read != ha->crb_win) {
370 ql_dbg(ql_dbg_p3p, vha, 0xb000, 369 ql_dbg(ql_dbg_p3p, vha, 0xb000,
371 "%s: Written crbwin (0x%x) " 370 "%s: Written crbwin (0x%x) "
372 "!= Read crbwin (0x%x), off=0x%lx.\n", 371 "!= Read crbwin (0x%x), off=0x%lx.\n",
373 __func__, ha->crb_win, win_read, *off); 372 __func__, ha->crb_win, win_read, off_in);
374 } 373 }
375 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; 374 *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
376} 375}
377 376
378static inline unsigned long 377static inline unsigned long
@@ -417,29 +416,30 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
417} 416}
418 417
419static int 418static int
420qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) 419qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in,
420 void __iomem **off_out)
421{ 421{
422 struct crb_128M_2M_sub_block_map *m; 422 struct crb_128M_2M_sub_block_map *m;
423 423
424 if (*off >= QLA82XX_CRB_MAX) 424 if (off_in >= QLA82XX_CRB_MAX)
425 return -1; 425 return -1;
426 426
427 if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { 427 if (off_in >= QLA82XX_PCI_CAMQM && off_in < QLA82XX_PCI_CAMQM_2M_END) {
428 *off = (*off - QLA82XX_PCI_CAMQM) + 428 *off_out = (off_in - QLA82XX_PCI_CAMQM) +
429 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; 429 QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
430 return 0; 430 return 0;
431 } 431 }
432 432
433 if (*off < QLA82XX_PCI_CRBSPACE) 433 if (off_in < QLA82XX_PCI_CRBSPACE)
434 return -1; 434 return -1;
435 435
436 *off -= QLA82XX_PCI_CRBSPACE; 436 *off_out = (void __iomem *)(off_in - QLA82XX_PCI_CRBSPACE);
437 437
438 /* Try direct map */ 438 /* Try direct map */
439 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; 439 m = &crb_128M_2M_map[CRB_BLK(off_in)].sub_block[CRB_SUBBLK(off_in)];
440 440
441 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { 441 if (m->valid && (m->start_128M <= off_in) && (m->end_128M > off_in)) {
442 *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; 442 *off_out = off_in + m->start_2M - m->start_128M + ha->nx_pcibase;
443 return 0; 443 return 0;
444 } 444 }
445 /* Not in direct map, use crb window */ 445 /* Not in direct map, use crb window */
@@ -465,51 +465,61 @@ static int qla82xx_crb_win_lock(struct qla_hw_data *ha)
465} 465}
466 466
467int 467int
468qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) 468qla82xx_wr_32(struct qla_hw_data *ha, ulong off_in, u32 data)
469{ 469{
470 void __iomem *off;
470 unsigned long flags = 0; 471 unsigned long flags = 0;
471 int rv; 472 int rv;
472 473
473 rv = qla82xx_pci_get_crb_addr_2M(ha, &off); 474 rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
474 475
475 BUG_ON(rv == -1); 476 BUG_ON(rv == -1);
476 477
477 if (rv == 1) { 478 if (rv == 1) {
479#ifndef __CHECKER__
478 write_lock_irqsave(&ha->hw_lock, flags); 480 write_lock_irqsave(&ha->hw_lock, flags);
481#endif
479 qla82xx_crb_win_lock(ha); 482 qla82xx_crb_win_lock(ha);
480 qla82xx_pci_set_crbwindow_2M(ha, &off); 483 qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
481 } 484 }
482 485
483 writel(data, (void __iomem *)off); 486 writel(data, (void __iomem *)off);
484 487
485 if (rv == 1) { 488 if (rv == 1) {
486 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 489 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
490#ifndef __CHECKER__
487 write_unlock_irqrestore(&ha->hw_lock, flags); 491 write_unlock_irqrestore(&ha->hw_lock, flags);
492#endif
488 } 493 }
489 return 0; 494 return 0;
490} 495}
491 496
492int 497int
493qla82xx_rd_32(struct qla_hw_data *ha, ulong off) 498qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in)
494{ 499{
500 void __iomem *off;
495 unsigned long flags = 0; 501 unsigned long flags = 0;
496 int rv; 502 int rv;
497 u32 data; 503 u32 data;
498 504
499 rv = qla82xx_pci_get_crb_addr_2M(ha, &off); 505 rv = qla82xx_pci_get_crb_addr_2M(ha, off_in, &off);
500 506
501 BUG_ON(rv == -1); 507 BUG_ON(rv == -1);
502 508
503 if (rv == 1) { 509 if (rv == 1) {
510#ifndef __CHECKER__
504 write_lock_irqsave(&ha->hw_lock, flags); 511 write_lock_irqsave(&ha->hw_lock, flags);
512#endif
505 qla82xx_crb_win_lock(ha); 513 qla82xx_crb_win_lock(ha);
506 qla82xx_pci_set_crbwindow_2M(ha, &off); 514 qla82xx_pci_set_crbwindow_2M(ha, off_in, &off);
507 } 515 }
508 data = RD_REG_DWORD((void __iomem *)off); 516 data = RD_REG_DWORD(off);
509 517
510 if (rv == 1) { 518 if (rv == 1) {
511 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); 519 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
520#ifndef __CHECKER__
512 write_unlock_irqrestore(&ha->hw_lock, flags); 521 write_unlock_irqrestore(&ha->hw_lock, flags);
522#endif
513 } 523 }
514 return data; 524 return data;
515} 525}
@@ -547,9 +557,6 @@ void qla82xx_idc_unlock(struct qla_hw_data *ha)
547 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); 557 qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
548} 558}
549 559
550/* PCI Windowing for DDR regions. */
551#define QLA82XX_ADDR_IN_RANGE(addr, low, high) \
552 (((addr) <= (high)) && ((addr) >= (low)))
553/* 560/*
554 * check memory access boundary. 561 * check memory access boundary.
555 * used by test agent. support ddr access only for now 562 * used by test agent. support ddr access only for now
@@ -558,9 +565,9 @@ static unsigned long
558qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, 565qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
559 unsigned long long addr, int size) 566 unsigned long long addr, int size)
560{ 567{
561 if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 568 if (!addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
562 QLA82XX_ADDR_DDR_NET_MAX) || 569 QLA82XX_ADDR_DDR_NET_MAX) ||
563 !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET, 570 !addr_in_range(addr + size - 1, QLA82XX_ADDR_DDR_NET,
564 QLA82XX_ADDR_DDR_NET_MAX) || 571 QLA82XX_ADDR_DDR_NET_MAX) ||
565 ((size != 1) && (size != 2) && (size != 4) && (size != 8))) 572 ((size != 1) && (size != 2) && (size != 4) && (size != 8)))
566 return 0; 573 return 0;
@@ -577,7 +584,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
577 u32 win_read; 584 u32 win_read;
578 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 585 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
579 586
580 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 587 if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
581 QLA82XX_ADDR_DDR_NET_MAX)) { 588 QLA82XX_ADDR_DDR_NET_MAX)) {
582 /* DDR network side */ 589 /* DDR network side */
583 window = MN_WIN(addr); 590 window = MN_WIN(addr);
@@ -592,7 +599,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
592 __func__, window, win_read); 599 __func__, window, win_read);
593 } 600 }
594 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; 601 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
595 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 602 } else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
596 QLA82XX_ADDR_OCM0_MAX)) { 603 QLA82XX_ADDR_OCM0_MAX)) {
597 unsigned int temp1; 604 unsigned int temp1;
598 if ((addr & 0x00ff800) == 0xff800) { 605 if ((addr & 0x00ff800) == 0xff800) {
@@ -615,7 +622,7 @@ qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
615 } 622 }
616 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; 623 addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
617 624
618 } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, 625 } else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET,
619 QLA82XX_P3_ADDR_QDR_NET_MAX)) { 626 QLA82XX_P3_ADDR_QDR_NET_MAX)) {
620 /* QDR network side */ 627 /* QDR network side */
621 window = MS_WIN(addr); 628 window = MS_WIN(addr);
@@ -656,16 +663,16 @@ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha,
656 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; 663 qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
657 664
658 /* DDR network side */ 665 /* DDR network side */
659 if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, 666 if (addr_in_range(addr, QLA82XX_ADDR_DDR_NET,
660 QLA82XX_ADDR_DDR_NET_MAX)) 667 QLA82XX_ADDR_DDR_NET_MAX))
661 BUG(); 668 BUG();
662 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, 669 else if (addr_in_range(addr, QLA82XX_ADDR_OCM0,
663 QLA82XX_ADDR_OCM0_MAX)) 670 QLA82XX_ADDR_OCM0_MAX))
664 return 1; 671 return 1;
665 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, 672 else if (addr_in_range(addr, QLA82XX_ADDR_OCM1,
666 QLA82XX_ADDR_OCM1_MAX)) 673 QLA82XX_ADDR_OCM1_MAX))
667 return 1; 674 return 1;
668 else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { 675 else if (addr_in_range(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) {
669 /* QDR network side */ 676 /* QDR network side */
670 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; 677 window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f;
671 if (ha->qdr_sn_window == window) 678 if (ha->qdr_sn_window == window)
@@ -922,20 +929,18 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
922{ 929{
923 uint32_t off_value, rval = 0; 930 uint32_t off_value, rval = 0;
924 931
925 WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase), 932 WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000);
926 (off & 0xFFFF0000));
927 933
928 /* Read back value to make sure write has gone through */ 934 /* Read back value to make sure write has gone through */
929 RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); 935 RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase);
930 off_value = (off & 0x0000FFFF); 936 off_value = (off & 0x0000FFFF);
931 937
932 if (flag) 938 if (flag)
933 WRT_REG_DWORD((void __iomem *) 939 WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase,
934 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase), 940 data);
935 data);
936 else 941 else
937 rval = RD_REG_DWORD((void __iomem *) 942 rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M +
938 (off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); 943 ha->nx_pcibase);
939 944
940 return rval; 945 return rval;
941} 946}
@@ -1663,8 +1668,7 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1663 } 1668 }
1664 1669
1665 len = pci_resource_len(ha->pdev, 0); 1670 len = pci_resource_len(ha->pdev, 0);
1666 ha->nx_pcibase = 1671 ha->nx_pcibase = ioremap(pci_resource_start(ha->pdev, 0), len);
1667 (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len);
1668 if (!ha->nx_pcibase) { 1672 if (!ha->nx_pcibase) {
1669 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, 1673 ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
1670 "Cannot remap pcibase MMIO, aborting.\n"); 1674 "Cannot remap pcibase MMIO, aborting.\n");
@@ -1673,17 +1677,13 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1673 1677
1674 /* Mapping of IO base pointer */ 1678 /* Mapping of IO base pointer */
1675 if (IS_QLA8044(ha)) { 1679 if (IS_QLA8044(ha)) {
1676 ha->iobase = 1680 ha->iobase = ha->nx_pcibase;
1677 (device_reg_t *)((uint8_t *)ha->nx_pcibase);
1678 } else if (IS_QLA82XX(ha)) { 1681 } else if (IS_QLA82XX(ha)) {
1679 ha->iobase = 1682 ha->iobase = ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11);
1680 (device_reg_t *)((uint8_t *)ha->nx_pcibase +
1681 0xbc000 + (ha->pdev->devfn << 11));
1682 } 1683 }
1683 1684
1684 if (!ql2xdbwr) { 1685 if (!ql2xdbwr) {
1685 ha->nxdb_wr_ptr = 1686 ha->nxdb_wr_ptr = ioremap((pci_resource_start(ha->pdev, 4) +
1686 (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) +
1687 (ha->pdev->devfn << 12)), 4); 1687 (ha->pdev->devfn << 12)), 4);
1688 if (!ha->nxdb_wr_ptr) { 1688 if (!ha->nxdb_wr_ptr) {
1689 ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, 1689 ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
@@ -1694,10 +1694,10 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1694 /* Mapping of IO base pointer, 1694 /* Mapping of IO base pointer,
1695 * door bell read and write pointer 1695 * door bell read and write pointer
1696 */ 1696 */
1697 ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + 1697 ha->nxdb_rd_ptr = ha->nx_pcibase + (512 * 1024) +
1698 (ha->pdev->devfn * 8); 1698 (ha->pdev->devfn * 8);
1699 } else { 1699 } else {
1700 ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ? 1700 ha->nxdb_wr_ptr = (void __iomem *)(ha->pdev->devfn == 6 ?
1701 QLA82XX_CAMRAM_DB1 : 1701 QLA82XX_CAMRAM_DB1 :
1702 QLA82XX_CAMRAM_DB2); 1702 QLA82XX_CAMRAM_DB2);
1703 } 1703 }
@@ -1707,12 +1707,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
1707 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, 1707 ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
1708 "nx_pci_base=%p iobase=%p " 1708 "nx_pci_base=%p iobase=%p "
1709 "max_req_queues=%d msix_count=%d.\n", 1709 "max_req_queues=%d msix_count=%d.\n",
1710 (void *)ha->nx_pcibase, ha->iobase, 1710 ha->nx_pcibase, ha->iobase,
1711 ha->max_req_queues, ha->msix_count); 1711 ha->max_req_queues, ha->msix_count);
1712 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, 1712 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
1713 "nx_pci_base=%p iobase=%p " 1713 "nx_pci_base=%p iobase=%p "
1714 "max_req_queues=%d msix_count=%d.\n", 1714 "max_req_queues=%d msix_count=%d.\n",
1715 (void *)ha->nx_pcibase, ha->iobase, 1715 ha->nx_pcibase, ha->iobase,
1716 ha->max_req_queues, ha->msix_count); 1716 ha->max_req_queues, ha->msix_count);
1717 return 0; 1717 return 0;
1718 1718
@@ -1740,8 +1740,8 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
1740 ret = pci_set_mwi(ha->pdev); 1740 ret = pci_set_mwi(ha->pdev);
1741 ha->chip_revision = ha->pdev->revision; 1741 ha->chip_revision = ha->pdev->revision;
1742 ql_dbg(ql_dbg_init, vha, 0x0043, 1742 ql_dbg(ql_dbg_init, vha, 0x0043,
1743 "Chip revision:%d.\n", 1743 "Chip revision:%d; pci_set_mwi() returned %d.\n",
1744 ha->chip_revision); 1744 ha->chip_revision, ret);
1745 return 0; 1745 return 0;
1746} 1746}
1747 1747
@@ -1768,8 +1768,8 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
1768 1768
1769 /* Setup ring parameters in initialization control block. */ 1769 /* Setup ring parameters in initialization control block. */
1770 icb = (struct init_cb_81xx *)ha->init_cb; 1770 icb = (struct init_cb_81xx *)ha->init_cb;
1771 icb->request_q_outpointer = __constant_cpu_to_le16(0); 1771 icb->request_q_outpointer = cpu_to_le16(0);
1772 icb->response_q_inpointer = __constant_cpu_to_le16(0); 1772 icb->response_q_inpointer = cpu_to_le16(0);
1773 icb->request_q_length = cpu_to_le16(req->length); 1773 icb->request_q_length = cpu_to_le16(req->length);
1774 icb->response_q_length = cpu_to_le16(rsp->length); 1774 icb->response_q_length = cpu_to_le16(rsp->length);
1775 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); 1775 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
@@ -1777,9 +1777,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
1777 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1777 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1778 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1778 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1779 1779
1780 WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0); 1780 WRT_REG_DWORD(&reg->req_q_out[0], 0);
1781 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0); 1781 WRT_REG_DWORD(&reg->rsp_q_in[0], 0);
1782 WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0); 1782 WRT_REG_DWORD(&reg->rsp_q_out[0], 0);
1783} 1783}
1784 1784
1785static int 1785static int
@@ -2298,7 +2298,7 @@ void qla82xx_init_flags(struct qla_hw_data *ha)
2298 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; 2298 ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
2299} 2299}
2300 2300
2301inline void 2301static inline void
2302qla82xx_set_idc_version(scsi_qla_host_t *vha) 2302qla82xx_set_idc_version(scsi_qla_host_t *vha)
2303{ 2303{
2304 int idc_ver; 2304 int idc_ver;
@@ -2481,14 +2481,12 @@ try_blob_fw:
2481 ql_log(ql_log_info, vha, 0x00a5, 2481 ql_log(ql_log_info, vha, 0x00a5,
2482 "Firmware loaded successfully from binary blob.\n"); 2482 "Firmware loaded successfully from binary blob.\n");
2483 return QLA_SUCCESS; 2483 return QLA_SUCCESS;
2484 } else {
2485 ql_log(ql_log_fatal, vha, 0x00a6,
2486 "Firmware load failed for binary blob.\n");
2487 blob->fw = NULL;
2488 blob = NULL;
2489 goto fw_load_failed;
2490 } 2484 }
2491 return QLA_SUCCESS; 2485
2486 ql_log(ql_log_fatal, vha, 0x00a6,
2487 "Firmware load failed for binary blob.\n");
2488 blob->fw = NULL;
2489 blob = NULL;
2492 2490
2493fw_load_failed: 2491fw_load_failed:
2494 return QLA_FUNCTION_FAILED; 2492 return QLA_FUNCTION_FAILED;
@@ -2549,7 +2547,7 @@ qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
2549 "Do ROM fast read failed.\n"); 2547 "Do ROM fast read failed.\n");
2550 goto done_read; 2548 goto done_read;
2551 } 2549 }
2552 dwptr[i] = __constant_cpu_to_le32(val); 2550 dwptr[i] = cpu_to_le32(val);
2553 } 2551 }
2554done_read: 2552done_read:
2555 return dwptr; 2553 return dwptr;
@@ -2671,7 +2669,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
2671{ 2669{
2672 int ret; 2670 int ret;
2673 uint32_t liter; 2671 uint32_t liter;
2674 uint32_t sec_mask, rest_addr; 2672 uint32_t rest_addr;
2675 dma_addr_t optrom_dma; 2673 dma_addr_t optrom_dma;
2676 void *optrom = NULL; 2674 void *optrom = NULL;
2677 int page_mode = 0; 2675 int page_mode = 0;
@@ -2693,7 +2691,6 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
2693 } 2691 }
2694 2692
2695 rest_addr = ha->fdt_block_size - 1; 2693 rest_addr = ha->fdt_block_size - 1;
2696 sec_mask = ~rest_addr;
2697 2694
2698 ret = qla82xx_unprotect_flash(ha); 2695 ret = qla82xx_unprotect_flash(ha);
2699 if (ret) { 2696 if (ret) {
@@ -2789,7 +2786,6 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
2789{ 2786{
2790 struct qla_hw_data *ha = vha->hw; 2787 struct qla_hw_data *ha = vha->hw;
2791 struct req_que *req = ha->req_q_map[0]; 2788 struct req_que *req = ha->req_q_map[0];
2792 struct device_reg_82xx __iomem *reg;
2793 uint32_t dbval; 2789 uint32_t dbval;
2794 2790
2795 /* Adjust ring index. */ 2791 /* Adjust ring index. */
@@ -2800,18 +2796,16 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
2800 } else 2796 } else
2801 req->ring_ptr++; 2797 req->ring_ptr++;
2802 2798
2803 reg = &ha->iobase->isp82;
2804 dbval = 0x04 | (ha->portnum << 5); 2799 dbval = 0x04 | (ha->portnum << 5);
2805 2800
2806 dbval = dbval | (req->id << 8) | (req->ring_index << 16); 2801 dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2807 if (ql2xdbwr) 2802 if (ql2xdbwr)
2808 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); 2803 qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval);
2809 else { 2804 else {
2810 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); 2805 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
2811 wmb(); 2806 wmb();
2812 while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { 2807 while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
2813 WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, 2808 WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
2814 dbval);
2815 wmb(); 2809 wmb();
2816 } 2810 }
2817 } 2811 }
@@ -3842,8 +3836,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
3842 loop_cnt = ocm_hdr->op_count; 3836 loop_cnt = ocm_hdr->op_count;
3843 3837
3844 for (i = 0; i < loop_cnt; i++) { 3838 for (i = 0; i < loop_cnt; i++) {
3845 r_value = RD_REG_DWORD((void __iomem *) 3839 r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase);
3846 (r_addr + ha->nx_pcibase));
3847 *data_ptr++ = cpu_to_le32(r_value); 3840 *data_ptr++ = cpu_to_le32(r_value);
3848 r_addr += r_stride; 3841 r_addr += r_stride;
3849 } 3842 }
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 000c57e4d033..007192d7bad8 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -462,12 +462,11 @@ qla8044_flash_lock(scsi_qla_host_t *vha)
462static void 462static void
463qla8044_flash_unlock(scsi_qla_host_t *vha) 463qla8044_flash_unlock(scsi_qla_host_t *vha)
464{ 464{
465 int ret_val;
466 struct qla_hw_data *ha = vha->hw; 465 struct qla_hw_data *ha = vha->hw;
467 466
468 /* Reading FLASH_UNLOCK register unlocks the Flash */ 467 /* Reading FLASH_UNLOCK register unlocks the Flash */
469 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF); 468 qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
470 ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK); 469 qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
471} 470}
472 471
473 472
@@ -561,7 +560,7 @@ qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
561 return buf; 560 return buf;
562} 561}
563 562
564inline int 563static inline int
565qla8044_need_reset(struct scsi_qla_host *vha) 564qla8044_need_reset(struct scsi_qla_host *vha)
566{ 565{
567 uint32_t drv_state, drv_active; 566 uint32_t drv_state, drv_active;
@@ -1130,9 +1129,9 @@ qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
1130 } 1129 }
1131 1130
1132 for (i = 0; i < count; i++, addr += 16) { 1131 for (i = 0; i < count; i++, addr += 16) {
1133 if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET, 1132 if (!((addr_in_range(addr, QLA8044_ADDR_QDR_NET,
1134 QLA8044_ADDR_QDR_NET_MAX)) || 1133 QLA8044_ADDR_QDR_NET_MAX)) ||
1135 (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET, 1134 (addr_in_range(addr, QLA8044_ADDR_DDR_NET,
1136 QLA8044_ADDR_DDR_NET_MAX)))) { 1135 QLA8044_ADDR_DDR_NET_MAX)))) {
1137 ret_val = QLA_FUNCTION_FAILED; 1136 ret_val = QLA_FUNCTION_FAILED;
1138 goto exit_ms_mem_write_unlock; 1137 goto exit_ms_mem_write_unlock;
@@ -1605,7 +1604,7 @@ qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
1605 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl); 1604 qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
1606} 1605}
1607 1606
1608inline void 1607static inline void
1609qla8044_set_rst_ready(struct scsi_qla_host *vha) 1608qla8044_set_rst_ready(struct scsi_qla_host *vha)
1610{ 1609{
1611 uint32_t drv_state; 1610 uint32_t drv_state;
@@ -2992,7 +2991,7 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
2992 uint32_t addr1, addr2, value, data, temp, wrVal; 2991 uint32_t addr1, addr2, value, data, temp, wrVal;
2993 uint8_t stride, stride2; 2992 uint8_t stride, stride2;
2994 uint16_t count; 2993 uint16_t count;
2995 uint32_t poll, mask, data_size, modify_mask; 2994 uint32_t poll, mask, modify_mask;
2996 uint32_t wait_count = 0; 2995 uint32_t wait_count = 0;
2997 2996
2998 uint32_t *data_ptr = *d_ptr; 2997 uint32_t *data_ptr = *d_ptr;
@@ -3009,7 +3008,6 @@ qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
3009 poll = rddfe->poll; 3008 poll = rddfe->poll;
3010 mask = rddfe->mask; 3009 mask = rddfe->mask;
3011 modify_mask = rddfe->modify_mask; 3010 modify_mask = rddfe->modify_mask;
3012 data_size = rddfe->data_size;
3013 3011
3014 addr2 = addr1 + stride; 3012 addr2 = addr1 + stride;
3015 3013
@@ -3091,7 +3089,7 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3091 uint8_t stride1, stride2; 3089 uint8_t stride1, stride2;
3092 uint32_t addr3, addr4, addr5, addr6, addr7; 3090 uint32_t addr3, addr4, addr5, addr6, addr7;
3093 uint16_t count, loop_cnt; 3091 uint16_t count, loop_cnt;
3094 uint32_t poll, mask; 3092 uint32_t mask;
3095 uint32_t *data_ptr = *d_ptr; 3093 uint32_t *data_ptr = *d_ptr;
3096 3094
3097 struct qla8044_minidump_entry_rdmdio *rdmdio; 3095 struct qla8044_minidump_entry_rdmdio *rdmdio;
@@ -3105,7 +3103,6 @@ qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
3105 stride2 = rdmdio->stride_2; 3103 stride2 = rdmdio->stride_2;
3106 count = rdmdio->count; 3104 count = rdmdio->count;
3107 3105
3108 poll = rdmdio->poll;
3109 mask = rdmdio->mask; 3106 mask = rdmdio->mask;
3110 value2 = rdmdio->value_2; 3107 value2 = rdmdio->value_2;
3111 3108
@@ -3164,7 +3161,7 @@ error:
3164static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha, 3161static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
3165 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) 3162 struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
3166{ 3163{
3167 uint32_t addr1, addr2, value1, value2, poll, mask, r_value; 3164 uint32_t addr1, addr2, value1, value2, poll, r_value;
3168 uint32_t wait_count = 0; 3165 uint32_t wait_count = 0;
3169 struct qla8044_minidump_entry_pollwr *pollwr_hdr; 3166 struct qla8044_minidump_entry_pollwr *pollwr_hdr;
3170 3167
@@ -3175,7 +3172,6 @@ static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
3175 value2 = pollwr_hdr->value_2; 3172 value2 = pollwr_hdr->value_2;
3176 3173
3177 poll = pollwr_hdr->poll; 3174 poll = pollwr_hdr->poll;
3178 mask = pollwr_hdr->mask;
3179 3175
3180 while (wait_count < poll) { 3176 while (wait_count < poll) {
3181 qla8044_rd_reg_indirect(vha, addr1, &r_value); 3177 qla8044_rd_reg_indirect(vha, addr1, &r_value);
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index ada36057d7cd..02fe3c4cdf55 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -58,8 +58,10 @@
58#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff) 58#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff)
59 59
60/* PCI Windowing for DDR regions. */ 60/* PCI Windowing for DDR regions. */
61#define QLA8044_ADDR_IN_RANGE(addr, low, high) \ 61static inline bool addr_in_range(u64 addr, u64 low, u64 high)
62 (((addr) <= (high)) && ((addr) >= (low))) 62{
63 return addr <= high && addr >= low;
64}
63 65
64/* Indirectly Mapped Registers */ 66/* Indirectly Mapped Registers */
65#define QLA8044_FLASH_SPI_STATUS 0x2808E010 67#define QLA8044_FLASH_SPI_STATUS 0x2808E010
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8a5cac8448c7..c2dd17b1d26f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -656,7 +656,7 @@ qla2x00_sp_compl(void *data, void *ptr, int res)
656 "SP reference-count to ZERO -- sp=%p cmd=%p.\n", 656 "SP reference-count to ZERO -- sp=%p cmd=%p.\n",
657 sp, GET_CMD_SP(sp)); 657 sp, GET_CMD_SP(sp));
658 if (ql2xextended_error_logging & ql_dbg_io) 658 if (ql2xextended_error_logging & ql_dbg_io)
659 BUG(); 659 WARN_ON(atomic_read(&sp->ref_count) == 0);
660 return; 660 return;
661 } 661 }
662 if (!atomic_dec_and_test(&sp->ref_count)) 662 if (!atomic_dec_and_test(&sp->ref_count))
@@ -958,8 +958,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
958 } 958 }
959 959
960 ql_dbg(ql_dbg_taskm, vha, 0x8002, 960 ql_dbg(ql_dbg_taskm, vha, 0x8002,
961 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p\n", 961 "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
962 vha->host_no, id, lun, sp, cmd); 962 vha->host_no, id, lun, sp, cmd, sp->handle);
963 963
964 /* Get a reference to the sp and drop the lock.*/ 964 /* Get a reference to the sp and drop the lock.*/
965 sp_get(sp); 965 sp_get(sp);
@@ -967,14 +967,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
967 spin_unlock_irqrestore(&ha->hardware_lock, flags); 967 spin_unlock_irqrestore(&ha->hardware_lock, flags);
968 rval = ha->isp_ops->abort_command(sp); 968 rval = ha->isp_ops->abort_command(sp);
969 if (rval) { 969 if (rval) {
970 if (rval == QLA_FUNCTION_PARAMETER_ERROR) { 970 if (rval == QLA_FUNCTION_PARAMETER_ERROR)
971 /*
972 * Decrement the ref_count since we can't find the
973 * command
974 */
975 atomic_dec(&sp->ref_count);
976 ret = SUCCESS; 971 ret = SUCCESS;
977 } else 972 else
978 ret = FAILED; 973 ret = FAILED;
979 974
980 ql_dbg(ql_dbg_taskm, vha, 0x8003, 975 ql_dbg(ql_dbg_taskm, vha, 0x8003,
@@ -986,12 +981,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
986 } 981 }
987 982
988 spin_lock_irqsave(&ha->hardware_lock, flags); 983 spin_lock_irqsave(&ha->hardware_lock, flags);
989 /*
990 * Clear the slot in the oustanding_cmds array if we can't find the
991 * command to reclaim the resources.
992 */
993 if (rval == QLA_FUNCTION_PARAMETER_ERROR)
994 vha->req->outstanding_cmds[sp->handle] = NULL;
995 sp->done(ha, sp, 0); 984 sp->done(ha, sp, 0);
996 spin_unlock_irqrestore(&ha->hardware_lock, flags); 985 spin_unlock_irqrestore(&ha->hardware_lock, flags);
997 986
@@ -2219,6 +2208,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
2219 ha->device_type |= DT_IIDMA; 2208 ha->device_type |= DT_IIDMA;
2220 ha->fw_srisc_address = RISC_START_ADDRESS_2400; 2209 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2221 break; 2210 break;
2211 case PCI_DEVICE_ID_QLOGIC_ISP2261:
2212 ha->device_type |= DT_ISP2261;
2213 ha->device_type |= DT_ZIO_SUPPORTED;
2214 ha->device_type |= DT_FWI2;
2215 ha->device_type |= DT_IIDMA;
2216 ha->fw_srisc_address = RISC_START_ADDRESS_2400;
2217 break;
2222 } 2218 }
2223 2219
2224 if (IS_QLA82XX(ha)) 2220 if (IS_QLA82XX(ha))
@@ -2296,7 +2292,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2296 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 || 2292 pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
2297 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || 2293 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
2298 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || 2294 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
2299 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) { 2295 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
2296 pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
2300 bars = pci_select_bars(pdev, IORESOURCE_MEM); 2297 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2301 mem_only = 1; 2298 mem_only = 1;
2302 ql_dbg_pci(ql_dbg_init, pdev, 0x0007, 2299 ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2974,7 +2971,6 @@ qla2x00_shutdown(struct pci_dev *pdev)
2974static void 2971static void
2975qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha) 2972qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
2976{ 2973{
2977 struct Scsi_Host *scsi_host;
2978 scsi_qla_host_t *vha; 2974 scsi_qla_host_t *vha;
2979 unsigned long flags; 2975 unsigned long flags;
2980 2976
@@ -2985,7 +2981,7 @@ qla2x00_delete_all_vps(struct qla_hw_data *ha, scsi_qla_host_t *base_vha)
2985 BUG_ON(base_vha->list.next == &ha->vp_list); 2981 BUG_ON(base_vha->list.next == &ha->vp_list);
2986 /* This assumes first entry in ha->vp_list is always base vha */ 2982 /* This assumes first entry in ha->vp_list is always base vha */
2987 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list); 2983 vha = list_first_entry(&base_vha->list, scsi_qla_host_t, list);
2988 scsi_host = scsi_host_get(vha->host); 2984 scsi_host_get(vha->host);
2989 2985
2990 spin_unlock_irqrestore(&ha->vport_slock, flags); 2986 spin_unlock_irqrestore(&ha->vport_slock, flags);
2991 mutex_unlock(&ha->vport_lock); 2987 mutex_unlock(&ha->vport_lock);
@@ -3275,9 +3271,10 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
3275 if (!do_login) 3271 if (!do_login)
3276 return; 3272 return;
3277 3273
3274 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3275
3278 if (fcport->login_retry == 0) { 3276 if (fcport->login_retry == 0) {
3279 fcport->login_retry = vha->hw->login_retry_count; 3277 fcport->login_retry = vha->hw->login_retry_count;
3280 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3281 3278
3282 ql_dbg(ql_dbg_disc, vha, 0x2067, 3279 ql_dbg(ql_dbg_disc, vha, 0x2067,
3283 "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n", 3280 "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
@@ -4801,7 +4798,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
4801static int 4798static int
4802qla2x00_do_dpc(void *data) 4799qla2x00_do_dpc(void *data)
4803{ 4800{
4804 int rval;
4805 scsi_qla_host_t *base_vha; 4801 scsi_qla_host_t *base_vha;
4806 struct qla_hw_data *ha; 4802 struct qla_hw_data *ha;
4807 4803
@@ -5033,7 +5029,7 @@ loop_resync_check:
5033 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, 5029 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE,
5034 &base_vha->dpc_flags))) { 5030 &base_vha->dpc_flags))) {
5035 5031
5036 rval = qla2x00_loop_resync(base_vha); 5032 qla2x00_loop_resync(base_vha);
5037 5033
5038 clear_bit(LOOP_RESYNC_ACTIVE, 5034 clear_bit(LOOP_RESYNC_ACTIVE,
5039 &base_vha->dpc_flags); 5035 &base_vha->dpc_flags);
@@ -5717,6 +5713,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
5717 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) }, 5713 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
5718 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, 5714 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
5719 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, 5715 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
5716 { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
5720 { 0 }, 5717 { 0 },
5721}; 5718};
5722MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); 5719MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 2feb5f38edcd..3272ed5bbcc7 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -316,7 +316,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
316 316
317 wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); 317 wprot_old = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
318 stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base, 318 stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base,
319 __constant_cpu_to_le16(0x1234), 100000); 319 cpu_to_le16(0x1234), 100000);
320 wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); 320 wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base));
321 if (stat != QLA_SUCCESS || wprot != 0x1234) { 321 if (stat != QLA_SUCCESS || wprot != 0x1234) {
322 /* Write enable. */ 322 /* Write enable. */
@@ -691,9 +691,9 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
691 region = (struct qla_flt_region *)&flt[1]; 691 region = (struct qla_flt_region *)&flt[1];
692 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, 692 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
693 flt_addr << 2, OPTROM_BURST_SIZE); 693 flt_addr << 2, OPTROM_BURST_SIZE);
694 if (*wptr == __constant_cpu_to_le16(0xffff)) 694 if (*wptr == cpu_to_le16(0xffff))
695 goto no_flash_data; 695 goto no_flash_data;
696 if (flt->version != __constant_cpu_to_le16(1)) { 696 if (flt->version != cpu_to_le16(1)) {
697 ql_log(ql_log_warn, vha, 0x0047, 697 ql_log(ql_log_warn, vha, 0x0047,
698 "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", 698 "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
699 le16_to_cpu(flt->version), le16_to_cpu(flt->length), 699 le16_to_cpu(flt->version), le16_to_cpu(flt->length),
@@ -892,7 +892,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
892 fdt = (struct qla_fdt_layout *)req->ring; 892 fdt = (struct qla_fdt_layout *)req->ring;
893 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, 893 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
894 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE); 894 ha->flt_region_fdt << 2, OPTROM_BURST_SIZE);
895 if (*wptr == __constant_cpu_to_le16(0xffff)) 895 if (*wptr == cpu_to_le16(0xffff))
896 goto no_flash_data; 896 goto no_flash_data;
897 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || 897 if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
898 fdt->sig[3] != 'D') 898 fdt->sig[3] != 'D')
@@ -991,7 +991,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
991 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, 991 ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring,
992 QLA82XX_IDC_PARAM_ADDR , 8); 992 QLA82XX_IDC_PARAM_ADDR , 8);
993 993
994 if (*wptr == __constant_cpu_to_le32(0xffffffff)) { 994 if (*wptr == cpu_to_le32(0xffffffff)) {
995 ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; 995 ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT;
996 ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT; 996 ha->fcoe_reset_timeout = QLA82XX_ROM_DRV_RESET_ACK_TIMEOUT;
997 } else { 997 } else {
@@ -1051,9 +1051,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
1051 1051
1052 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, 1052 ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
1053 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); 1053 ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
1054 if (hdr.version == __constant_cpu_to_le16(0xffff)) 1054 if (hdr.version == cpu_to_le16(0xffff))
1055 return; 1055 return;
1056 if (hdr.version != __constant_cpu_to_le16(1)) { 1056 if (hdr.version != cpu_to_le16(1)) {
1057 ql_dbg(ql_dbg_user, vha, 0x7090, 1057 ql_dbg(ql_dbg_user, vha, 0x7090,
1058 "Unsupported NPIV-Config " 1058 "Unsupported NPIV-Config "
1059 "detected: version=0x%x entries=0x%x checksum=0x%x.\n", 1059 "detected: version=0x%x entries=0x%x checksum=0x%x.\n",
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 58651ecbd88c..75514a15bea0 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1141,7 +1141,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1141 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; 1141 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1142 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { 1142 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1143 nack->u.isp24.flags = ntfy->u.isp24.flags & 1143 nack->u.isp24.flags = ntfy->u.isp24.flags &
1144 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); 1144 cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1145 } 1145 }
1146 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; 1146 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1147 nack->u.isp24.status = ntfy->u.isp24.status; 1147 nack->u.isp24.status = ntfy->u.isp24.status;
@@ -1199,7 +1199,7 @@ static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1199 resp->sof_type = abts->sof_type; 1199 resp->sof_type = abts->sof_type;
1200 resp->exchange_address = abts->exchange_address; 1200 resp->exchange_address = abts->exchange_address;
1201 resp->fcp_hdr_le = abts->fcp_hdr_le; 1201 resp->fcp_hdr_le = abts->fcp_hdr_le;
1202 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP | 1202 f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1203 F_CTL_LAST_SEQ | F_CTL_END_SEQ | 1203 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1204 F_CTL_SEQ_INITIATIVE); 1204 F_CTL_SEQ_INITIATIVE);
1205 p = (uint8_t *)&f_ctl; 1205 p = (uint8_t *)&f_ctl;
@@ -1274,15 +1274,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1274 ctio->entry_count = 1; 1274 ctio->entry_count = 1;
1275 ctio->nport_handle = entry->nport_handle; 1275 ctio->nport_handle = entry->nport_handle;
1276 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1276 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1277 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1277 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1278 ctio->vp_index = vha->vp_idx; 1278 ctio->vp_index = vha->vp_idx;
1279 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; 1279 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1280 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; 1280 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1281 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; 1281 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1282 ctio->exchange_addr = entry->exchange_addr_to_abort; 1282 ctio->exchange_addr = entry->exchange_addr_to_abort;
1283 ctio->u.status1.flags = 1283 ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1284 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1284 CTIO7_FLAGS_TERMINATE);
1285 CTIO7_FLAGS_TERMINATE);
1286 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id); 1285 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1287 1286
1288 /* Memory Barrier */ 1287 /* Memory Barrier */
@@ -1522,20 +1521,19 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1522 ctio->entry_count = 1; 1521 ctio->entry_count = 1;
1523 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; 1522 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1524 ctio->nport_handle = mcmd->sess->loop_id; 1523 ctio->nport_handle = mcmd->sess->loop_id;
1525 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1524 ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1526 ctio->vp_index = ha->vp_idx; 1525 ctio->vp_index = ha->vp_idx;
1527 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1526 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1528 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1527 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1529 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1528 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1530 ctio->exchange_addr = atio->u.isp24.exchange_addr; 1529 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1531 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1530 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1532 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1531 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
1533 CTIO7_FLAGS_SEND_STATUS);
1534 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 1532 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1535 ctio->u.status1.ox_id = cpu_to_le16(temp); 1533 ctio->u.status1.ox_id = cpu_to_le16(temp);
1536 ctio->u.status1.scsi_status = 1534 ctio->u.status1.scsi_status =
1537 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1535 cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1538 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1536 ctio->u.status1.response_len = cpu_to_le16(8);
1539 ctio->u.status1.sense_data[0] = resp_code; 1537 ctio->u.status1.sense_data[0] = resp_code;
1540 1538
1541 /* Memory Barrier */ 1539 /* Memory Barrier */
@@ -1786,7 +1784,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1786 1784
1787 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 1785 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1788 pkt->nport_handle = prm->cmd->loop_id; 1786 pkt->nport_handle = prm->cmd->loop_id;
1789 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 1787 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
1790 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 1788 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1791 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 1789 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1792 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1790 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -2087,10 +2085,9 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2087{ 2085{
2088 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len, 2086 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
2089 (uint32_t)sizeof(ctio->u.status1.sense_data)); 2087 (uint32_t)sizeof(ctio->u.status1.sense_data));
2090 ctio->u.status0.flags |= 2088 ctio->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2091 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
2092 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) { 2089 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
2093 ctio->u.status0.flags |= __constant_cpu_to_le16( 2090 ctio->u.status0.flags |= cpu_to_le16(
2094 CTIO7_FLAGS_EXPLICIT_CONFORM | 2091 CTIO7_FLAGS_EXPLICIT_CONFORM |
2095 CTIO7_FLAGS_CONFORM_REQ); 2092 CTIO7_FLAGS_CONFORM_REQ);
2096 } 2093 }
@@ -2107,17 +2104,17 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
2107 "non GOOD status\n"); 2104 "non GOOD status\n");
2108 goto skip_explict_conf; 2105 goto skip_explict_conf;
2109 } 2106 }
2110 ctio->u.status1.flags |= __constant_cpu_to_le16( 2107 ctio->u.status1.flags |= cpu_to_le16(
2111 CTIO7_FLAGS_EXPLICIT_CONFORM | 2108 CTIO7_FLAGS_EXPLICIT_CONFORM |
2112 CTIO7_FLAGS_CONFORM_REQ); 2109 CTIO7_FLAGS_CONFORM_REQ);
2113 } 2110 }
2114skip_explict_conf: 2111skip_explict_conf:
2115 ctio->u.status1.flags &= 2112 ctio->u.status1.flags &=
2116 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2113 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2117 ctio->u.status1.flags |= 2114 ctio->u.status1.flags |=
2118 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2115 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2119 ctio->u.status1.scsi_status |= 2116 ctio->u.status1.scsi_status |=
2120 __constant_cpu_to_le16(SS_SENSE_LEN_VALID); 2117 cpu_to_le16(SS_SENSE_LEN_VALID);
2121 ctio->u.status1.sense_length = 2118 ctio->u.status1.sense_length =
2122 cpu_to_le16(prm->sense_buffer_len); 2119 cpu_to_le16(prm->sense_buffer_len);
2123 for (i = 0; i < prm->sense_buffer_len/4; i++) 2120 for (i = 0; i < prm->sense_buffer_len/4; i++)
@@ -2137,9 +2134,9 @@ skip_explict_conf:
2137#endif 2134#endif
2138 } else { 2135 } else {
2139 ctio->u.status1.flags &= 2136 ctio->u.status1.flags &=
2140 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); 2137 ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2141 ctio->u.status1.flags |= 2138 ctio->u.status1.flags |=
2142 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1); 2139 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2143 ctio->u.status1.sense_length = 0; 2140 ctio->u.status1.sense_length = 0;
2144 memset(ctio->u.status1.sense_data, 0, 2141 memset(ctio->u.status1.sense_data, 0,
2145 sizeof(ctio->u.status1.sense_data)); 2142 sizeof(ctio->u.status1.sense_data));
@@ -2261,7 +2258,6 @@ static inline int
2261qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) 2258qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2262{ 2259{
2263 uint32_t *cur_dsd; 2260 uint32_t *cur_dsd;
2264 int sgc;
2265 uint32_t transfer_length = 0; 2261 uint32_t transfer_length = 0;
2266 uint32_t data_bytes; 2262 uint32_t data_bytes;
2267 uint32_t dif_bytes; 2263 uint32_t dif_bytes;
@@ -2278,7 +2274,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2278 struct atio_from_isp *atio = &prm->cmd->atio; 2274 struct atio_from_isp *atio = &prm->cmd->atio;
2279 uint16_t t16; 2275 uint16_t t16;
2280 2276
2281 sgc = 0;
2282 ha = vha->hw; 2277 ha = vha->hw;
2283 2278
2284 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr; 2279 pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
@@ -2368,7 +2363,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2368 2363
2369 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; 2364 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
2370 pkt->nport_handle = prm->cmd->loop_id; 2365 pkt->nport_handle = prm->cmd->loop_id;
2371 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2366 pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2372 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2367 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2373 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2368 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2374 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2369 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
@@ -2384,9 +2379,9 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2384 2379
2385 /* Set transfer direction */ 2380 /* Set transfer direction */
2386 if (cmd->dma_data_direction == DMA_TO_DEVICE) 2381 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2387 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN); 2382 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_IN);
2388 else if (cmd->dma_data_direction == DMA_FROM_DEVICE) 2383 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2389 pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT); 2384 pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
2390 2385
2391 2386
2392 pkt->dseg_count = prm->tot_dsds; 2387 pkt->dseg_count = prm->tot_dsds;
@@ -2438,11 +2433,11 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
2438 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz); 2433 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
2439 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts); 2434 crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
2440 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); 2435 crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
2441 crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0); 2436 crc_ctx_pkt->guard_seed = cpu_to_le16(0);
2442 2437
2443 2438
2444 /* Walks data segments */ 2439 /* Walks data segments */
2445 pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR); 2440 pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
2446 2441
2447 if (!bundling && prm->prot_seg_cnt) { 2442 if (!bundling && prm->prot_seg_cnt) {
2448 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, 2443 if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
@@ -2548,7 +2543,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2548 2543
2549 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) { 2544 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2550 pkt->u.status0.flags |= 2545 pkt->u.status0.flags |=
2551 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN | 2546 cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2552 CTIO7_FLAGS_STATUS_MODE_0); 2547 CTIO7_FLAGS_STATUS_MODE_0);
2553 2548
2554 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2549 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2560,11 +2555,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2560 cpu_to_le16(prm.rq_result); 2555 cpu_to_le16(prm.rq_result);
2561 pkt->u.status0.residual = 2556 pkt->u.status0.residual =
2562 cpu_to_le32(prm.residual); 2557 cpu_to_le32(prm.residual);
2563 pkt->u.status0.flags |= __constant_cpu_to_le16( 2558 pkt->u.status0.flags |= cpu_to_le16(
2564 CTIO7_FLAGS_SEND_STATUS); 2559 CTIO7_FLAGS_SEND_STATUS);
2565 if (qlt_need_explicit_conf(ha, cmd, 0)) { 2560 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2566 pkt->u.status0.flags |= 2561 pkt->u.status0.flags |=
2567 __constant_cpu_to_le16( 2562 cpu_to_le16(
2568 CTIO7_FLAGS_EXPLICIT_CONFORM | 2563 CTIO7_FLAGS_EXPLICIT_CONFORM |
2569 CTIO7_FLAGS_CONFORM_REQ); 2564 CTIO7_FLAGS_CONFORM_REQ);
2570 } 2565 }
@@ -2592,12 +2587,12 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2592 ctio->entry_count = 1; 2587 ctio->entry_count = 1;
2593 ctio->entry_type = CTIO_TYPE7; 2588 ctio->entry_type = CTIO_TYPE7;
2594 ctio->dseg_count = 0; 2589 ctio->dseg_count = 0;
2595 ctio->u.status1.flags &= ~__constant_cpu_to_le16( 2590 ctio->u.status1.flags &= ~cpu_to_le16(
2596 CTIO7_FLAGS_DATA_IN); 2591 CTIO7_FLAGS_DATA_IN);
2597 2592
2598 /* Real finish is ctio_m1's finish */ 2593 /* Real finish is ctio_m1's finish */
2599 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK; 2594 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2600 pkt->u.status0.flags |= __constant_cpu_to_le16( 2595 pkt->u.status0.flags |= cpu_to_le16(
2601 CTIO7_FLAGS_DONT_RET_CTIO); 2596 CTIO7_FLAGS_DONT_RET_CTIO);
2602 2597
2603 /* qlt_24xx_init_ctio_to_isp will correct 2598 /* qlt_24xx_init_ctio_to_isp will correct
@@ -2687,7 +2682,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2687 } 2682 }
2688 2683
2689 pkt = (struct ctio7_to_24xx *)prm.pkt; 2684 pkt = (struct ctio7_to_24xx *)prm.pkt;
2690 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2685 pkt->u.status0.flags |= cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2691 CTIO7_FLAGS_STATUS_MODE_0); 2686 CTIO7_FLAGS_STATUS_MODE_0);
2692 2687
2693 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) 2688 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
@@ -2762,7 +2757,7 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2762 2757
2763 /* Update protection tag */ 2758 /* Update protection tag */
2764 if (cmd->prot_sg_cnt) { 2759 if (cmd->prot_sg_cnt) {
2765 uint32_t i, j = 0, k = 0, num_ent; 2760 uint32_t i, k = 0, num_ent;
2766 struct scatterlist *sg, *sgl; 2761 struct scatterlist *sg, *sgl;
2767 2762
2768 2763
@@ -2775,7 +2770,6 @@ qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
2775 k += num_ent; 2770 k += num_ent;
2776 continue; 2771 continue;
2777 } 2772 }
2778 j = blocks_done - k - 1;
2779 k = blocks_done; 2773 k = blocks_done;
2780 break; 2774 break;
2781 } 2775 }
@@ -2969,14 +2963,14 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2969 ctio24 = (struct ctio7_to_24xx *)pkt; 2963 ctio24 = (struct ctio7_to_24xx *)pkt;
2970 ctio24->entry_type = CTIO_TYPE7; 2964 ctio24->entry_type = CTIO_TYPE7;
2971 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED; 2965 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2972 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 2966 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
2973 ctio24->vp_index = vha->vp_idx; 2967 ctio24->vp_index = vha->vp_idx;
2974 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 2968 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2975 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 2969 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2976 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 2970 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2977 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 2971 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2978 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2972 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2979 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2973 cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2980 CTIO7_FLAGS_TERMINATE); 2974 CTIO7_FLAGS_TERMINATE);
2981 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); 2975 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2982 ctio24->u.status1.ox_id = cpu_to_le16(temp); 2976 ctio24->u.status1.ox_id = cpu_to_le16(temp);
@@ -3216,7 +3210,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3216 if (ctio != NULL) { 3210 if (ctio != NULL) {
3217 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; 3211 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
3218 term = !(c->flags & 3212 term = !(c->flags &
3219 __constant_cpu_to_le16(OF_TERM_EXCH)); 3213 cpu_to_le16(OF_TERM_EXCH));
3220 } else 3214 } else
3221 term = 1; 3215 term = 1;
3222 3216
@@ -3364,7 +3358,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3364{ 3358{
3365 struct qla_hw_data *ha = vha->hw; 3359 struct qla_hw_data *ha = vha->hw;
3366 struct se_cmd *se_cmd; 3360 struct se_cmd *se_cmd;
3367 const struct target_core_fabric_ops *tfo;
3368 struct qla_tgt_cmd *cmd; 3361 struct qla_tgt_cmd *cmd;
3369 3362
3370 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) { 3363 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
@@ -3382,7 +3375,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3382 return; 3375 return;
3383 3376
3384 se_cmd = &cmd->se_cmd; 3377 se_cmd = &cmd->se_cmd;
3385 tfo = se_cmd->se_tfo;
3386 cmd->cmd_sent_to_fw = 0; 3378 cmd->cmd_sent_to_fw = 0;
3387 3379
3388 qlt_unmap_sg(vha, cmd); 3380 qlt_unmap_sg(vha, cmd);
@@ -3480,13 +3472,9 @@ skip_term:
3480 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3472 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3481 cmd->cmd_flags |= BIT_12; 3473 cmd->cmd_flags |= BIT_12;
3482 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3474 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3483 int rx_status = 0;
3484
3485 cmd->state = QLA_TGT_STATE_DATA_IN; 3475 cmd->state = QLA_TGT_STATE_DATA_IN;
3486 3476
3487 if (unlikely(status != CTIO_SUCCESS)) 3477 if (status == CTIO_SUCCESS)
3488 rx_status = -EIO;
3489 else
3490 cmd->write_data_transferred = 1; 3478 cmd->write_data_transferred = 1;
3491 3479
3492 ha->tgt.tgt_ops->handle_data(cmd); 3480 ha->tgt.tgt_ops->handle_data(cmd);
@@ -3928,12 +3916,11 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3928 struct qla_tgt *tgt; 3916 struct qla_tgt *tgt;
3929 struct qla_tgt_sess *sess; 3917 struct qla_tgt_sess *sess;
3930 uint32_t lun, unpacked_lun; 3918 uint32_t lun, unpacked_lun;
3931 int lun_size, fn; 3919 int fn;
3932 3920
3933 tgt = vha->vha_tgt.qla_tgt; 3921 tgt = vha->vha_tgt.qla_tgt;
3934 3922
3935 lun = a->u.isp24.fcp_cmnd.lun; 3923 lun = a->u.isp24.fcp_cmnd.lun;
3936 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
3937 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 3924 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
3938 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, 3925 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3939 a->u.isp24.fcp_hdr.s_id); 3926 a->u.isp24.fcp_hdr.s_id);
@@ -4578,16 +4565,20 @@ static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
4578 struct qla_hw_data *ha = vha->hw; 4565 struct qla_hw_data *ha = vha->hw;
4579 unsigned long flags = 0; 4566 unsigned long flags = 0;
4580 4567
4568#ifndef __CHECKER__
4581 if (!ha_locked) 4569 if (!ha_locked)
4582 spin_lock_irqsave(&ha->hardware_lock, flags); 4570 spin_lock_irqsave(&ha->hardware_lock, flags);
4571#endif
4583 4572
4584 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0, 4573 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
4585 NOTIFY_ACK_SRR_FLAGS_REJECT, 4574 NOTIFY_ACK_SRR_FLAGS_REJECT,
4586 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM, 4575 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
4587 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL); 4576 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
4588 4577
4578#ifndef __CHECKER__
4589 if (!ha_locked) 4579 if (!ha_locked)
4590 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4581#endif
4591 4582
4592 kfree(imm); 4583 kfree(imm);
4593} 4584}
@@ -4931,14 +4922,14 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
4931 ctio24 = (struct ctio7_to_24xx *)pkt; 4922 ctio24 = (struct ctio7_to_24xx *)pkt;
4932 ctio24->entry_type = CTIO_TYPE7; 4923 ctio24->entry_type = CTIO_TYPE7;
4933 ctio24->nport_handle = sess->loop_id; 4924 ctio24->nport_handle = sess->loop_id;
4934 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT); 4925 ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
4935 ctio24->vp_index = vha->vp_idx; 4926 ctio24->vp_index = vha->vp_idx;
4936 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; 4927 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
4937 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; 4928 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
4938 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 4929 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
4939 ctio24->exchange_addr = atio->u.isp24.exchange_addr; 4930 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
4940 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 4931 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
4941 __constant_cpu_to_le16( 4932 cpu_to_le16(
4942 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | 4933 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
4943 CTIO7_FLAGS_DONT_RET_CTIO); 4934 CTIO7_FLAGS_DONT_RET_CTIO);
4944 /* 4935 /*
@@ -5266,7 +5257,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5266 struct atio_from_isp *atio = (struct atio_from_isp *)pkt; 5257 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
5267 int rc; 5258 int rc;
5268 if (atio->u.isp2x.status != 5259 if (atio->u.isp2x.status !=
5269 __constant_cpu_to_le16(ATIO_CDB_VALID)) { 5260 cpu_to_le16(ATIO_CDB_VALID)) {
5270 ql_dbg(ql_dbg_tgt, vha, 0xe05e, 5261 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5271 "qla_target(%d): ATIO with error " 5262 "qla_target(%d): ATIO with error "
5272 "status %x received\n", vha->vp_idx, 5263 "status %x received\n", vha->vp_idx,
@@ -5340,7 +5331,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5340 le16_to_cpu(entry->u.isp2x.status)); 5331 le16_to_cpu(entry->u.isp2x.status));
5341 tgt->notify_ack_expected--; 5332 tgt->notify_ack_expected--;
5342 if (entry->u.isp2x.status != 5333 if (entry->u.isp2x.status !=
5343 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) { 5334 cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
5344 ql_dbg(ql_dbg_tgt, vha, 0xe061, 5335 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5345 "qla_target(%d): NOTIFY_ACK " 5336 "qla_target(%d): NOTIFY_ACK "
5346 "failed %x\n", vha->vp_idx, 5337 "failed %x\n", vha->vp_idx,
@@ -5659,7 +5650,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5659 uint8_t *s_id = NULL; /* to hide compiler warnings */ 5650 uint8_t *s_id = NULL; /* to hide compiler warnings */
5660 int rc; 5651 int rc;
5661 uint32_t lun, unpacked_lun; 5652 uint32_t lun, unpacked_lun;
5662 int lun_size, fn; 5653 int fn;
5663 void *iocb; 5654 void *iocb;
5664 5655
5665 spin_lock_irqsave(&ha->hardware_lock, flags); 5656 spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5691,7 +5682,6 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5691 5682
5692 iocb = a; 5683 iocb = a;
5693 lun = a->u.isp24.fcp_cmnd.lun; 5684 lun = a->u.isp24.fcp_cmnd.lun;
5694 lun_size = sizeof(lun);
5695 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags; 5685 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
5696 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); 5686 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
5697 5687
@@ -6215,19 +6205,19 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6215 ha->tgt.saved_set = 1; 6205 ha->tgt.saved_set = 1;
6216 } 6206 }
6217 6207
6218 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 6208 nv->exchange_count = cpu_to_le16(0xFFFF);
6219 6209
6220 /* Enable target mode */ 6210 /* Enable target mode */
6221 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 6211 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6222 6212
6223 /* Disable ini mode, if requested */ 6213 /* Disable ini mode, if requested */
6224 if (!qla_ini_mode_enabled(vha)) 6214 if (!qla_ini_mode_enabled(vha))
6225 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5); 6215 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6226 6216
6227 /* Disable Full Login after LIP */ 6217 /* Disable Full Login after LIP */
6228 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 6218 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6229 /* Enable initial LIP */ 6219 /* Enable initial LIP */
6230 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 6220 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6231 if (ql2xtgt_tape_enable) 6221 if (ql2xtgt_tape_enable)
6232 /* Enable FC Tape support */ 6222 /* Enable FC Tape support */
6233 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6223 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6236,9 +6226,9 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6236 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6226 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6237 6227
6238 /* Disable Full Login after LIP */ 6228 /* Disable Full Login after LIP */
6239 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 6229 nv->host_p &= cpu_to_le32(~BIT_10);
6240 /* Enable target PRLI control */ 6230 /* Enable target PRLI control */
6241 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 6231 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6242 } else { 6232 } else {
6243 if (ha->tgt.saved_set) { 6233 if (ha->tgt.saved_set) {
6244 nv->exchange_count = ha->tgt.saved_exchange_count; 6234 nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6260,12 +6250,12 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6260 fc_host_supported_classes(vha->host) = 6250 fc_host_supported_classes(vha->host) =
6261 FC_COS_CLASS2 | FC_COS_CLASS3; 6251 FC_COS_CLASS2 | FC_COS_CLASS3;
6262 6252
6263 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 6253 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6264 } else { 6254 } else {
6265 if (vha->flags.init_done) 6255 if (vha->flags.init_done)
6266 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6256 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6267 6257
6268 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 6258 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6269 } 6259 }
6270} 6260}
6271 6261
@@ -6277,7 +6267,7 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6277 6267
6278 if (ha->tgt.node_name_set) { 6268 if (ha->tgt.node_name_set) {
6279 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6269 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6280 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 6270 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6281 } 6271 }
6282} 6272}
6283 6273
@@ -6302,20 +6292,19 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6302 ha->tgt.saved_set = 1; 6292 ha->tgt.saved_set = 1;
6303 } 6293 }
6304 6294
6305 nv->exchange_count = __constant_cpu_to_le16(0xFFFF); 6295 nv->exchange_count = cpu_to_le16(0xFFFF);
6306 6296
6307 /* Enable target mode */ 6297 /* Enable target mode */
6308 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4); 6298 nv->firmware_options_1 |= cpu_to_le32(BIT_4);
6309 6299
6310 /* Disable ini mode, if requested */ 6300 /* Disable ini mode, if requested */
6311 if (!qla_ini_mode_enabled(vha)) 6301 if (!qla_ini_mode_enabled(vha))
6312 nv->firmware_options_1 |= 6302 nv->firmware_options_1 |= cpu_to_le32(BIT_5);
6313 __constant_cpu_to_le32(BIT_5);
6314 6303
6315 /* Disable Full Login after LIP */ 6304 /* Disable Full Login after LIP */
6316 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13); 6305 nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
6317 /* Enable initial LIP */ 6306 /* Enable initial LIP */
6318 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9); 6307 nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
6319 if (ql2xtgt_tape_enable) 6308 if (ql2xtgt_tape_enable)
6320 /* Enable FC tape support */ 6309 /* Enable FC tape support */
6321 nv->firmware_options_2 |= cpu_to_le32(BIT_12); 6310 nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6324,9 +6313,9 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6324 nv->firmware_options_2 &= cpu_to_le32(~BIT_12); 6313 nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
6325 6314
6326 /* Disable Full Login after LIP */ 6315 /* Disable Full Login after LIP */
6327 nv->host_p &= __constant_cpu_to_le32(~BIT_10); 6316 nv->host_p &= cpu_to_le32(~BIT_10);
6328 /* Enable target PRLI control */ 6317 /* Enable target PRLI control */
6329 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14); 6318 nv->firmware_options_2 |= cpu_to_le32(BIT_14);
6330 } else { 6319 } else {
6331 if (ha->tgt.saved_set) { 6320 if (ha->tgt.saved_set) {
6332 nv->exchange_count = ha->tgt.saved_exchange_count; 6321 nv->exchange_count = ha->tgt.saved_exchange_count;
@@ -6348,12 +6337,12 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6348 fc_host_supported_classes(vha->host) = 6337 fc_host_supported_classes(vha->host) =
6349 FC_COS_CLASS2 | FC_COS_CLASS3; 6338 FC_COS_CLASS2 | FC_COS_CLASS3;
6350 6339
6351 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8); 6340 nv->firmware_options_2 |= cpu_to_le32(BIT_8);
6352 } else { 6341 } else {
6353 if (vha->flags.init_done) 6342 if (vha->flags.init_done)
6354 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 6343 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6355 6344
6356 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8); 6345 nv->firmware_options_2 &= ~cpu_to_le32(BIT_8);
6357 } 6346 }
6358} 6347}
6359 6348
@@ -6368,7 +6357,7 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
6368 6357
6369 if (ha->tgt.node_name_set) { 6358 if (ha->tgt.node_name_set) {
6370 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE); 6359 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
6371 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14); 6360 icb->firmware_options_1 |= cpu_to_le32(BIT_14);
6372 } 6361 }
6373} 6362}
6374 6363
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 962cb89fe0ae..ddbe2e7ac14d 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -137,39 +137,39 @@ qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
137} 137}
138 138
139static inline void 139static inline void
140qla27xx_read8(void *window, void *buf, ulong *len) 140qla27xx_read8(void __iomem *window, void *buf, ulong *len)
141{ 141{
142 uint8_t value = ~0; 142 uint8_t value = ~0;
143 143
144 if (buf) { 144 if (buf) {
145 value = RD_REG_BYTE((__iomem void *)window); 145 value = RD_REG_BYTE(window);
146 } 146 }
147 qla27xx_insert32(value, buf, len); 147 qla27xx_insert32(value, buf, len);
148} 148}
149 149
150static inline void 150static inline void
151qla27xx_read16(void *window, void *buf, ulong *len) 151qla27xx_read16(void __iomem *window, void *buf, ulong *len)
152{ 152{
153 uint16_t value = ~0; 153 uint16_t value = ~0;
154 154
155 if (buf) { 155 if (buf) {
156 value = RD_REG_WORD((__iomem void *)window); 156 value = RD_REG_WORD(window);
157 } 157 }
158 qla27xx_insert32(value, buf, len); 158 qla27xx_insert32(value, buf, len);
159} 159}
160 160
161static inline void 161static inline void
162qla27xx_read32(void *window, void *buf, ulong *len) 162qla27xx_read32(void __iomem *window, void *buf, ulong *len)
163{ 163{
164 uint32_t value = ~0; 164 uint32_t value = ~0;
165 165
166 if (buf) { 166 if (buf) {
167 value = RD_REG_DWORD((__iomem void *)window); 167 value = RD_REG_DWORD(window);
168 } 168 }
169 qla27xx_insert32(value, buf, len); 169 qla27xx_insert32(value, buf, len);
170} 170}
171 171
172static inline void (*qla27xx_read_vector(uint width))(void *, void *, ulong *) 172static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
173{ 173{
174 return 174 return
175 (width == 1) ? qla27xx_read8 : 175 (width == 1) ? qla27xx_read8 :
@@ -181,7 +181,7 @@ static inline void
181qla27xx_read_reg(__iomem struct device_reg_24xx *reg, 181qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
182 uint offset, void *buf, ulong *len) 182 uint offset, void *buf, ulong *len)
183{ 183{
184 void *window = (void *)reg + offset; 184 void __iomem *window = (void __iomem *)reg + offset;
185 185
186 qla27xx_read32(window, buf, len); 186 qla27xx_read32(window, buf, len);
187} 187}
@@ -202,8 +202,8 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
202 uint32_t addr, uint offset, uint count, uint width, void *buf, 202 uint32_t addr, uint offset, uint count, uint width, void *buf,
203 ulong *len) 203 ulong *len)
204{ 204{
205 void *window = (void *)reg + offset; 205 void __iomem *window = (void __iomem *)reg + offset;
206 void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width); 206 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
207 207
208 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); 208 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
209 while (count--) { 209 while (count--) {
@@ -805,9 +805,8 @@ static void
805qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) 805qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
806{ 806{
807 uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; 807 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
808 int rval = 0;
809 808
810 rval = sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", 809 sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
811 v+0, v+1, v+2, v+3, v+4, v+5); 810 v+0, v+1, v+2, v+3, v+4, v+5);
812 811
813 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; 812 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
@@ -940,8 +939,10 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
940{ 939{
941 ulong flags = 0; 940 ulong flags = 0;
942 941
942#ifndef __CHECKER__
943 if (!hardware_locked) 943 if (!hardware_locked)
944 spin_lock_irqsave(&vha->hw->hardware_lock, flags); 944 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
945#endif
945 946
946 if (!vha->hw->fw_dump) 947 if (!vha->hw->fw_dump)
947 ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n"); 948 ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
@@ -954,6 +955,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
954 else 955 else
955 qla27xx_execute_fwdt_template(vha); 956 qla27xx_execute_fwdt_template(vha);
956 957
958#ifndef __CHECKER__
957 if (!hardware_locked) 959 if (!hardware_locked)
958 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); 960 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
961#endif
959} 962}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 2ed9ab90a455..6d31faa8c57b 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
7/* 7/*
8 * Driver version 8 * Driver version
9 */ 9 */
10#define QLA2XXX_VERSION "8.07.00.18-k" 10#define QLA2XXX_VERSION "8.07.00.26-k"
11 11
12#define QLA_DRIVER_MAJOR_VER 8 12#define QLA_DRIVER_MAJOR_VER 8
13#define QLA_DRIVER_MINOR_VER 7 13#define QLA_DRIVER_MINOR_VER 7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 9224a06646e6..7ed7bae6172b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -420,6 +420,12 @@ static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
420 420
421static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) 421static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
422{ 422{
423 if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
424 struct qla_tgt_cmd *cmd = container_of(se_cmd,
425 struct qla_tgt_cmd, se_cmd);
426 return cmd->state;
427 }
428
423 return 0; 429 return 0;
424} 430}
425 431
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 6457a8a0db9c..afd34a608fe7 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -420,6 +420,10 @@ static void scsi_report_sense(struct scsi_device *sdev,
420 evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED; 420 evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
421 sdev_printk(KERN_WARNING, sdev, 421 sdev_printk(KERN_WARNING, sdev,
422 "Mode parameters changed"); 422 "Mode parameters changed");
423 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x06) {
424 evt_type = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED;
425 sdev_printk(KERN_WARNING, sdev,
426 "Asymmetric access state changed");
423 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) { 427 } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
424 evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED; 428 evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
425 sdev_printk(KERN_WARNING, sdev, 429 sdev_printk(KERN_WARNING, sdev,
@@ -1155,8 +1159,13 @@ int scsi_eh_get_sense(struct list_head *work_q,
1155 struct Scsi_Host *shost; 1159 struct Scsi_Host *shost;
1156 int rtn; 1160 int rtn;
1157 1161
1162 /*
1163 * If SCSI_EH_ABORT_SCHEDULED has been set, it is timeout IO,
1164 * should not get sense.
1165 */
1158 list_for_each_entry_safe(scmd, next, work_q, eh_entry) { 1166 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1159 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || 1167 if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) ||
1168 (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
1160 SCSI_SENSE_VALID(scmd)) 1169 SCSI_SENSE_VALID(scmd))
1161 continue; 1170 continue;
1162 1171
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 448ebdaa3d69..882864f5cbae 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2423,7 +2423,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2423 unsigned char cmd[12]; 2423 unsigned char cmd[12];
2424 int use_10_for_ms; 2424 int use_10_for_ms;
2425 int header_length; 2425 int header_length;
2426 int result; 2426 int result, retry_count = retries;
2427 struct scsi_sense_hdr my_sshdr; 2427 struct scsi_sense_hdr my_sshdr;
2428 2428
2429 memset(data, 0, sizeof(*data)); 2429 memset(data, 0, sizeof(*data));
@@ -2502,6 +2502,11 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
2502 data->block_descriptor_length = buffer[3]; 2502 data->block_descriptor_length = buffer[3];
2503 } 2503 }
2504 data->header_length = header_length; 2504 data->header_length = header_length;
2505 } else if ((status_byte(result) == CHECK_CONDITION) &&
2506 scsi_sense_valid(sshdr) &&
2507 sshdr->sense_key == UNIT_ATTENTION && retry_count) {
2508 retry_count--;
2509 goto retry;
2505 } 2510 }
2506 2511
2507 return result; 2512 return result;
@@ -2707,6 +2712,9 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2707 case SDEV_EVT_LUN_CHANGE_REPORTED: 2712 case SDEV_EVT_LUN_CHANGE_REPORTED:
2708 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED"; 2713 envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
2709 break; 2714 break;
2715 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2716 envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED";
2717 break;
2710 default: 2718 default:
2711 /* do nothing */ 2719 /* do nothing */
2712 break; 2720 break;
@@ -2810,6 +2818,7 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2810 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: 2818 case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
2811 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: 2819 case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
2812 case SDEV_EVT_LUN_CHANGE_REPORTED: 2820 case SDEV_EVT_LUN_CHANGE_REPORTED:
2821 case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED:
2813 default: 2822 default:
2814 /* do nothing */ 2823 /* do nothing */
2815 break; 2824 break;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index da73d5524602..e4b3d8f4fd85 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2042,6 +2042,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
2042 session->transport = transport; 2042 session->transport = transport;
2043 session->creator = -1; 2043 session->creator = -1;
2044 session->recovery_tmo = 120; 2044 session->recovery_tmo = 120;
2045 session->recovery_tmo_sysfs_override = false;
2045 session->state = ISCSI_SESSION_FREE; 2046 session->state = ISCSI_SESSION_FREE;
2046 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); 2047 INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
2047 INIT_LIST_HEAD(&session->sess_list); 2048 INIT_LIST_HEAD(&session->sess_list);
@@ -2786,7 +2787,8 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
2786 switch (ev->u.set_param.param) { 2787 switch (ev->u.set_param.param) {
2787 case ISCSI_PARAM_SESS_RECOVERY_TMO: 2788 case ISCSI_PARAM_SESS_RECOVERY_TMO:
2788 sscanf(data, "%d", &value); 2789 sscanf(data, "%d", &value);
2789 session->recovery_tmo = value; 2790 if (!session->recovery_tmo_sysfs_override)
2791 session->recovery_tmo = value;
2790 break; 2792 break;
2791 default: 2793 default:
2792 err = transport->set_param(conn, ev->u.set_param.param, 2794 err = transport->set_param(conn, ev->u.set_param.param,
@@ -4049,13 +4051,15 @@ store_priv_session_##field(struct device *dev, \
4049 if ((session->state == ISCSI_SESSION_FREE) || \ 4051 if ((session->state == ISCSI_SESSION_FREE) || \
4050 (session->state == ISCSI_SESSION_FAILED)) \ 4052 (session->state == ISCSI_SESSION_FAILED)) \
4051 return -EBUSY; \ 4053 return -EBUSY; \
4052 if (strncmp(buf, "off", 3) == 0) \ 4054 if (strncmp(buf, "off", 3) == 0) { \
4053 session->field = -1; \ 4055 session->field = -1; \
4054 else { \ 4056 session->field##_sysfs_override = true; \
4057 } else { \
4055 val = simple_strtoul(buf, &cp, 0); \ 4058 val = simple_strtoul(buf, &cp, 0); \
4056 if (*cp != '\0' && *cp != '\n') \ 4059 if (*cp != '\0' && *cp != '\n') \
4057 return -EINVAL; \ 4060 return -EINVAL; \
4058 session->field = val; \ 4061 session->field = val; \
4062 session->field##_sysfs_override = true; \
4059 } \ 4063 } \
4060 return count; \ 4064 return count; \
4061} 4065}
@@ -4066,6 +4070,7 @@ store_priv_session_##field(struct device *dev, \
4066static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ 4070static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \
4067 show_priv_session_##field, \ 4071 show_priv_session_##field, \
4068 store_priv_session_##field) 4072 store_priv_session_##field)
4073
4069iscsi_priv_session_rw_attr(recovery_tmo, "%d"); 4074iscsi_priv_session_rw_attr(recovery_tmo, "%d");
4070 4075
4071static struct attribute *iscsi_session_attrs[] = { 4076static struct attribute *iscsi_session_attrs[] = {
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 871f3553987d..b37b9b00c4b4 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -85,6 +85,7 @@ static int debug_flag;
85 85
86static struct class st_sysfs_class; 86static struct class st_sysfs_class;
87static const struct attribute_group *st_dev_groups[]; 87static const struct attribute_group *st_dev_groups[];
88static const struct attribute_group *st_drv_groups[];
88 89
89MODULE_AUTHOR("Kai Makisara"); 90MODULE_AUTHOR("Kai Makisara");
90MODULE_DESCRIPTION("SCSI tape (st) driver"); 91MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -198,15 +199,13 @@ static int sgl_unmap_user_pages(struct st_buffer *, const unsigned int, int);
198static int st_probe(struct device *); 199static int st_probe(struct device *);
199static int st_remove(struct device *); 200static int st_remove(struct device *);
200 201
201static int do_create_sysfs_files(void);
202static void do_remove_sysfs_files(void);
203
204static struct scsi_driver st_template = { 202static struct scsi_driver st_template = {
205 .gendrv = { 203 .gendrv = {
206 .name = "st", 204 .name = "st",
207 .owner = THIS_MODULE, 205 .owner = THIS_MODULE,
208 .probe = st_probe, 206 .probe = st_probe,
209 .remove = st_remove, 207 .remove = st_remove,
208 .groups = st_drv_groups,
210 }, 209 },
211}; 210};
212 211
@@ -4404,14 +4403,8 @@ static int __init init_st(void)
4404 if (err) 4403 if (err)
4405 goto err_chrdev; 4404 goto err_chrdev;
4406 4405
4407 err = do_create_sysfs_files();
4408 if (err)
4409 goto err_scsidrv;
4410
4411 return 0; 4406 return 0;
4412 4407
4413err_scsidrv:
4414 scsi_unregister_driver(&st_template.gendrv);
4415err_chrdev: 4408err_chrdev:
4416 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4409 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4417 ST_MAX_TAPE_ENTRIES); 4410 ST_MAX_TAPE_ENTRIES);
@@ -4422,11 +4415,11 @@ err_class:
4422 4415
4423static void __exit exit_st(void) 4416static void __exit exit_st(void)
4424{ 4417{
4425 do_remove_sysfs_files();
4426 scsi_unregister_driver(&st_template.gendrv); 4418 scsi_unregister_driver(&st_template.gendrv);
4427 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0), 4419 unregister_chrdev_region(MKDEV(SCSI_TAPE_MAJOR, 0),
4428 ST_MAX_TAPE_ENTRIES); 4420 ST_MAX_TAPE_ENTRIES);
4429 class_unregister(&st_sysfs_class); 4421 class_unregister(&st_sysfs_class);
4422 idr_destroy(&st_index_idr);
4430 printk(KERN_INFO "st: Unloaded.\n"); 4423 printk(KERN_INFO "st: Unloaded.\n");
4431} 4424}
4432 4425
@@ -4435,68 +4428,38 @@ module_exit(exit_st);
4435 4428
4436 4429
4437/* The sysfs driver interface. Read-only at the moment */ 4430/* The sysfs driver interface. Read-only at the moment */
4438static ssize_t st_try_direct_io_show(struct device_driver *ddp, char *buf) 4431static ssize_t try_direct_io_show(struct device_driver *ddp, char *buf)
4439{ 4432{
4440 return snprintf(buf, PAGE_SIZE, "%d\n", try_direct_io); 4433 return scnprintf(buf, PAGE_SIZE, "%d\n", try_direct_io);
4441} 4434}
4442static DRIVER_ATTR(try_direct_io, S_IRUGO, st_try_direct_io_show, NULL); 4435static DRIVER_ATTR_RO(try_direct_io);
4443 4436
4444static ssize_t st_fixed_buffer_size_show(struct device_driver *ddp, char *buf) 4437static ssize_t fixed_buffer_size_show(struct device_driver *ddp, char *buf)
4445{ 4438{
4446 return snprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size); 4439 return scnprintf(buf, PAGE_SIZE, "%d\n", st_fixed_buffer_size);
4447} 4440}
4448static DRIVER_ATTR(fixed_buffer_size, S_IRUGO, st_fixed_buffer_size_show, NULL); 4441static DRIVER_ATTR_RO(fixed_buffer_size);
4449 4442
4450static ssize_t st_max_sg_segs_show(struct device_driver *ddp, char *buf) 4443static ssize_t max_sg_segs_show(struct device_driver *ddp, char *buf)
4451{ 4444{
4452 return snprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs); 4445 return scnprintf(buf, PAGE_SIZE, "%d\n", st_max_sg_segs);
4453} 4446}
4454static DRIVER_ATTR(max_sg_segs, S_IRUGO, st_max_sg_segs_show, NULL); 4447static DRIVER_ATTR_RO(max_sg_segs);
4455 4448
4456static ssize_t st_version_show(struct device_driver *ddd, char *buf) 4449static ssize_t version_show(struct device_driver *ddd, char *buf)
4457{ 4450{
4458 return snprintf(buf, PAGE_SIZE, "[%s]\n", verstr); 4451 return scnprintf(buf, PAGE_SIZE, "[%s]\n", verstr);
4459} 4452}
4460static DRIVER_ATTR(version, S_IRUGO, st_version_show, NULL); 4453static DRIVER_ATTR_RO(version);
4461
4462static int do_create_sysfs_files(void)
4463{
4464 struct device_driver *sysfs = &st_template.gendrv;
4465 int err;
4466 4454
4467 err = driver_create_file(sysfs, &driver_attr_try_direct_io); 4455static struct attribute *st_drv_attrs[] = {
4468 if (err) 4456 &driver_attr_try_direct_io.attr,
4469 return err; 4457 &driver_attr_fixed_buffer_size.attr,
4470 err = driver_create_file(sysfs, &driver_attr_fixed_buffer_size); 4458 &driver_attr_max_sg_segs.attr,
4471 if (err) 4459 &driver_attr_version.attr,
4472 goto err_try_direct_io; 4460 NULL,
4473 err = driver_create_file(sysfs, &driver_attr_max_sg_segs); 4461};
4474 if (err) 4462ATTRIBUTE_GROUPS(st_drv);
4475 goto err_attr_fixed_buf;
4476 err = driver_create_file(sysfs, &driver_attr_version);
4477 if (err)
4478 goto err_attr_max_sg;
4479
4480 return 0;
4481
4482err_attr_max_sg:
4483 driver_remove_file(sysfs, &driver_attr_max_sg_segs);
4484err_attr_fixed_buf:
4485 driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
4486err_try_direct_io:
4487 driver_remove_file(sysfs, &driver_attr_try_direct_io);
4488 return err;
4489}
4490
4491static void do_remove_sysfs_files(void)
4492{
4493 struct device_driver *sysfs = &st_template.gendrv;
4494
4495 driver_remove_file(sysfs, &driver_attr_version);
4496 driver_remove_file(sysfs, &driver_attr_max_sg_segs);
4497 driver_remove_file(sysfs, &driver_attr_fixed_buffer_size);
4498 driver_remove_file(sysfs, &driver_attr_try_direct_io);
4499}
4500 4463
4501/* The sysfs simple class interface */ 4464/* The sysfs simple class interface */
4502static ssize_t 4465static ssize_t
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 3c6584ff65c1..40c43aeb4ff3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -56,15 +56,18 @@
56 * V1 RC > 2008/1/31: 2.0 56 * V1 RC > 2008/1/31: 2.0
57 * Win7: 4.2 57 * Win7: 4.2
58 * Win8: 5.1 58 * Win8: 5.1
59 * Win8.1: 6.0
60 * Win10: 6.2
59 */ 61 */
60 62
63#define VMSTOR_PROTO_VERSION(MAJOR_, MINOR_) ((((MAJOR_) & 0xff) << 8) | \
64 (((MINOR_) & 0xff)))
61 65
62#define VMSTOR_WIN7_MAJOR 4 66#define VMSTOR_PROTO_VERSION_WIN6 VMSTOR_PROTO_VERSION(2, 0)
63#define VMSTOR_WIN7_MINOR 2 67#define VMSTOR_PROTO_VERSION_WIN7 VMSTOR_PROTO_VERSION(4, 2)
64 68#define VMSTOR_PROTO_VERSION_WIN8 VMSTOR_PROTO_VERSION(5, 1)
65#define VMSTOR_WIN8_MAJOR 5 69#define VMSTOR_PROTO_VERSION_WIN8_1 VMSTOR_PROTO_VERSION(6, 0)
66#define VMSTOR_WIN8_MINOR 1 70#define VMSTOR_PROTO_VERSION_WIN10 VMSTOR_PROTO_VERSION(6, 2)
67
68 71
69/* Packet structure describing virtual storage requests. */ 72/* Packet structure describing virtual storage requests. */
70enum vstor_packet_operation { 73enum vstor_packet_operation {
@@ -148,21 +151,18 @@ struct hv_fc_wwn_packet {
148 151
149/* 152/*
150 * Sense buffer size changed in win8; have a run-time 153 * Sense buffer size changed in win8; have a run-time
151 * variable to track the size we should use. 154 * variable to track the size we should use. This value will
155 * likely change during protocol negotiation but it is valid
156 * to start by assuming pre-Win8.
152 */ 157 */
153static int sense_buffer_size; 158static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
154 159
155/* 160/*
156 * The size of the vmscsi_request has changed in win8. The 161 * The storage protocol version is determined during the
157 * additional size is because of new elements added to the 162 * initial exchange with the host. It will indicate which
158 * structure. These elements are valid only when we are talking 163 * storage functionality is available in the host.
159 * to a win8 host. 164*/
160 * Track the correction to size we need to apply. 165static int vmstor_proto_version;
161 */
162
163static int vmscsi_size_delta;
164static int vmstor_current_major;
165static int vmstor_current_minor;
166 166
167struct vmscsi_win8_extension { 167struct vmscsi_win8_extension {
168 /* 168 /*
@@ -207,6 +207,56 @@ struct vmscsi_request {
207 207
208 208
209/* 209/*
210 * The size of the vmscsi_request has changed in win8. The
211 * additional size is because of new elements added to the
212 * structure. These elements are valid only when we are talking
213 * to a win8 host.
214 * Track the correction to size we need to apply. This value
215 * will likely change during protocol negotiation but it is
216 * valid to start by assuming pre-Win8.
217 */
218static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
219
220/*
221 * The list of storage protocols in order of preference.
222 */
223struct vmstor_protocol {
224 int protocol_version;
225 int sense_buffer_size;
226 int vmscsi_size_delta;
227};
228
229
230static const struct vmstor_protocol vmstor_protocols[] = {
231 {
232 VMSTOR_PROTO_VERSION_WIN10,
233 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
234 0
235 },
236 {
237 VMSTOR_PROTO_VERSION_WIN8_1,
238 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
239 0
240 },
241 {
242 VMSTOR_PROTO_VERSION_WIN8,
243 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
244 0
245 },
246 {
247 VMSTOR_PROTO_VERSION_WIN7,
248 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
249 sizeof(struct vmscsi_win8_extension),
250 },
251 {
252 VMSTOR_PROTO_VERSION_WIN6,
253 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
254 sizeof(struct vmscsi_win8_extension),
255 }
256};
257
258
259/*
210 * This structure is sent during the intialization phase to get the different 260 * This structure is sent during the intialization phase to get the different
211 * properties of the channel. 261 * properties of the channel.
212 */ 262 */
@@ -426,7 +476,6 @@ static void storvsc_host_scan(struct work_struct *work)
426 struct storvsc_scan_work *wrk; 476 struct storvsc_scan_work *wrk;
427 struct Scsi_Host *host; 477 struct Scsi_Host *host;
428 struct scsi_device *sdev; 478 struct scsi_device *sdev;
429 unsigned long flags;
430 479
431 wrk = container_of(work, struct storvsc_scan_work, work); 480 wrk = container_of(work, struct storvsc_scan_work, work);
432 host = wrk->host; 481 host = wrk->host;
@@ -443,14 +492,8 @@ static void storvsc_host_scan(struct work_struct *work)
443 * may have been removed this way. 492 * may have been removed this way.
444 */ 493 */
445 mutex_lock(&host->scan_mutex); 494 mutex_lock(&host->scan_mutex);
446 spin_lock_irqsave(host->host_lock, flags); 495 shost_for_each_device(sdev, host)
447 list_for_each_entry(sdev, &host->__devices, siblings) {
448 spin_unlock_irqrestore(host->host_lock, flags);
449 scsi_test_unit_ready(sdev, 1, 1, NULL); 496 scsi_test_unit_ready(sdev, 1, 1, NULL);
450 spin_lock_irqsave(host->host_lock, flags);
451 continue;
452 }
453 spin_unlock_irqrestore(host->host_lock, flags);
454 mutex_unlock(&host->scan_mutex); 497 mutex_unlock(&host->scan_mutex);
455 /* 498 /*
456 * Now scan the host to discover LUNs that may have been added. 499 * Now scan the host to discover LUNs that may have been added.
@@ -481,18 +524,6 @@ done:
481 kfree(wrk); 524 kfree(wrk);
482} 525}
483 526
484/*
485 * Major/minor macros. Minor version is in LSB, meaning that earlier flat
486 * version numbers will be interpreted as "0.x" (i.e., 1 becomes 0.1).
487 */
488
489static inline u16 storvsc_get_version(u8 major, u8 minor)
490{
491 u16 version;
492
493 version = ((major << 8) | minor);
494 return version;
495}
496 527
497/* 528/*
498 * We can get incoming messages from the host that are not in response to 529 * We can get incoming messages from the host that are not in response to
@@ -885,7 +916,7 @@ static int storvsc_channel_init(struct hv_device *device)
885 struct storvsc_device *stor_device; 916 struct storvsc_device *stor_device;
886 struct storvsc_cmd_request *request; 917 struct storvsc_cmd_request *request;
887 struct vstor_packet *vstor_packet; 918 struct vstor_packet *vstor_packet;
888 int ret, t; 919 int ret, t, i;
889 int max_chns; 920 int max_chns;
890 bool process_sub_channels = false; 921 bool process_sub_channels = false;
891 922
@@ -921,41 +952,65 @@ static int storvsc_channel_init(struct hv_device *device)
921 } 952 }
922 953
923 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 954 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
924 vstor_packet->status != 0) 955 vstor_packet->status != 0) {
956 ret = -EINVAL;
925 goto cleanup; 957 goto cleanup;
958 }
926 959
927 960
928 /* reuse the packet for version range supported */ 961 for (i = 0; i < ARRAY_SIZE(vmstor_protocols); i++) {
929 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 962 /* reuse the packet for version range supported */
930 vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION; 963 memset(vstor_packet, 0, sizeof(struct vstor_packet));
931 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 964 vstor_packet->operation =
965 VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
966 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
932 967
933 vstor_packet->version.major_minor = 968 vstor_packet->version.major_minor =
934 storvsc_get_version(vmstor_current_major, vmstor_current_minor); 969 vmstor_protocols[i].protocol_version;
935 970
936 /* 971 /*
937 * The revision number is only used in Windows; set it to 0. 972 * The revision number is only used in Windows; set it to 0.
938 */ 973 */
939 vstor_packet->version.revision = 0; 974 vstor_packet->version.revision = 0;
940 975
941 ret = vmbus_sendpacket(device->channel, vstor_packet, 976 ret = vmbus_sendpacket(device->channel, vstor_packet,
942 (sizeof(struct vstor_packet) - 977 (sizeof(struct vstor_packet) -
943 vmscsi_size_delta), 978 vmscsi_size_delta),
944 (unsigned long)request, 979 (unsigned long)request,
945 VM_PKT_DATA_INBAND, 980 VM_PKT_DATA_INBAND,
946 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 981 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
947 if (ret != 0) 982 if (ret != 0)
948 goto cleanup; 983 goto cleanup;
949 984
950 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 985 t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
951 if (t == 0) { 986 if (t == 0) {
952 ret = -ETIMEDOUT; 987 ret = -ETIMEDOUT;
953 goto cleanup; 988 goto cleanup;
989 }
990
991 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO) {
992 ret = -EINVAL;
993 goto cleanup;
994 }
995
996 if (vstor_packet->status == 0) {
997 vmstor_proto_version =
998 vmstor_protocols[i].protocol_version;
999
1000 sense_buffer_size =
1001 vmstor_protocols[i].sense_buffer_size;
1002
1003 vmscsi_size_delta =
1004 vmstor_protocols[i].vmscsi_size_delta;
1005
1006 break;
1007 }
954 } 1008 }
955 1009
956 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 1010 if (vstor_packet->status != 0) {
957 vstor_packet->status != 0) 1011 ret = -EINVAL;
958 goto cleanup; 1012 goto cleanup;
1013 }
959 1014
960 1015
961 memset(vstor_packet, 0, sizeof(struct vstor_packet)); 1016 memset(vstor_packet, 0, sizeof(struct vstor_packet));
@@ -979,8 +1034,10 @@ static int storvsc_channel_init(struct hv_device *device)
979 } 1034 }
980 1035
981 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 1036 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
982 vstor_packet->status != 0) 1037 vstor_packet->status != 0) {
1038 ret = -EINVAL;
983 goto cleanup; 1039 goto cleanup;
1040 }
984 1041
985 /* 1042 /*
986 * Check to see if multi-channel support is there. 1043 * Check to see if multi-channel support is there.
@@ -988,8 +1045,7 @@ static int storvsc_channel_init(struct hv_device *device)
988 * support multi-channel. 1045 * support multi-channel.
989 */ 1046 */
990 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt; 1047 max_chns = vstor_packet->storage_channel_properties.max_channel_cnt;
991 if ((vmbus_proto_version != VERSION_WIN7) && 1048 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN8) {
992 (vmbus_proto_version != VERSION_WS2008)) {
993 if (vstor_packet->storage_channel_properties.flags & 1049 if (vstor_packet->storage_channel_properties.flags &
994 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL) 1050 STORAGE_CHANNEL_SUPPORTS_MULTI_CHANNEL)
995 process_sub_channels = true; 1051 process_sub_channels = true;
@@ -1018,8 +1074,10 @@ static int storvsc_channel_init(struct hv_device *device)
1018 } 1074 }
1019 1075
1020 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO || 1076 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
1021 vstor_packet->status != 0) 1077 vstor_packet->status != 0) {
1078 ret = -EINVAL;
1022 goto cleanup; 1079 goto cleanup;
1080 }
1023 1081
1024 if (process_sub_channels) 1082 if (process_sub_channels)
1025 handle_multichannel_storage(device, max_chns); 1083 handle_multichannel_storage(device, max_chns);
@@ -1428,15 +1486,19 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
1428 1486
1429 /* 1487 /*
1430 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3 1488 * If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
1431 * if the device is a MSFT virtual device. 1489 * if the device is a MSFT virtual device. If the host is
1490 * WIN10 or newer, allow write_same.
1432 */ 1491 */
1433 if (!strncmp(sdevice->vendor, "Msft", 4)) { 1492 if (!strncmp(sdevice->vendor, "Msft", 4)) {
1434 switch (vmbus_proto_version) { 1493 switch (vmstor_proto_version) {
1435 case VERSION_WIN8: 1494 case VMSTOR_PROTO_VERSION_WIN8:
1436 case VERSION_WIN8_1: 1495 case VMSTOR_PROTO_VERSION_WIN8_1:
1437 sdevice->scsi_level = SCSI_SPC_3; 1496 sdevice->scsi_level = SCSI_SPC_3;
1438 break; 1497 break;
1439 } 1498 }
1499
1500 if (vmstor_proto_version >= VMSTOR_PROTO_VERSION_WIN10)
1501 sdevice->no_write_same = 0;
1440 } 1502 }
1441 1503
1442 return 0; 1504 return 0;
@@ -1563,7 +1625,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1563 u32 payload_sz; 1625 u32 payload_sz;
1564 u32 length; 1626 u32 length;
1565 1627
1566 if (vmstor_current_major <= VMSTOR_WIN8_MAJOR) { 1628 if (vmstor_proto_version <= VMSTOR_PROTO_VERSION_WIN8) {
1567 /* 1629 /*
1568 * On legacy hosts filter unimplemented commands. 1630 * On legacy hosts filter unimplemented commands.
1569 * Future hosts are expected to correctly handle 1631 * Future hosts are expected to correctly handle
@@ -1598,10 +1660,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
1598 vm_srb->data_in = READ_TYPE; 1660 vm_srb->data_in = READ_TYPE;
1599 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN; 1661 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_DATA_IN;
1600 break; 1662 break;
1601 default: 1663 case DMA_NONE:
1602 vm_srb->data_in = UNKNOWN_TYPE; 1664 vm_srb->data_in = UNKNOWN_TYPE;
1603 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; 1665 vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
1604 break; 1666 break;
1667 default:
1668 /*
1669 * This is DMA_BIDIRECTIONAL or something else we are never
1670 * supposed to see here.
1671 */
1672 WARN(1, "Unexpected data direction: %d\n",
1673 scmnd->sc_data_direction);
1674 return -EINVAL;
1605 } 1675 }
1606 1676
1607 1677
@@ -1758,22 +1828,11 @@ static int storvsc_probe(struct hv_device *device,
1758 * set state to properly communicate with the host. 1828 * set state to properly communicate with the host.
1759 */ 1829 */
1760 1830
1761 switch (vmbus_proto_version) { 1831 if (vmbus_proto_version < VERSION_WIN8) {
1762 case VERSION_WS2008:
1763 case VERSION_WIN7:
1764 sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
1765 vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
1766 vmstor_current_major = VMSTOR_WIN7_MAJOR;
1767 vmstor_current_minor = VMSTOR_WIN7_MINOR;
1768 max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET; 1832 max_luns_per_target = STORVSC_IDE_MAX_LUNS_PER_TARGET;
1769 max_targets = STORVSC_IDE_MAX_TARGETS; 1833 max_targets = STORVSC_IDE_MAX_TARGETS;
1770 max_channels = STORVSC_IDE_MAX_CHANNELS; 1834 max_channels = STORVSC_IDE_MAX_CHANNELS;
1771 break; 1835 } else {
1772 default:
1773 sense_buffer_size = POST_WIN7_STORVSC_SENSE_BUFFER_SIZE;
1774 vmscsi_size_delta = 0;
1775 vmstor_current_major = VMSTOR_WIN8_MAJOR;
1776 vmstor_current_minor = VMSTOR_WIN8_MINOR;
1777 max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET; 1836 max_luns_per_target = STORVSC_MAX_LUNS_PER_TARGET;
1778 max_targets = STORVSC_MAX_TARGETS; 1837 max_targets = STORVSC_MAX_TARGETS;
1779 max_channels = STORVSC_MAX_CHANNELS; 1838 max_channels = STORVSC_MAX_CHANNELS;
@@ -1783,7 +1842,6 @@ static int storvsc_probe(struct hv_device *device,
1783 * VCPUs in the guest. 1842 * VCPUs in the guest.
1784 */ 1843 */
1785 max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel); 1844 max_sub_channels = (num_cpus / storvsc_vcpus_per_sub_channel);
1786 break;
1787 } 1845 }
1788 1846
1789 scsi_driver.can_queue = (max_outstanding_req_per_channel * 1847 scsi_driver.can_queue = (max_outstanding_req_per_channel *
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index ae84b2214d40..50c2a363bc8f 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -57,9 +57,10 @@ enum scsi_device_event {
57 SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */ 57 SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */
58 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */ 58 SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
59 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */ 59 SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
60 SDEV_EVT_ALUA_STATE_CHANGE_REPORTED, /* 2A 06 UA reported */
60 61
61 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE, 62 SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
62 SDEV_EVT_LAST = SDEV_EVT_LUN_CHANGE_REPORTED, 63 SDEV_EVT_LAST = SDEV_EVT_ALUA_STATE_CHANGE_REPORTED,
63 64
64 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1 65 SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
65}; 66};
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 2555ee5343fd..6183d20a01fb 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -241,6 +241,7 @@ struct iscsi_cls_session {
241 241
242 /* recovery fields */ 242 /* recovery fields */
243 int recovery_tmo; 243 int recovery_tmo;
244 bool recovery_tmo_sysfs_override;
244 struct delayed_work recovery_work; 245 struct delayed_work recovery_work;
245 246
246 unsigned int target_id; 247 unsigned int target_id;
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index 75746d52f208..d791e0ad509d 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -3,3 +3,4 @@ header-y += fc/
3header-y += scsi_bsg_fc.h 3header-y += scsi_bsg_fc.h
4header-y += scsi_netlink.h 4header-y += scsi_netlink.h
5header-y += scsi_netlink_fc.h 5header-y += scsi_netlink_fc.h
6header-y += cxlflash_ioctl.h
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
new file mode 100644
index 000000000000..831351b2e660
--- /dev/null
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -0,0 +1,174 @@
1/*
2 * CXL Flash Device Driver
3 *
4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
6 *
7 * Copyright (C) 2015 IBM Corporation
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#ifndef _CXLFLASH_IOCTL_H
16#define _CXLFLASH_IOCTL_H
17
18#include <linux/types.h>
19
20/*
21 * Structure and flag definitions CXL Flash superpipe ioctls
22 */
23
24#define DK_CXLFLASH_VERSION_0 0
25
26struct dk_cxlflash_hdr {
27 __u16 version; /* Version data */
28 __u16 rsvd[3]; /* Reserved for future use */
29 __u64 flags; /* Input flags */
30 __u64 return_flags; /* Returned flags */
31};
32
33/*
34 * Notes:
35 * -----
36 * The 'context_id' field of all ioctl structures contains the context
37 * identifier for a context in the lower 32-bits (upper 32-bits are not
38 * to be used when identifying a context to the AFU). That said, the value
39 * in its entirety (all 64-bits) is to be treated as an opaque cookie and
40 * should be presented as such when issuing ioctls.
41 *
42 * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access
43 * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in
44 * the fcntl.h header file.
45 */
46#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL
47
48struct dk_cxlflash_attach {
49 struct dk_cxlflash_hdr hdr; /* Common fields */
50 __u64 num_interrupts; /* Requested number of interrupts */
51 __u64 context_id; /* Returned context */
52 __u64 mmio_size; /* Returned size of MMIO area */
53 __u64 block_size; /* Returned block size, in bytes */
54 __u64 adap_fd; /* Returned adapter file descriptor */
55 __u64 last_lba; /* Returned last LBA on the device */
56 __u64 max_xfer; /* Returned max transfer size, blocks */
57 __u64 reserved[8]; /* Reserved for future use */
58};
59
60struct dk_cxlflash_detach {
61 struct dk_cxlflash_hdr hdr; /* Common fields */
62 __u64 context_id; /* Context to detach */
63 __u64 reserved[8]; /* Reserved for future use */
64};
65
66struct dk_cxlflash_udirect {
67 struct dk_cxlflash_hdr hdr; /* Common fields */
68 __u64 context_id; /* Context to own physical resources */
69 __u64 rsrc_handle; /* Returned resource handle */
70 __u64 last_lba; /* Returned last LBA on the device */
71 __u64 reserved[8]; /* Reserved for future use */
72};
73
74#define DK_CXLFLASH_UVIRTUAL_NEED_WRITE_SAME 0x8000000000000000ULL
75
76struct dk_cxlflash_uvirtual {
77 struct dk_cxlflash_hdr hdr; /* Common fields */
78 __u64 context_id; /* Context to own virtual resources */
79 __u64 lun_size; /* Requested size, in 4K blocks */
80 __u64 rsrc_handle; /* Returned resource handle */
81 __u64 last_lba; /* Returned last LBA of LUN */
82 __u64 reserved[8]; /* Reserved for future use */
83};
84
85struct dk_cxlflash_release {
86 struct dk_cxlflash_hdr hdr; /* Common fields */
87 __u64 context_id; /* Context owning resources */
88 __u64 rsrc_handle; /* Resource handle to release */
89 __u64 reserved[8]; /* Reserved for future use */
90};
91
92struct dk_cxlflash_resize {
93 struct dk_cxlflash_hdr hdr; /* Common fields */
94 __u64 context_id; /* Context owning resources */
95 __u64 rsrc_handle; /* Resource handle of LUN to resize */
96 __u64 req_size; /* New requested size, in 4K blocks */
97 __u64 last_lba; /* Returned last LBA of LUN */
98 __u64 reserved[8]; /* Reserved for future use */
99};
100
101struct dk_cxlflash_clone {
102 struct dk_cxlflash_hdr hdr; /* Common fields */
103 __u64 context_id_src; /* Context to clone from */
104 __u64 context_id_dst; /* Context to clone to */
105 __u64 adap_fd_src; /* Source context adapter fd */
106 __u64 reserved[8]; /* Reserved for future use */
107};
108
109#define DK_CXLFLASH_VERIFY_SENSE_LEN 18
110#define DK_CXLFLASH_VERIFY_HINT_SENSE 0x8000000000000000ULL
111
112struct dk_cxlflash_verify {
113 struct dk_cxlflash_hdr hdr; /* Common fields */
114 __u64 context_id; /* Context owning resources to verify */
115 __u64 rsrc_handle; /* Resource handle of LUN */
116 __u64 hint; /* Reasons for verify */
117 __u64 last_lba; /* Returned last LBA of device */
118 __u8 sense_data[DK_CXLFLASH_VERIFY_SENSE_LEN]; /* SCSI sense data */
119 __u8 pad[6]; /* Pad to next 8-byte boundary */
120 __u64 reserved[8]; /* Reserved for future use */
121};
122
123#define DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET 0x8000000000000000ULL
124
125struct dk_cxlflash_recover_afu {
126 struct dk_cxlflash_hdr hdr; /* Common fields */
127 __u64 reason; /* Reason for recovery request */
128 __u64 context_id; /* Context to recover / updated ID */
129 __u64 mmio_size; /* Returned size of MMIO area */
130 __u64 adap_fd; /* Returned adapter file descriptor */
131 __u64 reserved[8]; /* Reserved for future use */
132};
133
134#define DK_CXLFLASH_MANAGE_LUN_WWID_LEN 16
135#define DK_CXLFLASH_MANAGE_LUN_ENABLE_SUPERPIPE 0x8000000000000000ULL
136#define DK_CXLFLASH_MANAGE_LUN_DISABLE_SUPERPIPE 0x4000000000000000ULL
137#define DK_CXLFLASH_MANAGE_LUN_ALL_PORTS_ACCESSIBLE 0x2000000000000000ULL
138
139struct dk_cxlflash_manage_lun {
140 struct dk_cxlflash_hdr hdr; /* Common fields */
141 __u8 wwid[DK_CXLFLASH_MANAGE_LUN_WWID_LEN]; /* Page83 WWID, NAA-6 */
142 __u64 reserved[8]; /* Rsvd, future use */
143};
144
145union cxlflash_ioctls {
146 struct dk_cxlflash_attach attach;
147 struct dk_cxlflash_detach detach;
148 struct dk_cxlflash_udirect udirect;
149 struct dk_cxlflash_uvirtual uvirtual;
150 struct dk_cxlflash_release release;
151 struct dk_cxlflash_resize resize;
152 struct dk_cxlflash_clone clone;
153 struct dk_cxlflash_verify verify;
154 struct dk_cxlflash_recover_afu recover_afu;
155 struct dk_cxlflash_manage_lun manage_lun;
156};
157
158#define MAX_CXLFLASH_IOCTL_SZ (sizeof(union cxlflash_ioctls))
159
160#define CXL_MAGIC 0xCA
161#define CXL_IOWR(_n, _s) _IOWR(CXL_MAGIC, _n, struct _s)
162
163#define DK_CXLFLASH_ATTACH CXL_IOWR(0x80, dk_cxlflash_attach)
164#define DK_CXLFLASH_USER_DIRECT CXL_IOWR(0x81, dk_cxlflash_udirect)
165#define DK_CXLFLASH_RELEASE CXL_IOWR(0x82, dk_cxlflash_release)
166#define DK_CXLFLASH_DETACH CXL_IOWR(0x83, dk_cxlflash_detach)
167#define DK_CXLFLASH_VERIFY CXL_IOWR(0x84, dk_cxlflash_verify)
168#define DK_CXLFLASH_RECOVER_AFU CXL_IOWR(0x85, dk_cxlflash_recover_afu)
169#define DK_CXLFLASH_MANAGE_LUN CXL_IOWR(0x86, dk_cxlflash_manage_lun)
170#define DK_CXLFLASH_USER_VIRTUAL CXL_IOWR(0x87, dk_cxlflash_uvirtual)
171#define DK_CXLFLASH_VLUN_RESIZE CXL_IOWR(0x88, dk_cxlflash_resize)
172#define DK_CXLFLASH_VLUN_CLONE CXL_IOWR(0x89, dk_cxlflash_clone)
173
174#endif /* ifndef _CXLFLASH_IOCTL_H */