aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/powerpc/cxlflash.txt44
-rw-r--r--Documentation/scsi/00-INDEX2
-rw-r--r--Documentation/scsi/dtc3x80.txt43
-rw-r--r--Documentation/scsi/in2000.txt202
-rw-r--r--Documentation/scsi/scsi-parameters.txt18
-rw-r--r--Documentation/scsi/smartpqi.txt80
-rw-r--r--MAINTAINERS38
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptfc.c7
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c162
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h14
-rw-r--r--drivers/s390/scsi/zfcp_erp.c12
-rw-r--r--drivers/s390/scsi/zfcp_ext.h8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c22
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h4
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c8
-rw-r--r--drivers/scsi/Kconfig136
-rw-r--r--drivers/scsi/Makefile8
-rw-r--r--drivers/scsi/NCR5380.c15
-rw-r--r--drivers/scsi/NCR5380.h10
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_hwi.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c12
-rw-r--r--drivers/scsi/be2iscsi/be.h15
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c1096
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h142
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c408
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h25
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2478
-rw-r--r--drivers/scsi/be2iscsi/be_main.h220
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c1497
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h51
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c12
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c5
-rw-r--r--drivers/scsi/cxlflash/main.c81
-rw-r--r--drivers/scsi/cxlflash/superpipe.c180
-rw-r--r--drivers/scsi/cxlflash/superpipe.h3
-rw-r--r--drivers/scsi/cxlflash/vlun.c13
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c1
-rw-r--r--drivers/scsi/dtc.c447
-rw-r--r--drivers/scsi/dtc.h42
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c4
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c53
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c238
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c36
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c130
-rw-r--r--drivers/scsi/hosts.c12
-rw-r--r--drivers/scsi/hpsa.c139
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c12
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c37
-rw-r--r--drivers/scsi/in2000.c2302
-rw-r--r--drivers/scsi/in2000.h412
-rw-r--r--drivers/scsi/ipr.c134
-rw-r--r--drivers/scsi/ipr.h8
-rw-r--r--drivers/scsi/libfc/fc_exch.c1
-rw-r--r--drivers/scsi/libfc/fc_rport.c26
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c82
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c28
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c259
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h24
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c49
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c169
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c28
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c19
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c41
-rw-r--r--drivers/scsi/mvsas/mv_sas.c16
-rw-r--r--drivers/scsi/pas16.c565
-rw-r--r--drivers/scsi/pas16.h121
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c2
-rw-r--r--drivers/scsi/pmcraid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/scsi_debug.c54
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/sd.c11
-rw-r--r--drivers/scsi/sd.h30
-rw-r--r--drivers/scsi/sd_dif.c10
-rw-r--r--drivers/scsi/sg.c20
-rw-r--r--drivers/scsi/smartpqi/Kconfig54
-rw-r--r--drivers/scsi/smartpqi/Makefile3
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h1136
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6303
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c350
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c404
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h34
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c2
-rw-r--r--drivers/scsi/t128.c407
-rw-r--r--drivers/scsi/t128.h97
-rw-r--r--drivers/scsi/u14-34f.c1971
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210.c1
-rw-r--r--drivers/scsi/ufs/ufs.h1
-rw-r--r--drivers/scsi/ufs/ufshcd.c6
-rw-r--r--drivers/scsi/ultrastor.c1210
-rw-r--r--drivers/scsi/ultrastor.h80
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/wd7000.c1657
-rw-r--r--include/linux/t10-pi.h20
-rw-r--r--include/uapi/scsi/cxlflash_ioctl.h19
117 files changed, 12922 insertions, 13622 deletions
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.txt
index 4202d1bc583c..6d9a2ed32cad 100644
--- a/Documentation/powerpc/cxlflash.txt
+++ b/Documentation/powerpc/cxlflash.txt
@@ -121,7 +121,7 @@ Block library API
121 below. 121 below.
122 122
123 The block library can be found on GitHub: 123 The block library can be found on GitHub:
124 http://www.github.com/mikehollinger/ibmcapikv 124 http://github.com/open-power/capiflash
125 125
126 126
127CXL Flash Driver IOCTLs 127CXL Flash Driver IOCTLs
@@ -171,11 +171,30 @@ DK_CXLFLASH_ATTACH
171 destroyed, the tokens are to be considered stale and subsequent 171 destroyed, the tokens are to be considered stale and subsequent
172 usage will result in errors. 172 usage will result in errors.
173 173
174 - A valid adapter file descriptor (fd2 >= 0) is only returned on
175 the initial attach for a context. Subsequent attaches to an
176 existing context (DK_CXLFLASH_ATTACH_REUSE_CONTEXT flag present)
177 do not provide the adapter file descriptor as it was previously
178 made known to the application.
179
174 - When a context is no longer needed, the user shall detach from 180 - When a context is no longer needed, the user shall detach from
175 the context via the DK_CXLFLASH_DETACH ioctl. 181 the context via the DK_CXLFLASH_DETACH ioctl. When this ioctl
182 returns with a valid adapter file descriptor and the return flag
183 DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
184 close the adapter file descriptor following a successful detach.
185
186 - When this ioctl returns with a valid fd2 and the return flag
187 DK_CXLFLASH_APP_CLOSE_ADAP_FD is present, the application _must_
188 close fd2 in the following circumstances:
189
190 + Following a successful detach of the last user of the context
191 + Following a successful recovery on the context's original fd2
192 + In the child process of a fork(), following a clone ioctl,
193 on the fd2 associated with the source context
176 194
177 - A close on fd2 will invalidate the tokens. This operation is not 195 - At any time, a close on fd2 will invalidate the tokens. Applications
178 required by the user. 196 should exercise caution to only close fd2 when appropriate (outlined
197 in the previous bullet) to avoid premature loss of I/O.
179 198
180DK_CXLFLASH_USER_DIRECT 199DK_CXLFLASH_USER_DIRECT
181----------------------- 200-----------------------
@@ -254,6 +273,10 @@ DK_CXLFLASH_DETACH
254 success, all "tokens" which had been provided to the user from the 273 success, all "tokens" which had been provided to the user from the
255 DK_CXLFLASH_ATTACH onward are no longer valid. 274 DK_CXLFLASH_ATTACH onward are no longer valid.
256 275
276 When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
277 attach, the application _must_ close the fd2 associated with the context
278 following the detach of the final user of the context.
279
257DK_CXLFLASH_VLUN_CLONE 280DK_CXLFLASH_VLUN_CLONE
258---------------------- 281----------------------
259 This ioctl is responsible for cloning a previously created 282 This ioctl is responsible for cloning a previously created
@@ -261,7 +284,7 @@ DK_CXLFLASH_VLUN_CLONE
261 support maintaining user space access to storage after a process 284 support maintaining user space access to storage after a process
262 forks. Upon success, the child process (which invoked the ioctl) 285 forks. Upon success, the child process (which invoked the ioctl)
263 will have access to the same LUNs via the same resource handle(s) 286 will have access to the same LUNs via the same resource handle(s)
264 and fd2 as the parent, but under a different context. 287 as the parent, but under a different context.
265 288
266 Context sharing across processes is not supported with CXL and 289 Context sharing across processes is not supported with CXL and
267 therefore each fork must be met with establishing a new context 290 therefore each fork must be met with establishing a new context
@@ -275,6 +298,12 @@ DK_CXLFLASH_VLUN_CLONE
275 translation tables are copied from the parent context to the child's 298 translation tables are copied from the parent context to the child's
276 and then synced with the AFU. 299 and then synced with the AFU.
277 300
301 When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
302 attach, the application _must_ close the fd2 associated with the source
303 context (still resident/accessible in the parent process) following the
304 clone. This is to avoid a stale entry in the file descriptor table of the
305 child process.
306
278DK_CXLFLASH_VERIFY 307DK_CXLFLASH_VERIFY
279------------------ 308------------------
280 This ioctl is used to detect various changes such as the capacity of 309 This ioctl is used to detect various changes such as the capacity of
@@ -309,6 +338,11 @@ DK_CXLFLASH_RECOVER_AFU
309 at which time the context/resources they held will be freed as part of 338 at which time the context/resources they held will be freed as part of
310 the release fop. 339 the release fop.
311 340
341 When the DK_CXLFLASH_APP_CLOSE_ADAP_FD flag was returned on a successful
342 attach, the application _must_ unmap and close the fd2 associated with the
343 original context following this ioctl returning success and indicating that
344 the context was recovered (DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET).
345
312DK_CXLFLASH_MANAGE_LUN 346DK_CXLFLASH_MANAGE_LUN
313---------------------- 347----------------------
314 This ioctl is used to switch a LUN from a mode where it is available 348 This ioctl is used to switch a LUN from a mode where it is available
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index c4b978a72f78..bb4a76f823e1 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -64,8 +64,6 @@ hpsa.txt
64 - HP Smart Array Controller SCSI driver. 64 - HP Smart Array Controller SCSI driver.
65hptiop.txt 65hptiop.txt
66 - HIGHPOINT ROCKETRAID 3xxx RAID DRIVER 66 - HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
67in2000.txt
68 - info on in2000 driver
69libsas.txt 67libsas.txt
70 - Serial Attached SCSI management layer. 68 - Serial Attached SCSI management layer.
71link_power_management_policy.txt 69link_power_management_policy.txt
diff --git a/Documentation/scsi/dtc3x80.txt b/Documentation/scsi/dtc3x80.txt
deleted file mode 100644
index 1d7af9f9a8ed..000000000000
--- a/Documentation/scsi/dtc3x80.txt
+++ /dev/null
@@ -1,43 +0,0 @@
1README file for the Linux DTC3180/3280 scsi driver.
2by Ray Van Tassle (rayvt@comm.mot.com) March 1996
3Based on the generic & core NCR5380 code by Drew Eckhard
4
5SCSI device driver for the DTC 3180/3280.
6Data Technology Corp---a division of Qume.
7
8The 3280 has a standard floppy interface.
9
10The 3180 does not. Otherwise, they are identical.
11
12The DTC3x80 does not support DMA but it does have Pseudo-DMA which is
13supported by the driver.
14
15Its DTC406 scsi chip is supposedly compatible with the NCR 53C400.
16It is memory mapped, uses an IRQ, but no dma or io-port. There is
17internal DMA, between SCSI bus and an on-chip 128-byte buffer. Double
18buffering is done automagically by the chip. Data is transferred
19between the on-chip buffer and CPU/RAM via memory moves.
20
21The driver detects the possible memory addresses (jumper selectable):
22 CC00, DC00, C800, and D800
23The possible IRQ's (jumper selectable) are:
24 IRQ 10, 11, 12, 15
25Parity is supported by the chip, but not by this driver.
26Information can be obtained from /proc/scsi/dtc3c80/N.
27
28Note on interrupts:
29
30The documentation says that it can be set to interrupt whenever the
31on-chip buffer needs CPU attention. I couldn't get this to work. So
32the driver polls for data-ready in the pseudo-DMA transfer routine.
33The interrupt support routines in the NCR3280.c core modules handle
34scsi disconnect/reconnect, and this (mostly) works. However..... I
35have tested it with 4 totally different hard drives (both SCSI-1 and
36SCSI-2), and one CDROM drive. Interrupts works great for all but one
37specific hard drive. For this one, the driver will eventually hang in
38the transfer state. I have tested with: "dd bs=4k count=2k
39of=/dev/null if=/dev/sdb". It reads ok for a while, then hangs.
40After beating my head against this for a couple of weeks, getting
41nowhere, I give up. So.....This driver does NOT use interrupts, even
42if you have the card jumpered to an IRQ. Probably nobody will ever
43care.
diff --git a/Documentation/scsi/in2000.txt b/Documentation/scsi/in2000.txt
deleted file mode 100644
index c3e2a90475d2..000000000000
--- a/Documentation/scsi/in2000.txt
+++ /dev/null
@@ -1,202 +0,0 @@
1
2UPDATE NEWS: version 1.33 - 26 Aug 98
3
4 Interrupt management in this driver has become, over
5 time, increasingly odd and difficult to explain - this
6 has been mostly due to my own mental inadequacies. In
7 recent kernels, it has failed to function at all when
8 compiled for SMP. I've fixed that problem, and after
9 taking a fresh look at interrupts in general, greatly
10 reduced the number of places where they're fiddled
11 with. Done some heavy testing and it looks very good.
12 The driver now makes use of the __initfunc() and
13 __initdata macros to save about 4k of kernel memory.
14 Once again, the same code works for both 2.0.xx and
15 2.1.xx kernels.
16
17UPDATE NEWS: version 1.32 - 28 Mar 98
18
19 Removed the check for legal IN2000 hardware versions:
20 It appears that the driver works fine with serial
21 EPROMs (the 8-pin chip that defines hardware rev) as
22 old as 2.1, so we'll assume that all cards are OK.
23
24UPDATE NEWS: version 1.31 - 6 Jul 97
25
26 Fixed a bug that caused incorrect SCSI status bytes to be
27 returned from commands sent to LUNs greater than 0. This
28 means that CDROM changers work now! Fixed a bug in the
29 handling of command-line arguments when loaded as a module.
30 Also put all the header data in in2000.h where it belongs.
31 There are no longer any differences between this driver in
32 the 2.1.xx source tree and the 2.0.xx tree, as of 2.0.31
33 and 2.1.45 (or is it .46?) - this makes things much easier
34 for me...
35
36UPDATE NEWS: version 1.30 - 14 Oct 96
37
38 Fixed a bug in the code that sets the transfer direction
39 bit (DESTID_DPD in the WD_DESTINATION_ID register). There
40 are quite a few SCSI commands that do a write-to-device;
41 now we deal with all of them correctly. Thanks to Joerg
42 Dorchain for catching this one.
43
44UPDATE NEWS: version 1.29 - 24 Sep 96
45
46 The memory-mapped hardware on the card is now accessed via
47 the 'readb()' and 'readl()' macros - required by the new
48 memory management scheme in the 2.1.x kernel series.
49 As suggested by Andries Brouwer, 'bios_param()' no longer
50 forces an artificial 1023 track limit on drives. Also
51 removed some kludge-code left over from struggles with
52 older (buggy) compilers.
53
54UPDATE NEWS: version 1.28 - 07 May 96
55
56 Tightened up the "interrupts enabled/disabled" discipline
57 in 'in2000_queuecommand()' and maybe 1 or 2 other places.
58 I _think_ it may have been a little too lax, causing an
59 occasional crash during full moon. A fully functional
60 /proc interface is now in place - if you want to play
61 with it, start by doing 'cat /proc/scsi/in2000/0'. You
62 can also use it to change a few run-time parameters on
63 the fly, but it's mostly for debugging. The curious
64 should take a good look at 'in2000_proc_info()' in the
65 in2000.c file to get an understanding of what it's all
66 about; I figure that people who are really into it will
67 want to add features suited to their own needs...
68 Also, sync is now DISABLED by default.
69
70UPDATE NEWS: version 1.27 - 10 Apr 96
71
72 Fixed a well-hidden bug in the adaptive-disconnect code
73 that would show up every now and then during extreme
74 heavy loads involving 2 or more simultaneously active
75 devices. Thanks to Joe Mack for keeping my nose to the
76 grindstone on this one.
77
78UPDATE NEWS: version 1.26 - 07 Mar 96
79
80 1.25 had a nasty bug that bit people with swap partitions
81 and tape drives. Also, in my attempt to guess my way
82 through Intel assembly language, I made an error in the
83 inline code for IO writes. Made a few other changes and
84 repairs - this version (fingers crossed) should work well.
85
86UPDATE NEWS: version 1.25 - 05 Mar 96
87
88 Kernel 1.3.70 interrupt mods added; old kernels still OK.
89 Big help from Bill Earnest and David Willmore on speed
90 testing and optimizing: I think there's a real improvement
91 in this area.
92 New! User-friendly command-line interface for LILO and
93 module loading - the old method is gone, so you'll need
94 to read the comments for 'setup_strings' near the top
95 of in2000.c. For people with CDROM's or other devices
96 that have a tough time with sync negotiation, you can
97 now selectively disable sync on individual devices -
98 search for the 'nosync' keyword in the command-line
99 comments. Some of you disable the BIOS on the card, which
100 caused the auto-detect function to fail; there is now a
101 command-line option to force detection of a ROM-less card.
102
103UPDATE NEWS: version 1.24a - 24 Feb 96
104
105 There was a bug in the synchronous transfer code. Only
106 a few people downloaded before I caught it - could have
107 been worse.
108
109UPDATE NEWS: version 1.24 - 23 Feb 96
110
111 Lots of good changes. Advice from Bill Earnest resulted
112 in much better detection of cards, more efficient usage
113 of the fifo, and (hopefully) faster data transfers. The
114 jury is still out on speed - I hope it's improved some.
115 One nifty new feature is a cool way of doing disconnect/
116 reselect. The driver defaults to what I'm calling
117 'adaptive disconnect' - meaning that each command is
118 evaluated individually as to whether or not it should be
119 run with the option to disconnect/reselect (if the device
120 chooses), or as a "SCSI-bus-hog". When several devices
121 are operating simultaneously, disconnects are usually an
122 advantage. In a single device system, or if only 1 device
123 is being accessed, transfers usually go faster if disconnects
124 are not allowed.
125
126
127
128The default arguments (you get these when you don't give an 'in2000'
129command-line argument, or you give a blank argument) will cause
130the driver to do adaptive disconnect, synchronous transfers, and a
131minimum of debug messages. If you want to fool with the options,
132search for 'setup_strings' near the top of the in2000.c file and
133check the 'hostdata->args' section in in2000.h - but be warned! Not
134everything is working yet (some things will never work, probably).
135I believe that disabling disconnects (DIS_NEVER) will allow you
136to choose a LEVEL2 value higher than 'L2_BASIC', but I haven't
137spent a lot of time testing this. You might try 'ENABLE_CLUSTERING'
138to see what happens: my tests showed little difference either way.
139There's also a define called 'DEFAULT_SX_PER'; this sets the data
140transfer speed for the asynchronous mode. I've put it at 500 ns
141despite the fact that the card could handle settings of 376 or
142252, because higher speeds may be a problem with poor quality
143cables or improper termination; 500 ns is a compromise. You can
144choose your own default through the command-line with the
145'period' keyword.
146
147
148------------------------------------------------
149*********** DIP switch settings **************
150------------------------------------------------
151
152 sw1-1 sw1-2 BIOS address (hex)
153 -----------------------------------------
154 off off C8000 - CBFF0
155 on off D8000 - DBFF0
156 off on D0000 - D3FF0
157 on on BIOS disabled
158
159 sw1-3 sw1-4 IO port address (hex)
160 ------------------------------------
161 off off 220 - 22F
162 on off 200 - 20F
163 off on 110 - 11F
164 on on 100 - 10F
165
166 sw1-5 sw1-6 sw1-7 Interrupt
167 ------------------------------
168 off off off 15
169 off on off 14
170 off off on 11
171 off on on 10
172 on - - disabled
173
174 sw1-8 function depends on BIOS version. In earlier versions this
175 controlled synchronous data transfer support for MSDOS:
176 off = disabled
177 on = enabled
178 In later ROMs (starting with 01.3 in April 1994) sw1-8 controls
179 the "greater than 2 disk drive" feature that first appeared in
180 MSDOS 5.0 (ignored by Linux):
181 off = 2 drives maximum
182 on = 7 drives maximum
183
184 sw1-9 Floppy controller
185 --------------------------
186 off disabled
187 on enabled
188
189------------------------------------------------
190
191 I should mention that Drew Eckhardt's 'Generic NCR5380' sources
192 were my main inspiration, with lots of reference to the IN2000
193 driver currently distributed in the kernel source. I also owe
194 much to a driver written by Hamish Macdonald for Linux-m68k(!).
195 And to Eric Wright for being an ALPHA guinea pig. And to Bill
196 Earnest for 2 tons of great input and information. And to David
197 Willmore for extensive 'bonnie' testing. And to Joe Mack for
198 continual testing and feedback.
199
200
201 John Shifflett jshiffle@netcom.com
202
diff --git a/Documentation/scsi/scsi-parameters.txt b/Documentation/scsi/scsi-parameters.txt
index d5ae6ced6be3..8e66dafa41e1 100644
--- a/Documentation/scsi/scsi-parameters.txt
+++ b/Documentation/scsi/scsi-parameters.txt
@@ -34,9 +34,6 @@ parameters may be changed at runtime by the command
34 See drivers/scsi/BusLogic.c, comment before function 34 See drivers/scsi/BusLogic.c, comment before function
35 BusLogic_ParseDriverOptions(). 35 BusLogic_ParseDriverOptions().
36 36
37 dtc3181e= [HW,SCSI]
38 See Documentation/scsi/g_NCR5380.txt.
39
40 eata= [HW,SCSI] 37 eata= [HW,SCSI]
41 38
42 fdomain= [HW,SCSI] 39 fdomain= [HW,SCSI]
@@ -47,9 +44,6 @@ parameters may be changed at runtime by the command
47 44
48 gvp11= [HW,SCSI] 45 gvp11= [HW,SCSI]
49 46
50 in2000= [HW,SCSI]
51 See header of drivers/scsi/in2000.c.
52
53 ips= [HW,SCSI] Adaptec / IBM ServeRAID controller 47 ips= [HW,SCSI] Adaptec / IBM ServeRAID controller
54 See header of drivers/scsi/ips.c. 48 See header of drivers/scsi/ips.c.
55 49
@@ -83,9 +77,6 @@ parameters may be changed at runtime by the command
83 Format: <buffer_size>,<write_threshold> 77 Format: <buffer_size>,<write_threshold>
84 See also Documentation/scsi/st.txt. 78 See also Documentation/scsi/st.txt.
85 79
86 pas16= [HW,SCSI]
87 See header of drivers/scsi/pas16.c.
88
89 scsi_debug_*= [SCSI] 80 scsi_debug_*= [SCSI]
90 See drivers/scsi/scsi_debug.c. 81 See drivers/scsi/scsi_debug.c.
91 82
@@ -119,18 +110,9 @@ parameters may be changed at runtime by the command
119 sym53c416= [HW,SCSI] 110 sym53c416= [HW,SCSI]
120 See header of drivers/scsi/sym53c416.c. 111 See header of drivers/scsi/sym53c416.c.
121 112
122 t128= [HW,SCSI]
123 See header of drivers/scsi/t128.c.
124
125 tmscsim= [HW,SCSI] 113 tmscsim= [HW,SCSI]
126 See comment before function dc390_setup() in 114 See comment before function dc390_setup() in
127 drivers/scsi/tmscsim.c. 115 drivers/scsi/tmscsim.c.
128 116
129 u14-34f= [HW,SCSI] UltraStor 14F/34F SCSI host adapter
130 See header of drivers/scsi/u14-34f.c.
131
132 wd33c93= [HW,SCSI] 117 wd33c93= [HW,SCSI]
133 See header of drivers/scsi/wd33c93.c. 118 See header of drivers/scsi/wd33c93.c.
134
135 wd7000= [HW,SCSI]
136 See header of drivers/scsi/wd7000.c.
diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt
new file mode 100644
index 000000000000..ab377d9e5d1b
--- /dev/null
+++ b/Documentation/scsi/smartpqi.txt
@@ -0,0 +1,80 @@
1
2SMARTPQI - Microsemi Smart PQI Driver
3-----------------------------------------
4
5This file describes the smartpqi SCSI driver for Microsemi
6(http://www.microsemi.com) PQI controllers. The smartpqi driver
7is the next generation SCSI driver for Microsemi Corp. The smartpqi
8driver is the first SCSI driver to implement the PQI queuing model.
9
10The smartpqi driver will replace the aacraid driver for Adaptec Series 9
11controllers. Customers running an older kernel (Pre-4.9) using an Adaptec
12Series 9 controller will have to configure the smartpqi driver or their
13volumes will not be added to the OS.
14
15For Microsemi smartpqi controller support, enable the smartpqi driver
16when configuring the kernel.
17
18For more information on the PQI Queuing Interface, please see:
19http://www.t10.org/drafts.htm
20http://www.t10.org/members/w_pqi2.htm
21
22Supported devices:
23------------------
24<Controller names to be added as they become publically available.>
25
26smartpqi specific entries in /sys
27-----------------------------
28
29 smartpqi host attributes:
30 -------------------------
31 /sys/class/scsi_host/host*/rescan
32 /sys/class/scsi_host/host*/version
33
34 The host rescan attribute is a write only attribute. Writing to this
35 attribute will trigger the driver to scan for new, changed, or removed
36 devices and notify the SCSI mid-layer of any changes detected.
37
38 The version attribute is read-only and will return the driver version
39 and the controller firmware version.
40 For example:
41 driver: 0.9.13-370
42 firmware: 0.01-522
43
44 smartpqi sas device attributes
45 ------------------------------
46 HBA devices are added to the SAS transport layer. These attributes are
47 automatically added by the SAS transport layer.
48
49 /sys/class/sas_device/end_device-X:X/sas_address
50 /sys/class/sas_device/end_device-X:X/enclosure_identifier
51 /sys/class/sas_device/end_device-X:X/scsi_target_id
52
53smartpqi specific ioctls:
54-------------------------
55
56 For compatibility with applications written for the cciss protocol.
57
58 CCISS_DEREGDISK
59 CCISS_REGNEWDISK
60 CCISS_REGNEWD
61
62 The above three ioctls all do exactly the same thing, which is to cause the driver
63 to rescan for new devices. This does exactly the same thing as writing to the
64 smartpqi specific host "rescan" attribute.
65
66 CCISS_GETPCIINFO
67
68 Returns PCI domain, bus, device and function and "board ID" (PCI subsystem ID).
69
70 CCISS_GETDRIVVER
71
72 Returns driver version in three bytes encoded as:
73 (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) | (DRIVER_RELEASE << 16) | DRIVER_REVISION;
74
75 CCISS_PASSTHRU
76
77 Allows "BMIC" and "CISS" commands to be passed through to the Smart Storage Array.
78 These are used extensively by the SSA Array Configuration Utility, SNMP storage
79 agents, etc.
80
diff --git a/MAINTAINERS b/MAINTAINERS
index 58c520517107..b9a0d48989ab 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7973,6 +7973,18 @@ W: http://www.melexis.com
7973S: Supported 7973S: Supported
7974F: drivers/iio/temperature/mlx90614.c 7974F: drivers/iio/temperature/mlx90614.c
7975 7975
7976MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
7977M: Don Brace <don.brace@microsemi.com>
7978L: esc.storagedev@microsemi.com
7979L: linux-scsi@vger.kernel.org
7980S: Supported
7981F: drivers/scsi/smartpqi/smartpqi*.[ch]
7982F: drivers/scsi/smartpqi/Kconfig
7983F: drivers/scsi/smartpqi/Makefile
7984F: include/linux/cciss*.h
7985F: include/uapi/linux/cciss*.h
7986F: Documentation/scsi/smartpqi.txt
7987
7976MN88472 MEDIA DRIVER 7988MN88472 MEDIA DRIVER
7977M: Antti Palosaari <crope@iki.fi> 7989M: Antti Palosaari <crope@iki.fi>
7978L: linux-media@vger.kernel.org 7990L: linux-media@vger.kernel.org
@@ -8185,20 +8197,16 @@ M: Michael Schmitz <schmitzmic@gmail.com>
8185L: linux-scsi@vger.kernel.org 8197L: linux-scsi@vger.kernel.org
8186S: Maintained 8198S: Maintained
8187F: Documentation/scsi/g_NCR5380.txt 8199F: Documentation/scsi/g_NCR5380.txt
8188F: Documentation/scsi/dtc3x80.txt
8189F: drivers/scsi/NCR5380.* 8200F: drivers/scsi/NCR5380.*
8190F: drivers/scsi/arm/cumana_1.c 8201F: drivers/scsi/arm/cumana_1.c
8191F: drivers/scsi/arm/oak.c 8202F: drivers/scsi/arm/oak.c
8192F: drivers/scsi/atari_scsi.* 8203F: drivers/scsi/atari_scsi.*
8193F: drivers/scsi/dmx3191d.c 8204F: drivers/scsi/dmx3191d.c
8194F: drivers/scsi/dtc.*
8195F: drivers/scsi/g_NCR5380.* 8205F: drivers/scsi/g_NCR5380.*
8196F: drivers/scsi/g_NCR5380_mmio.c 8206F: drivers/scsi/g_NCR5380_mmio.c
8197F: drivers/scsi/mac_scsi.* 8207F: drivers/scsi/mac_scsi.*
8198F: drivers/scsi/pas16.*
8199F: drivers/scsi/sun3_scsi.* 8208F: drivers/scsi/sun3_scsi.*
8200F: drivers/scsi/sun3_scsi_vme.c 8209F: drivers/scsi/sun3_scsi_vme.c
8201F: drivers/scsi/t128.*
8202 8210
8203NCR DUAL 700 SCSI DRIVER (MICROCHANNEL) 8211NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
8204M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> 8212M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
@@ -10740,12 +10748,12 @@ S: Maintained
10740F: drivers/misc/phantom.c 10748F: drivers/misc/phantom.c
10741F: include/uapi/linux/phantom.h 10749F: include/uapi/linux/phantom.h
10742 10750
10743SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER 10751Emulex 10Gbps iSCSI - OneConnect DRIVER
10744M: Jayamohan Kallickal <jayamohan.kallickal@avagotech.com> 10752M: Subbu Seetharaman <subbu.seetharaman@broadcom.com>
10745M: Ketan Mukadam <ketan.mukadam@avagotech.com> 10753M: Ketan Mukadam <ketan.mukadam@broadcom.com>
10746M: John Soni Jose <sony.john@avagotech.com> 10754M: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
10747L: linux-scsi@vger.kernel.org 10755L: linux-scsi@vger.kernel.org
10748W: http://www.avagotech.com 10756W: http://www.broadcom.com
10749S: Supported 10757S: Supported
10750F: drivers/scsi/be2iscsi/ 10758F: drivers/scsi/be2iscsi/
10751 10759
@@ -12143,12 +12151,6 @@ S: Maintained
12143F: drivers/tc/ 12151F: drivers/tc/
12144F: include/linux/tc.h 12152F: include/linux/tc.h
12145 12153
12146U14-34F SCSI DRIVER
12147M: Dario Ballabio <ballabio_dario@emc.com>
12148L: linux-scsi@vger.kernel.org
12149S: Maintained
12150F: drivers/scsi/u14-34f.c
12151
12152UBI FILE SYSTEM (UBIFS) 12154UBI FILE SYSTEM (UBIFS)
12153M: Richard Weinberger <richard@nod.at> 12155M: Richard Weinberger <richard@nod.at>
12154M: Artem Bityutskiy <dedekind1@gmail.com> 12156M: Artem Bityutskiy <dedekind1@gmail.com>
@@ -12876,12 +12878,6 @@ F: drivers/watchdog/
12876F: include/linux/watchdog.h 12878F: include/linux/watchdog.h
12877F: include/uapi/linux/watchdog.h 12879F: include/uapi/linux/watchdog.h
12878 12880
12879WD7000 SCSI DRIVER
12880M: Miroslav Zagorac <zaga@fly.cc.fer.hr>
12881L: linux-scsi@vger.kernel.org
12882S: Maintained
12883F: drivers/scsi/wd7000.c
12884
12885WIIMOTE HID DRIVER 12881WIIMOTE HID DRIVER
12886M: David Herrmann <dh.herrmann@googlemail.com> 12882M: David Herrmann <dh.herrmann@googlemail.com>
12887L: linux-input@vger.kernel.org 12883L: linux-input@vger.kernel.org
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5537f8df8512..89c7ed16b4df 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1865,8 +1865,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1865 1865
1866 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN, 1866 snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
1867 "mpt_poll_%d", ioc->id); 1867 "mpt_poll_%d", ioc->id);
1868 ioc->reset_work_q = 1868 ioc->reset_work_q = alloc_workqueue(ioc->reset_work_q_name,
1869 create_singlethread_workqueue(ioc->reset_work_q_name); 1869 WQ_MEM_RECLAIM, 0);
1870 if (!ioc->reset_work_q) { 1870 if (!ioc->reset_work_q) {
1871 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", 1871 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
1872 ioc->name); 1872 ioc->name);
@@ -1992,7 +1992,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1992 INIT_LIST_HEAD(&ioc->fw_event_list); 1992 INIT_LIST_HEAD(&ioc->fw_event_list);
1993 spin_lock_init(&ioc->fw_event_lock); 1993 spin_lock_init(&ioc->fw_event_lock);
1994 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id); 1994 snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
1995 ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name); 1995 ioc->fw_event_q = alloc_workqueue(ioc->fw_event_q_name,
1996 WQ_MEM_RECLAIM, 0);
1996 if (!ioc->fw_event_q) { 1997 if (!ioc->fw_event_q) {
1997 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n", 1998 printk(MYIOC_s_ERR_FMT "Insufficient memory to add adapter!\n",
1998 ioc->name); 1999 ioc->name);
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index d8bf84aef602..add6a3a6ef0d 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1324,9 +1324,12 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1324 snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name), 1324 snprintf(ioc->fc_rescan_work_q_name, sizeof(ioc->fc_rescan_work_q_name),
1325 "mptfc_wq_%d", sh->host_no); 1325 "mptfc_wq_%d", sh->host_no);
1326 ioc->fc_rescan_work_q = 1326 ioc->fc_rescan_work_q =
1327 create_singlethread_workqueue(ioc->fc_rescan_work_q_name); 1327 alloc_ordered_workqueue(ioc->fc_rescan_work_q_name,
1328 if (!ioc->fc_rescan_work_q) 1328 WQ_MEM_RECLAIM);
1329 if (!ioc->fc_rescan_work_q) {
1330 error = -ENOMEM;
1329 goto out_mptfc_probe; 1331 goto out_mptfc_probe;
1332 }
1330 1333
1331 /* 1334 /*
1332 * Pre-fetch FC port WWN and stuff... 1335 * Pre-fetch FC port WWN and stuff...
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 5d7fbe4e907e..637cf8973c9e 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Debug traces for zfcp. 4 * Debug traces for zfcp.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2013 6 * Copyright IBM Corp. 2002, 2016
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -65,7 +65,7 @@ void zfcp_dbf_pl_write(struct zfcp_dbf *dbf, void *data, u16 length, char *area,
65 * @tag: tag indicating which kind of unsolicited status has been received 65 * @tag: tag indicating which kind of unsolicited status has been received
66 * @req: request for which a response was received 66 * @req: request for which a response was received
67 */ 67 */
68void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req) 68void zfcp_dbf_hba_fsf_res(char *tag, int level, struct zfcp_fsf_req *req)
69{ 69{
70 struct zfcp_dbf *dbf = req->adapter->dbf; 70 struct zfcp_dbf *dbf = req->adapter->dbf;
71 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix; 71 struct fsf_qtcb_prefix *q_pref = &req->qtcb->prefix;
@@ -85,6 +85,8 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
85 rec->u.res.req_issued = req->issued; 85 rec->u.res.req_issued = req->issued;
86 rec->u.res.prot_status = q_pref->prot_status; 86 rec->u.res.prot_status = q_pref->prot_status;
87 rec->u.res.fsf_status = q_head->fsf_status; 87 rec->u.res.fsf_status = q_head->fsf_status;
88 rec->u.res.port_handle = q_head->port_handle;
89 rec->u.res.lun_handle = q_head->lun_handle;
88 90
89 memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual, 91 memcpy(rec->u.res.prot_status_qual, &q_pref->prot_status_qual,
90 FSF_PROT_STATUS_QUAL_SIZE); 92 FSF_PROT_STATUS_QUAL_SIZE);
@@ -97,7 +99,7 @@ void zfcp_dbf_hba_fsf_res(char *tag, struct zfcp_fsf_req *req)
97 rec->pl_len, "fsf_res", req->req_id); 99 rec->pl_len, "fsf_res", req->req_id);
98 } 100 }
99 101
100 debug_event(dbf->hba, 1, rec, sizeof(*rec)); 102 debug_event(dbf->hba, level, rec, sizeof(*rec));
101 spin_unlock_irqrestore(&dbf->hba_lock, flags); 103 spin_unlock_irqrestore(&dbf->hba_lock, flags);
102} 104}
103 105
@@ -241,7 +243,8 @@ static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
241 if (sdev) { 243 if (sdev) {
242 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); 244 rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status);
243 rec->lun = zfcp_scsi_dev_lun(sdev); 245 rec->lun = zfcp_scsi_dev_lun(sdev);
244 } 246 } else
247 rec->lun = ZFCP_DBF_INVALID_LUN;
245} 248}
246 249
247/** 250/**
@@ -320,13 +323,48 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
320 spin_unlock_irqrestore(&dbf->rec_lock, flags); 323 spin_unlock_irqrestore(&dbf->rec_lock, flags);
321} 324}
322 325
326/**
327 * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
328 * @tag: identifier for event
329 * @wka_port: well known address port
330 * @req_id: request ID to correlate with potential HBA trace record
331 */
332void zfcp_dbf_rec_run_wka(char *tag, struct zfcp_fc_wka_port *wka_port,
333 u64 req_id)
334{
335 struct zfcp_dbf *dbf = wka_port->adapter->dbf;
336 struct zfcp_dbf_rec *rec = &dbf->rec_buf;
337 unsigned long flags;
338
339 spin_lock_irqsave(&dbf->rec_lock, flags);
340 memset(rec, 0, sizeof(*rec));
341
342 rec->id = ZFCP_DBF_REC_RUN;
343 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
344 rec->port_status = wka_port->status;
345 rec->d_id = wka_port->d_id;
346 rec->lun = ZFCP_DBF_INVALID_LUN;
347
348 rec->u.run.fsf_req_id = req_id;
349 rec->u.run.rec_status = ~0;
350 rec->u.run.rec_step = ~0;
351 rec->u.run.rec_action = ~0;
352 rec->u.run.rec_count = ~0;
353
354 debug_event(dbf->rec, 1, rec, sizeof(*rec));
355 spin_unlock_irqrestore(&dbf->rec_lock, flags);
356}
357
323static inline 358static inline
324void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len, 359void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf,
325 u64 req_id, u32 d_id) 360 char *paytag, struct scatterlist *sg, u8 id, u16 len,
361 u64 req_id, u32 d_id, u16 cap_len)
326{ 362{
327 struct zfcp_dbf_san *rec = &dbf->san_buf; 363 struct zfcp_dbf_san *rec = &dbf->san_buf;
328 u16 rec_len; 364 u16 rec_len;
329 unsigned long flags; 365 unsigned long flags;
366 struct zfcp_dbf_pay *payload = &dbf->pay_buf;
367 u16 pay_sum = 0;
330 368
331 spin_lock_irqsave(&dbf->san_lock, flags); 369 spin_lock_irqsave(&dbf->san_lock, flags);
332 memset(rec, 0, sizeof(*rec)); 370 memset(rec, 0, sizeof(*rec));
@@ -334,10 +372,41 @@ void zfcp_dbf_san(char *tag, struct zfcp_dbf *dbf, void *data, u8 id, u16 len,
334 rec->id = id; 372 rec->id = id;
335 rec->fsf_req_id = req_id; 373 rec->fsf_req_id = req_id;
336 rec->d_id = d_id; 374 rec->d_id = d_id;
337 rec_len = min(len, (u16)ZFCP_DBF_SAN_MAX_PAYLOAD);
338 memcpy(rec->payload, data, rec_len);
339 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); 375 memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
376 rec->pl_len = len; /* full length even if we cap pay below */
377 if (!sg)
378 goto out;
379 rec_len = min_t(unsigned int, sg->length, ZFCP_DBF_SAN_MAX_PAYLOAD);
380 memcpy(rec->payload, sg_virt(sg), rec_len); /* part of 1st sg entry */
381 if (len <= rec_len)
382 goto out; /* skip pay record if full content in rec->payload */
383
384 /* if (len > rec_len):
385 * dump data up to cap_len ignoring small duplicate in rec->payload
386 */
387 spin_lock_irqsave(&dbf->pay_lock, flags);
388 memset(payload, 0, sizeof(*payload));
389 memcpy(payload->area, paytag, ZFCP_DBF_TAG_LEN);
390 payload->fsf_req_id = req_id;
391 payload->counter = 0;
392 for (; sg && pay_sum < cap_len; sg = sg_next(sg)) {
393 u16 pay_len, offset = 0;
394
395 while (offset < sg->length && pay_sum < cap_len) {
396 pay_len = min((u16)ZFCP_DBF_PAY_MAX_REC,
397 (u16)(sg->length - offset));
398 /* cap_len <= pay_sum < cap_len+ZFCP_DBF_PAY_MAX_REC */
399 memcpy(payload->data, sg_virt(sg) + offset, pay_len);
400 debug_event(dbf->pay, 1, payload,
401 zfcp_dbf_plen(pay_len));
402 payload->counter++;
403 offset += pay_len;
404 pay_sum += pay_len;
405 }
406 }
407 spin_unlock(&dbf->pay_lock);
340 408
409out:
341 debug_event(dbf->san, 1, rec, sizeof(*rec)); 410 debug_event(dbf->san, 1, rec, sizeof(*rec));
342 spin_unlock_irqrestore(&dbf->san_lock, flags); 411 spin_unlock_irqrestore(&dbf->san_lock, flags);
343} 412}
@@ -354,9 +423,62 @@ void zfcp_dbf_san_req(char *tag, struct zfcp_fsf_req *fsf, u32 d_id)
354 struct zfcp_fsf_ct_els *ct_els = fsf->data; 423 struct zfcp_fsf_ct_els *ct_els = fsf->data;
355 u16 length; 424 u16 length;
356 425
357 length = (u16)(ct_els->req->length + FC_CT_HDR_LEN); 426 length = (u16)zfcp_qdio_real_bytes(ct_els->req);
358 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->req), ZFCP_DBF_SAN_REQ, length, 427 zfcp_dbf_san(tag, dbf, "san_req", ct_els->req, ZFCP_DBF_SAN_REQ,
359 fsf->req_id, d_id); 428 length, fsf->req_id, d_id, length);
429}
430
431static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
432 struct zfcp_fsf_req *fsf,
433 u16 len)
434{
435 struct zfcp_fsf_ct_els *ct_els = fsf->data;
436 struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
437 struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
438 struct scatterlist *resp_entry = ct_els->resp;
439 struct fc_gpn_ft_resp *acc;
440 int max_entries, x, last = 0;
441
442 if (!(memcmp(tag, "fsscth2", 7) == 0
443 && ct_els->d_id == FC_FID_DIR_SERV
444 && reqh->ct_rev == FC_CT_REV
445 && reqh->ct_in_id[0] == 0
446 && reqh->ct_in_id[1] == 0
447 && reqh->ct_in_id[2] == 0
448 && reqh->ct_fs_type == FC_FST_DIR
449 && reqh->ct_fs_subtype == FC_NS_SUBTYPE
450 && reqh->ct_options == 0
451 && reqh->_ct_resvd1 == 0
452 && reqh->ct_cmd == FC_NS_GPN_FT
453 /* reqh->ct_mr_size can vary so do not match but read below */
454 && reqh->_ct_resvd2 == 0
455 && reqh->ct_reason == 0
456 && reqh->ct_explan == 0
457 && reqh->ct_vendor == 0
458 && reqn->fn_resvd == 0
459 && reqn->fn_domain_id_scope == 0
460 && reqn->fn_area_id_scope == 0
461 && reqn->fn_fc4_type == FC_TYPE_FCP))
462 return len; /* not GPN_FT response so do not cap */
463
464 acc = sg_virt(resp_entry);
465 max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
466 + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
467 * to account for header as 1st pseudo "entry" */;
468
469 /* the basic CT_IU preamble is the same size as one entry in the GPN_FT
470 * response, allowing us to skip special handling for it - just skip it
471 */
472 for (x = 1; x < max_entries && !last; x++) {
473 if (x % (ZFCP_FC_GPN_FT_ENT_PAGE + 1))
474 acc++;
475 else
476 acc = sg_virt(++resp_entry);
477
478 last = acc->fp_flags & FC_NS_FID_LAST;
479 }
480 len = min(len, (u16)(x * sizeof(struct fc_gpn_ft_resp)));
481 return len; /* cap after last entry */
360} 482}
361 483
362/** 484/**
@@ -370,9 +492,10 @@ void zfcp_dbf_san_res(char *tag, struct zfcp_fsf_req *fsf)
370 struct zfcp_fsf_ct_els *ct_els = fsf->data; 492 struct zfcp_fsf_ct_els *ct_els = fsf->data;
371 u16 length; 493 u16 length;
372 494
373 length = (u16)(ct_els->resp->length + FC_CT_HDR_LEN); 495 length = (u16)zfcp_qdio_real_bytes(ct_els->resp);
374 zfcp_dbf_san(tag, dbf, sg_virt(ct_els->resp), ZFCP_DBF_SAN_RES, length, 496 zfcp_dbf_san(tag, dbf, "san_res", ct_els->resp, ZFCP_DBF_SAN_RES,
375 fsf->req_id, 0); 497 length, fsf->req_id, ct_els->d_id,
498 zfcp_dbf_san_res_cap_len_if_gpn_ft(tag, fsf, length));
376} 499}
377 500
378/** 501/**
@@ -386,11 +509,13 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
386 struct fsf_status_read_buffer *srb = 509 struct fsf_status_read_buffer *srb =
387 (struct fsf_status_read_buffer *) fsf->data; 510 (struct fsf_status_read_buffer *) fsf->data;
388 u16 length; 511 u16 length;
512 struct scatterlist sg;
389 513
390 length = (u16)(srb->length - 514 length = (u16)(srb->length -
391 offsetof(struct fsf_status_read_buffer, payload)); 515 offsetof(struct fsf_status_read_buffer, payload));
392 zfcp_dbf_san(tag, dbf, srb->payload.data, ZFCP_DBF_SAN_ELS, length, 516 sg_init_one(&sg, srb->payload.data, length);
393 fsf->req_id, ntoh24(srb->d_id)); 517 zfcp_dbf_san(tag, dbf, "san_els", &sg, ZFCP_DBF_SAN_ELS, length,
518 fsf->req_id, ntoh24(srb->d_id), length);
394} 519}
395 520
396/** 521/**
@@ -399,7 +524,8 @@ void zfcp_dbf_san_in_els(char *tag, struct zfcp_fsf_req *fsf)
399 * @sc: pointer to struct scsi_cmnd 524 * @sc: pointer to struct scsi_cmnd
400 * @fsf: pointer to struct zfcp_fsf_req 525 * @fsf: pointer to struct zfcp_fsf_req
401 */ 526 */
402void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf) 527void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
528 struct zfcp_fsf_req *fsf)
403{ 529{
404 struct zfcp_adapter *adapter = 530 struct zfcp_adapter *adapter =
405 (struct zfcp_adapter *) sc->device->host->hostdata[0]; 531 (struct zfcp_adapter *) sc->device->host->hostdata[0];
@@ -442,7 +568,7 @@ void zfcp_dbf_scsi(char *tag, struct scsi_cmnd *sc, struct zfcp_fsf_req *fsf)
442 } 568 }
443 } 569 }
444 570
445 debug_event(dbf->scsi, 1, rec, sizeof(*rec)); 571 debug_event(dbf->scsi, level, rec, sizeof(*rec));
446 spin_unlock_irqrestore(&dbf->scsi_lock, flags); 572 spin_unlock_irqrestore(&dbf->scsi_lock, flags);
447} 573}
448 574
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 0be3d48681ae..36d07584271d 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
2 * zfcp device driver 2 * zfcp device driver
3 * debug feature declarations 3 * debug feature declarations
4 * 4 *
5 * Copyright IBM Corp. 2008, 2010 5 * Copyright IBM Corp. 2008, 2015
6 */ 6 */
7 7
8#ifndef ZFCP_DBF_H 8#ifndef ZFCP_DBF_H
@@ -17,6 +17,11 @@
17 17
18#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull 18#define ZFCP_DBF_INVALID_LUN 0xFFFFFFFFFFFFFFFFull
19 19
20enum zfcp_dbf_pseudo_erp_act_type {
21 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD = 0xff,
22 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL = 0xfe,
23};
24
20/** 25/**
21 * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action 26 * struct zfcp_dbf_rec_trigger - trace record for triggered recovery action
22 * @ready: number of ready recovery actions 27 * @ready: number of ready recovery actions
@@ -110,6 +115,7 @@ struct zfcp_dbf_san {
110 u32 d_id; 115 u32 d_id;
111#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32) 116#define ZFCP_DBF_SAN_MAX_PAYLOAD (FC_CT_HDR_LEN + 32)
112 char payload[ZFCP_DBF_SAN_MAX_PAYLOAD]; 117 char payload[ZFCP_DBF_SAN_MAX_PAYLOAD];
118 u16 pl_len;
113} __packed; 119} __packed;
114 120
115/** 121/**
@@ -126,6 +132,8 @@ struct zfcp_dbf_hba_res {
126 u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE]; 132 u8 prot_status_qual[FSF_PROT_STATUS_QUAL_SIZE];
127 u32 fsf_status; 133 u32 fsf_status;
128 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE]; 134 u8 fsf_status_qual[FSF_STATUS_QUALIFIER_SIZE];
135 u32 port_handle;
136 u32 lun_handle;
129} __packed; 137} __packed;
130 138
131/** 139/**
@@ -279,7 +287,7 @@ static inline
279void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) 287void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
280{ 288{
281 if (debug_level_enabled(req->adapter->dbf->hba, level)) 289 if (debug_level_enabled(req->adapter->dbf->hba, level))
282 zfcp_dbf_hba_fsf_res(tag, req); 290 zfcp_dbf_hba_fsf_res(tag, level, req);
283} 291}
284 292
285/** 293/**
@@ -318,7 +326,7 @@ void _zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *scmd,
318 scmd->device->host->hostdata[0]; 326 scmd->device->host->hostdata[0];
319 327
320 if (debug_level_enabled(adapter->dbf->scsi, level)) 328 if (debug_level_enabled(adapter->dbf->scsi, level))
321 zfcp_dbf_scsi(tag, scmd, req); 329 zfcp_dbf_scsi(tag, level, scmd, req);
322} 330}
323 331
324/** 332/**
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3fb410977014..a59d678125bd 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Error Recovery Procedures (ERP). 4 * Error Recovery Procedures (ERP).
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corp. 2002, 2015
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -1217,8 +1217,14 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
1217 break; 1217 break;
1218 1218
1219 case ZFCP_ERP_ACTION_REOPEN_PORT: 1219 case ZFCP_ERP_ACTION_REOPEN_PORT:
1220 if (result == ZFCP_ERP_SUCCEEDED) 1220 /* This switch case might also happen after a forced reopen
1221 zfcp_scsi_schedule_rport_register(port); 1221 * was successfully done and thus overwritten with a new
1222 * non-forced reopen at `ersfs_2'. In this case, we must not
1223 * do the clean-up of the non-forced version.
1224 */
1225 if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
1226 if (result == ZFCP_ERP_SUCCEEDED)
1227 zfcp_scsi_schedule_rport_register(port);
1222 /* fall through */ 1228 /* fall through */
1223 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: 1229 case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
1224 put_device(&port->dev); 1230 put_device(&port->dev);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 5b500652572b..c8fed9fa1cca 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * External function declarations. 4 * External function declarations.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corp. 2002, 2015
7 */ 7 */
8 8
9#ifndef ZFCP_EXT_H 9#ifndef ZFCP_EXT_H
@@ -35,8 +35,9 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
35extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, 35extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
36 struct zfcp_port *, struct scsi_device *, u8, u8); 36 struct zfcp_port *, struct scsi_device *, u8, u8);
37extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); 37extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
38extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64);
38extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); 39extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
39extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); 40extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *);
40extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); 41extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
41extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); 42extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
42extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); 43extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
@@ -44,7 +45,8 @@ extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
44extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); 45extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
45extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); 46extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
46extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); 47extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
47extern void zfcp_dbf_scsi(char *, struct scsi_cmnd *, struct zfcp_fsf_req *); 48extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
49 struct zfcp_fsf_req *);
48 50
49/* zfcp_erp.c */ 51/* zfcp_erp.c */
50extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32); 52extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 522a633c866a..75f820ca17b7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Implementation of FSF commands. 4 * Implementation of FSF commands.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2013 6 * Copyright IBM Corp. 2002, 2015
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -508,7 +508,10 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
508 fc_host_port_type(shost) = FC_PORTTYPE_PTP; 508 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
509 break; 509 break;
510 case FSF_TOPO_FABRIC: 510 case FSF_TOPO_FABRIC:
511 fc_host_port_type(shost) = FC_PORTTYPE_NPORT; 511 if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
512 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
513 else
514 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
512 break; 515 break;
513 case FSF_TOPO_AL: 516 case FSF_TOPO_AL:
514 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT; 517 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
@@ -613,7 +616,6 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
613 616
614 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) { 617 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
615 fc_host_permanent_port_name(shost) = bottom->wwpn; 618 fc_host_permanent_port_name(shost) = bottom->wwpn;
616 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
617 } else 619 } else
618 fc_host_permanent_port_name(shost) = fc_host_port_name(shost); 620 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
619 fc_host_maxframe_size(shost) = bottom->maximum_frame_size; 621 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
@@ -982,8 +984,12 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
982 if (zfcp_adapter_multi_buffer_active(adapter)) { 984 if (zfcp_adapter_multi_buffer_active(adapter)) {
983 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) 985 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
984 return -EIO; 986 return -EIO;
987 qtcb->bottom.support.req_buf_length =
988 zfcp_qdio_real_bytes(sg_req);
985 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) 989 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
986 return -EIO; 990 return -EIO;
991 qtcb->bottom.support.resp_buf_length =
992 zfcp_qdio_real_bytes(sg_resp);
987 993
988 zfcp_qdio_set_data_div(qdio, &req->qdio_req, 994 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
989 zfcp_qdio_sbale_count(sg_req)); 995 zfcp_qdio_sbale_count(sg_req));
@@ -1073,6 +1079,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1073 1079
1074 req->handler = zfcp_fsf_send_ct_handler; 1080 req->handler = zfcp_fsf_send_ct_handler;
1075 req->qtcb->header.port_handle = wka_port->handle; 1081 req->qtcb->header.port_handle = wka_port->handle;
1082 ct->d_id = wka_port->d_id;
1076 req->data = ct; 1083 req->data = ct;
1077 1084
1078 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); 1085 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
@@ -1169,6 +1176,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1169 1176
1170 hton24(req->qtcb->bottom.support.d_id, d_id); 1177 hton24(req->qtcb->bottom.support.d_id, d_id);
1171 req->handler = zfcp_fsf_send_els_handler; 1178 req->handler = zfcp_fsf_send_els_handler;
1179 els->d_id = d_id;
1172 req->data = els; 1180 req->data = els;
1173 1181
1174 zfcp_dbf_san_req("fssels1", req, d_id); 1182 zfcp_dbf_san_req("fssels1", req, d_id);
@@ -1575,7 +1583,7 @@ out:
1575int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port) 1583int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1576{ 1584{
1577 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1585 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1578 struct zfcp_fsf_req *req; 1586 struct zfcp_fsf_req *req = NULL;
1579 int retval = -EIO; 1587 int retval = -EIO;
1580 1588
1581 spin_lock_irq(&qdio->req_q_lock); 1589 spin_lock_irq(&qdio->req_q_lock);
@@ -1604,6 +1612,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1604 zfcp_fsf_req_free(req); 1612 zfcp_fsf_req_free(req);
1605out: 1613out:
1606 spin_unlock_irq(&qdio->req_q_lock); 1614 spin_unlock_irq(&qdio->req_q_lock);
1615 if (req && !IS_ERR(req))
1616 zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
1607 return retval; 1617 return retval;
1608} 1618}
1609 1619
@@ -1628,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1628int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port) 1638int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1629{ 1639{
1630 struct zfcp_qdio *qdio = wka_port->adapter->qdio; 1640 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1631 struct zfcp_fsf_req *req; 1641 struct zfcp_fsf_req *req = NULL;
1632 int retval = -EIO; 1642 int retval = -EIO;
1633 1643
1634 spin_lock_irq(&qdio->req_q_lock); 1644 spin_lock_irq(&qdio->req_q_lock);
@@ -1657,6 +1667,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1657 zfcp_fsf_req_free(req); 1667 zfcp_fsf_req_free(req);
1658out: 1668out:
1659 spin_unlock_irq(&qdio->req_q_lock); 1669 spin_unlock_irq(&qdio->req_q_lock);
1670 if (req && !IS_ERR(req))
1671 zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
1660 return retval; 1672 return retval;
1661} 1673}
1662 1674
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 57ae3ae1046d..be1c04b334c5 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to the FSF support functions. 4 * Interface to the FSF support functions.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2010 6 * Copyright IBM Corp. 2002, 2015
7 */ 7 */
8 8
9#ifndef FSF_H 9#ifndef FSF_H
@@ -436,6 +436,7 @@ struct zfcp_blk_drv_data {
436 * @handler_data: data passed to handler function 436 * @handler_data: data passed to handler function
437 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC) 437 * @port: Optional pointer to port for zfcp internal ELS (only test link ADISC)
438 * @status: used to pass error status to calling function 438 * @status: used to pass error status to calling function
439 * @d_id: Destination ID of either open WKA port for CT or of D_ID for ELS
439 */ 440 */
440struct zfcp_fsf_ct_els { 441struct zfcp_fsf_ct_els {
441 struct scatterlist *req; 442 struct scatterlist *req;
@@ -444,6 +445,7 @@ struct zfcp_fsf_ct_els {
444 void *handler_data; 445 void *handler_data;
445 struct zfcp_port *port; 446 struct zfcp_port *port;
446 int status; 447 int status;
448 u32 d_id;
447}; 449};
448 450
449#endif /* FSF_H */ 451#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b3c6ff49103b..9069f98a1817 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Interface to Linux SCSI midlayer. 4 * Interface to Linux SCSI midlayer.
5 * 5 *
6 * Copyright IBM Corp. 2002, 2013 6 * Copyright IBM Corp. 2002, 2015
7 */ 7 */
8 8
9#define KMSG_COMPONENT "zfcp" 9#define KMSG_COMPONENT "zfcp"
@@ -556,6 +556,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
556 ids.port_id = port->d_id; 556 ids.port_id = port->d_id;
557 ids.roles = FC_RPORT_ROLE_FCP_TARGET; 557 ids.roles = FC_RPORT_ROLE_FCP_TARGET;
558 558
559 zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
560 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
561 ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
559 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids); 562 rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
560 if (!rport) { 563 if (!rport) {
561 dev_err(&port->adapter->ccw_device->dev, 564 dev_err(&port->adapter->ccw_device->dev,
@@ -577,6 +580,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
577 struct fc_rport *rport = port->rport; 580 struct fc_rport *rport = port->rport;
578 581
579 if (rport) { 582 if (rport) {
583 zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
584 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
585 ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
580 fc_remote_port_delete(rport); 586 fc_remote_port_delete(rport);
581 port->rport = NULL; 587 port->rport = NULL;
582 } 588 }
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7d1b4317eccc..3e2bdb90813c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -396,18 +396,6 @@ config SCSI_3W_SAS
396 Please read the comments at the top of 396 Please read the comments at the top of
397 <file:drivers/scsi/3w-sas.c>. 397 <file:drivers/scsi/3w-sas.c>.
398 398
399config SCSI_7000FASST
400 tristate "7000FASST SCSI support"
401 depends on ISA && SCSI && ISA_DMA_API
402 select CHECK_SIGNATURE
403 help
404 This driver supports the Western Digital 7000 SCSI host adapter
405 family. Some information is in the source:
406 <file:drivers/scsi/wd7000.c>.
407
408 To compile this driver as a module, choose M here: the
409 module will be called wd7000.
410
411config SCSI_ACARD 399config SCSI_ACARD
412 tristate "ACARD SCSI support" 400 tristate "ACARD SCSI support"
413 depends on PCI && SCSI 401 depends on PCI && SCSI
@@ -512,18 +500,6 @@ config SCSI_ADVANSYS
512 To compile this driver as a module, choose M here: the 500 To compile this driver as a module, choose M here: the
513 module will be called advansys. 501 module will be called advansys.
514 502
515config SCSI_IN2000
516 tristate "Always IN2000 SCSI support"
517 depends on ISA && SCSI
518 help
519 This is support for an ISA bus SCSI host adapter. You'll find more
520 information in <file:Documentation/scsi/in2000.txt>. If it doesn't work
521 out of the box, you may have to change the jumpers for IRQ or
522 address selection.
523
524 To compile this driver as a module, choose M here: the
525 module will be called in2000.
526
527config SCSI_ARCMSR 503config SCSI_ARCMSR
528 tristate "ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Adapter" 504 tristate "ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Adapter"
529 depends on PCI && SCSI 505 depends on PCI && SCSI
@@ -540,6 +516,7 @@ config SCSI_ARCMSR
540source "drivers/scsi/esas2r/Kconfig" 516source "drivers/scsi/esas2r/Kconfig"
541source "drivers/scsi/megaraid/Kconfig.megaraid" 517source "drivers/scsi/megaraid/Kconfig.megaraid"
542source "drivers/scsi/mpt3sas/Kconfig" 518source "drivers/scsi/mpt3sas/Kconfig"
519source "drivers/scsi/smartpqi/Kconfig"
543source "drivers/scsi/ufs/Kconfig" 520source "drivers/scsi/ufs/Kconfig"
544 521
545config SCSI_HPTIOP 522config SCSI_HPTIOP
@@ -660,20 +637,6 @@ config SCSI_DMX3191D
660 To compile this driver as a module, choose M here: the 637 To compile this driver as a module, choose M here: the
661 module will be called dmx3191d. 638 module will be called dmx3191d.
662 639
663config SCSI_DTC3280
664 tristate "DTC3180/3280 SCSI support"
665 depends on ISA && SCSI
666 select SCSI_SPI_ATTRS
667 select CHECK_SIGNATURE
668 help
669 This is support for DTC 3180/3280 SCSI Host Adapters. Please read
670 the SCSI-HOWTO, available from
671 <http://www.tldp.org/docs.html#howto>, and the file
672 <file:Documentation/scsi/dtc3x80.txt>.
673
674 To compile this driver as a module, choose M here: the
675 module will be called dtc.
676
677config SCSI_EATA 640config SCSI_EATA
678 tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support" 641 tristate "EATA ISA/EISA/PCI (DPT and generic EATA/DMA-compliant boards) support"
679 depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API 642 depends on (ISA || EISA || PCI) && SCSI && ISA_DMA_API
@@ -1248,20 +1211,6 @@ config SCSI_NCR53C8XX_NO_DISCONNECT
1248 not allow targets to disconnect is not reasonable if there is more 1211 not allow targets to disconnect is not reasonable if there is more
1249 than 1 device on a SCSI bus. The normal answer therefore is N. 1212 than 1 device on a SCSI bus. The normal answer therefore is N.
1250 1213
1251config SCSI_PAS16
1252 tristate "PAS16 SCSI support"
1253 depends on ISA && SCSI
1254 select SCSI_SPI_ATTRS
1255 ---help---
1256 This is support for a SCSI host adapter. It is explained in section
1257 3.10 of the SCSI-HOWTO, available from
1258 <http://www.tldp.org/docs.html#howto>. If it doesn't work out
1259 of the box, you may have to change some settings in
1260 <file:drivers/scsi/pas16.h>.
1261
1262 To compile this driver as a module, choose M here: the
1263 module will be called pas16.
1264
1265config SCSI_QLOGIC_FAS 1214config SCSI_QLOGIC_FAS
1266 tristate "Qlogic FAS SCSI support" 1215 tristate "Qlogic FAS SCSI support"
1267 depends on ISA && SCSI 1216 depends on ISA && SCSI
@@ -1382,89 +1331,6 @@ config SCSI_AM53C974
1382 To compile this driver as a module, choose M here: the 1331 To compile this driver as a module, choose M here: the
1383 module will be called am53c974. 1332 module will be called am53c974.
1384 1333
1385config SCSI_T128
1386 tristate "Trantor T128/T128F/T228 SCSI support"
1387 depends on ISA && SCSI
1388 select SCSI_SPI_ATTRS
1389 select CHECK_SIGNATURE
1390 ---help---
1391 This is support for a SCSI host adapter. It is explained in section
1392 3.11 of the SCSI-HOWTO, available from
1393 <http://www.tldp.org/docs.html#howto>. If it doesn't work out
1394 of the box, you may have to change some settings in
1395 <file:drivers/scsi/t128.h>. Note that Trantor was purchased by
1396 Adaptec, and some former Trantor products are being sold under the
1397 Adaptec name.
1398
1399 To compile this driver as a module, choose M here: the
1400 module will be called t128.
1401
1402config SCSI_U14_34F
1403 tristate "UltraStor 14F/34F support"
1404 depends on ISA && SCSI && ISA_DMA_API
1405 ---help---
1406 This is support for the UltraStor 14F and 34F SCSI-2 host adapters.
1407 The source at <file:drivers/scsi/u14-34f.c> contains some
1408 information about this hardware. If the driver doesn't work out of
1409 the box, you may have to change some settings in
1410 <file: drivers/scsi/u14-34f.c>. Read the SCSI-HOWTO, available from
1411 <http://www.tldp.org/docs.html#howto>. Note that there is also
1412 another driver for the same hardware: "UltraStor SCSI support",
1413 below. You should say Y to both only if you want 24F support as
1414 well.
1415
1416 To compile this driver as a module, choose M here: the
1417 module will be called u14-34f.
1418
1419config SCSI_U14_34F_TAGGED_QUEUE
1420 bool "enable tagged command queueing"
1421 depends on SCSI_U14_34F
1422 help
1423 This is a feature of SCSI-2 which improves performance: the host
1424 adapter can send several SCSI commands to a device's queue even if
1425 previous commands haven't finished yet.
1426 This is equivalent to the "u14-34f=tc:y" boot option.
1427
1428config SCSI_U14_34F_LINKED_COMMANDS
1429 bool "enable elevator sorting"
1430 depends on SCSI_U14_34F
1431 help
1432 This option enables elevator sorting for all probed SCSI disks and
1433 CD-ROMs. It definitely reduces the average seek distance when doing
1434 random seeks, but this does not necessarily result in a noticeable
1435 performance improvement: your mileage may vary...
1436 This is equivalent to the "u14-34f=lc:y" boot option.
1437
1438config SCSI_U14_34F_MAX_TAGS
1439 int "maximum number of queued commands"
1440 depends on SCSI_U14_34F
1441 default "8"
1442 help
1443 This specifies how many SCSI commands can be maximally queued for
1444 each probed SCSI device. You should reduce the default value of 8
1445 only if you have disks with buggy or limited tagged command support.
1446 Minimum is 2 and maximum is 14. This value is also the window size
1447 used by the elevator sorting option above. The effective value used
1448 by the driver for each probed SCSI device is reported at boot time.
1449 This is equivalent to the "u14-34f=mq:8" boot option.
1450
1451config SCSI_ULTRASTOR
1452 tristate "UltraStor SCSI support"
1453 depends on X86 && ISA && SCSI && ISA_DMA_API
1454 ---help---
1455 This is support for the UltraStor 14F, 24F and 34F SCSI-2 host
1456 adapter family. This driver is explained in section 3.12 of the
1457 SCSI-HOWTO, available from
1458 <http://www.tldp.org/docs.html#howto>. If it doesn't work out
1459 of the box, you may have to change some settings in
1460 <file:drivers/scsi/ultrastor.h>.
1461
1462 Note that there is also another driver for the same hardware:
1463 "UltraStor 14F/34F support", above.
1464
1465 To compile this driver as a module, choose M here: the
1466 module will be called ultrastor.
1467
1468config SCSI_NSP32 1334config SCSI_NSP32
1469 tristate "Workbit NinjaSCSI-32Bi/UDE support" 1335 tristate "Workbit NinjaSCSI-32Bi/UDE support"
1470 depends on PCI && SCSI && !64BIT 1336 depends on PCI && SCSI && !64BIT
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index d5397987e731..38d938d7fe67 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -61,9 +61,7 @@ obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
61obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o 61obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
62obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o 62obj-$(CONFIG_SCSI_BUSLOGIC) += BusLogic.o
63obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o 63obj-$(CONFIG_SCSI_DPT_I2O) += dpt_i2o.o
64obj-$(CONFIG_SCSI_U14_34F) += u14-34f.o
65obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/ 64obj-$(CONFIG_SCSI_ARCMSR) += arcmsr/
66obj-$(CONFIG_SCSI_ULTRASTOR) += ultrastor.o
67obj-$(CONFIG_SCSI_AHA152X) += aha152x.o 65obj-$(CONFIG_SCSI_AHA152X) += aha152x.o
68obj-$(CONFIG_SCSI_AHA1542) += aha1542.o 66obj-$(CONFIG_SCSI_AHA1542) += aha1542.o
69obj-$(CONFIG_SCSI_AHA1740) += aha1740.o 67obj-$(CONFIG_SCSI_AHA1740) += aha1740.o
@@ -75,7 +73,6 @@ obj-$(CONFIG_SCSI_PM8001) += pm8001/
75obj-$(CONFIG_SCSI_ISCI) += isci/ 73obj-$(CONFIG_SCSI_ISCI) += isci/
76obj-$(CONFIG_SCSI_IPS) += ips.o 74obj-$(CONFIG_SCSI_IPS) += ips.o
77obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o 75obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
78obj-$(CONFIG_SCSI_IN2000) += in2000.o
79obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o 76obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
80obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o 77obj-$(CONFIG_SCSI_GENERIC_NCR5380_MMIO) += g_NCR5380_mmio.o
81obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o 78obj-$(CONFIG_SCSI_NCR53C406A) += NCR53c406a.o
@@ -90,15 +87,12 @@ obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
90obj-$(CONFIG_SCSI_LPFC) += lpfc/ 87obj-$(CONFIG_SCSI_LPFC) += lpfc/
91obj-$(CONFIG_SCSI_BFA_FC) += bfa/ 88obj-$(CONFIG_SCSI_BFA_FC) += bfa/
92obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/ 89obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
93obj-$(CONFIG_SCSI_PAS16) += pas16.o
94obj-$(CONFIG_SCSI_T128) += t128.o
95obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o 90obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
96obj-$(CONFIG_SCSI_HPSA) += hpsa.o 91obj-$(CONFIG_SCSI_HPSA) += hpsa.o
97obj-$(CONFIG_SCSI_DTC3280) += dtc.o 92obj-$(CONFIG_SCSI_SMARTPQI) += smartpqi/
98obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ 93obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/
99obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o 94obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o
100obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o 95obj-$(CONFIG_SCSI_EATA_PIO) += eata_pio.o
101obj-$(CONFIG_SCSI_7000FASST) += wd7000.o
102obj-$(CONFIG_SCSI_EATA) += eata.o 96obj-$(CONFIG_SCSI_EATA) += eata.o
103obj-$(CONFIG_SCSI_DC395x) += dc395x.o 97obj-$(CONFIG_SCSI_DC395x) += dc395x.o
104obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o 98obj-$(CONFIG_SCSI_AM53C974) += esp_scsi.o am53c974.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 43908bbb3b23..db2739079cbb 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -230,13 +230,6 @@ static int NCR5380_poll_politely2(struct Scsi_Host *instance,
230 return -ETIMEDOUT; 230 return -ETIMEDOUT;
231} 231}
232 232
233static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
234 int reg, int bit, int val, int wait)
235{
236 return NCR5380_poll_politely2(instance, reg, bit, val,
237 reg, bit, val, wait);
238}
239
240#if NDEBUG 233#if NDEBUG
241static struct { 234static struct {
242 unsigned char mask; 235 unsigned char mask;
@@ -1854,11 +1847,11 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
1854 /* XXX - need to source or sink data here, as appropriate */ 1847 /* XXX - need to source or sink data here, as appropriate */
1855 } 1848 }
1856 } else { 1849 } else {
1857 /* Break up transfer into 3 ms chunks, 1850 /* Transfer a small chunk so that the
1858 * presuming 6 accesses per handshake. 1851 * irq mode lock is not held too long.
1859 */ 1852 */
1860 transfersize = min((unsigned long)cmd->SCp.this_residual, 1853 transfersize = min(cmd->SCp.this_residual,
1861 hostdata->accesses_per_ms / 2); 1854 NCR5380_PIO_CHUNK_SIZE);
1862 len = transfersize; 1855 len = transfersize;
1863 NCR5380_transfer_pio(instance, &phase, &len, 1856 NCR5380_transfer_pio(instance, &phase, &len,
1864 (unsigned char **)&cmd->SCp.ptr); 1857 (unsigned char **)&cmd->SCp.ptr);
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index c60728785d89..965d92339455 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -250,6 +250,8 @@ struct NCR5380_cmd {
250 250
251#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd)) 251#define NCR5380_CMD_SIZE (sizeof(struct NCR5380_cmd))
252 252
253#define NCR5380_PIO_CHUNK_SIZE 256
254
253static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr) 255static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
254{ 256{
255 return ((struct scsi_cmnd *)ncmd_ptr) - 1; 257 return ((struct scsi_cmnd *)ncmd_ptr) - 1;
@@ -292,8 +294,14 @@ static void NCR5380_reselect(struct Scsi_Host *instance);
292static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *); 294static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
293static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 295static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
294static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data); 296static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
295static int NCR5380_poll_politely(struct Scsi_Host *, int, int, int, int);
296static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int); 297static int NCR5380_poll_politely2(struct Scsi_Host *, int, int, int, int, int, int, int);
297 298
299static inline int NCR5380_poll_politely(struct Scsi_Host *instance,
300 int reg, int bit, int val, int wait)
301{
302 return NCR5380_poll_politely2(instance, reg, bit, val,
303 reg, bit, val, wait);
304}
305
298#endif /* __KERNEL__ */ 306#endif /* __KERNEL__ */
299#endif /* NCR5380_H */ 307#endif /* NCR5380_H */
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 28f8b8a1b8a4..0c453880f214 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -613,7 +613,7 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled)
613 * @dev: Adapter 613 * @dev: Adapter
614 * @comm: communications method 614 * @comm: communications method
615 */ 615 */
616int aac_src_select_comm(struct aac_dev *dev, int comm) 616static int aac_src_select_comm(struct aac_dev *dev, int comm)
617{ 617{
618 switch (comm) { 618 switch (comm) {
619 case AAC_COMM_MESSAGE: 619 case AAC_COMM_MESSAGE:
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c
index 0fdc98bc2338..7c713f797535 100644
--- a/drivers/scsi/aic94xx/aic94xx_hwi.c
+++ b/drivers/scsi/aic94xx/aic94xx_hwi.c
@@ -632,7 +632,7 @@ int asd_init_hw(struct asd_ha_struct *asd_ha)
632 pci_name(asd_ha->pcidev)); 632 pci_name(asd_ha->pcidev));
633 return err; 633 return err;
634 } 634 }
635 pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL, 635 err = pci_write_config_dword(asd_ha->pcidev, PCIC_HSTPCIX_CNTRL,
636 v | SC_TMR_DIS); 636 v | SC_TMR_DIS);
637 if (err) { 637 if (err) {
638 asd_printk("couldn't disable split completion timer of %s\n", 638 asd_printk("couldn't disable split completion timer of %s\n",
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 7640498964a5..3d53d636b17b 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2388,15 +2388,23 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
2388 } 2388 }
2389 case ARCMSR_MESSAGE_WRITE_WQBUFFER: { 2389 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
2390 unsigned char *ver_addr; 2390 unsigned char *ver_addr;
2391 int32_t user_len, cnt2end; 2391 uint32_t user_len;
2392 int32_t cnt2end;
2392 uint8_t *pQbuffer, *ptmpuserbuffer; 2393 uint8_t *pQbuffer, *ptmpuserbuffer;
2394
2395 user_len = pcmdmessagefld->cmdmessage.Length;
2396 if (user_len > ARCMSR_API_DATA_BUFLEN) {
2397 retvalue = ARCMSR_MESSAGE_FAIL;
2398 goto message_out;
2399 }
2400
2393 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC); 2401 ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
2394 if (!ver_addr) { 2402 if (!ver_addr) {
2395 retvalue = ARCMSR_MESSAGE_FAIL; 2403 retvalue = ARCMSR_MESSAGE_FAIL;
2396 goto message_out; 2404 goto message_out;
2397 } 2405 }
2398 ptmpuserbuffer = ver_addr; 2406 ptmpuserbuffer = ver_addr;
2399 user_len = pcmdmessagefld->cmdmessage.Length; 2407
2400 memcpy(ptmpuserbuffer, 2408 memcpy(ptmpuserbuffer,
2401 pcmdmessagefld->messagedatabuffer, user_len); 2409 pcmdmessagefld->messagedatabuffer, user_len);
2402 spin_lock_irqsave(&acb->wqbuffer_lock, flags); 2410 spin_lock_irqsave(&acb->wqbuffer_lock, flags);
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index ee5ace873535..b1d0fdc5d5e1 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@avagotech.com 11 * linux-drivers@broadcom.com
12 * 12 *
13 * Emulex 13 * Emulex
14 * 3333 Susan Street 14 * 3333 Susan Street
@@ -89,7 +89,7 @@ struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
89 u32 max_eqd; /* in usecs */ 89 u32 max_eqd; /* in usecs */
90 u32 prev_eqd; /* in usecs */ 90 u32 prev_eqd; /* in usecs */
91 u32 et_eqd; /* configured val when aic is off */ 91 u32 et_eqd; /* configured val when aic is off */
92 ulong jiffs; 92 ulong jiffies;
93 u64 eq_prev; /* Used to calculate eqe */ 93 u64 eq_prev; /* Used to calculate eqe */
94}; 94};
95 95
@@ -100,7 +100,7 @@ struct be_eq_obj {
100 struct be_queue_info q; 100 struct be_queue_info q;
101 struct beiscsi_hba *phba; 101 struct beiscsi_hba *phba;
102 struct be_queue_info *cq; 102 struct be_queue_info *cq;
103 struct work_struct work_cqs; /* Work Item */ 103 struct work_struct mcc_work; /* Work Item */
104 struct irq_poll iopoll; 104 struct irq_poll iopoll;
105}; 105};
106 106
@@ -111,8 +111,11 @@ struct be_mcc_obj {
111 111
112struct beiscsi_mcc_tag_state { 112struct beiscsi_mcc_tag_state {
113 unsigned long tag_state; 113 unsigned long tag_state;
114#define MCC_TAG_STATE_RUNNING 1 114#define MCC_TAG_STATE_RUNNING 0
115#define MCC_TAG_STATE_TIMEOUT 2 115#define MCC_TAG_STATE_TIMEOUT 1
116#define MCC_TAG_STATE_ASYNC 2
117#define MCC_TAG_STATE_IGNORE 3
118 void (*cbfn)(struct beiscsi_hba *, unsigned int);
116 struct be_dma_mem tag_mem_state; 119 struct be_dma_mem tag_mem_state;
117}; 120};
118 121
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index a55eaeea37e7..be65da2988fb 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@avagotech.com 11 * linux-drivers@broadcom.com
12 * 12 *
13 * Emulex 13 * Emulex
14 * 3333 Susan Street 14 * 3333 Susan Street
@@ -21,110 +21,77 @@
21#include "be.h" 21#include "be.h"
22#include "be_mgmt.h" 22#include "be_mgmt.h"
23 23
24int beiscsi_pci_soft_reset(struct beiscsi_hba *phba) 24/* UE Status Low CSR */
25{ 25static const char * const desc_ue_status_low[] = {
26 u32 sreset; 26 "CEV",
27 u8 *pci_reset_offset = 0; 27 "CTX",
28 u8 *pci_online0_offset = 0; 28 "DBUF",
29 u8 *pci_online1_offset = 0; 29 "ERX",
30 u32 pconline0 = 0; 30 "Host",
31 u32 pconline1 = 0; 31 "MPU",
32 u32 i; 32 "NDMA",
33 33 "PTC ",
34 pci_reset_offset = (u8 *)phba->pci_va + BE2_SOFT_RESET; 34 "RDMA ",
35 pci_online0_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE0; 35 "RXF ",
36 pci_online1_offset = (u8 *)phba->pci_va + BE2_PCI_ONLINE1; 36 "RXIPS ",
37 sreset = readl((void *)pci_reset_offset); 37 "RXULP0 ",
38 sreset |= BE2_SET_RESET; 38 "RXULP1 ",
39 writel(sreset, (void *)pci_reset_offset); 39 "RXULP2 ",
40 40 "TIM ",
41 i = 0; 41 "TPOST ",
42 while (sreset & BE2_SET_RESET) { 42 "TPRE ",
43 if (i > 64) 43 "TXIPS ",
44 break; 44 "TXULP0 ",
45 msleep(100); 45 "TXULP1 ",
46 sreset = readl((void *)pci_reset_offset); 46 "UC ",
47 i++; 47 "WDMA ",
48 } 48 "TXULP2 ",
49 49 "HOST1 ",
50 if (sreset & BE2_SET_RESET) { 50 "P0_OB_LINK ",
51 printk(KERN_ERR DRV_NAME 51 "P1_OB_LINK ",
52 " Soft Reset did not deassert\n"); 52 "HOST_GPIO ",
53 return -EIO; 53 "MBOX ",
54 } 54 "AXGMAC0",
55 pconline1 = BE2_MPU_IRAM_ONLINE; 55 "AXGMAC1",
56 writel(pconline0, (void *)pci_online0_offset); 56 "JTAG",
57 writel(pconline1, (void *)pci_online1_offset); 57 "MPU_INTPEND"
58 58};
59 sreset |= BE2_SET_RESET;
60 writel(sreset, (void *)pci_reset_offset);
61
62 i = 0;
63 while (sreset & BE2_SET_RESET) {
64 if (i > 64)
65 break;
66 msleep(1);
67 sreset = readl((void *)pci_reset_offset);
68 i++;
69 }
70 if (sreset & BE2_SET_RESET) {
71 printk(KERN_ERR DRV_NAME
72 " MPU Online Soft Reset did not deassert\n");
73 return -EIO;
74 }
75 return 0;
76}
77
78int be_chk_reset_complete(struct beiscsi_hba *phba)
79{
80 unsigned int num_loop;
81 u8 *mpu_sem = 0;
82 u32 status;
83
84 num_loop = 1000;
85 mpu_sem = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
86 msleep(5000);
87
88 while (num_loop) {
89 status = readl((void *)mpu_sem);
90
91 if ((status & 0x80000000) || (status & 0x0000FFFF) == 0xC000)
92 break;
93 msleep(60);
94 num_loop--;
95 }
96
97 if ((status & 0x80000000) || (!num_loop)) {
98 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
99 "BC_%d : Failed in be_chk_reset_complete"
100 "status = 0x%x\n", status);
101 return -EIO;
102 }
103
104 return 0;
105}
106
107unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
108{
109 unsigned int tag = 0;
110 59
111 spin_lock(&phba->ctrl.mcc_lock); 60/* UE Status High CSR */
112 if (phba->ctrl.mcc_tag_available) { 61static const char * const desc_ue_status_hi[] = {
113 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index]; 62 "LPCMEMHOST",
114 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; 63 "MGMT_MAC",
115 phba->ctrl.mcc_tag_status[tag] = 0; 64 "PCS0ONLINE",
116 phba->ctrl.ptag_state[tag].tag_state = 0; 65 "MPU_IRAM",
117 } 66 "PCS1ONLINE",
118 if (tag) { 67 "PCTL0",
119 phba->ctrl.mcc_tag_available--; 68 "PCTL1",
120 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1)) 69 "PMEM",
121 phba->ctrl.mcc_alloc_index = 0; 70 "RR",
122 else 71 "TXPB",
123 phba->ctrl.mcc_alloc_index++; 72 "RXPP",
124 } 73 "XAUI",
125 spin_unlock(&phba->ctrl.mcc_lock); 74 "TXP",
126 return tag; 75 "ARM",
127} 76 "IPC",
77 "HOST2",
78 "HOST3",
79 "HOST4",
80 "HOST5",
81 "HOST6",
82 "HOST7",
83 "HOST8",
84 "HOST9",
85 "NETC",
86 "Unknown",
87 "Unknown",
88 "Unknown",
89 "Unknown",
90 "Unknown",
91 "Unknown",
92 "Unknown",
93 "Unknown"
94};
128 95
129struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba, 96struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
130 unsigned int *ref_tag) 97 unsigned int *ref_tag)
@@ -133,7 +100,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
133 struct be_mcc_wrb *wrb = NULL; 100 struct be_mcc_wrb *wrb = NULL;
134 unsigned int tag; 101 unsigned int tag;
135 102
136 spin_lock_bh(&phba->ctrl.mcc_lock); 103 spin_lock(&phba->ctrl.mcc_lock);
137 if (mccq->used == mccq->len) { 104 if (mccq->used == mccq->len) {
138 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT | 105 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
139 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 106 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
@@ -160,6 +127,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
160 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0; 127 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
161 phba->ctrl.mcc_tag_status[tag] = 0; 128 phba->ctrl.mcc_tag_status[tag] = 0;
162 phba->ctrl.ptag_state[tag].tag_state = 0; 129 phba->ctrl.ptag_state[tag].tag_state = 0;
130 phba->ctrl.ptag_state[tag].cbfn = NULL;
163 phba->ctrl.mcc_tag_available--; 131 phba->ctrl.mcc_tag_available--;
164 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1)) 132 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
165 phba->ctrl.mcc_alloc_index = 0; 133 phba->ctrl.mcc_alloc_index = 0;
@@ -174,7 +142,7 @@ struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
174 mccq->used++; 142 mccq->used++;
175 143
176alloc_failed: 144alloc_failed:
177 spin_unlock_bh(&phba->ctrl.mcc_lock); 145 spin_unlock(&phba->ctrl.mcc_lock);
178 return wrb; 146 return wrb;
179} 147}
180 148
@@ -182,7 +150,7 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
182{ 150{
183 struct be_queue_info *mccq = &ctrl->mcc_obj.q; 151 struct be_queue_info *mccq = &ctrl->mcc_obj.q;
184 152
185 spin_lock_bh(&ctrl->mcc_lock); 153 spin_lock(&ctrl->mcc_lock);
186 tag = tag & MCC_Q_CMD_TAG_MASK; 154 tag = tag & MCC_Q_CMD_TAG_MASK;
187 ctrl->mcc_tag[ctrl->mcc_free_index] = tag; 155 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
188 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1)) 156 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
@@ -191,16 +159,71 @@ void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
191 ctrl->mcc_free_index++; 159 ctrl->mcc_free_index++;
192 ctrl->mcc_tag_available++; 160 ctrl->mcc_tag_available++;
193 mccq->used--; 161 mccq->used--;
194 spin_unlock_bh(&ctrl->mcc_lock); 162 spin_unlock(&ctrl->mcc_lock);
195} 163}
196 164
197/** 165/*
198 * beiscsi_fail_session(): Closing session with appropriate error 166 * beiscsi_mcc_compl_status - Return the status of MCC completion
199 * @cls_session: ptr to session 167 * @phba: Driver private structure
200 **/ 168 * @tag: Tag for the MBX Command
201void beiscsi_fail_session(struct iscsi_cls_session *cls_session) 169 * @wrb: the WRB used for the MBX Command
170 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
171 *
172 * return
173 * Success: 0
174 * Failure: Non-Zero
175 */
176int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
177 unsigned int tag,
178 struct be_mcc_wrb **wrb,
179 struct be_dma_mem *mbx_cmd_mem)
202{ 180{
203 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); 181 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
182 uint16_t status = 0, addl_status = 0, wrb_num = 0;
183 struct be_cmd_resp_hdr *mbx_resp_hdr;
184 struct be_cmd_req_hdr *mbx_hdr;
185 struct be_mcc_wrb *temp_wrb;
186 uint32_t mcc_tag_status;
187 int rc = 0;
188
189 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
190 status = (mcc_tag_status & CQE_STATUS_MASK);
191 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
192 CQE_STATUS_ADDL_SHIFT);
193
194 if (mbx_cmd_mem) {
195 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
196 } else {
197 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
198 CQE_STATUS_WRB_SHIFT;
199 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
200 mbx_hdr = embedded_payload(temp_wrb);
201
202 if (wrb)
203 *wrb = temp_wrb;
204 }
205
206 if (status || addl_status) {
207 beiscsi_log(phba, KERN_WARNING,
208 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
209 BEISCSI_LOG_CONFIG,
210 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
211 mbx_hdr->subsystem, mbx_hdr->opcode,
212 status, addl_status);
213 rc = -EIO;
214 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
215 mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
216 beiscsi_log(phba, KERN_WARNING,
217 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
218 BEISCSI_LOG_CONFIG,
219 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
220 mbx_resp_hdr->response_length,
221 mbx_resp_hdr->actual_resp_len);
222 rc = -EAGAIN;
223 }
224 }
225
226 return rc;
204} 227}
205 228
206/* 229/*
@@ -217,26 +240,34 @@ void beiscsi_fail_session(struct iscsi_cls_session *cls_session)
217 * Failure: Non-Zero 240 * Failure: Non-Zero
218 **/ 241 **/
219int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, 242int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
220 uint32_t tag, struct be_mcc_wrb **wrb, 243 unsigned int tag,
244 struct be_mcc_wrb **wrb,
221 struct be_dma_mem *mbx_cmd_mem) 245 struct be_dma_mem *mbx_cmd_mem)
222{ 246{
223 int rc = 0; 247 int rc = 0;
224 uint32_t mcc_tag_status;
225 uint16_t status = 0, addl_status = 0, wrb_num = 0;
226 struct be_mcc_wrb *temp_wrb;
227 struct be_cmd_req_hdr *mbx_hdr;
228 struct be_cmd_resp_hdr *mbx_resp_hdr;
229 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
230 248
231 if (beiscsi_error(phba)) 249 if (beiscsi_hba_in_error(phba)) {
232 return -EPERM; 250 clear_bit(MCC_TAG_STATE_RUNNING,
251 &phba->ctrl.ptag_state[tag].tag_state);
252 return -EIO;
253 }
233 254
234 /* wait for the mccq completion */ 255 /* wait for the mccq completion */
235 rc = wait_event_interruptible_timeout( 256 rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
236 phba->ctrl.mcc_wait[tag], 257 phba->ctrl.mcc_tag_status[tag],
237 phba->ctrl.mcc_tag_status[tag], 258 msecs_to_jiffies(
238 msecs_to_jiffies( 259 BEISCSI_HOST_MBX_TIMEOUT));
239 BEISCSI_HOST_MBX_TIMEOUT)); 260 /**
261 * Return EIO if port is being disabled. Associated DMA memory, if any,
262 * is freed by the caller. When port goes offline, MCCQ is cleaned up
263 * so does WRB.
264 */
265 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
266 clear_bit(MCC_TAG_STATE_RUNNING,
267 &phba->ctrl.ptag_state[tag].tag_state);
268 return -EIO;
269 }
270
240 /** 271 /**
241 * If MBOX cmd timeout expired, tag and resource allocated 272 * If MBOX cmd timeout expired, tag and resource allocated
242 * for cmd is not freed until FW returns completion. 273 * for cmd is not freed until FW returns completion.
@@ -270,47 +301,7 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
270 return -EBUSY; 301 return -EBUSY;
271 } 302 }
272 303
273 rc = 0; 304 rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
274 mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
275 status = (mcc_tag_status & CQE_STATUS_MASK);
276 addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
277 CQE_STATUS_ADDL_SHIFT);
278
279 if (mbx_cmd_mem) {
280 mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
281 } else {
282 wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
283 CQE_STATUS_WRB_SHIFT;
284 temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
285 mbx_hdr = embedded_payload(temp_wrb);
286
287 if (wrb)
288 *wrb = temp_wrb;
289 }
290
291 if (status || addl_status) {
292 beiscsi_log(phba, KERN_WARNING,
293 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
294 BEISCSI_LOG_CONFIG,
295 "BC_%d : MBX Cmd Failed for "
296 "Subsys : %d Opcode : %d with "
297 "Status : %d and Extd_Status : %d\n",
298 mbx_hdr->subsystem,
299 mbx_hdr->opcode,
300 status, addl_status);
301 rc = -EIO;
302 if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
303 mbx_resp_hdr = (struct be_cmd_resp_hdr *) mbx_hdr;
304 beiscsi_log(phba, KERN_WARNING,
305 BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
306 BEISCSI_LOG_CONFIG,
307 "BC_%d : Insufficient Buffer Error "
308 "Resp_Len : %d Actual_Resp_Len : %d\n",
309 mbx_resp_hdr->response_length,
310 mbx_resp_hdr->actual_resp_len);
311 rc = -EAGAIN;
312 }
313 }
314 305
315 free_mcc_wrb(&phba->ctrl, tag); 306 free_mcc_wrb(&phba->ctrl, tag);
316 return rc; 307 return rc;
@@ -330,11 +321,10 @@ int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
330static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl, 321static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
331 struct be_mcc_compl *compl) 322 struct be_mcc_compl *compl)
332{ 323{
333 u16 compl_status, extd_status;
334 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); 324 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
335 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); 325 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
336 struct be_cmd_req_hdr *hdr = embedded_payload(wrb); 326 struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
337 struct be_cmd_resp_hdr *resp_hdr; 327 u16 compl_status, extd_status;
338 328
339 /** 329 /**
340 * To check if valid bit is set, check the entire word as we don't know 330 * To check if valid bit is set, check the entire word as we don't know
@@ -368,14 +358,7 @@ static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
368 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 358 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
369 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n", 359 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
370 hdr->subsystem, hdr->opcode, compl_status, extd_status); 360 hdr->subsystem, hdr->opcode, compl_status, extd_status);
371 361 return compl_status;
372 if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) {
373 /* if status is insufficient buffer, check the length */
374 resp_hdr = (struct be_cmd_resp_hdr *) hdr;
375 if (resp_hdr->response_length)
376 return 0;
377 }
378 return -EINVAL;
379} 362}
380 363
381static void beiscsi_process_async_link(struct beiscsi_hba *phba, 364static void beiscsi_process_async_link(struct beiscsi_hba *phba,
@@ -391,18 +374,19 @@ static void beiscsi_process_async_link(struct beiscsi_hba *phba,
391 * This has been newly introduced in SKH-R Firmware 10.0.338.45. 374 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
392 **/ 375 **/
393 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) { 376 if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
394 phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT; 377 set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
395 phba->get_boot = BE_GET_BOOT_RETRIES; 378 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
379 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
396 __beiscsi_log(phba, KERN_ERR, 380 __beiscsi_log(phba, KERN_ERR,
397 "BC_%d : Link Up on Port %d tag 0x%x\n", 381 "BC_%d : Link Up on Port %d tag 0x%x\n",
398 evt->physical_port, evt->event_tag); 382 evt->physical_port, evt->event_tag);
399 } else { 383 } else {
400 phba->state = BE_ADAPTER_LINK_DOWN; 384 clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
401 __beiscsi_log(phba, KERN_ERR, 385 __beiscsi_log(phba, KERN_ERR,
402 "BC_%d : Link Down on Port %d tag 0x%x\n", 386 "BC_%d : Link Down on Port %d tag 0x%x\n",
403 evt->physical_port, evt->event_tag); 387 evt->physical_port, evt->event_tag);
404 iscsi_host_for_each_session(phba->shost, 388 iscsi_host_for_each_session(phba->shost,
405 beiscsi_fail_session); 389 beiscsi_session_fail);
406 } 390 }
407} 391}
408 392
@@ -482,8 +466,8 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
482 beiscsi_process_async_link(phba, compl); 466 beiscsi_process_async_link(phba, compl);
483 break; 467 break;
484 case ASYNC_EVENT_CODE_ISCSI: 468 case ASYNC_EVENT_CODE_ISCSI:
485 phba->state |= BE_ADAPTER_CHECK_BOOT; 469 if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
486 phba->get_boot = BE_GET_BOOT_RETRIES; 470 beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
487 sev = KERN_ERR; 471 sev = KERN_ERR;
488 break; 472 break;
489 case ASYNC_EVENT_CODE_SLI: 473 case ASYNC_EVENT_CODE_SLI:
@@ -519,6 +503,9 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
519 return 0; 503 return 0;
520 } 504 }
521 505
506 /* end MCC with this tag */
507 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
508
522 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) { 509 if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
523 beiscsi_log(phba, KERN_WARNING, 510 beiscsi_log(phba, KERN_WARNING,
524 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT | 511 BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
@@ -529,9 +516,11 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
529 * Only for non-embedded cmd, PCI resource is allocated. 516 * Only for non-embedded cmd, PCI resource is allocated.
530 **/ 517 **/
531 tag_mem = &ctrl->ptag_state[tag].tag_mem_state; 518 tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
532 if (tag_mem->size) 519 if (tag_mem->size) {
533 pci_free_consistent(ctrl->pdev, tag_mem->size, 520 pci_free_consistent(ctrl->pdev, tag_mem->size,
534 tag_mem->va, tag_mem->dma); 521 tag_mem->va, tag_mem->dma);
522 tag_mem->size = 0;
523 }
535 free_mcc_wrb(ctrl, tag); 524 free_mcc_wrb(ctrl, tag);
536 return 0; 525 return 0;
537 } 526 }
@@ -550,57 +539,25 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
550 CQE_STATUS_ADDL_MASK; 539 CQE_STATUS_ADDL_MASK;
551 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK); 540 ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
552 541
553 /* write ordering forced in wake_up_interruptible */ 542 if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
554 clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state); 543 if (ctrl->ptag_state[tag].cbfn)
555 wake_up_interruptible(&ctrl->mcc_wait[tag]); 544 ctrl->ptag_state[tag].cbfn(phba, tag);
556 return 0; 545 else
557} 546 __beiscsi_log(phba, KERN_ERR,
558 547 "BC_%d : MBX ASYNC command with no callback\n");
559/* 548 free_mcc_wrb(ctrl, tag);
560 * be_mcc_compl_poll()- Wait for MBX completion
561 * @phba: driver private structure
562 *
563 * Wait till no more pending mcc requests are present
564 *
565 * return
566 * Success: 0
567 * Failure: Non-Zero
568 *
569 **/
570int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag)
571{
572 struct be_ctrl_info *ctrl = &phba->ctrl;
573 int i;
574
575 if (!test_bit(MCC_TAG_STATE_RUNNING,
576 &ctrl->ptag_state[tag].tag_state)) {
577 beiscsi_log(phba, KERN_ERR,
578 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
579 "BC_%d: tag %u state not running\n", tag);
580 return 0; 549 return 0;
581 } 550 }
582 for (i = 0; i < mcc_timeout; i++) {
583 if (beiscsi_error(phba))
584 return -EIO;
585 551
586 beiscsi_process_mcc_cq(phba); 552 if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
587 /* after polling, wrb and tag need to be released */ 553 /* just check completion status and free wrb */
588 if (!test_bit(MCC_TAG_STATE_RUNNING, 554 __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
589 &ctrl->ptag_state[tag].tag_state)) { 555 free_mcc_wrb(ctrl, tag);
590 free_mcc_wrb(ctrl, tag);
591 break;
592 }
593 udelay(100);
594 }
595
596 if (i < mcc_timeout)
597 return 0; 556 return 0;
557 }
598 558
599 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 559 wake_up_interruptible(&ctrl->mcc_wait[tag]);
600 "BC_%d : FW Timed Out\n"); 560 return 0;
601 phba->fw_timeout = true;
602 beiscsi_ue_detect(phba);
603 return -EBUSY;
604} 561}
605 562
606void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag) 563void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
@@ -642,7 +599,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
642 */ 599 */
643 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT); 600 timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
644 do { 601 do {
645 if (beiscsi_error(phba)) 602 if (beiscsi_hba_in_error(phba))
646 return -EIO; 603 return -EIO;
647 604
648 ready = ioread32(db); 605 ready = ioread32(db);
@@ -655,16 +612,14 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
655 612
656 if (time_after(jiffies, timeout)) 613 if (time_after(jiffies, timeout))
657 break; 614 break;
658 msleep(20); 615 /* 1ms sleep is enough in most cases */
616 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
659 } while (!ready); 617 } while (!ready);
660 618
661 beiscsi_log(phba, KERN_ERR, 619 beiscsi_log(phba, KERN_ERR,
662 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 620 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
663 "BC_%d : FW Timed Out\n"); 621 "BC_%d : FW Timed Out\n");
664 622 set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
665 phba->fw_timeout = true;
666 beiscsi_ue_detect(phba);
667
668 return -EBUSY; 623 return -EBUSY;
669} 624}
670 625
@@ -679,7 +634,7 @@ static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
679 * Success: 0 634 * Success: 0
680 * Failure: Non-Zero 635 * Failure: Non-Zero
681 **/ 636 **/
682int be_mbox_notify(struct be_ctrl_info *ctrl) 637static int be_mbox_notify(struct be_ctrl_info *ctrl)
683{ 638{
684 int status; 639 int status;
685 u32 val = 0; 640 u32 val = 0;
@@ -819,87 +774,6 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
819 return status; 774 return status;
820} 775}
821 776
822/**
823 * be_cmd_fw_initialize()- Initialize FW
824 * @ctrl: Pointer to function control structure
825 *
826 * Send FW initialize pattern for the function.
827 *
828 * return
829 * Success: 0
830 * Failure: Non-Zero value
831 **/
832int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
833{
834 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
835 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
836 int status;
837 u8 *endian_check;
838
839 mutex_lock(&ctrl->mbox_lock);
840 memset(wrb, 0, sizeof(*wrb));
841
842 endian_check = (u8 *) wrb;
843 *endian_check++ = 0xFF;
844 *endian_check++ = 0x12;
845 *endian_check++ = 0x34;
846 *endian_check++ = 0xFF;
847 *endian_check++ = 0xFF;
848 *endian_check++ = 0x56;
849 *endian_check++ = 0x78;
850 *endian_check++ = 0xFF;
851 be_dws_cpu_to_le(wrb, sizeof(*wrb));
852
853 status = be_mbox_notify(ctrl);
854 if (status)
855 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
856 "BC_%d : be_cmd_fw_initialize Failed\n");
857
858 mutex_unlock(&ctrl->mbox_lock);
859 return status;
860}
861
862/**
863 * be_cmd_fw_uninit()- Uinitialize FW
864 * @ctrl: Pointer to function control structure
865 *
866 * Send FW uninitialize pattern for the function
867 *
868 * return
869 * Success: 0
870 * Failure: Non-Zero value
871 **/
872int be_cmd_fw_uninit(struct be_ctrl_info *ctrl)
873{
874 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
875 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
876 int status;
877 u8 *endian_check;
878
879 mutex_lock(&ctrl->mbox_lock);
880 memset(wrb, 0, sizeof(*wrb));
881
882 endian_check = (u8 *) wrb;
883 *endian_check++ = 0xFF;
884 *endian_check++ = 0xAA;
885 *endian_check++ = 0xBB;
886 *endian_check++ = 0xFF;
887 *endian_check++ = 0xFF;
888 *endian_check++ = 0xCC;
889 *endian_check++ = 0xDD;
890 *endian_check = 0xFF;
891
892 be_dws_cpu_to_le(wrb, sizeof(*wrb));
893
894 status = be_mbox_notify(ctrl);
895 if (status)
896 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
897 "BC_%d : be_cmd_fw_uninit Failed\n");
898
899 mutex_unlock(&ctrl->mbox_lock);
900 return status;
901}
902
903int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, 777int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
904 struct be_queue_info *cq, struct be_queue_info *eq, 778 struct be_queue_info *cq, struct be_queue_info *eq,
905 bool sol_evts, bool no_delay, int coalesce_wm) 779 bool sol_evts, bool no_delay, int coalesce_wm)
@@ -1343,25 +1217,6 @@ error:
1343 return status; 1217 return status;
1344} 1218}
1345 1219
1346int beiscsi_cmd_reset_function(struct beiscsi_hba *phba)
1347{
1348 struct be_ctrl_info *ctrl = &phba->ctrl;
1349 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1350 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1351 int status;
1352
1353 mutex_lock(&ctrl->mbox_lock);
1354
1355 req = embedded_payload(wrb);
1356 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1357 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1358 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1359 status = be_mbox_notify(ctrl);
1360
1361 mutex_unlock(&ctrl->mbox_lock);
1362 return status;
1363}
1364
1365/** 1220/**
1366 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter 1221 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1367 * @phba: device priv structure instance 1222 * @phba: device priv structure instance
@@ -1402,3 +1257,564 @@ int be_cmd_set_vlan(struct beiscsi_hba *phba,
1402 1257
1403 return tag; 1258 return tag;
1404} 1259}
1260
1261int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
1262 struct beiscsi_hba *phba)
1263{
1264 struct be_dma_mem nonemb_cmd;
1265 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1266 struct be_mgmt_controller_attributes *req;
1267 struct be_sge *sge = nonembedded_sgl(wrb);
1268 int status = 0;
1269
1270 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
1271 sizeof(struct be_mgmt_controller_attributes),
1272 &nonemb_cmd.dma);
1273 if (nonemb_cmd.va == NULL) {
1274 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1275 "BG_%d : pci_alloc_consistent failed in %s\n",
1276 __func__);
1277 return -ENOMEM;
1278 }
1279 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
1280 req = nonemb_cmd.va;
1281 memset(req, 0, sizeof(*req));
1282 mutex_lock(&ctrl->mbox_lock);
1283 memset(wrb, 0, sizeof(*wrb));
1284 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1285 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1286 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
1287 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
1288 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
1289 sge->len = cpu_to_le32(nonemb_cmd.size);
1290 status = be_mbox_notify(ctrl);
1291 if (!status) {
1292 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
1293
1294 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1295 "BG_%d : Firmware Version of CMD : %s\n"
1296 "Firmware Version is : %s\n"
1297 "Developer Build, not performing version check...\n",
1298 resp->params.hba_attribs
1299 .flashrom_version_string,
1300 resp->params.hba_attribs.
1301 firmware_version_string);
1302
1303 phba->fw_config.iscsi_features =
1304 resp->params.hba_attribs.iscsi_features;
1305 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1306 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1307 phba->fw_config.iscsi_features);
1308 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
1309 firmware_version_string, BEISCSI_VER_STRLEN);
1310 } else
1311 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1312 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1313 mutex_unlock(&ctrl->mbox_lock);
1314 if (nonemb_cmd.va)
1315 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
1316 nonemb_cmd.va, nonemb_cmd.dma);
1317
1318 return status;
1319}
1320
1321/**
1322 * beiscsi_get_fw_config()- Get the FW config for the function
1323 * @ctrl: ptr to Ctrl Info
1324 * @phba: ptr to the dev priv structure
1325 *
1326 * Get the FW config and resources available for the function.
1327 * The resources are created based on the count received here.
1328 *
1329 * return
1330 * Success: 0
1331 * Failure: Non-Zero Value
1332 **/
1333int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
1334 struct beiscsi_hba *phba)
1335{
1336 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1337 struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
1338 uint32_t cid_count, icd_count;
1339 int status = -EINVAL;
1340 uint8_t ulp_num = 0;
1341
1342 mutex_lock(&ctrl->mbox_lock);
1343 memset(wrb, 0, sizeof(*wrb));
1344 be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
1345
1346 be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
1347 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
1348 EMBED_MBX_MAX_PAYLOAD_SIZE);
1349
1350 if (be_mbox_notify(ctrl)) {
1351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1352 "BG_%d : Failed in beiscsi_get_fw_config\n");
1353 goto fail_init;
1354 }
1355
1356 /* FW response formats depend on port id */
1357 phba->fw_config.phys_port = pfw_cfg->phys_port;
1358 if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
1359 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1360 "BG_%d : invalid physical port id %d\n",
1361 phba->fw_config.phys_port);
1362 goto fail_init;
1363 }
1364
1365 /* populate and check FW config against min and max values */
1366 if (!is_chip_be2_be3r(phba)) {
1367 phba->fw_config.eqid_count = pfw_cfg->eqid_count;
1368 phba->fw_config.cqid_count = pfw_cfg->cqid_count;
1369 if (phba->fw_config.eqid_count == 0 ||
1370 phba->fw_config.eqid_count > 2048) {
1371 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1372 "BG_%d : invalid EQ count %d\n",
1373 phba->fw_config.eqid_count);
1374 goto fail_init;
1375 }
1376 if (phba->fw_config.cqid_count == 0 ||
1377 phba->fw_config.cqid_count > 4096) {
1378 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1379 "BG_%d : invalid CQ count %d\n",
1380 phba->fw_config.cqid_count);
1381 goto fail_init;
1382 }
1383 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1384 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1385 phba->fw_config.eqid_count,
1386 phba->fw_config.cqid_count);
1387 }
1388
1389 /**
1390 * Check on which all ULP iSCSI Protocol is loaded.
1391 * Set the Bit for those ULP. This set flag is used
1392 * at all places in the code to check on which ULP
1393 * iSCSi Protocol is loaded
1394 **/
1395 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
1396 if (pfw_cfg->ulp[ulp_num].ulp_mode &
1397 BEISCSI_ULP_ISCSI_INI_MODE) {
1398 set_bit(ulp_num, &phba->fw_config.ulp_supported);
1399
1400 /* Get the CID, ICD and Chain count for each ULP */
1401 phba->fw_config.iscsi_cid_start[ulp_num] =
1402 pfw_cfg->ulp[ulp_num].sq_base;
1403 phba->fw_config.iscsi_cid_count[ulp_num] =
1404 pfw_cfg->ulp[ulp_num].sq_count;
1405
1406 phba->fw_config.iscsi_icd_start[ulp_num] =
1407 pfw_cfg->ulp[ulp_num].icd_base;
1408 phba->fw_config.iscsi_icd_count[ulp_num] =
1409 pfw_cfg->ulp[ulp_num].icd_count;
1410
1411 phba->fw_config.iscsi_chain_start[ulp_num] =
1412 pfw_cfg->chain_icd[ulp_num].chain_base;
1413 phba->fw_config.iscsi_chain_count[ulp_num] =
1414 pfw_cfg->chain_icd[ulp_num].chain_count;
1415
1416 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1417 "BG_%d : Function loaded on ULP : %d\n"
1418 "\tiscsi_cid_count : %d\n"
1419 "\tiscsi_cid_start : %d\n"
1420 "\t iscsi_icd_count : %d\n"
1421 "\t iscsi_icd_start : %d\n",
1422 ulp_num,
1423 phba->fw_config.
1424 iscsi_cid_count[ulp_num],
1425 phba->fw_config.
1426 iscsi_cid_start[ulp_num],
1427 phba->fw_config.
1428 iscsi_icd_count[ulp_num],
1429 phba->fw_config.
1430 iscsi_icd_start[ulp_num]);
1431 }
1432 }
1433
1434 if (phba->fw_config.ulp_supported == 0) {
1435 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1436 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1437 pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
1438 pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
1439 goto fail_init;
1440 }
1441
1442 /**
1443 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1444 **/
1445 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
1446 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
1447 break;
1448 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
1449 if (icd_count == 0 || icd_count > 65536) {
1450 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1451 "BG_%d: invalid ICD count %d\n", icd_count);
1452 goto fail_init;
1453 }
1454
1455 cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
1456 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
1457 if (cid_count == 0 || cid_count > 4096) {
1458 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1459 "BG_%d: invalid CID count %d\n", cid_count);
1460 goto fail_init;
1461 }
1462
1463 /**
1464 * Check FW is dual ULP aware i.e. can handle either
1465 * of the protocols.
1466 */
1467 phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
1468 BEISCSI_FUNC_DUA_MODE);
1469
1470 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1471 "BG_%d : DUA Mode : 0x%x\n",
1472 phba->fw_config.dual_ulp_aware);
1473
1474 /* all set, continue using this FW config */
1475 status = 0;
1476fail_init:
1477 mutex_unlock(&ctrl->mbox_lock);
1478 return status;
1479}
1480
1481/**
1482 * beiscsi_get_port_name()- Get port name for the function
1483 * @ctrl: ptr to Ctrl Info
1484 * @phba: ptr to the dev priv structure
1485 *
1486 * Get the alphanumeric character for port
1487 *
1488 **/
1489int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
1490{
1491 int ret = 0;
1492 struct be_mcc_wrb *wrb;
1493 struct be_cmd_get_port_name *ioctl;
1494
1495 mutex_lock(&ctrl->mbox_lock);
1496 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1497 memset(wrb, 0, sizeof(*wrb));
1498 ioctl = embedded_payload(wrb);
1499
1500 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1501 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1502 OPCODE_COMMON_GET_PORT_NAME,
1503 EMBED_MBX_MAX_PAYLOAD_SIZE);
1504 ret = be_mbox_notify(ctrl);
1505 phba->port_name = 0;
1506 if (!ret) {
1507 phba->port_name = ioctl->p.resp.port_names >>
1508 (phba->fw_config.phys_port * 8) & 0xff;
1509 } else {
1510 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1511 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1512 ret, ioctl->h.resp_hdr.status);
1513 }
1514
1515 if (phba->port_name == 0)
1516 phba->port_name = '?';
1517
1518 mutex_unlock(&ctrl->mbox_lock);
1519 return ret;
1520}
1521
1522int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
1523{
1524 struct be_ctrl_info *ctrl = &phba->ctrl;
1525 struct be_cmd_set_features *ioctl;
1526 struct be_mcc_wrb *wrb;
1527 int ret = 0;
1528
1529 mutex_lock(&ctrl->mbox_lock);
1530 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1531 memset(wrb, 0, sizeof(*wrb));
1532 ioctl = embedded_payload(wrb);
1533
1534 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
1535 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
1536 OPCODE_COMMON_SET_FEATURES,
1537 EMBED_MBX_MAX_PAYLOAD_SIZE);
1538 ioctl->feature = BE_CMD_SET_FEATURE_UER;
1539 ioctl->param_len = sizeof(ioctl->param.req);
1540 ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
1541 ret = be_mbox_notify(ctrl);
1542 if (!ret) {
1543 phba->ue2rp = ioctl->param.resp.ue2rp;
1544 set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
1545 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1546 "BG_%d : HBA error recovery supported\n");
1547 } else {
1548 /**
1549 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1550 * Older FW versions return this error.
1551 */
1552 if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
1553 ret == MCC_STATUS_INVALID_LENGTH)
1554 __beiscsi_log(phba, KERN_INFO,
1555 "BG_%d : HBA error recovery not supported\n");
1556 }
1557
1558 mutex_unlock(&ctrl->mbox_lock);
1559 return ret;
1560}
1561
1562static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
1563{
1564 u32 sem;
1565
1566 if (is_chip_be2_be3r(phba))
1567 sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
1568 else
1569 pci_read_config_dword(phba->pcidev,
1570 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
1571 return sem;
1572}
1573
1574int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
1575{
1576 u32 loop, post, rdy = 0;
1577
1578 loop = 1000;
1579 while (loop--) {
1580 post = beiscsi_get_post_stage(phba);
1581 if (post & POST_ERROR_BIT)
1582 break;
1583 if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
1584 rdy = 1;
1585 break;
1586 }
1587 msleep(60);
1588 }
1589
1590 if (!rdy) {
1591 __beiscsi_log(phba, KERN_ERR,
1592 "BC_%d : FW not ready 0x%x\n", post);
1593 }
1594
1595 return rdy;
1596}
1597
1598int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
1599{
1600 struct be_ctrl_info *ctrl = &phba->ctrl;
1601 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1602 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
1603 int status;
1604
1605 mutex_lock(&ctrl->mbox_lock);
1606
1607 req = embedded_payload(wrb);
1608 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1609 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1610 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1611 status = be_mbox_notify(ctrl);
1612
1613 mutex_unlock(&ctrl->mbox_lock);
1614 return status;
1615}
1616
1617int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
1618{
1619 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
1620 struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
1621 u8 *endian_check;
1622 int status;
1623
1624 mutex_lock(&ctrl->mbox_lock);
1625 memset(wrb, 0, sizeof(*wrb));
1626
1627 endian_check = (u8 *) wrb;
1628 if (load) {
1629 /* to start communicating */
1630 *endian_check++ = 0xFF;
1631 *endian_check++ = 0x12;
1632 *endian_check++ = 0x34;
1633 *endian_check++ = 0xFF;
1634 *endian_check++ = 0xFF;
1635 *endian_check++ = 0x56;
1636 *endian_check++ = 0x78;
1637 *endian_check++ = 0xFF;
1638 } else {
1639 /* to stop communicating */
1640 *endian_check++ = 0xFF;
1641 *endian_check++ = 0xAA;
1642 *endian_check++ = 0xBB;
1643 *endian_check++ = 0xFF;
1644 *endian_check++ = 0xFF;
1645 *endian_check++ = 0xCC;
1646 *endian_check++ = 0xDD;
1647 *endian_check = 0xFF;
1648 }
1649 be_dws_cpu_to_le(wrb, sizeof(*wrb));
1650
1651 status = be_mbox_notify(ctrl);
1652 if (status)
1653 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
1654 "BC_%d : special WRB message failed\n");
1655 mutex_unlock(&ctrl->mbox_lock);
1656 return status;
1657}
1658
1659int beiscsi_init_sliport(struct beiscsi_hba *phba)
1660{
1661 int status;
1662
1663 /* check POST stage before talking to FW */
1664 status = beiscsi_check_fw_rdy(phba);
1665 if (!status)
1666 return -EIO;
1667
1668 /* clear all error states after checking FW rdy */
1669 phba->state &= ~BEISCSI_HBA_IN_ERR;
1670
1671 /* check again UER support */
1672 phba->state &= ~BEISCSI_HBA_UER_SUPP;
1673
1674 /*
1675 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1676 * It should clean up any stale info in FW for this fn.
1677 */
1678 status = beiscsi_cmd_function_reset(phba);
1679 if (status) {
1680 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1681 "BC_%d : SLI Function Reset failed\n");
1682 return status;
1683 }
1684
1685 /* indicate driver is loading */
1686 return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
1687}
1688
1689/**
1690 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1691 * @phba: pointer to dev priv structure
1692 * @ulp: ULP number.
1693 *
1694 * return
1695 * Success: 0
1696 * Failure: Non-Zero Value
1697 **/
1698int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
1699{
1700 struct be_ctrl_info *ctrl = &phba->ctrl;
1701 struct iscsi_cleanup_req_v1 *req_v1;
1702 struct iscsi_cleanup_req *req;
1703 struct be_mcc_wrb *wrb;
1704 int status;
1705
1706 mutex_lock(&ctrl->mbox_lock);
1707 wrb = wrb_from_mbox(&ctrl->mbox_mem);
1708 req = embedded_payload(wrb);
1709 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1710 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
1711 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
1712
1713 /**
1714 * TODO: Check with FW folks the chute value to be set.
1715 * For now, use the ULP_MASK as the chute value.
1716 */
1717 if (is_chip_be2_be3r(phba)) {
1718 req->chute = (1 << ulp);
1719 req->hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
1720 req->data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
1721 } else {
1722 req_v1 = (struct iscsi_cleanup_req_v1 *)req;
1723 req_v1->hdr.version = 1;
1724 req_v1->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba,
1725 ulp));
1726 req_v1->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba,
1727 ulp));
1728 }
1729
1730 status = be_mbox_notify(ctrl);
1731 if (status)
1732 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
1733 "BG_%d : %s failed %d\n", __func__, ulp);
1734 mutex_unlock(&ctrl->mbox_lock);
1735 return status;
1736}
1737
1738/*
1739 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1740 * @phba: Driver priv structure
1741 *
1742 * Read registers linked to UE and check for the UE status
1743 **/
1744int beiscsi_detect_ue(struct beiscsi_hba *phba)
1745{
1746 uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
1747 uint32_t ue_hi = 0, ue_lo = 0;
1748 uint8_t i = 0;
1749 int ret = 0;
1750
1751 pci_read_config_dword(phba->pcidev,
1752 PCICFG_UE_STATUS_LOW, &ue_lo);
1753 pci_read_config_dword(phba->pcidev,
1754 PCICFG_UE_STATUS_MASK_LOW,
1755 &ue_mask_lo);
1756 pci_read_config_dword(phba->pcidev,
1757 PCICFG_UE_STATUS_HIGH,
1758 &ue_hi);
1759 pci_read_config_dword(phba->pcidev,
1760 PCICFG_UE_STATUS_MASK_HI,
1761 &ue_mask_hi);
1762
1763 ue_lo = (ue_lo & ~ue_mask_lo);
1764 ue_hi = (ue_hi & ~ue_mask_hi);
1765
1766
1767 if (ue_lo || ue_hi) {
1768 set_bit(BEISCSI_HBA_IN_UE, &phba->state);
1769 __beiscsi_log(phba, KERN_ERR,
1770 "BC_%d : HBA error detected\n");
1771 ret = 1;
1772 }
1773
1774 if (ue_lo) {
1775 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
1776 if (ue_lo & 1)
1777 __beiscsi_log(phba, KERN_ERR,
1778 "BC_%d : UE_LOW %s bit set\n",
1779 desc_ue_status_low[i]);
1780 }
1781 }
1782
1783 if (ue_hi) {
1784 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
1785 if (ue_hi & 1)
1786 __beiscsi_log(phba, KERN_ERR,
1787 "BC_%d : UE_HIGH %s bit set\n",
1788 desc_ue_status_hi[i]);
1789 }
1790 }
1791 return ret;
1792}
1793
1794/*
1795 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1796 * @phba: Driver priv structure
1797 *
1798 * Read SLIPORT SEMAPHORE register to check for UER
1799 *
1800 **/
1801int beiscsi_detect_tpe(struct beiscsi_hba *phba)
1802{
1803 u32 post, status;
1804 int ret = 0;
1805
1806 post = beiscsi_get_post_stage(phba);
1807 status = post & POST_STAGE_MASK;
1808 if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
1809 POST_STAGE_RECOVERABLE_ERR) {
1810 set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
1811 __beiscsi_log(phba, KERN_INFO,
1812 "BC_%d : HBA error recoverable: 0x%x\n", post);
1813 ret = 1;
1814 } else {
1815 __beiscsi_log(phba, KERN_INFO,
1816 "BC_%d : HBA in UE: 0x%x\n", post);
1817 }
1818
1819 return ret;
1820}
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index deeb951e6874..328fb5b973cd 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -8,7 +8,7 @@
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Contact Information: 10 * Contact Information:
11 * linux-drivers@avagotech.com 11 * linux-drivers@broadcom.com
12 * 12 *
13 * Emulex 13 * Emulex
14 * 3333 Susan Street 14 * 3333 Susan Street
@@ -57,6 +57,7 @@ struct be_mcc_wrb {
57#define MCC_STATUS_ILLEGAL_REQUEST 0x2 57#define MCC_STATUS_ILLEGAL_REQUEST 0x2
58#define MCC_STATUS_ILLEGAL_FIELD 0x3 58#define MCC_STATUS_ILLEGAL_FIELD 0x3
59#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 59#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4
60#define MCC_STATUS_INVALID_LENGTH 0x74
60 61
61#define CQE_STATUS_COMPL_MASK 0xFFFF 62#define CQE_STATUS_COMPL_MASK 0xFFFF
62#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ 63#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
@@ -97,11 +98,23 @@ struct be_mcc_compl {
97#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */ 98#define MPU_MAILBOX_DB_RDY_MASK 0x1 /* bit 0 */
98#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */ 99#define MPU_MAILBOX_DB_HI_MASK 0x2 /* bit 1 */
99 100
100/********** MPU semphore ******************/ 101/********** MPU semphore: used for SH & BE ******************/
101#define MPU_EP_SEMAPHORE_OFFSET 0xac 102#define SLIPORT_SOFTRESET_OFFSET 0x5c /* CSR BAR offset */
102#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 103#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
103#define EP_SEMAPHORE_POST_ERR_MASK 0x1 104#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
104#define EP_SEMAPHORE_POST_ERR_SHIFT 31 105#define POST_STAGE_MASK 0x0000FFFF
106#define POST_ERROR_BIT 0x80000000
107#define POST_ERR_RECOVERY_CODE_MASK 0xF000
108
109/* Soft Reset register masks */
110#define SLIPORT_SOFTRESET_SR_MASK 0x00000080 /* SR bit */
111
112/* MPU semphore POST stage values */
113#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
114#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
115#define POST_STAGE_BE_RESET 0x3 /* Host wants to reset chip */
116#define POST_STAGE_ARMFW_RDY 0xC000 /* FW is done with POST */
117#define POST_STAGE_RECOVERABLE_ERR 0xE000 /* Recoverable err detected */
105 118
106/********** MCC door bell ************/ 119/********** MCC door bell ************/
107#define DB_MCCQ_OFFSET 0x140 120#define DB_MCCQ_OFFSET 0x140
@@ -109,9 +122,6 @@ struct be_mcc_compl {
109/* Number of entries posted */ 122/* Number of entries posted */
110#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */ 123#define DB_MCCQ_NUM_POSTED_SHIFT 16 /* bits 16 - 29 */
111 124
112/* MPU semphore POST stage values */
113#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
114
115/** 125/**
116 * When the async bit of mcc_compl is set, the last 4 bytes of 126 * When the async bit of mcc_compl is set, the last 4 bytes of
117 * mcc_compl is interpreted as follows: 127 * mcc_compl is interpreted as follows:
@@ -217,6 +227,7 @@ struct be_mcc_mailbox {
217#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58 227#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
218#define OPCODE_COMMON_FUNCTION_RESET 61 228#define OPCODE_COMMON_FUNCTION_RESET 61
219#define OPCODE_COMMON_GET_PORT_NAME 77 229#define OPCODE_COMMON_GET_PORT_NAME 77
230#define OPCODE_COMMON_SET_FEATURES 191
220 231
221/** 232/**
222 * LIST of opcodes that are common between Initiator and Target 233 * LIST of opcodes that are common between Initiator and Target
@@ -345,8 +356,8 @@ struct be_cmd_req_logout_fw_sess {
345 356
346struct be_cmd_resp_logout_fw_sess { 357struct be_cmd_resp_logout_fw_sess {
347 struct be_cmd_resp_hdr hdr; /* dw[4] */ 358 struct be_cmd_resp_hdr hdr; /* dw[4] */
348#define BEISCSI_MGMT_SESSION_CLOSE 0x20
349 uint32_t session_status; 359 uint32_t session_status;
360#define BE_SESS_STATUS_CLOSE 0x20
350} __packed; 361} __packed;
351 362
352struct mgmt_conn_login_options { 363struct mgmt_conn_login_options {
@@ -365,6 +376,14 @@ struct ip_addr_format {
365 u16 size_of_structure; 376 u16 size_of_structure;
366 u8 reserved; 377 u8 reserved;
367 u8 ip_type; 378 u8 ip_type;
379#define BEISCSI_IP_TYPE_V4 0x1
380#define BEISCSI_IP_TYPE_STATIC_V4 0x3
381#define BEISCSI_IP_TYPE_DHCP_V4 0x5
382/* type v4 values < type v6 values */
383#define BEISCSI_IP_TYPE_V6 0x10
384#define BEISCSI_IP_TYPE_ROUTABLE_V6 0x30
385#define BEISCSI_IP_TYPE_LINK_LOCAL_V6 0x50
386#define BEISCSI_IP_TYPE_AUTO_V6 0x90
368 u8 addr[16]; 387 u8 addr[16];
369 u32 rsvd0; 388 u32 rsvd0;
370} __packed; 389} __packed;
@@ -430,8 +449,13 @@ struct be_cmd_get_boot_target_req {
430 449
431struct be_cmd_get_boot_target_resp { 450struct be_cmd_get_boot_target_resp {
432 struct be_cmd_resp_hdr hdr; 451 struct be_cmd_resp_hdr hdr;
433 u32 boot_session_count; 452 u32 boot_session_count;
434 int boot_session_handle; 453 u32 boot_session_handle;
454/**
455 * FW returns 0xffffffff if it couldn't establish connection with
456 * configured boot target.
457 */
458#define BE_BOOT_INVALID_SHANDLE 0xffffffff
435}; 459};
436 460
437struct be_cmd_reopen_session_req { 461struct be_cmd_reopen_session_req {
@@ -699,16 +723,59 @@ struct be_cmd_get_nic_conf_resp {
699 u8 mac_address[ETH_ALEN]; 723 u8 mac_address[ETH_ALEN];
700} __packed; 724} __packed;
701 725
702#define BEISCSI_ALIAS_LEN 32 726/******************** Get HBA NAME *******************/
703 727
704struct be_cmd_hba_name { 728struct be_cmd_hba_name {
705 struct be_cmd_req_hdr hdr; 729 struct be_cmd_req_hdr hdr;
706 u16 flags; 730 u16 flags;
707 u16 rsvd0; 731 u16 rsvd0;
708 u8 initiator_name[ISCSI_NAME_LEN]; 732 u8 initiator_name[ISCSI_NAME_LEN];
709 u8 initiator_alias[BEISCSI_ALIAS_LEN]; 733#define BE_INI_ALIAS_LEN 32
734 u8 initiator_alias[BE_INI_ALIAS_LEN];
710} __packed; 735} __packed;
711 736
737/******************** COMMON SET Features *******************/
738#define BE_CMD_SET_FEATURE_UER 0x10
739#define BE_CMD_UER_SUPP_BIT 0x1
740struct be_uer_req {
741 u32 uer;
742 u32 rsvd;
743};
744
745struct be_uer_resp {
746 u32 uer;
747 u16 ue2rp;
748 u16 ue2sr;
749};
750
751struct be_cmd_set_features {
752 union {
753 struct be_cmd_req_hdr req_hdr;
754 struct be_cmd_resp_hdr resp_hdr;
755 } h;
756 u32 feature;
757 u32 param_len;
758 union {
759 struct be_uer_req req;
760 struct be_uer_resp resp;
761 u32 rsvd[2];
762 } param;
763} __packed;
764
765int beiscsi_cmd_function_reset(struct beiscsi_hba *phba);
766
767int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load);
768
769int beiscsi_check_fw_rdy(struct beiscsi_hba *phba);
770
771int beiscsi_init_sliport(struct beiscsi_hba *phba);
772
773int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num);
774
775int beiscsi_detect_ue(struct beiscsi_hba *phba);
776
777int beiscsi_detect_tpe(struct beiscsi_hba *phba);
778
712int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, 779int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
713 struct be_queue_info *eq, int eq_delay); 780 struct be_queue_info *eq, int eq_delay);
714 781
@@ -723,24 +790,21 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
723 struct be_queue_info *mccq, 790 struct be_queue_info *mccq,
724 struct be_queue_info *cq); 791 struct be_queue_info *cq);
725 792
726int be_poll_mcc(struct be_ctrl_info *ctrl);
727int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
728 struct beiscsi_hba *phba);
729unsigned int be_cmd_get_initname(struct beiscsi_hba *phba); 793unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
730 794
731void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag); 795void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag);
732 796
733int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *, 797int beiscsi_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
734 int num); 798 int num);
735int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba, 799int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
736 uint32_t tag, struct be_mcc_wrb **wrb, 800 unsigned int tag,
801 struct be_mcc_wrb **wrb,
737 struct be_dma_mem *mbx_cmd_mem); 802 struct be_dma_mem *mbx_cmd_mem);
738/*ISCSI Functuions */ 803int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
739int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); 804 unsigned int tag,
740int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); 805 struct be_mcc_wrb **wrb,
741 806 struct be_dma_mem *mbx_cmd_mem);
742struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); 807struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem);
743int be_mcc_compl_poll(struct beiscsi_hba *phba, unsigned int tag);
744void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag); 808void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag);
745struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba, 809struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
746 unsigned int *ref_tag); 810 unsigned int *ref_tag);
@@ -749,9 +813,6 @@ void beiscsi_process_async_event(struct beiscsi_hba *phba,
749int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl, 813int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
750 struct be_mcc_compl *compl); 814 struct be_mcc_compl *compl);
751 815
752
753int be_mbox_notify(struct be_ctrl_info *ctrl);
754
755int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, 816int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
756 struct be_queue_info *cq, 817 struct be_queue_info *cq,
757 struct be_queue_info *dq, int length, 818 struct be_queue_info *dq, int length,
@@ -767,8 +828,6 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
767 struct be_dma_mem *q_mem, u32 page_offset, 828 struct be_dma_mem *q_mem, u32 page_offset,
768 u32 num_pages); 829 u32 num_pages);
769 830
770int beiscsi_cmd_reset_function(struct beiscsi_hba *phba);
771
772int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, 831int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
773 struct be_queue_info *wrbq, 832 struct be_queue_info *wrbq,
774 struct hwi_wrb_context *pwrb_context, 833 struct hwi_wrb_context *pwrb_context,
@@ -777,6 +836,15 @@ int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
777/* Configuration Functions */ 836/* Configuration Functions */
778int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); 837int be_cmd_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
779 838
839int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
840 struct beiscsi_hba *phba);
841
842int beiscsi_get_fw_config(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
843
844int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba);
845
846int beiscsi_set_uer_feature(struct beiscsi_hba *phba);
847
780struct be_default_pdu_context { 848struct be_default_pdu_context {
781 u32 dw[4]; 849 u32 dw[4];
782} __packed; 850} __packed;
@@ -999,7 +1067,16 @@ struct iscsi_cleanup_req {
999 u16 chute; 1067 u16 chute;
1000 u8 hdr_ring_id; 1068 u8 hdr_ring_id;
1001 u8 data_ring_id; 1069 u8 data_ring_id;
1070} __packed;
1002 1071
1072struct iscsi_cleanup_req_v1 {
1073 struct be_cmd_req_hdr hdr;
1074 u16 chute;
1075 u16 rsvd1;
1076 u16 hdr_ring_id;
1077 u16 rsvd2;
1078 u16 data_ring_id;
1079 u16 rsvd3;
1003} __packed; 1080} __packed;
1004 1081
1005struct eq_delay { 1082struct eq_delay {
@@ -1368,14 +1445,9 @@ struct be_cmd_get_port_name {
1368 * the cxn 1445 * the cxn
1369 */ 1446 */
1370 1447
1371int beiscsi_pci_soft_reset(struct beiscsi_hba *phba);
1372int be_chk_reset_complete(struct beiscsi_hba *phba);
1373
1374void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len, 1448void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
1375 bool embedded, u8 sge_cnt); 1449 bool embedded, u8 sge_cnt);
1376 1450
1377void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 1451void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
1378 u8 subsystem, u8 opcode, int cmd_len); 1452 u8 subsystem, u8 opcode, int cmd_len);
1379
1380void beiscsi_fail_session(struct iscsi_cls_session *cls_session);
1381#endif /* !BEISCSI_CMDS_H */ 1453#endif /* !BEISCSI_CMDS_H */
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 09f89a3eaa87..ba258217614e 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@avagotech.com 13 * linux-drivers@broadcom.com
14 * 14 *
15 * Emulex 15 * Emulex
16 * 3333 Susan Street 16 * 3333 Susan Street
@@ -52,22 +52,20 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
52 52
53 53
54 if (!ep) { 54 if (!ep) {
55 printk(KERN_ERR 55 pr_err("beiscsi_session_create: invalid ep\n");
56 "beiscsi_session_create: invalid ep\n");
57 return NULL; 56 return NULL;
58 } 57 }
59 beiscsi_ep = ep->dd_data; 58 beiscsi_ep = ep->dd_data;
60 phba = beiscsi_ep->phba; 59 phba = beiscsi_ep->phba;
61 60
62 if (phba->state & BE_ADAPTER_PCI_ERR) { 61 if (!beiscsi_hba_is_online(phba)) {
63 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
64 "BS_%d : PCI_ERROR Recovery\n");
65 return NULL;
66 } else {
67 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 62 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
68 "BS_%d : In beiscsi_session_create\n"); 63 "BS_%d : HBA in error 0x%lx\n", phba->state);
64 return NULL;
69 } 65 }
70 66
67 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
68 "BS_%d : In beiscsi_session_create\n");
71 if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) { 69 if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
72 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 70 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
73 "BS_%d : Cannot handle %d cmds." 71 "BS_%d : Cannot handle %d cmds."
@@ -120,6 +118,16 @@ void beiscsi_session_destroy(struct iscsi_cls_session *cls_session)
120} 118}
121 119
122/** 120/**
121 * beiscsi_session_fail(): Closing session with appropriate error
122 * @cls_session: ptr to session
123 **/
124void beiscsi_session_fail(struct iscsi_cls_session *cls_session)
125{
126 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
127}
128
129
130/**
123 * beiscsi_conn_create - create an instance of iscsi connection 131 * beiscsi_conn_create - create an instance of iscsi connection
124 * @cls_session: ptr to iscsi_cls_session 132 * @cls_session: ptr to iscsi_cls_session
125 * @cid: iscsi cid 133 * @cid: iscsi cid
@@ -237,7 +245,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
237 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid); 245 return beiscsi_bindconn_cid(phba, beiscsi_conn, beiscsi_ep->ep_cid);
238} 246}
239 247
240static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba) 248static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
241{ 249{
242 if (phba->ipv4_iface) 250 if (phba->ipv4_iface)
243 return 0; 251 return 0;
@@ -256,7 +264,7 @@ static int beiscsi_create_ipv4_iface(struct beiscsi_hba *phba)
256 return 0; 264 return 0;
257} 265}
258 266
259static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba) 267static int beiscsi_iface_create_ipv6(struct beiscsi_hba *phba)
260{ 268{
261 if (phba->ipv6_iface) 269 if (phba->ipv6_iface)
262 return 0; 270 return 0;
@@ -275,79 +283,31 @@ static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
275 return 0; 283 return 0;
276} 284}
277 285
278void beiscsi_create_def_ifaces(struct beiscsi_hba *phba) 286void beiscsi_iface_create_default(struct beiscsi_hba *phba)
279{ 287{
280 struct be_cmd_get_if_info_resp *if_info; 288 struct be_cmd_get_if_info_resp *if_info;
281 289
282 if (!mgmt_get_if_info(phba, BE2_IPV4, &if_info)) { 290 if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V4, &if_info)) {
283 beiscsi_create_ipv4_iface(phba); 291 beiscsi_iface_create_ipv4(phba);
284 kfree(if_info); 292 kfree(if_info);
285 } 293 }
286 294
287 if (!mgmt_get_if_info(phba, BE2_IPV6, &if_info)) { 295 if (!beiscsi_if_get_info(phba, BEISCSI_IP_TYPE_V6, &if_info)) {
288 beiscsi_create_ipv6_iface(phba); 296 beiscsi_iface_create_ipv6(phba);
289 kfree(if_info); 297 kfree(if_info);
290 } 298 }
291} 299}
292 300
293void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba) 301void beiscsi_iface_destroy_default(struct beiscsi_hba *phba)
294{ 302{
295 if (phba->ipv6_iface) 303 if (phba->ipv6_iface) {
296 iscsi_destroy_iface(phba->ipv6_iface); 304 iscsi_destroy_iface(phba->ipv6_iface);
297 if (phba->ipv4_iface) 305 phba->ipv6_iface = NULL;
298 iscsi_destroy_iface(phba->ipv4_iface);
299}
300
301static int
302beiscsi_set_static_ip(struct Scsi_Host *shost,
303 struct iscsi_iface_param_info *iface_param,
304 void *data, uint32_t dt_len)
305{
306 struct beiscsi_hba *phba = iscsi_host_priv(shost);
307 struct iscsi_iface_param_info *iface_ip = NULL;
308 struct iscsi_iface_param_info *iface_subnet = NULL;
309 struct nlattr *nla;
310 int ret;
311
312
313 switch (iface_param->param) {
314 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
315 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
316 if (nla)
317 iface_ip = nla_data(nla);
318
319 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
320 if (nla)
321 iface_subnet = nla_data(nla);
322 break;
323 case ISCSI_NET_PARAM_IPV4_ADDR:
324 iface_ip = iface_param;
325 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
326 if (nla)
327 iface_subnet = nla_data(nla);
328 break;
329 case ISCSI_NET_PARAM_IPV4_SUBNET:
330 iface_subnet = iface_param;
331 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
332 if (nla)
333 iface_ip = nla_data(nla);
334 break;
335 default:
336 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
337 "BS_%d : Unsupported param %d\n",
338 iface_param->param);
339 } 306 }
340 307 if (phba->ipv4_iface) {
341 if (!iface_ip || !iface_subnet) { 308 iscsi_destroy_iface(phba->ipv4_iface);
342 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 309 phba->ipv4_iface = NULL;
343 "BS_%d : IP and Subnet Mask required\n");
344 return -EINVAL;
345 } 310 }
346
347 ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
348 ISCSI_BOOTPROTO_STATIC);
349
350 return ret;
351} 311}
352 312
353/** 313/**
@@ -363,137 +323,141 @@ beiscsi_set_static_ip(struct Scsi_Host *shost,
363 * Failure: Non-Zero Value 323 * Failure: Non-Zero Value
364 **/ 324 **/
365static int 325static int
366beiscsi_set_vlan_tag(struct Scsi_Host *shost, 326beiscsi_iface_config_vlan(struct Scsi_Host *shost,
367 struct iscsi_iface_param_info *iface_param) 327 struct iscsi_iface_param_info *iface_param)
368{ 328{
369 struct beiscsi_hba *phba = iscsi_host_priv(shost); 329 struct beiscsi_hba *phba = iscsi_host_priv(shost);
370 int ret; 330 int ret = -EPERM;
371
372 /* Get the Interface Handle */
373 ret = mgmt_get_all_if_id(phba);
374 if (ret) {
375 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
376 "BS_%d : Getting Interface Handle Failed\n");
377 return ret;
378 }
379 331
380 switch (iface_param->param) { 332 switch (iface_param->param) {
381 case ISCSI_NET_PARAM_VLAN_ENABLED: 333 case ISCSI_NET_PARAM_VLAN_ENABLED:
334 ret = 0;
382 if (iface_param->value[0] != ISCSI_VLAN_ENABLE) 335 if (iface_param->value[0] != ISCSI_VLAN_ENABLE)
383 ret = mgmt_set_vlan(phba, BEISCSI_VLAN_DISABLE); 336 ret = beiscsi_if_set_vlan(phba, BEISCSI_VLAN_DISABLE);
384 break; 337 break;
385 case ISCSI_NET_PARAM_VLAN_TAG: 338 case ISCSI_NET_PARAM_VLAN_TAG:
386 ret = mgmt_set_vlan(phba, 339 ret = beiscsi_if_set_vlan(phba,
387 *((uint16_t *)iface_param->value)); 340 *((uint16_t *)iface_param->value));
388 break; 341 break;
389 default:
390 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
391 "BS_%d : Unknown Param Type : %d\n",
392 iface_param->param);
393 return -ENOSYS;
394 } 342 }
395 return ret; 343 return ret;
396} 344}
397 345
398 346
399static int 347static int
400beiscsi_set_ipv4(struct Scsi_Host *shost, 348beiscsi_iface_config_ipv4(struct Scsi_Host *shost,
401 struct iscsi_iface_param_info *iface_param, 349 struct iscsi_iface_param_info *info,
402 void *data, uint32_t dt_len) 350 void *data, uint32_t dt_len)
403{ 351{
404 struct beiscsi_hba *phba = iscsi_host_priv(shost); 352 struct beiscsi_hba *phba = iscsi_host_priv(shost);
405 int ret = 0; 353 u8 *ip = NULL, *subnet = NULL, *gw;
354 struct nlattr *nla;
355 int ret = -EPERM;
406 356
407 /* Check the param */ 357 /* Check the param */
408 switch (iface_param->param) { 358 switch (info->param) {
359 case ISCSI_NET_PARAM_IFACE_ENABLE:
360 if (info->value[0] == ISCSI_IFACE_ENABLE)
361 ret = beiscsi_iface_create_ipv4(phba);
362 else {
363 iscsi_destroy_iface(phba->ipv4_iface);
364 phba->ipv4_iface = NULL;
365 }
366 break;
409 case ISCSI_NET_PARAM_IPV4_GW: 367 case ISCSI_NET_PARAM_IPV4_GW:
410 ret = mgmt_set_gateway(phba, iface_param); 368 gw = info->value;
369 ret = beiscsi_if_set_gw(phba, BEISCSI_IP_TYPE_V4, gw);
411 break; 370 break;
412 case ISCSI_NET_PARAM_IPV4_BOOTPROTO: 371 case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
413 if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) 372 if (info->value[0] == ISCSI_BOOTPROTO_DHCP)
414 ret = mgmt_set_ip(phba, iface_param, 373 ret = beiscsi_if_en_dhcp(phba, BEISCSI_IP_TYPE_V4);
415 NULL, ISCSI_BOOTPROTO_DHCP); 374 else if (info->value[0] == ISCSI_BOOTPROTO_STATIC)
416 else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) 375 /* release DHCP IP address */
417 ret = beiscsi_set_static_ip(shost, iface_param, 376 ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
418 data, dt_len); 377 NULL, NULL);
419 else 378 else
420 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 379 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
421 "BS_%d : Invalid BOOTPROTO: %d\n", 380 "BS_%d : Invalid BOOTPROTO: %d\n",
422 iface_param->value[0]); 381 info->value[0]);
423 break; 382 break;
424 case ISCSI_NET_PARAM_IFACE_ENABLE:
425 if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
426 ret = beiscsi_create_ipv4_iface(phba);
427 else
428 iscsi_destroy_iface(phba->ipv4_iface);
429 break;
430 case ISCSI_NET_PARAM_IPV4_SUBNET:
431 case ISCSI_NET_PARAM_IPV4_ADDR: 383 case ISCSI_NET_PARAM_IPV4_ADDR:
432 ret = beiscsi_set_static_ip(shost, iface_param, 384 ip = info->value;
433 data, dt_len); 385 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
386 if (nla) {
387 info = nla_data(nla);
388 subnet = info->value;
389 }
390 ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
391 ip, subnet);
434 break; 392 break;
435 case ISCSI_NET_PARAM_VLAN_ENABLED: 393 case ISCSI_NET_PARAM_IPV4_SUBNET:
436 case ISCSI_NET_PARAM_VLAN_TAG: 394 /*
437 ret = beiscsi_set_vlan_tag(shost, iface_param); 395 * OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR ioctl needs IP
396 * and subnet both. Find IP to be applied for this subnet.
397 */
398 subnet = info->value;
399 nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
400 if (nla) {
401 info = nla_data(nla);
402 ip = info->value;
403 }
404 ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V4,
405 ip, subnet);
438 break; 406 break;
439 default:
440 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
441 "BS_%d : Param %d not supported\n",
442 iface_param->param);
443 } 407 }
444 408
445 return ret; 409 return ret;
446} 410}
447 411
448static int 412static int
449beiscsi_set_ipv6(struct Scsi_Host *shost, 413beiscsi_iface_config_ipv6(struct Scsi_Host *shost,
450 struct iscsi_iface_param_info *iface_param, 414 struct iscsi_iface_param_info *iface_param,
451 void *data, uint32_t dt_len) 415 void *data, uint32_t dt_len)
452{ 416{
453 struct beiscsi_hba *phba = iscsi_host_priv(shost); 417 struct beiscsi_hba *phba = iscsi_host_priv(shost);
454 int ret = 0; 418 int ret = -EPERM;
455 419
456 switch (iface_param->param) { 420 switch (iface_param->param) {
457 case ISCSI_NET_PARAM_IFACE_ENABLE: 421 case ISCSI_NET_PARAM_IFACE_ENABLE:
458 if (iface_param->value[0] == ISCSI_IFACE_ENABLE) 422 if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
459 ret = beiscsi_create_ipv6_iface(phba); 423 ret = beiscsi_iface_create_ipv6(phba);
460 else { 424 else {
461 iscsi_destroy_iface(phba->ipv6_iface); 425 iscsi_destroy_iface(phba->ipv6_iface);
462 ret = 0; 426 phba->ipv6_iface = NULL;
463 } 427 }
464 break; 428 break;
465 case ISCSI_NET_PARAM_IPV6_ADDR: 429 case ISCSI_NET_PARAM_IPV6_ADDR:
466 ret = mgmt_set_ip(phba, iface_param, NULL, 430 ret = beiscsi_if_en_static(phba, BEISCSI_IP_TYPE_V6,
467 ISCSI_BOOTPROTO_STATIC); 431 iface_param->value, NULL);
468 break; 432 break;
469 case ISCSI_NET_PARAM_VLAN_ENABLED:
470 case ISCSI_NET_PARAM_VLAN_TAG:
471 ret = beiscsi_set_vlan_tag(shost, iface_param);
472 break;
473 default:
474 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
475 "BS_%d : Param %d not supported\n",
476 iface_param->param);
477 } 433 }
478 434
479 return ret; 435 return ret;
480} 436}
481 437
482int be2iscsi_iface_set_param(struct Scsi_Host *shost, 438int beiscsi_iface_set_param(struct Scsi_Host *shost,
483 void *data, uint32_t dt_len) 439 void *data, uint32_t dt_len)
484{ 440{
485 struct iscsi_iface_param_info *iface_param = NULL; 441 struct iscsi_iface_param_info *iface_param = NULL;
486 struct beiscsi_hba *phba = iscsi_host_priv(shost); 442 struct beiscsi_hba *phba = iscsi_host_priv(shost);
487 struct nlattr *attrib; 443 struct nlattr *attrib;
488 uint32_t rm_len = dt_len; 444 uint32_t rm_len = dt_len;
489 int ret = 0 ; 445 int ret;
490 446
491 if (phba->state & BE_ADAPTER_PCI_ERR) { 447 if (!beiscsi_hba_is_online(phba)) {
492 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 448 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
493 "BS_%d : In PCI_ERROR Recovery\n"); 449 "BS_%d : HBA in error 0x%lx\n", phba->state);
494 return -EBUSY; 450 return -EBUSY;
495 } 451 }
496 452
453 /* update interface_handle */
454 ret = beiscsi_if_get_handle(phba);
455 if (ret) {
456 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
457 "BS_%d : Getting Interface Handle Failed\n");
458 return ret;
459 }
460
497 nla_for_each_attr(attrib, data, dt_len, rm_len) { 461 nla_for_each_attr(attrib, data, dt_len, rm_len) {
498 iface_param = nla_data(attrib); 462 iface_param = nla_data(attrib);
499 463
@@ -512,40 +476,58 @@ int be2iscsi_iface_set_param(struct Scsi_Host *shost,
512 return -EINVAL; 476 return -EINVAL;
513 } 477 }
514 478
515 switch (iface_param->iface_type) { 479 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
516 case ISCSI_IFACE_TYPE_IPV4: 480 "BS_%d : %s.0 set param %d",
517 ret = beiscsi_set_ipv4(shost, iface_param, 481 (iface_param->iface_type == ISCSI_IFACE_TYPE_IPV4) ?
518 data, dt_len); 482 "ipv4" : "ipv6", iface_param->param);
519 break; 483
520 case ISCSI_IFACE_TYPE_IPV6: 484 ret = -EPERM;
521 ret = beiscsi_set_ipv6(shost, iface_param, 485 switch (iface_param->param) {
522 data, dt_len); 486 case ISCSI_NET_PARAM_VLAN_ENABLED:
487 case ISCSI_NET_PARAM_VLAN_TAG:
488 ret = beiscsi_iface_config_vlan(shost, iface_param);
523 break; 489 break;
524 default: 490 default:
525 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 491 switch (iface_param->iface_type) {
526 "BS_%d : Invalid iface type :%d passed\n", 492 case ISCSI_IFACE_TYPE_IPV4:
527 iface_param->iface_type); 493 ret = beiscsi_iface_config_ipv4(shost,
528 break; 494 iface_param,
495 data, dt_len);
496 break;
497 case ISCSI_IFACE_TYPE_IPV6:
498 ret = beiscsi_iface_config_ipv6(shost,
499 iface_param,
500 data, dt_len);
501 break;
502 }
529 } 503 }
530 504
505 if (ret == -EPERM) {
506 __beiscsi_log(phba, KERN_ERR,
507 "BS_%d : %s.0 set param %d not permitted",
508 (iface_param->iface_type ==
509 ISCSI_IFACE_TYPE_IPV4) ? "ipv4" : "ipv6",
510 iface_param->param);
511 ret = 0;
512 }
531 if (ret) 513 if (ret)
532 return ret; 514 break;
533 } 515 }
534 516
535 return ret; 517 return ret;
536} 518}
537 519
538static int be2iscsi_get_if_param(struct beiscsi_hba *phba, 520static int __beiscsi_iface_get_param(struct beiscsi_hba *phba,
539 struct iscsi_iface *iface, int param, 521 struct iscsi_iface *iface,
540 char *buf) 522 int param, char *buf)
541{ 523{
542 struct be_cmd_get_if_info_resp *if_info; 524 struct be_cmd_get_if_info_resp *if_info;
543 int len, ip_type = BE2_IPV4; 525 int len, ip_type = BEISCSI_IP_TYPE_V4;
544 526
545 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) 527 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
546 ip_type = BE2_IPV6; 528 ip_type = BEISCSI_IP_TYPE_V6;
547 529
548 len = mgmt_get_if_info(phba, ip_type, &if_info); 530 len = beiscsi_if_get_info(phba, ip_type, &if_info);
549 if (len) 531 if (len)
550 return len; 532 return len;
551 533
@@ -567,24 +549,24 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
567 break; 549 break;
568 case ISCSI_NET_PARAM_VLAN_ENABLED: 550 case ISCSI_NET_PARAM_VLAN_ENABLED:
569 len = sprintf(buf, "%s\n", 551 len = sprintf(buf, "%s\n",
570 (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) 552 (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) ?
571 ? "Disabled\n" : "Enabled\n"); 553 "disable" : "enable");
572 break; 554 break;
573 case ISCSI_NET_PARAM_VLAN_ID: 555 case ISCSI_NET_PARAM_VLAN_ID:
574 if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) 556 if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
575 len = -EINVAL; 557 len = -EINVAL;
576 else 558 else
577 len = sprintf(buf, "%d\n", 559 len = sprintf(buf, "%d\n",
578 (if_info->vlan_priority & 560 (if_info->vlan_priority &
579 ISCSI_MAX_VLAN_ID)); 561 ISCSI_MAX_VLAN_ID));
580 break; 562 break;
581 case ISCSI_NET_PARAM_VLAN_PRIORITY: 563 case ISCSI_NET_PARAM_VLAN_PRIORITY:
582 if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE) 564 if (if_info->vlan_priority == BEISCSI_VLAN_DISABLE)
583 len = -EINVAL; 565 len = -EINVAL;
584 else 566 else
585 len = sprintf(buf, "%d\n", 567 len = sprintf(buf, "%d\n",
586 ((if_info->vlan_priority >> 13) & 568 ((if_info->vlan_priority >> 13) &
587 ISCSI_MAX_VLAN_PRIORITY)); 569 ISCSI_MAX_VLAN_PRIORITY));
588 break; 570 break;
589 default: 571 default:
590 WARN_ON(1); 572 WARN_ON(1);
@@ -594,18 +576,20 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
594 return len; 576 return len;
595} 577}
596 578
597int be2iscsi_iface_get_param(struct iscsi_iface *iface, 579int beiscsi_iface_get_param(struct iscsi_iface *iface,
598 enum iscsi_param_type param_type, 580 enum iscsi_param_type param_type,
599 int param, char *buf) 581 int param, char *buf)
600{ 582{
601 struct Scsi_Host *shost = iscsi_iface_to_shost(iface); 583 struct Scsi_Host *shost = iscsi_iface_to_shost(iface);
602 struct beiscsi_hba *phba = iscsi_host_priv(shost); 584 struct beiscsi_hba *phba = iscsi_host_priv(shost);
603 struct be_cmd_get_def_gateway_resp gateway; 585 struct be_cmd_get_def_gateway_resp gateway;
604 int len = -ENOSYS; 586 int len = -EPERM;
605 587
606 if (phba->state & BE_ADAPTER_PCI_ERR) { 588 if (param_type != ISCSI_NET_PARAM)
607 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 589 return 0;
608 "BS_%d : In PCI_ERROR Recovery\n"); 590 if (!beiscsi_hba_is_online(phba)) {
591 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
592 "BS_%d : HBA in error 0x%lx\n", phba->state);
609 return -EBUSY; 593 return -EBUSY;
610 } 594 }
611 595
@@ -617,19 +601,22 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
617 case ISCSI_NET_PARAM_VLAN_ENABLED: 601 case ISCSI_NET_PARAM_VLAN_ENABLED:
618 case ISCSI_NET_PARAM_VLAN_ID: 602 case ISCSI_NET_PARAM_VLAN_ID:
619 case ISCSI_NET_PARAM_VLAN_PRIORITY: 603 case ISCSI_NET_PARAM_VLAN_PRIORITY:
620 len = be2iscsi_get_if_param(phba, iface, param, buf); 604 len = __beiscsi_iface_get_param(phba, iface, param, buf);
621 break; 605 break;
622 case ISCSI_NET_PARAM_IFACE_ENABLE: 606 case ISCSI_NET_PARAM_IFACE_ENABLE:
623 len = sprintf(buf, "enabled\n"); 607 if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4)
608 len = sprintf(buf, "%s\n",
609 phba->ipv4_iface ? "enable" : "disable");
610 else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6)
611 len = sprintf(buf, "%s\n",
612 phba->ipv6_iface ? "enable" : "disable");
624 break; 613 break;
625 case ISCSI_NET_PARAM_IPV4_GW: 614 case ISCSI_NET_PARAM_IPV4_GW:
626 memset(&gateway, 0, sizeof(gateway)); 615 memset(&gateway, 0, sizeof(gateway));
627 len = mgmt_get_gateway(phba, BE2_IPV4, &gateway); 616 len = beiscsi_if_get_gw(phba, BEISCSI_IP_TYPE_V4, &gateway);
628 if (!len) 617 if (!len)
629 len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr); 618 len = sprintf(buf, "%pI4\n", &gateway.ip_addr.addr);
630 break; 619 break;
631 default:
632 len = -ENOSYS;
633 } 620 }
634 621
635 return len; 622 return len;
@@ -647,7 +634,7 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
647 enum iscsi_param param, char *buf) 634 enum iscsi_param param, char *buf)
648{ 635{
649 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; 636 struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
650 int len = 0; 637 int len;
651 638
652 beiscsi_log(beiscsi_ep->phba, KERN_INFO, 639 beiscsi_log(beiscsi_ep->phba, KERN_INFO,
653 BEISCSI_LOG_CONFIG, 640 BEISCSI_LOG_CONFIG,
@@ -659,13 +646,13 @@ int beiscsi_ep_get_param(struct iscsi_endpoint *ep,
659 len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport); 646 len = sprintf(buf, "%hu\n", beiscsi_ep->dst_tcpport);
660 break; 647 break;
661 case ISCSI_PARAM_CONN_ADDRESS: 648 case ISCSI_PARAM_CONN_ADDRESS:
662 if (beiscsi_ep->ip_type == BE2_IPV4) 649 if (beiscsi_ep->ip_type == BEISCSI_IP_TYPE_V4)
663 len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr); 650 len = sprintf(buf, "%pI4\n", &beiscsi_ep->dst_addr);
664 else 651 else
665 len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr); 652 len = sprintf(buf, "%pI6\n", &beiscsi_ep->dst6_addr);
666 break; 653 break;
667 default: 654 default:
668 return -ENOSYS; 655 len = -EPERM;
669 } 656 }
670 return len; 657 return len;
671} 658}
@@ -758,7 +745,7 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
758 struct beiscsi_hba *phba = iscsi_host_priv(shost); 745 struct beiscsi_hba *phba = iscsi_host_priv(shost);
759 struct iscsi_cls_host *ihost = shost->shost_data; 746 struct iscsi_cls_host *ihost = shost->shost_data;
760 747
761 ihost->port_state = (phba->state & BE_ADAPTER_LINK_UP) ? 748 ihost->port_state = test_bit(BEISCSI_HBA_LINK_UP, &phba->state) ?
762 ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN; 749 ISCSI_PORT_STATE_UP : ISCSI_PORT_STATE_DOWN;
763} 750}
764 751
@@ -810,16 +797,13 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
810 struct beiscsi_hba *phba = iscsi_host_priv(shost); 797 struct beiscsi_hba *phba = iscsi_host_priv(shost);
811 int status = 0; 798 int status = 0;
812 799
813 800 if (!beiscsi_hba_is_online(phba)) {
814 if (phba->state & BE_ADAPTER_PCI_ERR) {
815 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
816 "BS_%d : In PCI_ERROR Recovery\n");
817 return -EBUSY;
818 } else {
819 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 801 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
820 "BS_%d : In beiscsi_get_host_param," 802 "BS_%d : HBA in error 0x%lx\n", phba->state);
821 " param = %d\n", param); 803 return -EBUSY;
822 } 804 }
805 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
806 "BS_%d : In beiscsi_get_host_param, param = %d\n", param);
823 807
824 switch (param) { 808 switch (param) {
825 case ISCSI_HOST_PARAM_HWADDRESS: 809 case ISCSI_HOST_PARAM_HWADDRESS:
@@ -961,15 +945,13 @@ int beiscsi_conn_start(struct iscsi_cls_conn *cls_conn)
961 945
962 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 946 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
963 947
964 if (phba->state & BE_ADAPTER_PCI_ERR) { 948 if (!beiscsi_hba_is_online(phba)) {
965 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 949 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
966 "BS_%d : In PCI_ERROR Recovery\n"); 950 "BS_%d : HBA in error 0x%lx\n", phba->state);
967 return -EBUSY; 951 return -EBUSY;
968 } else {
969 beiscsi_log(beiscsi_conn->phba, KERN_INFO,
970 BEISCSI_LOG_CONFIG,
971 "BS_%d : In beiscsi_conn_start\n");
972 } 952 }
953 beiscsi_log(beiscsi_conn->phba, KERN_INFO, BEISCSI_LOG_CONFIG,
954 "BS_%d : In beiscsi_conn_start\n");
973 955
974 memset(&params, 0, sizeof(struct beiscsi_offload_params)); 956 memset(&params, 0, sizeof(struct beiscsi_offload_params));
975 beiscsi_ep = beiscsi_conn->ep; 957 beiscsi_ep = beiscsi_conn->ep;
@@ -1186,28 +1168,20 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
1186 struct iscsi_endpoint *ep; 1168 struct iscsi_endpoint *ep;
1187 int ret; 1169 int ret;
1188 1170
1189 if (shost) 1171 if (!shost) {
1190 phba = iscsi_host_priv(shost);
1191 else {
1192 ret = -ENXIO; 1172 ret = -ENXIO;
1193 printk(KERN_ERR 1173 pr_err("beiscsi_ep_connect shost is NULL\n");
1194 "beiscsi_ep_connect shost is NULL\n");
1195 return ERR_PTR(ret); 1174 return ERR_PTR(ret);
1196 } 1175 }
1197 1176
1198 if (beiscsi_error(phba)) { 1177 phba = iscsi_host_priv(shost);
1178 if (!beiscsi_hba_is_online(phba)) {
1199 ret = -EIO; 1179 ret = -EIO;
1200 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1180 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1201 "BS_%d : The FW state Not Stable!!!\n"); 1181 "BS_%d : HBA in error 0x%lx\n", phba->state);
1202 return ERR_PTR(ret); 1182 return ERR_PTR(ret);
1203 } 1183 }
1204 1184 if (!test_bit(BEISCSI_HBA_LINK_UP, &phba->state)) {
1205 if (phba->state & BE_ADAPTER_PCI_ERR) {
1206 ret = -EBUSY;
1207 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
1208 "BS_%d : In PCI_ERROR Recovery\n");
1209 return ERR_PTR(ret);
1210 } else if (phba->state & BE_ADAPTER_LINK_DOWN) {
1211 ret = -EBUSY; 1185 ret = -EBUSY;
1212 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1186 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1213 "BS_%d : The Adapter Port state is Down!!!\n"); 1187 "BS_%d : The Adapter Port state is Down!!!\n");
@@ -1361,9 +1335,9 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
1361 tcp_upload_flag = CONNECTION_UPLOAD_ABORT; 1335 tcp_upload_flag = CONNECTION_UPLOAD_ABORT;
1362 } 1336 }
1363 1337
1364 if (phba->state & BE_ADAPTER_PCI_ERR) { 1338 if (!beiscsi_hba_is_online(phba)) {
1365 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 1339 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1366 "BS_%d : PCI_ERROR Recovery\n"); 1340 "BS_%d : HBA in error 0x%lx\n", phba->state);
1367 goto free_ep; 1341 goto free_ep;
1368 } 1342 }
1369 1343
@@ -1386,7 +1360,7 @@ free_ep:
1386 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); 1360 iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
1387} 1361}
1388 1362
1389umode_t be2iscsi_attr_is_visible(int param_type, int param) 1363umode_t beiscsi_attr_is_visible(int param_type, int param)
1390{ 1364{
1391 switch (param_type) { 1365 switch (param_type) {
1392 case ISCSI_NET_PARAM: 1366 case ISCSI_NET_PARAM:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 0c84e1c0763a..e4d67dfea4cb 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Avago Technologies 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@avagotech.com 13 * linux-drivers@broadcom.com
14 * 14 *
15 * Avago Technologies 15 * Avago Technologies
16 * 3333 Susan Street 16 * 3333 Susan Street
@@ -23,25 +23,18 @@
23#include "be_main.h" 23#include "be_main.h"
24#include "be_mgmt.h" 24#include "be_mgmt.h"
25 25
26#define BE2_IPV4 0x1 26void beiscsi_iface_create_default(struct beiscsi_hba *phba);
27#define BE2_IPV6 0x10
28#define BE2_DHCP_V4 0x05
29 27
30#define NON_BLOCKING 0x0 28void beiscsi_iface_destroy_default(struct beiscsi_hba *phba);
31#define BLOCKING 0x1
32 29
33void beiscsi_create_def_ifaces(struct beiscsi_hba *phba); 30int beiscsi_iface_get_param(struct iscsi_iface *iface,
34
35void beiscsi_destroy_def_ifaces(struct beiscsi_hba *phba);
36
37int be2iscsi_iface_get_param(struct iscsi_iface *iface,
38 enum iscsi_param_type param_type, 31 enum iscsi_param_type param_type,
39 int param, char *buf); 32 int param, char *buf);
40 33
41int be2iscsi_iface_set_param(struct Scsi_Host *shost, 34int beiscsi_iface_set_param(struct Scsi_Host *shost,
42 void *data, uint32_t count); 35 void *data, uint32_t count);
43 36
44umode_t be2iscsi_attr_is_visible(int param_type, int param); 37umode_t beiscsi_attr_is_visible(int param_type, int param);
45 38
46void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 39void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
47 struct beiscsi_offload_params *params); 40 struct beiscsi_offload_params *params);
@@ -57,6 +50,8 @@ struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
57 50
58void beiscsi_session_destroy(struct iscsi_cls_session *cls_session); 51void beiscsi_session_destroy(struct iscsi_cls_session *cls_session);
59 52
53void beiscsi_session_fail(struct iscsi_cls_session *cls_session);
54
60struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session 55struct iscsi_cls_conn *beiscsi_conn_create(struct iscsi_cls_session
61 *cls_session, uint32_t cid); 56 *cls_session, uint32_t cid);
62 57
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index f05e7737107d..6a6906f847db 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@avagotech.com 13 * linux-drivers@broadcom.com
14 * 14 *
15 * Emulex 15 * Emulex
16 * 3333 Susan Street 16 * 3333 Susan Street
@@ -374,170 +374,6 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
374 return iscsi_eh_device_reset(sc); 374 return iscsi_eh_device_reset(sc);
375} 375}
376 376
377static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
378{
379 struct beiscsi_hba *phba = data;
380 struct mgmt_session_info *boot_sess = &phba->boot_sess;
381 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
382 char *str = buf;
383 int rc;
384
385 switch (type) {
386 case ISCSI_BOOT_TGT_NAME:
387 rc = sprintf(buf, "%.*s\n",
388 (int)strlen(boot_sess->target_name),
389 (char *)&boot_sess->target_name);
390 break;
391 case ISCSI_BOOT_TGT_IP_ADDR:
392 if (boot_conn->dest_ipaddr.ip_type == 0x1)
393 rc = sprintf(buf, "%pI4\n",
394 (char *)&boot_conn->dest_ipaddr.addr);
395 else
396 rc = sprintf(str, "%pI6\n",
397 (char *)&boot_conn->dest_ipaddr.addr);
398 break;
399 case ISCSI_BOOT_TGT_PORT:
400 rc = sprintf(str, "%d\n", boot_conn->dest_port);
401 break;
402
403 case ISCSI_BOOT_TGT_CHAP_NAME:
404 rc = sprintf(str, "%.*s\n",
405 boot_conn->negotiated_login_options.auth_data.chap.
406 target_chap_name_length,
407 (char *)&boot_conn->negotiated_login_options.
408 auth_data.chap.target_chap_name);
409 break;
410 case ISCSI_BOOT_TGT_CHAP_SECRET:
411 rc = sprintf(str, "%.*s\n",
412 boot_conn->negotiated_login_options.auth_data.chap.
413 target_secret_length,
414 (char *)&boot_conn->negotiated_login_options.
415 auth_data.chap.target_secret);
416 break;
417 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
418 rc = sprintf(str, "%.*s\n",
419 boot_conn->negotiated_login_options.auth_data.chap.
420 intr_chap_name_length,
421 (char *)&boot_conn->negotiated_login_options.
422 auth_data.chap.intr_chap_name);
423 break;
424 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
425 rc = sprintf(str, "%.*s\n",
426 boot_conn->negotiated_login_options.auth_data.chap.
427 intr_secret_length,
428 (char *)&boot_conn->negotiated_login_options.
429 auth_data.chap.intr_secret);
430 break;
431 case ISCSI_BOOT_TGT_FLAGS:
432 rc = sprintf(str, "2\n");
433 break;
434 case ISCSI_BOOT_TGT_NIC_ASSOC:
435 rc = sprintf(str, "0\n");
436 break;
437 default:
438 rc = -ENOSYS;
439 break;
440 }
441 return rc;
442}
443
444static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
445{
446 struct beiscsi_hba *phba = data;
447 char *str = buf;
448 int rc;
449
450 switch (type) {
451 case ISCSI_BOOT_INI_INITIATOR_NAME:
452 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
453 break;
454 default:
455 rc = -ENOSYS;
456 break;
457 }
458 return rc;
459}
460
461static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
462{
463 struct beiscsi_hba *phba = data;
464 char *str = buf;
465 int rc;
466
467 switch (type) {
468 case ISCSI_BOOT_ETH_FLAGS:
469 rc = sprintf(str, "2\n");
470 break;
471 case ISCSI_BOOT_ETH_INDEX:
472 rc = sprintf(str, "0\n");
473 break;
474 case ISCSI_BOOT_ETH_MAC:
475 rc = beiscsi_get_macaddr(str, phba);
476 break;
477 default:
478 rc = -ENOSYS;
479 break;
480 }
481 return rc;
482}
483
484
485static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
486{
487 umode_t rc;
488
489 switch (type) {
490 case ISCSI_BOOT_TGT_NAME:
491 case ISCSI_BOOT_TGT_IP_ADDR:
492 case ISCSI_BOOT_TGT_PORT:
493 case ISCSI_BOOT_TGT_CHAP_NAME:
494 case ISCSI_BOOT_TGT_CHAP_SECRET:
495 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
496 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
497 case ISCSI_BOOT_TGT_NIC_ASSOC:
498 case ISCSI_BOOT_TGT_FLAGS:
499 rc = S_IRUGO;
500 break;
501 default:
502 rc = 0;
503 break;
504 }
505 return rc;
506}
507
508static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
509{
510 umode_t rc;
511
512 switch (type) {
513 case ISCSI_BOOT_INI_INITIATOR_NAME:
514 rc = S_IRUGO;
515 break;
516 default:
517 rc = 0;
518 break;
519 }
520 return rc;
521}
522
523
524static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
525{
526 umode_t rc;
527
528 switch (type) {
529 case ISCSI_BOOT_ETH_FLAGS:
530 case ISCSI_BOOT_ETH_MAC:
531 case ISCSI_BOOT_ETH_INDEX:
532 rc = S_IRUGO;
533 break;
534 default:
535 rc = 0;
536 break;
537 }
538 return rc;
539}
540
541/*------------------- PCI Driver operations and data ----------------- */ 377/*------------------- PCI Driver operations and data ----------------- */
542static const struct pci_device_id beiscsi_pci_id_table[] = { 378static const struct pci_device_id beiscsi_pci_id_table[] = {
543 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 379 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -850,12 +686,11 @@ static void hwi_ring_eq_db(struct beiscsi_hba *phba,
850static irqreturn_t be_isr_mcc(int irq, void *dev_id) 686static irqreturn_t be_isr_mcc(int irq, void *dev_id)
851{ 687{
852 struct beiscsi_hba *phba; 688 struct beiscsi_hba *phba;
853 struct be_eq_entry *eqe = NULL; 689 struct be_eq_entry *eqe;
854 struct be_queue_info *eq; 690 struct be_queue_info *eq;
855 struct be_queue_info *mcc; 691 struct be_queue_info *mcc;
856 unsigned int num_eq_processed; 692 unsigned int mcc_events;
857 struct be_eq_obj *pbe_eq; 693 struct be_eq_obj *pbe_eq;
858 unsigned long flags;
859 694
860 pbe_eq = dev_id; 695 pbe_eq = dev_id;
861 eq = &pbe_eq->q; 696 eq = &pbe_eq->q;
@@ -863,27 +698,23 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
863 mcc = &phba->ctrl.mcc_obj.cq; 698 mcc = &phba->ctrl.mcc_obj.cq;
864 eqe = queue_tail_node(eq); 699 eqe = queue_tail_node(eq);
865 700
866 num_eq_processed = 0; 701 mcc_events = 0;
867
868 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 702 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
869 & EQE_VALID_MASK) { 703 & EQE_VALID_MASK) {
870 if (((eqe->dw[offsetof(struct amap_eq_entry, 704 if (((eqe->dw[offsetof(struct amap_eq_entry,
871 resource_id) / 32] & 705 resource_id) / 32] &
872 EQE_RESID_MASK) >> 16) == mcc->id) { 706 EQE_RESID_MASK) >> 16) == mcc->id) {
873 spin_lock_irqsave(&phba->isr_lock, flags); 707 mcc_events++;
874 pbe_eq->todo_mcc_cq = true;
875 spin_unlock_irqrestore(&phba->isr_lock, flags);
876 } 708 }
877 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 709 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
878 queue_tail_inc(eq); 710 queue_tail_inc(eq);
879 eqe = queue_tail_node(eq); 711 eqe = queue_tail_node(eq);
880 num_eq_processed++;
881 } 712 }
882 if (pbe_eq->todo_mcc_cq)
883 queue_work(phba->wq, &pbe_eq->work_cqs);
884 if (num_eq_processed)
885 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
886 713
714 if (mcc_events) {
715 queue_work(phba->wq, &pbe_eq->mcc_work);
716 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1);
717 }
887 return IRQ_HANDLED; 718 return IRQ_HANDLED;
888} 719}
889 720
@@ -902,7 +733,6 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
902 eq = &pbe_eq->q; 733 eq = &pbe_eq->q;
903 734
904 phba = pbe_eq->phba; 735 phba = pbe_eq->phba;
905
906 /* disable interrupt till iopoll completes */ 736 /* disable interrupt till iopoll completes */
907 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 737 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1);
908 irq_poll_sched(&pbe_eq->iopoll); 738 irq_poll_sched(&pbe_eq->iopoll);
@@ -920,14 +750,13 @@ static irqreturn_t be_isr(int irq, void *dev_id)
920 struct beiscsi_hba *phba; 750 struct beiscsi_hba *phba;
921 struct hwi_controller *phwi_ctrlr; 751 struct hwi_controller *phwi_ctrlr;
922 struct hwi_context_memory *phwi_context; 752 struct hwi_context_memory *phwi_context;
923 struct be_eq_entry *eqe = NULL; 753 struct be_eq_entry *eqe;
924 struct be_queue_info *eq; 754 struct be_queue_info *eq;
925 struct be_queue_info *mcc; 755 struct be_queue_info *mcc;
926 unsigned long flags, index; 756 unsigned int mcc_events, io_events;
927 unsigned int num_mcceq_processed, num_ioeq_processed;
928 struct be_ctrl_info *ctrl; 757 struct be_ctrl_info *ctrl;
929 struct be_eq_obj *pbe_eq; 758 struct be_eq_obj *pbe_eq;
930 int isr; 759 int isr, rearm;
931 760
932 phba = dev_id; 761 phba = dev_id;
933 ctrl = &phba->ctrl; 762 ctrl = &phba->ctrl;
@@ -942,44 +771,35 @@ static irqreturn_t be_isr(int irq, void *dev_id)
942 771
943 eq = &phwi_context->be_eq[0].q; 772 eq = &phwi_context->be_eq[0].q;
944 mcc = &phba->ctrl.mcc_obj.cq; 773 mcc = &phba->ctrl.mcc_obj.cq;
945 index = 0;
946 eqe = queue_tail_node(eq); 774 eqe = queue_tail_node(eq);
947 775
948 num_ioeq_processed = 0; 776 io_events = 0;
949 num_mcceq_processed = 0; 777 mcc_events = 0;
950 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 778 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
951 & EQE_VALID_MASK) { 779 & EQE_VALID_MASK) {
952 if (((eqe->dw[offsetof(struct amap_eq_entry, 780 if (((eqe->dw[offsetof(struct amap_eq_entry,
953 resource_id) / 32] & 781 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id)
954 EQE_RESID_MASK) >> 16) == mcc->id) { 782 mcc_events++;
955 spin_lock_irqsave(&phba->isr_lock, flags); 783 else
956 pbe_eq->todo_mcc_cq = true; 784 io_events++;
957 spin_unlock_irqrestore(&phba->isr_lock, flags);
958 num_mcceq_processed++;
959 } else {
960 irq_poll_sched(&pbe_eq->iopoll);
961 num_ioeq_processed++;
962 }
963 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 785 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
964 queue_tail_inc(eq); 786 queue_tail_inc(eq);
965 eqe = queue_tail_node(eq); 787 eqe = queue_tail_node(eq);
966 } 788 }
967 if (num_ioeq_processed || num_mcceq_processed) { 789 if (!io_events && !mcc_events)
968 if (pbe_eq->todo_mcc_cq)
969 queue_work(phba->wq, &pbe_eq->work_cqs);
970
971 if ((num_mcceq_processed) && (!num_ioeq_processed))
972 hwi_ring_eq_db(phba, eq->id, 0,
973 (num_ioeq_processed +
974 num_mcceq_processed) , 1, 1);
975 else
976 hwi_ring_eq_db(phba, eq->id, 0,
977 (num_ioeq_processed +
978 num_mcceq_processed), 0, 1);
979
980 return IRQ_HANDLED;
981 } else
982 return IRQ_NONE; 790 return IRQ_NONE;
791
792 /* no need to rearm if interrupt is only for IOs */
793 rearm = 0;
794 if (mcc_events) {
795 queue_work(phba->wq, &pbe_eq->mcc_work);
796 /* rearm for MCCQ */
797 rearm = 1;
798 }
799 if (io_events)
800 irq_poll_sched(&pbe_eq->iopoll);
801 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1);
802 return IRQ_HANDLED;
983} 803}
984 804
985 805
@@ -1077,57 +897,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
1077 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 897 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1078} 898}
1079 899
1080static unsigned int
1081beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1082 struct beiscsi_hba *phba,
1083 struct pdu_base *ppdu,
1084 unsigned long pdu_len,
1085 void *pbuffer, unsigned long buf_len)
1086{
1087 struct iscsi_conn *conn = beiscsi_conn->conn;
1088 struct iscsi_session *session = conn->session;
1089 struct iscsi_task *task;
1090 struct beiscsi_io_task *io_task;
1091 struct iscsi_hdr *login_hdr;
1092
1093 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1094 PDUBASE_OPCODE_MASK) {
1095 case ISCSI_OP_NOOP_IN:
1096 pbuffer = NULL;
1097 buf_len = 0;
1098 break;
1099 case ISCSI_OP_ASYNC_EVENT:
1100 break;
1101 case ISCSI_OP_REJECT:
1102 WARN_ON(!pbuffer);
1103 WARN_ON(!(buf_len == 48));
1104 beiscsi_log(phba, KERN_ERR,
1105 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1106 "BM_%d : In ISCSI_OP_REJECT\n");
1107 break;
1108 case ISCSI_OP_LOGIN_RSP:
1109 case ISCSI_OP_TEXT_RSP:
1110 task = conn->login_task;
1111 io_task = task->dd_data;
1112 login_hdr = (struct iscsi_hdr *)ppdu;
1113 login_hdr->itt = io_task->libiscsi_itt;
1114 break;
1115 default:
1116 beiscsi_log(phba, KERN_WARNING,
1117 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1118 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1119 (ppdu->
1120 dw[offsetof(struct amap_pdu_base, opcode) / 32]
1121 & PDUBASE_OPCODE_MASK));
1122 return 1;
1123 }
1124
1125 spin_lock_bh(&session->back_lock);
1126 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1127 spin_unlock_bh(&session->back_lock);
1128 return 0;
1129}
1130
1131static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 900static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1132{ 901{
1133 struct sgl_handle *psgl_handle; 902 struct sgl_handle *psgl_handle;
@@ -1199,6 +968,9 @@ beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
1199 pwrb_context->alloc_index++; 968 pwrb_context->alloc_index++;
1200 spin_unlock_bh(&pwrb_context->wrb_lock); 969 spin_unlock_bh(&pwrb_context->wrb_lock);
1201 970
971 if (pwrb_handle)
972 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
973
1202 return pwrb_handle; 974 return pwrb_handle;
1203} 975}
1204 976
@@ -1440,11 +1212,10 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1440 struct beiscsi_hba *phba, struct sol_cqe *psol) 1212 struct beiscsi_hba *phba, struct sol_cqe *psol)
1441{ 1213{
1442 struct hwi_wrb_context *pwrb_context; 1214 struct hwi_wrb_context *pwrb_context;
1443 struct wrb_handle *pwrb_handle = NULL; 1215 uint16_t wrb_index, cid, cri_index;
1444 struct hwi_controller *phwi_ctrlr; 1216 struct hwi_controller *phwi_ctrlr;
1217 struct wrb_handle *pwrb_handle;
1445 struct iscsi_task *task; 1218 struct iscsi_task *task;
1446 struct beiscsi_io_task *io_task;
1447 uint16_t wrb_index, cid, cri_index;
1448 1219
1449 phwi_ctrlr = phba->phwi_ctrlr; 1220 phwi_ctrlr = phba->phwi_ctrlr;
1450 if (is_chip_be2_be3r(phba)) { 1221 if (is_chip_be2_be3r(phba)) {
@@ -1463,9 +1234,6 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1463 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1234 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1464 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1235 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1465 task = pwrb_handle->pio_handle; 1236 task = pwrb_handle->pio_handle;
1466
1467 io_task = task->dd_data;
1468 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1469 iscsi_put_task(task); 1237 iscsi_put_task(task);
1470} 1238}
1471 1239
@@ -1614,431 +1382,428 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1614 spin_unlock_bh(&session->back_lock); 1382 spin_unlock_bh(&session->back_lock);
1615} 1383}
1616 1384
1617static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1385/**
1618 *pasync_ctx, unsigned int is_header, 1386 * ASYNC PDUs include
1619 unsigned int host_write_ptr) 1387 * a. Unsolicited NOP-In (target initiated NOP-In)
1388 * b. ASYNC Messages
1389 * c. Reject PDU
1390 * d. Login response
1391 * These headers arrive unprocessed by the EP firmware.
1392 * iSCSI layer processes them.
1393 */
1394static unsigned int
1395beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn,
1396 struct pdu_base *phdr, void *pdata, unsigned int dlen)
1620{ 1397{
1621 if (is_header) 1398 struct beiscsi_hba *phba = beiscsi_conn->phba;
1622 return &pasync_ctx->async_entry[host_write_ptr]. 1399 struct iscsi_conn *conn = beiscsi_conn->conn;
1623 header_busy_list; 1400 struct beiscsi_io_task *io_task;
1624 else 1401 struct iscsi_hdr *login_hdr;
1625 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list; 1402 struct iscsi_task *task;
1403 u8 code;
1404
1405 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr);
1406 switch (code) {
1407 case ISCSI_OP_NOOP_IN:
1408 pdata = NULL;
1409 dlen = 0;
1410 break;
1411 case ISCSI_OP_ASYNC_EVENT:
1412 break;
1413 case ISCSI_OP_REJECT:
1414 WARN_ON(!pdata);
1415 WARN_ON(!(dlen == 48));
1416 beiscsi_log(phba, KERN_ERR,
1417 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1418 "BM_%d : In ISCSI_OP_REJECT\n");
1419 break;
1420 case ISCSI_OP_LOGIN_RSP:
1421 case ISCSI_OP_TEXT_RSP:
1422 task = conn->login_task;
1423 io_task = task->dd_data;
1424 login_hdr = (struct iscsi_hdr *)phdr;
1425 login_hdr->itt = io_task->libiscsi_itt;
1426 break;
1427 default:
1428 beiscsi_log(phba, KERN_WARNING,
1429 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1430 "BM_%d : unrecognized async PDU opcode 0x%x\n",
1431 code);
1432 return 1;
1433 }
1434 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen);
1435 return 0;
1436}
1437
1438static inline void
1439beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
1440 struct hd_async_handle *pasync_handle)
1441{
1442 if (pasync_handle->is_header) {
1443 list_add_tail(&pasync_handle->link,
1444 &pasync_ctx->async_header.free_list);
1445 pasync_ctx->async_header.free_entries++;
1446 } else {
1447 list_add_tail(&pasync_handle->link,
1448 &pasync_ctx->async_data.free_list);
1449 pasync_ctx->async_data.free_entries++;
1450 }
1626} 1451}
1627 1452
1628static struct async_pdu_handle * 1453static struct hd_async_handle *
1629hwi_get_async_handle(struct beiscsi_hba *phba, 1454beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
1630 struct beiscsi_conn *beiscsi_conn, 1455 struct hd_async_context *pasync_ctx,
1631 struct hwi_async_pdu_context *pasync_ctx, 1456 struct i_t_dpdu_cqe *pdpdu_cqe)
1632 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1633{ 1457{
1458 struct beiscsi_hba *phba = beiscsi_conn->phba;
1459 struct hd_async_handle *pasync_handle;
1634 struct be_bus_address phys_addr; 1460 struct be_bus_address phys_addr;
1635 struct list_head *pbusy_list; 1461 u8 final, error = 0;
1636 struct async_pdu_handle *pasync_handle = NULL; 1462 u16 cid, code, ci;
1637 unsigned char is_header = 0; 1463 u32 dpl;
1638 unsigned int index, dpl;
1639 1464
1465 cid = beiscsi_conn->beiscsi_conn_cid;
1466 /**
1467 * This function is invoked to get the right async_handle structure
1468 * from a given DEF PDU CQ entry.
1469 *
1470 * - index in CQ entry gives the vertical index
1471 * - address in CQ entry is the offset where the DMA last ended
1472 * - final - no more notifications for this PDU
1473 */
1640 if (is_chip_be2_be3r(phba)) { 1474 if (is_chip_be2_be3r(phba)) {
1641 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1475 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1642 dpl, pdpdu_cqe); 1476 dpl, pdpdu_cqe);
1643 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1477 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1644 index, pdpdu_cqe); 1478 index, pdpdu_cqe);
1479 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1480 final, pdpdu_cqe);
1645 } else { 1481 } else {
1646 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1482 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1647 dpl, pdpdu_cqe); 1483 dpl, pdpdu_cqe);
1648 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1484 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1649 index, pdpdu_cqe); 1485 index, pdpdu_cqe);
1486 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1487 final, pdpdu_cqe);
1650 } 1488 }
1651 1489
1652 phys_addr.u.a32.address_lo = 1490 /**
1653 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1491 * DB addr Hi/Lo is same for BE and SKH.
1654 db_addr_lo) / 32] - dpl); 1492 * Subtract the dataplacementlength to get to the base.
1655 phys_addr.u.a32.address_hi = 1493 */
1656 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1494 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1657 db_addr_hi) / 32]; 1495 db_addr_lo, pdpdu_cqe);
1658 1496 phys_addr.u.a32.address_lo -= dpl;
1659 phys_addr.u.a64.address = 1497 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1660 *((unsigned long long *)(&phys_addr.u.a64.address)); 1498 db_addr_hi, pdpdu_cqe);
1661 1499
1662 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32] 1500 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe);
1663 & PDUCQE_CODE_MASK) { 1501 switch (code) {
1664 case UNSOL_HDR_NOTIFY: 1502 case UNSOL_HDR_NOTIFY:
1665 is_header = 1; 1503 pasync_handle = pasync_ctx->async_entry[ci].header;
1666
1667 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1668 is_header, index);
1669 break; 1504 break;
1505 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1506 error = 1;
1670 case UNSOL_DATA_NOTIFY: 1507 case UNSOL_DATA_NOTIFY:
1671 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1508 pasync_handle = pasync_ctx->async_entry[ci].data;
1672 is_header, index);
1673 break; 1509 break;
1510 /* called only for above codes */
1674 default: 1511 default:
1675 pbusy_list = NULL; 1512 pasync_handle = NULL;
1676 beiscsi_log(phba, KERN_WARNING, 1513 break;
1677 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1678 "BM_%d : Unexpected code=%d\n",
1679 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1680 code) / 32] & PDUCQE_CODE_MASK);
1681 return NULL;
1682 } 1514 }
1683 1515
1684 WARN_ON(list_empty(pbusy_list)); 1516 if (!pasync_handle) {
1685 list_for_each_entry(pasync_handle, pbusy_list, link) { 1517 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1686 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address) 1518 "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n",
1687 break; 1519 cid, code, ci, phys_addr.u.a64.address);
1520 return pasync_handle;
1688 } 1521 }
1689 1522
1690 WARN_ON(!pasync_handle); 1523 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
1524 pasync_handle->index != ci) {
1525 /* driver bug - if ci does not match async handle index */
1526 error = 1;
1527 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1528 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
1529 cid, pasync_handle->is_header ? 'H' : 'D',
1530 pasync_handle->pa.u.a64.address,
1531 pasync_handle->index,
1532 phys_addr.u.a64.address, ci);
1533 /* FW has stale address - attempt continuing by dropping */
1534 }
1691 1535
1692 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( 1536 /**
1693 beiscsi_conn->beiscsi_conn_cid); 1537 * Each CID is associated with unique CRI.
1694 pasync_handle->is_header = is_header; 1538 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
1539 **/
1540 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
1541 pasync_handle->is_final = final;
1695 pasync_handle->buffer_len = dpl; 1542 pasync_handle->buffer_len = dpl;
1696 *pcq_index = index; 1543 /* empty the slot */
1544 if (pasync_handle->is_header)
1545 pasync_ctx->async_entry[ci].header = NULL;
1546 else
1547 pasync_ctx->async_entry[ci].data = NULL;
1697 1548
1549 /**
1550 * DEF PDU header and data buffers with errors should be simply
1551 * dropped as there are no consumers for it.
1552 */
1553 if (error) {
1554 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1555 pasync_handle = NULL;
1556 }
1698 return pasync_handle; 1557 return pasync_handle;
1699} 1558}
1700 1559
1701static unsigned int 1560static void
1702hwi_update_async_writables(struct beiscsi_hba *phba, 1561beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
1703 struct hwi_async_pdu_context *pasync_ctx, 1562 struct hd_async_context *pasync_ctx,
1704 unsigned int is_header, unsigned int cq_index) 1563 u16 cri)
1705{ 1564{
1706 struct list_head *pbusy_list; 1565 struct hd_async_handle *pasync_handle, *tmp_handle;
1707 struct async_pdu_handle *pasync_handle; 1566 struct list_head *plist;
1708 unsigned int num_entries, writables = 0;
1709 unsigned int *pep_read_ptr, *pwritables;
1710
1711 num_entries = pasync_ctx->num_entries;
1712 if (is_header) {
1713 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1714 pwritables = &pasync_ctx->async_header.writables;
1715 } else {
1716 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1717 pwritables = &pasync_ctx->async_data.writables;
1718 }
1719
1720 while ((*pep_read_ptr) != cq_index) {
1721 (*pep_read_ptr)++;
1722 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1723
1724 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1725 *pep_read_ptr);
1726 if (writables == 0)
1727 WARN_ON(list_empty(pbusy_list));
1728
1729 if (!list_empty(pbusy_list)) {
1730 pasync_handle = list_entry(pbusy_list->next,
1731 struct async_pdu_handle,
1732 link);
1733 WARN_ON(!pasync_handle);
1734 pasync_handle->consumed = 1;
1735 }
1736
1737 writables++;
1738 }
1739 1567
1740 if (!writables) { 1568 plist = &pasync_ctx->async_entry[cri].wq.list;
1741 beiscsi_log(phba, KERN_ERR, 1569 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1742 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1570 list_del(&pasync_handle->link);
1743 "BM_%d : Duplicate notification received - index 0x%x!!\n", 1571 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1744 cq_index);
1745 WARN_ON(1);
1746 } 1572 }
1747 1573
1748 *pwritables = *pwritables + writables; 1574 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
1749 return 0; 1575 pasync_ctx->async_entry[cri].wq.hdr_len = 0;
1576 pasync_ctx->async_entry[cri].wq.bytes_received = 0;
1577 pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
1750} 1578}
1751 1579
1752static void hwi_free_async_msg(struct beiscsi_hba *phba, 1580static unsigned int
1753 struct hwi_async_pdu_context *pasync_ctx, 1581beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
1754 unsigned int cri) 1582 struct hd_async_context *pasync_ctx,
1583 u16 cri)
1755{ 1584{
1756 struct async_pdu_handle *pasync_handle, *tmp_handle; 1585 struct iscsi_session *session = beiscsi_conn->conn->session;
1586 struct hd_async_handle *pasync_handle, *plast_handle;
1587 struct beiscsi_hba *phba = beiscsi_conn->phba;
1588 void *phdr = NULL, *pdata = NULL;
1589 u32 dlen = 0, status = 0;
1757 struct list_head *plist; 1590 struct list_head *plist;
1758 1591
1759 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1592 plist = &pasync_ctx->async_entry[cri].wq.list;
1760 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1593 plast_handle = NULL;
1761 list_del(&pasync_handle->link); 1594 list_for_each_entry(pasync_handle, plist, link) {
1762 1595 plast_handle = pasync_handle;
1763 if (pasync_handle->is_header) { 1596 /* get the header, the first entry */
1764 list_add_tail(&pasync_handle->link, 1597 if (!phdr) {
1765 &pasync_ctx->async_header.free_list); 1598 phdr = pasync_handle->pbuffer;
1766 pasync_ctx->async_header.free_entries++; 1599 continue;
1767 } else { 1600 }
1768 list_add_tail(&pasync_handle->link, 1601 /* use first buffer to collect all the data */
1769 &pasync_ctx->async_data.free_list); 1602 if (!pdata) {
1770 pasync_ctx->async_data.free_entries++; 1603 pdata = pasync_handle->pbuffer;
1604 dlen = pasync_handle->buffer_len;
1605 continue;
1771 } 1606 }
1607 memcpy(pdata + dlen, pasync_handle->pbuffer,
1608 pasync_handle->buffer_len);
1609 dlen += pasync_handle->buffer_len;
1772 } 1610 }
1773 1611
1774 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1612 if (!plast_handle->is_final) {
1775 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1613 /* last handle should have final PDU notification from FW */
1776 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1614 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1615 "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n",
1616 beiscsi_conn->beiscsi_conn_cid, plast_handle,
1617 pasync_ctx->async_entry[cri].wq.hdr_len,
1618 pasync_ctx->async_entry[cri].wq.bytes_needed,
1619 pasync_ctx->async_entry[cri].wq.bytes_received);
1620 }
1621 spin_lock_bh(&session->back_lock);
1622 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen);
1623 spin_unlock_bh(&session->back_lock);
1624 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1625 return status;
1777} 1626}
1778 1627
1779static struct phys_addr * 1628static unsigned int
1780hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, 1629beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn,
1781 unsigned int is_header, unsigned int host_write_ptr) 1630 struct hd_async_context *pasync_ctx,
1631 struct hd_async_handle *pasync_handle)
1782{ 1632{
1783 struct phys_addr *pasync_sge = NULL; 1633 unsigned int bytes_needed = 0, status = 0;
1634 u16 cri = pasync_handle->cri;
1635 struct cri_wait_queue *wq;
1636 struct beiscsi_hba *phba;
1637 struct pdu_base *ppdu;
1638 char *err = "";
1784 1639
1785 if (is_header) 1640 phba = beiscsi_conn->phba;
1786 pasync_sge = pasync_ctx->async_header.ring_base; 1641 wq = &pasync_ctx->async_entry[cri].wq;
1787 else 1642 if (pasync_handle->is_header) {
1788 pasync_sge = pasync_ctx->async_data.ring_base; 1643 /* check if PDU hdr is rcv'd when old hdr not completed */
1644 if (wq->hdr_len) {
1645 err = "incomplete";
1646 goto drop_pdu;
1647 }
1648 ppdu = pasync_handle->pbuffer;
1649 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base,
1650 data_len_hi, ppdu);
1651 bytes_needed <<= 16;
1652 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base,
1653 data_len_lo, ppdu));
1654 wq->hdr_len = pasync_handle->buffer_len;
1655 wq->bytes_received = 0;
1656 wq->bytes_needed = bytes_needed;
1657 list_add_tail(&pasync_handle->link, &wq->list);
1658 if (!bytes_needed)
1659 status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1660 pasync_ctx, cri);
1661 } else {
1662 /* check if data received has header and is needed */
1663 if (!wq->hdr_len || !wq->bytes_needed) {
1664 err = "header less";
1665 goto drop_pdu;
1666 }
1667 wq->bytes_received += pasync_handle->buffer_len;
1668 /* Something got overwritten? Better catch it here. */
1669 if (wq->bytes_received > wq->bytes_needed) {
1670 err = "overflow";
1671 goto drop_pdu;
1672 }
1673 list_add_tail(&pasync_handle->link, &wq->list);
1674 if (wq->bytes_received == wq->bytes_needed)
1675 status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1676 pasync_ctx, cri);
1677 }
1678 return status;
1789 1679
1790 return pasync_sge + host_write_ptr; 1680drop_pdu:
1681 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1682 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
1683 beiscsi_conn->beiscsi_conn_cid, err,
1684 pasync_handle->is_header ? 'H' : 'D',
1685 wq->hdr_len, wq->bytes_needed,
1686 pasync_handle->buffer_len);
1687 /* discard this handle */
1688 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1689 /* free all the other handles in cri_wait_queue */
1690 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1691 /* try continuing */
1692 return status;
1791} 1693}
1792 1694
1793static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1695static void
1794 unsigned int is_header, uint8_t ulp_num) 1696beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
1697 u8 header, u8 ulp_num)
1795{ 1698{
1699 struct hd_async_handle *pasync_handle, *tmp, **slot;
1700 struct hd_async_context *pasync_ctx;
1796 struct hwi_controller *phwi_ctrlr; 1701 struct hwi_controller *phwi_ctrlr;
1797 struct hwi_async_pdu_context *pasync_ctx; 1702 struct list_head *hfree_list;
1798 struct async_pdu_handle *pasync_handle;
1799 struct list_head *pfree_link, *pbusy_list;
1800 struct phys_addr *pasync_sge; 1703 struct phys_addr *pasync_sge;
1801 unsigned int ring_id, num_entries; 1704 u32 ring_id, doorbell = 0;
1802 unsigned int host_write_num, doorbell_offset; 1705 u16 index, num_entries;
1803 unsigned int writables; 1706 u32 doorbell_offset;
1804 unsigned int i = 0; 1707 u16 prod = 0, cons;
1805 u32 doorbell = 0;
1806 1708
1807 phwi_ctrlr = phba->phwi_ctrlr; 1709 phwi_ctrlr = phba->phwi_ctrlr;
1808 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1710 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1809 num_entries = pasync_ctx->num_entries; 1711 num_entries = pasync_ctx->num_entries;
1810 1712 if (header) {
1811 if (is_header) { 1713 cons = pasync_ctx->async_header.free_entries;
1812 writables = min(pasync_ctx->async_header.writables, 1714 hfree_list = &pasync_ctx->async_header.free_list;
1813 pasync_ctx->async_header.free_entries);
1814 pfree_link = pasync_ctx->async_header.free_list.next;
1815 host_write_num = pasync_ctx->async_header.host_write_ptr;
1816 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1715 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1817 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1716 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1818 doorbell_offset; 1717 doorbell_offset;
1819 } else { 1718 } else {
1820 writables = min(pasync_ctx->async_data.writables, 1719 cons = pasync_ctx->async_data.free_entries;
1821 pasync_ctx->async_data.free_entries); 1720 hfree_list = &pasync_ctx->async_data.free_list;
1822 pfree_link = pasync_ctx->async_data.free_list.next;
1823 host_write_num = pasync_ctx->async_data.host_write_ptr;
1824 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1721 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1825 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1722 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1826 doorbell_offset; 1723 doorbell_offset;
1827 } 1724 }
1725 /* number of entries posted must be in multiples of 8 */
1726 if (cons % 8)
1727 return;
1828 1728
1829 writables = (writables / 8) * 8; 1729 list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) {
1830 if (writables) { 1730 list_del_init(&pasync_handle->link);
1831 for (i = 0; i < writables; i++) { 1731 pasync_handle->is_final = 0;
1832 pbusy_list = 1732 pasync_handle->buffer_len = 0;
1833 hwi_get_async_busy_list(pasync_ctx, is_header,
1834 host_write_num);
1835 pasync_handle =
1836 list_entry(pfree_link, struct async_pdu_handle,
1837 link);
1838 WARN_ON(!pasync_handle);
1839 pasync_handle->consumed = 0;
1840
1841 pfree_link = pfree_link->next;
1842
1843 pasync_sge = hwi_get_ring_address(pasync_ctx,
1844 is_header, host_write_num);
1845
1846 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1847 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1848
1849 list_move(&pasync_handle->link, pbusy_list);
1850
1851 host_write_num++;
1852 host_write_num = host_write_num % num_entries;
1853 }
1854
1855 if (is_header) {
1856 pasync_ctx->async_header.host_write_ptr =
1857 host_write_num;
1858 pasync_ctx->async_header.free_entries -= writables;
1859 pasync_ctx->async_header.writables -= writables;
1860 pasync_ctx->async_header.busy_entries += writables;
1861 } else {
1862 pasync_ctx->async_data.host_write_ptr = host_write_num;
1863 pasync_ctx->async_data.free_entries -= writables;
1864 pasync_ctx->async_data.writables -= writables;
1865 pasync_ctx->async_data.busy_entries += writables;
1866 }
1867
1868 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1869 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1870 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1871 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1872 << DB_DEF_PDU_CQPROC_SHIFT;
1873
1874 iowrite32(doorbell, phba->db_va + doorbell_offset);
1875 }
1876}
1877
1878static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1879 struct beiscsi_conn *beiscsi_conn,
1880 struct i_t_dpdu_cqe *pdpdu_cqe)
1881{
1882 struct hwi_controller *phwi_ctrlr;
1883 struct hwi_async_pdu_context *pasync_ctx;
1884 struct async_pdu_handle *pasync_handle = NULL;
1885 unsigned int cq_index = -1;
1886 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1887 beiscsi_conn->beiscsi_conn_cid);
1888
1889 phwi_ctrlr = phba->phwi_ctrlr;
1890 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1891 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1892 cri_index));
1893
1894 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1895 pdpdu_cqe, &cq_index);
1896 BUG_ON(pasync_handle->is_header != 0);
1897 if (pasync_handle->consumed == 0)
1898 hwi_update_async_writables(phba, pasync_ctx,
1899 pasync_handle->is_header, cq_index);
1900
1901 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
1902 hwi_post_async_buffers(phba, pasync_handle->is_header,
1903 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1904 cri_index));
1905}
1906
1907static unsigned int
1908hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1909 struct beiscsi_hba *phba,
1910 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1911{
1912 struct list_head *plist;
1913 struct async_pdu_handle *pasync_handle;
1914 void *phdr = NULL;
1915 unsigned int hdr_len = 0, buf_len = 0;
1916 unsigned int status, index = 0, offset = 0;
1917 void *pfirst_buffer = NULL;
1918 unsigned int num_buf = 0;
1919
1920 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1921 1733
1922 list_for_each_entry(pasync_handle, plist, link) { 1734 /* handles can be consumed out of order, use index in handle */
1923 if (index == 0) { 1735 index = pasync_handle->index;
1924 phdr = pasync_handle->pbuffer; 1736 WARN_ON(pasync_handle->is_header != header);
1925 hdr_len = pasync_handle->buffer_len; 1737 if (header)
1926 } else { 1738 slot = &pasync_ctx->async_entry[index].header;
1927 buf_len = pasync_handle->buffer_len; 1739 else
1928 if (!num_buf) { 1740 slot = &pasync_ctx->async_entry[index].data;
1929 pfirst_buffer = pasync_handle->pbuffer; 1741 /**
1930 num_buf++; 1742 * The slot just tracks handle's hold and release, so
1931 } 1743 * overwriting at the same index won't do any harm but
1932 memcpy(pfirst_buffer + offset, 1744 * needs to be caught.
1933 pasync_handle->pbuffer, buf_len); 1745 */
1934 offset += buf_len; 1746 if (*slot != NULL) {
1747 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1748 "BM_%d : async PDU %s slot at %u not empty\n",
1749 header ? "header" : "data", index);
1935 } 1750 }
1936 index++; 1751 /**
1752 * We use same freed index as in completion to post so this
1753 * operation is not required for refills. Its required only
1754 * for ring creation.
1755 */
1756 if (header)
1757 pasync_sge = pasync_ctx->async_header.ring_base;
1758 else
1759 pasync_sge = pasync_ctx->async_data.ring_base;
1760 pasync_sge += index;
1761 /* if its a refill then address is same; hi is lo */
1762 WARN_ON(pasync_sge->hi &&
1763 pasync_sge->hi != pasync_handle->pa.u.a32.address_lo);
1764 WARN_ON(pasync_sge->lo &&
1765 pasync_sge->lo != pasync_handle->pa.u.a32.address_hi);
1766 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1767 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1768
1769 *slot = pasync_handle;
1770 if (++prod == cons)
1771 break;
1937 } 1772 }
1773 if (header)
1774 pasync_ctx->async_header.free_entries -= prod;
1775 else
1776 pasync_ctx->async_data.free_entries -= prod;
1938 1777
1939 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1778 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1940 phdr, hdr_len, pfirst_buffer, 1779 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1941 offset); 1780 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1942 1781 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT;
1943 hwi_free_async_msg(phba, pasync_ctx, cri); 1782 iowrite32(doorbell, phba->db_va + doorbell_offset);
1944 return 0;
1945}
1946
1947static unsigned int
1948hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1949 struct beiscsi_hba *phba,
1950 struct async_pdu_handle *pasync_handle)
1951{
1952 struct hwi_async_pdu_context *pasync_ctx;
1953 struct hwi_controller *phwi_ctrlr;
1954 unsigned int bytes_needed = 0, status = 0;
1955 unsigned short cri = pasync_handle->cri;
1956 struct pdu_base *ppdu;
1957
1958 phwi_ctrlr = phba->phwi_ctrlr;
1959 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1960 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1961 BE_GET_CRI_FROM_CID(beiscsi_conn->
1962 beiscsi_conn_cid)));
1963
1964 list_del(&pasync_handle->link);
1965 if (pasync_handle->is_header) {
1966 pasync_ctx->async_header.busy_entries--;
1967 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1968 hwi_free_async_msg(phba, pasync_ctx, cri);
1969 BUG();
1970 }
1971
1972 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1973 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1974 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1975 (unsigned short)pasync_handle->buffer_len;
1976 list_add_tail(&pasync_handle->link,
1977 &pasync_ctx->async_entry[cri].wait_queue.list);
1978
1979 ppdu = pasync_handle->pbuffer;
1980 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1981 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1982 0xFFFF0000) | ((be16_to_cpu((ppdu->
1983 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1984 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1985
1986 if (status == 0) {
1987 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1988 bytes_needed;
1989
1990 if (bytes_needed == 0)
1991 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1992 pasync_ctx, cri);
1993 }
1994 } else {
1995 pasync_ctx->async_data.busy_entries--;
1996 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1997 list_add_tail(&pasync_handle->link,
1998 &pasync_ctx->async_entry[cri].wait_queue.
1999 list);
2000 pasync_ctx->async_entry[cri].wait_queue.
2001 bytes_received +=
2002 (unsigned short)pasync_handle->buffer_len;
2003
2004 if (pasync_ctx->async_entry[cri].wait_queue.
2005 bytes_received >=
2006 pasync_ctx->async_entry[cri].wait_queue.
2007 bytes_needed)
2008 status = hwi_fwd_async_msg(beiscsi_conn, phba,
2009 pasync_ctx, cri);
2010 }
2011 }
2012 return status;
2013} 1783}
2014 1784
2015static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, 1785static void
2016 struct beiscsi_hba *phba, 1786beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
2017 struct i_t_dpdu_cqe *pdpdu_cqe) 1787 struct i_t_dpdu_cqe *pdpdu_cqe)
2018{ 1788{
1789 struct beiscsi_hba *phba = beiscsi_conn->phba;
1790 struct hd_async_handle *pasync_handle = NULL;
1791 struct hd_async_context *pasync_ctx;
2019 struct hwi_controller *phwi_ctrlr; 1792 struct hwi_controller *phwi_ctrlr;
2020 struct hwi_async_pdu_context *pasync_ctx; 1793 u16 cid_cri;
2021 struct async_pdu_handle *pasync_handle = NULL; 1794 u8 ulp_num;
2022 unsigned int cq_index = -1;
2023 uint16_t cri_index = BE_GET_CRI_FROM_CID(
2024 beiscsi_conn->beiscsi_conn_cid);
2025 1795
2026 phwi_ctrlr = phba->phwi_ctrlr; 1796 phwi_ctrlr = phba->phwi_ctrlr;
2027 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1797 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
2028 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1798 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
2029 cri_index)); 1799 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
2030 1800 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
2031 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1801 pdpdu_cqe);
2032 pdpdu_cqe, &cq_index); 1802 if (!pasync_handle)
2033 1803 return;
2034 if (pasync_handle->consumed == 0)
2035 hwi_update_async_writables(phba, pasync_ctx,
2036 pasync_handle->is_header, cq_index);
2037 1804
2038 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 1805 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
2039 hwi_post_async_buffers(phba, pasync_handle->is_header, 1806 beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num);
2040 BEISCSI_GET_ULP_FROM_CRI(
2041 phwi_ctrlr, cri_index));
2042} 1807}
2043 1808
2044void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1809void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
@@ -2051,6 +1816,9 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
2051 mcc_compl = queue_tail_node(mcc_cq); 1816 mcc_compl = queue_tail_node(mcc_cq);
2052 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1817 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2053 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1818 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1819 if (beiscsi_hba_in_error(phba))
1820 return;
1821
2054 if (num_processed >= 32) { 1822 if (num_processed >= 32) {
2055 hwi_ring_cq_db(phba, mcc_cq->id, 1823 hwi_ring_cq_db(phba, mcc_cq->id,
2056 num_processed, 0); 1824 num_processed, 0);
@@ -2073,6 +1841,19 @@ void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
2073 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1841 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
2074} 1842}
2075 1843
1844static void beiscsi_mcc_work(struct work_struct *work)
1845{
1846 struct be_eq_obj *pbe_eq;
1847 struct beiscsi_hba *phba;
1848
1849 pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
1850 phba = pbe_eq->phba;
1851 beiscsi_process_mcc_cq(phba);
1852 /* rearm EQ for further interrupts */
1853 if (!beiscsi_hba_in_error(phba))
1854 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1855}
1856
2076/** 1857/**
2077 * beiscsi_process_cq()- Process the Completion Queue 1858 * beiscsi_process_cq()- Process the Completion Queue
2078 * @pbe_eq: Event Q on which the Completion has come 1859 * @pbe_eq: Event Q on which the Completion has come
@@ -2101,6 +1882,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
2101 1882
2102 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1883 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
2103 CQE_VALID_MASK) { 1884 CQE_VALID_MASK) {
1885 if (beiscsi_hba_in_error(phba))
1886 return 0;
1887
2104 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1888 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
2105 1889
2106 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 1890 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
@@ -2165,8 +1949,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
2165 cqe_desc[code], code, cid); 1949 cqe_desc[code], code, cid);
2166 1950
2167 spin_lock_bh(&phba->async_pdu_lock); 1951 spin_lock_bh(&phba->async_pdu_lock);
2168 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1952 beiscsi_hdq_process_compl(beiscsi_conn,
2169 (struct i_t_dpdu_cqe *)sol); 1953 (struct i_t_dpdu_cqe *)sol);
2170 spin_unlock_bh(&phba->async_pdu_lock); 1954 spin_unlock_bh(&phba->async_pdu_lock);
2171 break; 1955 break;
2172 case UNSOL_DATA_NOTIFY: 1956 case UNSOL_DATA_NOTIFY:
@@ -2176,8 +1960,8 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
2176 cqe_desc[code], code, cid); 1960 cqe_desc[code], code, cid);
2177 1961
2178 spin_lock_bh(&phba->async_pdu_lock); 1962 spin_lock_bh(&phba->async_pdu_lock);
2179 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1963 beiscsi_hdq_process_compl(beiscsi_conn,
2180 (struct i_t_dpdu_cqe *)sol); 1964 (struct i_t_dpdu_cqe *)sol);
2181 spin_unlock_bh(&phba->async_pdu_lock); 1965 spin_unlock_bh(&phba->async_pdu_lock);
2182 break; 1966 break;
2183 case CXN_INVALIDATE_INDEX_NOTIFY: 1967 case CXN_INVALIDATE_INDEX_NOTIFY:
@@ -2213,8 +1997,9 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
2213 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1997 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2214 cqe_desc[code], code, cid); 1998 cqe_desc[code], code, cid);
2215 spin_lock_bh(&phba->async_pdu_lock); 1999 spin_lock_bh(&phba->async_pdu_lock);
2216 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2000 /* driver consumes the entry and drops the contents */
2217 (struct i_t_dpdu_cqe *) sol); 2001 beiscsi_hdq_process_compl(beiscsi_conn,
2002 (struct i_t_dpdu_cqe *)sol);
2218 spin_unlock_bh(&phba->async_pdu_lock); 2003 spin_unlock_bh(&phba->async_pdu_lock);
2219 break; 2004 break;
2220 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2005 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
@@ -2262,60 +2047,32 @@ proc_next_cqe:
2262 return total; 2047 return total;
2263} 2048}
2264 2049
2265void beiscsi_process_all_cqs(struct work_struct *work)
2266{
2267 unsigned long flags;
2268 struct hwi_controller *phwi_ctrlr;
2269 struct hwi_context_memory *phwi_context;
2270 struct beiscsi_hba *phba;
2271 struct be_eq_obj *pbe_eq =
2272 container_of(work, struct be_eq_obj, work_cqs);
2273
2274 phba = pbe_eq->phba;
2275 phwi_ctrlr = phba->phwi_ctrlr;
2276 phwi_context = phwi_ctrlr->phwi_ctxt;
2277
2278 if (pbe_eq->todo_mcc_cq) {
2279 spin_lock_irqsave(&phba->isr_lock, flags);
2280 pbe_eq->todo_mcc_cq = false;
2281 spin_unlock_irqrestore(&phba->isr_lock, flags);
2282 beiscsi_process_mcc_cq(phba);
2283 }
2284
2285 if (pbe_eq->todo_cq) {
2286 spin_lock_irqsave(&phba->isr_lock, flags);
2287 pbe_eq->todo_cq = false;
2288 spin_unlock_irqrestore(&phba->isr_lock, flags);
2289 beiscsi_process_cq(pbe_eq, BE2_MAX_NUM_CQ_PROC);
2290 }
2291
2292 /* rearm EQ for further interrupts */
2293 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2294}
2295
2296static int be_iopoll(struct irq_poll *iop, int budget) 2050static int be_iopoll(struct irq_poll *iop, int budget)
2297{ 2051{
2298 unsigned int ret, num_eq_processed; 2052 unsigned int ret, io_events;
2299 struct beiscsi_hba *phba; 2053 struct beiscsi_hba *phba;
2300 struct be_eq_obj *pbe_eq; 2054 struct be_eq_obj *pbe_eq;
2301 struct be_eq_entry *eqe = NULL; 2055 struct be_eq_entry *eqe = NULL;
2302 struct be_queue_info *eq; 2056 struct be_queue_info *eq;
2303 2057
2304 num_eq_processed = 0;
2305 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2058 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2306 phba = pbe_eq->phba; 2059 phba = pbe_eq->phba;
2060 if (beiscsi_hba_in_error(phba)) {
2061 irq_poll_complete(iop);
2062 return 0;
2063 }
2064
2065 io_events = 0;
2307 eq = &pbe_eq->q; 2066 eq = &pbe_eq->q;
2308 eqe = queue_tail_node(eq); 2067 eqe = queue_tail_node(eq);
2309
2310 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2068 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
2311 EQE_VALID_MASK) { 2069 EQE_VALID_MASK) {
2312 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2070 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2313 queue_tail_inc(eq); 2071 queue_tail_inc(eq);
2314 eqe = queue_tail_node(eq); 2072 eqe = queue_tail_node(eq);
2315 num_eq_processed++; 2073 io_events++;
2316 } 2074 }
2317 2075 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1);
2318 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
2319 2076
2320 ret = beiscsi_process_cq(pbe_eq, budget); 2077 ret = beiscsi_process_cq(pbe_eq, budget);
2321 pbe_eq->cq_count += ret; 2078 pbe_eq->cq_count += ret;
@@ -2325,7 +2082,8 @@ static int be_iopoll(struct irq_poll *iop, int budget)
2325 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2082 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2326 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2083 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
2327 pbe_eq->q.id, ret); 2084 pbe_eq->q.id, ret);
2328 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2085 if (!beiscsi_hba_in_error(phba))
2086 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2329 } 2087 }
2330 return ret; 2088 return ret;
2331} 2089}
@@ -2691,20 +2449,20 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2691 (ulp_num * MEM_DESCR_OFFSET)); 2449 (ulp_num * MEM_DESCR_OFFSET));
2692 phba->mem_req[mem_descr_index] = 2450 phba->mem_req[mem_descr_index] =
2693 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2451 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2694 sizeof(struct async_pdu_handle); 2452 sizeof(struct hd_async_handle);
2695 2453
2696 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2454 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2697 (ulp_num * MEM_DESCR_OFFSET)); 2455 (ulp_num * MEM_DESCR_OFFSET));
2698 phba->mem_req[mem_descr_index] = 2456 phba->mem_req[mem_descr_index] =
2699 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2457 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2700 sizeof(struct async_pdu_handle); 2458 sizeof(struct hd_async_handle);
2701 2459
2702 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2460 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2703 (ulp_num * MEM_DESCR_OFFSET)); 2461 (ulp_num * MEM_DESCR_OFFSET));
2704 phba->mem_req[mem_descr_index] = 2462 phba->mem_req[mem_descr_index] =
2705 sizeof(struct hwi_async_pdu_context) + 2463 sizeof(struct hd_async_context) +
2706 (BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2464 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2707 sizeof(struct hwi_async_entry)); 2465 sizeof(struct hd_async_entry));
2708 } 2466 }
2709 } 2467 }
2710} 2468}
@@ -2963,35 +2721,34 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2963 uint8_t ulp_num; 2721 uint8_t ulp_num;
2964 struct hwi_controller *phwi_ctrlr; 2722 struct hwi_controller *phwi_ctrlr;
2965 struct hba_parameters *p = &phba->params; 2723 struct hba_parameters *p = &phba->params;
2966 struct hwi_async_pdu_context *pasync_ctx; 2724 struct hd_async_context *pasync_ctx;
2967 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2725 struct hd_async_handle *pasync_header_h, *pasync_data_h;
2968 unsigned int index, idx, num_per_mem, num_async_data; 2726 unsigned int index, idx, num_per_mem, num_async_data;
2969 struct be_mem_descriptor *mem_descr; 2727 struct be_mem_descriptor *mem_descr;
2970 2728
2971 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2729 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2972 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2730 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2973 2731 /* get async_ctx for each ULP */
2974 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2732 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2975 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2733 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2976 (ulp_num * MEM_DESCR_OFFSET)); 2734 (ulp_num * MEM_DESCR_OFFSET));
2977 2735
2978 phwi_ctrlr = phba->phwi_ctrlr; 2736 phwi_ctrlr = phba->phwi_ctrlr;
2979 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2737 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2980 (struct hwi_async_pdu_context *) 2738 (struct hd_async_context *)
2981 mem_descr->mem_array[0].virtual_address; 2739 mem_descr->mem_array[0].virtual_address;
2982 2740
2983 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2741 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2984 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2742 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2985 2743
2986 pasync_ctx->async_entry = 2744 pasync_ctx->async_entry =
2987 (struct hwi_async_entry *) 2745 (struct hd_async_entry *)
2988 ((long unsigned int)pasync_ctx + 2746 ((long unsigned int)pasync_ctx +
2989 sizeof(struct hwi_async_pdu_context)); 2747 sizeof(struct hd_async_context));
2990 2748
2991 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, 2749 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2992 ulp_num); 2750 ulp_num);
2993 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2751 /* setup header buffers */
2994
2995 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2752 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2996 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2753 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2997 (ulp_num * MEM_DESCR_OFFSET); 2754 (ulp_num * MEM_DESCR_OFFSET);
@@ -3008,6 +2765,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3008 "BM_%d : No Virtual address for ULP : %d\n", 2765 "BM_%d : No Virtual address for ULP : %d\n",
3009 ulp_num); 2766 ulp_num);
3010 2767
2768 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
3011 pasync_ctx->async_header.va_base = 2769 pasync_ctx->async_header.va_base =
3012 mem_descr->mem_array[0].virtual_address; 2770 mem_descr->mem_array[0].virtual_address;
3013 2771
@@ -3015,6 +2773,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3015 mem_descr->mem_array[0]. 2773 mem_descr->mem_array[0].
3016 bus_address.u.a64.address; 2774 bus_address.u.a64.address;
3017 2775
2776 /* setup header buffer sgls */
3018 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2777 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3019 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2778 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3020 (ulp_num * MEM_DESCR_OFFSET); 2779 (ulp_num * MEM_DESCR_OFFSET);
@@ -3034,6 +2793,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3034 pasync_ctx->async_header.ring_base = 2793 pasync_ctx->async_header.ring_base =
3035 mem_descr->mem_array[0].virtual_address; 2794 mem_descr->mem_array[0].virtual_address;
3036 2795
2796 /* setup header buffer handles */
3037 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2797 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3038 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2798 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
3039 (ulp_num * MEM_DESCR_OFFSET); 2799 (ulp_num * MEM_DESCR_OFFSET);
@@ -3052,9 +2812,9 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3052 2812
3053 pasync_ctx->async_header.handle_base = 2813 pasync_ctx->async_header.handle_base =
3054 mem_descr->mem_array[0].virtual_address; 2814 mem_descr->mem_array[0].virtual_address;
3055 pasync_ctx->async_header.writables = 0;
3056 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2815 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
3057 2816
2817 /* setup data buffer sgls */
3058 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2818 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3059 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2819 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3060 (ulp_num * MEM_DESCR_OFFSET); 2820 (ulp_num * MEM_DESCR_OFFSET);
@@ -3074,6 +2834,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3074 pasync_ctx->async_data.ring_base = 2834 pasync_ctx->async_data.ring_base =
3075 mem_descr->mem_array[0].virtual_address; 2835 mem_descr->mem_array[0].virtual_address;
3076 2836
2837 /* setup data buffer handles */
3077 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2838 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3078 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2839 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
3079 (ulp_num * MEM_DESCR_OFFSET); 2840 (ulp_num * MEM_DESCR_OFFSET);
@@ -3085,16 +2846,16 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3085 2846
3086 pasync_ctx->async_data.handle_base = 2847 pasync_ctx->async_data.handle_base =
3087 mem_descr->mem_array[0].virtual_address; 2848 mem_descr->mem_array[0].virtual_address;
3088 pasync_ctx->async_data.writables = 0;
3089 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 2849 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
3090 2850
3091 pasync_header_h = 2851 pasync_header_h =
3092 (struct async_pdu_handle *) 2852 (struct hd_async_handle *)
3093 pasync_ctx->async_header.handle_base; 2853 pasync_ctx->async_header.handle_base;
3094 pasync_data_h = 2854 pasync_data_h =
3095 (struct async_pdu_handle *) 2855 (struct hd_async_handle *)
3096 pasync_ctx->async_data.handle_base; 2856 pasync_ctx->async_data.handle_base;
3097 2857
2858 /* setup data buffers */
3098 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2859 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3099 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2860 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
3100 (ulp_num * MEM_DESCR_OFFSET); 2861 (ulp_num * MEM_DESCR_OFFSET);
@@ -3112,6 +2873,7 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3112 ulp_num); 2873 ulp_num);
3113 2874
3114 idx = 0; 2875 idx = 0;
2876 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
3115 pasync_ctx->async_data.va_base = 2877 pasync_ctx->async_data.va_base =
3116 mem_descr->mem_array[idx].virtual_address; 2878 mem_descr->mem_array[idx].virtual_address;
3117 pasync_ctx->async_data.pa_base.u.a64.address = 2879 pasync_ctx->async_data.pa_base.u.a64.address =
@@ -3125,7 +2887,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3125 for (index = 0; index < BEISCSI_GET_CID_COUNT 2887 for (index = 0; index < BEISCSI_GET_CID_COUNT
3126 (phba, ulp_num); index++) { 2888 (phba, ulp_num); index++) {
3127 pasync_header_h->cri = -1; 2889 pasync_header_h->cri = -1;
3128 pasync_header_h->index = (char)index; 2890 pasync_header_h->is_header = 1;
2891 pasync_header_h->index = index;
3129 INIT_LIST_HEAD(&pasync_header_h->link); 2892 INIT_LIST_HEAD(&pasync_header_h->link);
3130 pasync_header_h->pbuffer = 2893 pasync_header_h->pbuffer =
3131 (void *)((unsigned long) 2894 (void *)((unsigned long)
@@ -3142,14 +2905,13 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3142 free_list); 2905 free_list);
3143 pasync_header_h++; 2906 pasync_header_h++;
3144 pasync_ctx->async_header.free_entries++; 2907 pasync_ctx->async_header.free_entries++;
3145 pasync_ctx->async_header.writables++;
3146
3147 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2908 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3148 wait_queue.list); 2909 wq.list);
3149 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2910 pasync_ctx->async_entry[index].header = NULL;
3150 header_busy_list); 2911
3151 pasync_data_h->cri = -1; 2912 pasync_data_h->cri = -1;
3152 pasync_data_h->index = (char)index; 2913 pasync_data_h->is_header = 0;
2914 pasync_data_h->index = index;
3153 INIT_LIST_HEAD(&pasync_data_h->link); 2915 INIT_LIST_HEAD(&pasync_data_h->link);
3154 2916
3155 if (!num_async_data) { 2917 if (!num_async_data) {
@@ -3184,16 +2946,8 @@ static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
3184 free_list); 2946 free_list);
3185 pasync_data_h++; 2947 pasync_data_h++;
3186 pasync_ctx->async_data.free_entries++; 2948 pasync_ctx->async_data.free_entries++;
3187 pasync_ctx->async_data.writables++; 2949 pasync_ctx->async_entry[index].data = NULL;
3188
3189 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3190 data_busy_list);
3191 } 2950 }
3192
3193 pasync_ctx->async_header.host_write_ptr = 0;
3194 pasync_ctx->async_header.ep_read_ptr = -1;
3195 pasync_ctx->async_data.host_write_ptr = 0;
3196 pasync_ctx->async_data.ep_read_ptr = -1;
3197 } 2951 }
3198 } 2952 }
3199 2953
@@ -3265,8 +3019,8 @@ static int be_fill_queue(struct be_queue_info *q,
3265static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3019static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3266 struct hwi_context_memory *phwi_context) 3020 struct hwi_context_memory *phwi_context)
3267{ 3021{
3022 int ret = -ENOMEM, eq_for_mcc;
3268 unsigned int i, num_eq_pages; 3023 unsigned int i, num_eq_pages;
3269 int ret = 0, eq_for_mcc;
3270 struct be_queue_info *eq; 3024 struct be_queue_info *eq;
3271 struct be_dma_mem *mem; 3025 struct be_dma_mem *mem;
3272 void *eq_vaddress; 3026 void *eq_vaddress;
@@ -3284,8 +3038,8 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3284 mem = &eq->dma_mem; 3038 mem = &eq->dma_mem;
3285 phwi_context->be_eq[i].phba = phba; 3039 phwi_context->be_eq[i].phba = phba;
3286 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3040 eq_vaddress = pci_alloc_consistent(phba->pcidev,
3287 num_eq_pages * PAGE_SIZE, 3041 num_eq_pages * PAGE_SIZE,
3288 &paddr); 3042 &paddr);
3289 if (!eq_vaddress) 3043 if (!eq_vaddress)
3290 goto create_eq_error; 3044 goto create_eq_error;
3291 3045
@@ -3313,6 +3067,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3313 phwi_context->be_eq[i].q.id); 3067 phwi_context->be_eq[i].q.id);
3314 } 3068 }
3315 return 0; 3069 return 0;
3070
3316create_eq_error: 3071create_eq_error:
3317 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3072 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3318 eq = &phwi_context->be_eq[i].q; 3073 eq = &phwi_context->be_eq[i].q;
@@ -3329,11 +3084,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3329 struct hwi_context_memory *phwi_context) 3084 struct hwi_context_memory *phwi_context)
3330{ 3085{
3331 unsigned int i, num_cq_pages; 3086 unsigned int i, num_cq_pages;
3332 int ret = 0;
3333 struct be_queue_info *cq, *eq; 3087 struct be_queue_info *cq, *eq;
3334 struct be_dma_mem *mem; 3088 struct be_dma_mem *mem;
3335 struct be_eq_obj *pbe_eq; 3089 struct be_eq_obj *pbe_eq;
3336 void *cq_vaddress; 3090 void *cq_vaddress;
3091 int ret = -ENOMEM;
3337 dma_addr_t paddr; 3092 dma_addr_t paddr;
3338 3093
3339 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3094 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
@@ -3347,10 +3102,11 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3347 pbe_eq->phba = phba; 3102 pbe_eq->phba = phba;
3348 mem = &cq->dma_mem; 3103 mem = &cq->dma_mem;
3349 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3104 cq_vaddress = pci_alloc_consistent(phba->pcidev,
3350 num_cq_pages * PAGE_SIZE, 3105 num_cq_pages * PAGE_SIZE,
3351 &paddr); 3106 &paddr);
3352 if (!cq_vaddress) 3107 if (!cq_vaddress)
3353 goto create_cq_error; 3108 goto create_cq_error;
3109
3354 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3110 ret = be_fill_queue(cq, phba->params.num_cq_entries,
3355 sizeof(struct sol_cqe), cq_vaddress); 3111 sizeof(struct sol_cqe), cq_vaddress);
3356 if (ret) { 3112 if (ret) {
@@ -3385,7 +3141,6 @@ create_cq_error:
3385 mem->va, mem->dma); 3141 mem->va, mem->dma);
3386 } 3142 }
3387 return ret; 3143 return ret;
3388
3389} 3144}
3390 3145
3391static int 3146static int
@@ -3437,7 +3192,6 @@ beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3437 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3192 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3438 ulp_num, 3193 ulp_num,
3439 phwi_context->be_def_hdrq[ulp_num].id); 3194 phwi_context->be_def_hdrq[ulp_num].id);
3440 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
3441 return 0; 3195 return 0;
3442} 3196}
3443 3197
@@ -3492,11 +3246,9 @@ beiscsi_create_def_data(struct beiscsi_hba *phba,
3492 ulp_num, 3246 ulp_num,
3493 phwi_context->be_def_dataq[ulp_num].id); 3247 phwi_context->be_def_dataq[ulp_num].id);
3494 3248
3495 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
3496 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3249 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3497 "BM_%d : DEFAULT PDU DATA RING CREATED" 3250 "BM_%d : DEFAULT PDU DATA RING CREATED"
3498 "on ULP : %d\n", ulp_num); 3251 "on ULP : %d\n", ulp_num);
3499
3500 return 0; 3252 return 0;
3501} 3253}
3502 3254
@@ -3716,10 +3468,53 @@ static void free_wrb_handles(struct beiscsi_hba *phba)
3716 3468
3717static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3469static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3718{ 3470{
3719 struct be_queue_info *q;
3720 struct be_ctrl_info *ctrl = &phba->ctrl; 3471 struct be_ctrl_info *ctrl = &phba->ctrl;
3472 struct be_dma_mem *ptag_mem;
3473 struct be_queue_info *q;
3474 int i, tag;
3721 3475
3722 q = &phba->ctrl.mcc_obj.q; 3476 q = &phba->ctrl.mcc_obj.q;
3477 for (i = 0; i < MAX_MCC_CMD; i++) {
3478 tag = i + 1;
3479 if (!test_bit(MCC_TAG_STATE_RUNNING,
3480 &ctrl->ptag_state[tag].tag_state))
3481 continue;
3482
3483 if (test_bit(MCC_TAG_STATE_TIMEOUT,
3484 &ctrl->ptag_state[tag].tag_state)) {
3485 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
3486 if (ptag_mem->size) {
3487 pci_free_consistent(ctrl->pdev,
3488 ptag_mem->size,
3489 ptag_mem->va,
3490 ptag_mem->dma);
3491 ptag_mem->size = 0;
3492 }
3493 continue;
3494 }
3495 /**
3496 * If MCC is still active and waiting then wake up the process.
3497 * We are here only because port is going offline. The process
3498 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
3499 * returned for the operation and allocated memory cleaned up.
3500 */
3501 if (waitqueue_active(&ctrl->mcc_wait[tag])) {
3502 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED;
3503 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK;
3504 wake_up_interruptible(&ctrl->mcc_wait[tag]);
3505 /*
3506 * Control tag info gets reinitialized in enable
3507 * so wait for the process to clear running state.
3508 */
3509 while (test_bit(MCC_TAG_STATE_RUNNING,
3510 &ctrl->ptag_state[tag].tag_state))
3511 schedule_timeout_uninterruptible(HZ);
3512 }
3513 /**
3514 * For MCC with tag_states MCC_TAG_STATE_ASYNC and
3515 * MCC_TAG_STATE_IGNORE nothing needs to done.
3516 */
3517 }
3723 if (q->created) { 3518 if (q->created) {
3724 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3519 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3725 be_queue_free(phba, q); 3520 be_queue_free(phba, q);
@@ -3732,68 +3527,6 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3732 } 3527 }
3733} 3528}
3734 3529
3735static void hwi_cleanup(struct beiscsi_hba *phba)
3736{
3737 struct be_queue_info *q;
3738 struct be_ctrl_info *ctrl = &phba->ctrl;
3739 struct hwi_controller *phwi_ctrlr;
3740 struct hwi_context_memory *phwi_context;
3741 struct hwi_async_pdu_context *pasync_ctx;
3742 int i, eq_for_mcc, ulp_num;
3743
3744 phwi_ctrlr = phba->phwi_ctrlr;
3745 phwi_context = phwi_ctrlr->phwi_ctxt;
3746
3747 be_cmd_iscsi_remove_template_hdr(ctrl);
3748
3749 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3750 q = &phwi_context->be_wrbq[i];
3751 if (q->created)
3752 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3753 }
3754 kfree(phwi_context->be_wrbq);
3755 free_wrb_handles(phba);
3756
3757 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3758 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3759
3760 q = &phwi_context->be_def_hdrq[ulp_num];
3761 if (q->created)
3762 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3763
3764 q = &phwi_context->be_def_dataq[ulp_num];
3765 if (q->created)
3766 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3767
3768 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3769 }
3770 }
3771
3772 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3773
3774 for (i = 0; i < (phba->num_cpus); i++) {
3775 q = &phwi_context->be_cq[i];
3776 if (q->created) {
3777 be_queue_free(phba, q);
3778 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3779 }
3780 }
3781
3782 be_mcc_queues_destroy(phba);
3783 if (phba->msix_enabled)
3784 eq_for_mcc = 1;
3785 else
3786 eq_for_mcc = 0;
3787 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3788 q = &phwi_context->be_eq[i].q;
3789 if (q->created) {
3790 be_queue_free(phba, q);
3791 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3792 }
3793 }
3794 be_cmd_fw_uninit(ctrl);
3795}
3796
3797static int be_mcc_queues_create(struct beiscsi_hba *phba, 3530static int be_mcc_queues_create(struct beiscsi_hba *phba,
3798 struct hwi_context_memory *phwi_context) 3531 struct hwi_context_memory *phwi_context)
3799{ 3532{
@@ -3875,6 +3608,118 @@ static void find_num_cpus(struct beiscsi_hba *phba)
3875 } 3608 }
3876} 3609}
3877 3610
3611static void hwi_purge_eq(struct beiscsi_hba *phba)
3612{
3613 struct hwi_controller *phwi_ctrlr;
3614 struct hwi_context_memory *phwi_context;
3615 struct be_queue_info *eq;
3616 struct be_eq_entry *eqe = NULL;
3617 int i, eq_msix;
3618 unsigned int num_processed;
3619
3620 if (beiscsi_hba_in_error(phba))
3621 return;
3622
3623 phwi_ctrlr = phba->phwi_ctrlr;
3624 phwi_context = phwi_ctrlr->phwi_ctxt;
3625 if (phba->msix_enabled)
3626 eq_msix = 1;
3627 else
3628 eq_msix = 0;
3629
3630 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3631 eq = &phwi_context->be_eq[i].q;
3632 eqe = queue_tail_node(eq);
3633 num_processed = 0;
3634 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3635 & EQE_VALID_MASK) {
3636 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3637 queue_tail_inc(eq);
3638 eqe = queue_tail_node(eq);
3639 num_processed++;
3640 }
3641
3642 if (num_processed)
3643 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3644 }
3645}
3646
3647static void hwi_cleanup_port(struct beiscsi_hba *phba)
3648{
3649 struct be_queue_info *q;
3650 struct be_ctrl_info *ctrl = &phba->ctrl;
3651 struct hwi_controller *phwi_ctrlr;
3652 struct hwi_context_memory *phwi_context;
3653 struct hd_async_context *pasync_ctx;
3654 int i, eq_for_mcc, ulp_num;
3655
3656 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3657 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3658 beiscsi_cmd_iscsi_cleanup(phba, ulp_num);
3659
3660 /**
3661 * Purge all EQ entries that may have been left out. This is to
3662 * workaround a problem we've seen occasionally where driver gets an
3663 * interrupt with EQ entry bit set after stopping the controller.
3664 */
3665 hwi_purge_eq(phba);
3666
3667 phwi_ctrlr = phba->phwi_ctrlr;
3668 phwi_context = phwi_ctrlr->phwi_ctxt;
3669
3670 be_cmd_iscsi_remove_template_hdr(ctrl);
3671
3672 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3673 q = &phwi_context->be_wrbq[i];
3674 if (q->created)
3675 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3676 }
3677 kfree(phwi_context->be_wrbq);
3678 free_wrb_handles(phba);
3679
3680 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3681 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3682
3683 q = &phwi_context->be_def_hdrq[ulp_num];
3684 if (q->created)
3685 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3686
3687 q = &phwi_context->be_def_dataq[ulp_num];
3688 if (q->created)
3689 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3690
3691 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3692 }
3693 }
3694
3695 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3696
3697 for (i = 0; i < (phba->num_cpus); i++) {
3698 q = &phwi_context->be_cq[i];
3699 if (q->created) {
3700 be_queue_free(phba, q);
3701 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3702 }
3703 }
3704
3705 be_mcc_queues_destroy(phba);
3706 if (phba->msix_enabled)
3707 eq_for_mcc = 1;
3708 else
3709 eq_for_mcc = 0;
3710 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3711 q = &phwi_context->be_eq[i].q;
3712 if (q->created) {
3713 be_queue_free(phba, q);
3714 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3715 }
3716 }
3717 /* this ensures complete FW cleanup */
3718 beiscsi_cmd_function_reset(phba);
3719 /* last communication, indicate driver is unloading */
3720 beiscsi_cmd_special_wrb(&phba->ctrl, 0);
3721}
3722
3878static int hwi_init_port(struct beiscsi_hba *phba) 3723static int hwi_init_port(struct beiscsi_hba *phba)
3879{ 3724{
3880 struct hwi_controller *phwi_ctrlr; 3725 struct hwi_controller *phwi_ctrlr;
@@ -3887,9 +3732,8 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3887 phwi_context = phwi_ctrlr->phwi_ctxt; 3732 phwi_context = phwi_ctrlr->phwi_ctxt;
3888 phwi_context->max_eqd = 128; 3733 phwi_context->max_eqd = 128;
3889 phwi_context->min_eqd = 0; 3734 phwi_context->min_eqd = 0;
3890 phwi_context->cur_eqd = 0; 3735 phwi_context->cur_eqd = 32;
3891 be_cmd_fw_initialize(&phba->ctrl); 3736 /* set port optic state to unknown */
3892 /* set optic state to unknown */
3893 phba->optic_state = 0xff; 3737 phba->optic_state = 0xff;
3894 3738
3895 status = beiscsi_create_eqs(phba, phwi_context); 3739 status = beiscsi_create_eqs(phba, phwi_context);
@@ -3903,7 +3747,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3903 if (status != 0) 3747 if (status != 0)
3904 goto error; 3748 goto error;
3905 3749
3906 status = mgmt_check_supported_fw(ctrl, phba); 3750 status = beiscsi_check_supported_fw(ctrl, phba);
3907 if (status != 0) { 3751 if (status != 0) {
3908 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3752 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3909 "BM_%d : Unsupported fw version\n"); 3753 "BM_%d : Unsupported fw version\n");
@@ -3919,7 +3763,6 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3919 3763
3920 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3764 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3921 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3765 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3922
3923 def_pdu_ring_sz = 3766 def_pdu_ring_sz =
3924 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 3767 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3925 sizeof(struct phys_addr); 3768 sizeof(struct phys_addr);
@@ -3945,6 +3788,15 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3945 ulp_num); 3788 ulp_num);
3946 goto error; 3789 goto error;
3947 } 3790 }
3791 /**
3792 * Now that the default PDU rings have been created,
3793 * let EP know about it.
3794 * Call beiscsi_cmd_iscsi_cleanup before posting?
3795 */
3796 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
3797 ulp_num);
3798 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
3799 ulp_num);
3948 } 3800 }
3949 } 3801 }
3950 3802
@@ -3973,7 +3825,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3973 3825
3974 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3826 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3975 uint16_t cri = 0; 3827 uint16_t cri = 0;
3976 struct hwi_async_pdu_context *pasync_ctx; 3828 struct hd_async_context *pasync_ctx;
3977 3829
3978 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3830 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3979 phwi_ctrlr, ulp_num); 3831 phwi_ctrlr, ulp_num);
@@ -3985,6 +3837,14 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3985 phwi_ctrlr->wrb_context[cri].cid] = 3837 phwi_ctrlr->wrb_context[cri].cid] =
3986 async_arr_idx++; 3838 async_arr_idx++;
3987 } 3839 }
3840 /**
3841 * Now that the default PDU rings have been created,
3842 * let EP know about it.
3843 */
3844 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
3845 ulp_num);
3846 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
3847 ulp_num);
3988 } 3848 }
3989 } 3849 }
3990 3850
@@ -3995,7 +3855,7 @@ static int hwi_init_port(struct beiscsi_hba *phba)
3995error: 3855error:
3996 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3856 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3997 "BM_%d : hwi_init_port failed"); 3857 "BM_%d : hwi_init_port failed");
3998 hwi_cleanup(phba); 3858 hwi_cleanup_port(phba);
3999 return status; 3859 return status;
4000} 3860}
4001 3861
@@ -4354,149 +4214,6 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
4354 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4214 "BM_%d : In hwi_disable_intr, Already Disabled\n");
4355} 4215}
4356 4216
4357/**
4358 * beiscsi_get_boot_info()- Get the boot session info
4359 * @phba: The device priv structure instance
4360 *
4361 * Get the boot target info and store in driver priv structure
4362 *
4363 * return values
4364 * Success: 0
4365 * Failure: Non-Zero Value
4366 **/
4367static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4368{
4369 struct be_cmd_get_session_resp *session_resp;
4370 struct be_dma_mem nonemb_cmd;
4371 unsigned int tag;
4372 unsigned int s_handle;
4373 int ret = -ENOMEM;
4374
4375 /* Get the session handle of the boot target */
4376 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
4377 if (ret) {
4378 beiscsi_log(phba, KERN_ERR,
4379 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4380 "BM_%d : No boot session\n");
4381
4382 if (ret == -ENXIO)
4383 phba->get_boot = 0;
4384
4385
4386 return ret;
4387 }
4388 phba->get_boot = 0;
4389 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
4390 sizeof(*session_resp),
4391 &nonemb_cmd.dma);
4392 if (nonemb_cmd.va == NULL) {
4393 beiscsi_log(phba, KERN_ERR,
4394 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4395 "BM_%d : Failed to allocate memory for"
4396 "beiscsi_get_session_info\n");
4397
4398 return -ENOMEM;
4399 }
4400
4401 tag = mgmt_get_session_info(phba, s_handle,
4402 &nonemb_cmd);
4403 if (!tag) {
4404 beiscsi_log(phba, KERN_ERR,
4405 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4406 "BM_%d : beiscsi_get_session_info"
4407 " Failed\n");
4408
4409 goto boot_freemem;
4410 }
4411
4412 ret = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
4413 if (ret) {
4414 beiscsi_log(phba, KERN_ERR,
4415 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4416 "BM_%d : beiscsi_get_session_info Failed");
4417
4418 if (ret != -EBUSY)
4419 goto boot_freemem;
4420 else
4421 return ret;
4422 }
4423
4424 session_resp = nonemb_cmd.va ;
4425
4426 memcpy(&phba->boot_sess, &session_resp->session_info,
4427 sizeof(struct mgmt_session_info));
4428
4429 beiscsi_logout_fw_sess(phba,
4430 phba->boot_sess.session_handle);
4431 ret = 0;
4432
4433boot_freemem:
4434 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4435 nonemb_cmd.va, nonemb_cmd.dma);
4436 return ret;
4437}
4438
4439static void beiscsi_boot_release(void *data)
4440{
4441 struct beiscsi_hba *phba = data;
4442
4443 scsi_host_put(phba->shost);
4444}
4445
4446static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
4447{
4448 struct iscsi_boot_kobj *boot_kobj;
4449
4450 /* it has been created previously */
4451 if (phba->boot_kset)
4452 return 0;
4453
4454 /* get boot info using mgmt cmd */
4455 if (beiscsi_get_boot_info(phba))
4456 /* Try to see if we can carry on without this */
4457 return 0;
4458
4459 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
4460 if (!phba->boot_kset)
4461 return -ENOMEM;
4462
4463 /* get a ref because the show function will ref the phba */
4464 if (!scsi_host_get(phba->shost))
4465 goto free_kset;
4466 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
4467 beiscsi_show_boot_tgt_info,
4468 beiscsi_tgt_get_attr_visibility,
4469 beiscsi_boot_release);
4470 if (!boot_kobj)
4471 goto put_shost;
4472
4473 if (!scsi_host_get(phba->shost))
4474 goto free_kset;
4475 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
4476 beiscsi_show_boot_ini_info,
4477 beiscsi_ini_get_attr_visibility,
4478 beiscsi_boot_release);
4479 if (!boot_kobj)
4480 goto put_shost;
4481
4482 if (!scsi_host_get(phba->shost))
4483 goto free_kset;
4484 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
4485 beiscsi_show_boot_eth_info,
4486 beiscsi_eth_get_attr_visibility,
4487 beiscsi_boot_release);
4488 if (!boot_kobj)
4489 goto put_shost;
4490 return 0;
4491
4492put_shost:
4493 scsi_host_put(phba->shost);
4494free_kset:
4495 iscsi_boot_destroy_kset(phba->boot_kset);
4496 phba->boot_kset = NULL;
4497 return -ENOMEM;
4498}
4499
4500static int beiscsi_init_port(struct beiscsi_hba *phba) 4217static int beiscsi_init_port(struct beiscsi_hba *phba)
4501{ 4218{
4502 int ret; 4219 int ret;
@@ -4516,7 +4233,8 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
4516 goto do_cleanup_ctrlr; 4233 goto do_cleanup_ctrlr;
4517 } 4234 }
4518 4235
4519 if (hba_setup_cid_tbls(phba)) { 4236 ret = hba_setup_cid_tbls(phba);
4237 if (ret < 0) {
4520 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4238 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4521 "BM_%d : Failed in hba_setup_cid_tbls\n"); 4239 "BM_%d : Failed in hba_setup_cid_tbls\n");
4522 kfree(phba->io_sgl_hndl_base); 4240 kfree(phba->io_sgl_hndl_base);
@@ -4527,61 +4245,15 @@ static int beiscsi_init_port(struct beiscsi_hba *phba)
4527 return ret; 4245 return ret;
4528 4246
4529do_cleanup_ctrlr: 4247do_cleanup_ctrlr:
4530 hwi_cleanup(phba); 4248 hwi_cleanup_port(phba);
4531 return ret; 4249 return ret;
4532} 4250}
4533 4251
4534static void hwi_purge_eq(struct beiscsi_hba *phba) 4252static void beiscsi_cleanup_port(struct beiscsi_hba *phba)
4535{
4536 struct hwi_controller *phwi_ctrlr;
4537 struct hwi_context_memory *phwi_context;
4538 struct be_queue_info *eq;
4539 struct be_eq_entry *eqe = NULL;
4540 int i, eq_msix;
4541 unsigned int num_processed;
4542
4543 phwi_ctrlr = phba->phwi_ctrlr;
4544 phwi_context = phwi_ctrlr->phwi_ctxt;
4545 if (phba->msix_enabled)
4546 eq_msix = 1;
4547 else
4548 eq_msix = 0;
4549
4550 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
4551 eq = &phwi_context->be_eq[i].q;
4552 eqe = queue_tail_node(eq);
4553 num_processed = 0;
4554 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
4555 & EQE_VALID_MASK) {
4556 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
4557 queue_tail_inc(eq);
4558 eqe = queue_tail_node(eq);
4559 num_processed++;
4560 }
4561
4562 if (num_processed)
4563 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
4564 }
4565}
4566
4567static void beiscsi_clean_port(struct beiscsi_hba *phba)
4568{ 4253{
4569 int mgmt_status, ulp_num;
4570 struct ulp_cid_info *ptr_cid_info = NULL; 4254 struct ulp_cid_info *ptr_cid_info = NULL;
4255 int ulp_num;
4571 4256
4572 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4573 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4574 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
4575 if (mgmt_status)
4576 beiscsi_log(phba, KERN_WARNING,
4577 BEISCSI_LOG_INIT,
4578 "BM_%d : mgmt_epfw_cleanup FAILED"
4579 " for ULP_%d\n", ulp_num);
4580 }
4581 }
4582
4583 hwi_purge_eq(phba);
4584 hwi_cleanup(phba);
4585 kfree(phba->io_sgl_hndl_base); 4257 kfree(phba->io_sgl_hndl_base);
4586 kfree(phba->eh_sgl_hndl_base); 4258 kfree(phba->eh_sgl_hndl_base);
4587 kfree(phba->ep_array); 4259 kfree(phba->ep_array);
@@ -4598,7 +4270,6 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
4598 } 4270 }
4599 } 4271 }
4600 } 4272 }
4601
4602} 4273}
4603 4274
4604/** 4275/**
@@ -4625,16 +4296,12 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4625 io_task = task->dd_data; 4296 io_task = task->dd_data;
4626 4297
4627 if (io_task->pwrb_handle) { 4298 if (io_task->pwrb_handle) {
4628 memset(io_task->pwrb_handle->pwrb, 0, 4299 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4629 sizeof(struct iscsi_wrb));
4630 free_wrb_handle(phba, pwrb_context,
4631 io_task->pwrb_handle);
4632 io_task->pwrb_handle = NULL; 4300 io_task->pwrb_handle = NULL;
4633 } 4301 }
4634 4302
4635 if (io_task->psgl_handle) { 4303 if (io_task->psgl_handle) {
4636 free_mgmt_sgl_handle(phba, 4304 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4637 io_task->psgl_handle);
4638 io_task->psgl_handle = NULL; 4305 io_task->psgl_handle = NULL;
4639 } 4306 }
4640 4307
@@ -4671,6 +4338,7 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
4671 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4338 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4672 io_task->bhs_pa.u.a64.address); 4339 io_task->bhs_pa.u.a64.address);
4673 io_task->cmd_bhs = NULL; 4340 io_task->cmd_bhs = NULL;
4341 task->hdr = NULL;
4674 } 4342 }
4675 4343
4676 if (task->sc) { 4344 if (task->sc) {
@@ -4686,7 +4354,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
4686 } 4354 }
4687 4355
4688 if (io_task->scsi_cmnd) { 4356 if (io_task->scsi_cmnd) {
4689 scsi_dma_unmap(io_task->scsi_cmnd); 4357 if (io_task->num_sg)
4358 scsi_dma_unmap(io_task->scsi_cmnd);
4690 io_task->scsi_cmnd = NULL; 4359 io_task->scsi_cmnd = NULL;
4691 } 4360 }
4692 } else { 4361 } else {
@@ -5051,7 +4720,6 @@ static int beiscsi_mtask(struct iscsi_task *task)
5051 4720
5052 cid = beiscsi_conn->beiscsi_conn_cid; 4721 cid = beiscsi_conn->beiscsi_conn_cid;
5053 pwrb = io_task->pwrb_handle->pwrb; 4722 pwrb = io_task->pwrb_handle->pwrb;
5054 memset(pwrb, 0, sizeof(*pwrb));
5055 4723
5056 if (is_chip_be2_be3r(phba)) { 4724 if (is_chip_be2_be3r(phba)) {
5057 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4725 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
@@ -5165,6 +4833,15 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
5165 int num_sg; 4833 int num_sg;
5166 unsigned int writedir = 0, xferlen = 0; 4834 unsigned int writedir = 0, xferlen = 0;
5167 4835
4836 phba = io_task->conn->phba;
4837 /**
4838 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
4839 * operational if FW still gets heartbeat from EP FW. Is management
4840 * path really needed to continue further?
4841 */
4842 if (!beiscsi_hba_is_online(phba))
4843 return -EIO;
4844
5168 if (!io_task->conn->login_in_progress) 4845 if (!io_task->conn->login_in_progress)
5169 task->hdr->exp_statsn = 0; 4846 task->hdr->exp_statsn = 0;
5170 4847
@@ -5172,8 +4849,8 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
5172 return beiscsi_mtask(task); 4849 return beiscsi_mtask(task);
5173 4850
5174 io_task->scsi_cmnd = sc; 4851 io_task->scsi_cmnd = sc;
4852 io_task->num_sg = 0;
5175 num_sg = scsi_dma_map(sc); 4853 num_sg = scsi_dma_map(sc);
5176 phba = io_task->conn->phba;
5177 if (num_sg < 0) { 4854 if (num_sg < 0) {
5178 beiscsi_log(phba, KERN_ERR, 4855 beiscsi_log(phba, KERN_ERR,
5179 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4856 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
@@ -5184,6 +4861,11 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
5184 4861
5185 return num_sg; 4862 return num_sg;
5186 } 4863 }
4864 /**
4865 * For scsi cmd task, check num_sg before unmapping in cleanup_task.
4866 * For management task, cleanup_task checks mtask_addr before unmapping.
4867 */
4868 io_task->num_sg = num_sg;
5187 xferlen = scsi_bufflen(sc); 4869 xferlen = scsi_bufflen(sc);
5188 sg = scsi_sglist(sc); 4870 sg = scsi_sglist(sc);
5189 if (sc->sc_data_direction == DMA_TO_DEVICE) 4871 if (sc->sc_data_direction == DMA_TO_DEVICE)
@@ -5213,6 +4895,12 @@ static int beiscsi_bsg_request(struct bsg_job *job)
5213 shost = iscsi_job_to_shost(job); 4895 shost = iscsi_job_to_shost(job);
5214 phba = iscsi_host_priv(shost); 4896 phba = iscsi_host_priv(shost);
5215 4897
4898 if (!beiscsi_hba_is_online(phba)) {
4899 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
4900 "BM_%d : HBA in error 0x%lx\n", phba->state);
4901 return -ENXIO;
4902 }
4903
5216 switch (bsg_req->msgcode) { 4904 switch (bsg_req->msgcode) {
5217 case ISCSI_BSG_HST_VENDOR: 4905 case ISCSI_BSG_HST_VENDOR:
5218 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4906 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
@@ -5240,6 +4928,14 @@ static int beiscsi_bsg_request(struct bsg_job *job)
5240 phba->ctrl.mcc_tag_status[tag], 4928 phba->ctrl.mcc_tag_status[tag],
5241 msecs_to_jiffies( 4929 msecs_to_jiffies(
5242 BEISCSI_HOST_MBX_TIMEOUT)); 4930 BEISCSI_HOST_MBX_TIMEOUT));
4931
4932 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
4933 clear_bit(MCC_TAG_STATE_RUNNING,
4934 &phba->ctrl.ptag_state[tag].tag_state);
4935 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4936 nonemb_cmd.va, nonemb_cmd.dma);
4937 return -EIO;
4938 }
5243 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4939 extd_status = (phba->ctrl.mcc_tag_status[tag] &
5244 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4940 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
5245 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4941 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
@@ -5283,106 +4979,294 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
5283 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4979 beiscsi_log_enable_init(phba, beiscsi_log_enable);
5284} 4980}
5285 4981
5286/* 4982void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
5287 * beiscsi_quiesce()- Cleanup Driver resources
5288 * @phba: Instance Priv structure
5289 * @unload_state:i Clean or EEH unload state
5290 *
5291 * Free the OS and HW resources held by the driver
5292 **/
5293static void beiscsi_quiesce(struct beiscsi_hba *phba,
5294 uint32_t unload_state)
5295{ 4983{
5296 struct hwi_controller *phwi_ctrlr; 4984 if (phba->boot_struct.boot_kset)
5297 struct hwi_context_memory *phwi_context; 4985 return;
5298 struct be_eq_obj *pbe_eq;
5299 unsigned int i, msix_vec;
5300 4986
5301 phwi_ctrlr = phba->phwi_ctrlr; 4987 /* skip if boot work is already in progress */
5302 phwi_context = phwi_ctrlr->phwi_ctxt; 4988 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state))
5303 hwi_disable_intr(phba); 4989 return;
5304 if (phba->msix_enabled) {
5305 for (i = 0; i <= phba->num_cpus; i++) {
5306 msix_vec = phba->msix_entries[i].vector;
5307 free_irq(msix_vec, &phwi_context->be_eq[i]);
5308 kfree(phba->msi_name[i]);
5309 }
5310 } else
5311 if (phba->pcidev->irq)
5312 free_irq(phba->pcidev->irq, phba);
5313 pci_disable_msix(phba->pcidev);
5314 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
5315 4990
5316 for (i = 0; i < phba->num_cpus; i++) { 4991 phba->boot_struct.retry = 3;
5317 pbe_eq = &phwi_context->be_eq[i]; 4992 phba->boot_struct.tag = 0;
5318 irq_poll_disable(&pbe_eq->iopoll); 4993 phba->boot_struct.s_handle = s_handle;
4994 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE;
4995 schedule_work(&phba->boot_work);
4996}
4997
4998static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
4999{
5000 struct beiscsi_hba *phba = data;
5001 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess;
5002 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
5003 char *str = buf;
5004 int rc = -EPERM;
5005
5006 switch (type) {
5007 case ISCSI_BOOT_TGT_NAME:
5008 rc = sprintf(buf, "%.*s\n",
5009 (int)strlen(boot_sess->target_name),
5010 (char *)&boot_sess->target_name);
5011 break;
5012 case ISCSI_BOOT_TGT_IP_ADDR:
5013 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4)
5014 rc = sprintf(buf, "%pI4\n",
5015 (char *)&boot_conn->dest_ipaddr.addr);
5016 else
5017 rc = sprintf(str, "%pI6\n",
5018 (char *)&boot_conn->dest_ipaddr.addr);
5019 break;
5020 case ISCSI_BOOT_TGT_PORT:
5021 rc = sprintf(str, "%d\n", boot_conn->dest_port);
5022 break;
5023
5024 case ISCSI_BOOT_TGT_CHAP_NAME:
5025 rc = sprintf(str, "%.*s\n",
5026 boot_conn->negotiated_login_options.auth_data.chap.
5027 target_chap_name_length,
5028 (char *)&boot_conn->negotiated_login_options.
5029 auth_data.chap.target_chap_name);
5030 break;
5031 case ISCSI_BOOT_TGT_CHAP_SECRET:
5032 rc = sprintf(str, "%.*s\n",
5033 boot_conn->negotiated_login_options.auth_data.chap.
5034 target_secret_length,
5035 (char *)&boot_conn->negotiated_login_options.
5036 auth_data.chap.target_secret);
5037 break;
5038 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5039 rc = sprintf(str, "%.*s\n",
5040 boot_conn->negotiated_login_options.auth_data.chap.
5041 intr_chap_name_length,
5042 (char *)&boot_conn->negotiated_login_options.
5043 auth_data.chap.intr_chap_name);
5044 break;
5045 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5046 rc = sprintf(str, "%.*s\n",
5047 boot_conn->negotiated_login_options.auth_data.chap.
5048 intr_secret_length,
5049 (char *)&boot_conn->negotiated_login_options.
5050 auth_data.chap.intr_secret);
5051 break;
5052 case ISCSI_BOOT_TGT_FLAGS:
5053 rc = sprintf(str, "2\n");
5054 break;
5055 case ISCSI_BOOT_TGT_NIC_ASSOC:
5056 rc = sprintf(str, "0\n");
5057 break;
5319 } 5058 }
5059 return rc;
5060}
5320 5061
5321 if (unload_state == BEISCSI_CLEAN_UNLOAD) { 5062static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
5322 destroy_workqueue(phba->wq); 5063{
5323 beiscsi_clean_port(phba); 5064 struct beiscsi_hba *phba = data;
5324 beiscsi_free_mem(phba); 5065 char *str = buf;
5066 int rc = -EPERM;
5325 5067
5326 beiscsi_unmap_pci_function(phba); 5068 switch (type) {
5327 pci_free_consistent(phba->pcidev, 5069 case ISCSI_BOOT_INI_INITIATOR_NAME:
5328 phba->ctrl.mbox_mem_alloced.size, 5070 rc = sprintf(str, "%s\n",
5329 phba->ctrl.mbox_mem_alloced.va, 5071 phba->boot_struct.boot_sess.initiator_iscsiname);
5330 phba->ctrl.mbox_mem_alloced.dma); 5072 break;
5331 } else {
5332 hwi_purge_eq(phba);
5333 hwi_cleanup(phba);
5334 } 5073 }
5074 return rc;
5075}
5076
5077static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
5078{
5079 struct beiscsi_hba *phba = data;
5080 char *str = buf;
5081 int rc = -EPERM;
5335 5082
5083 switch (type) {
5084 case ISCSI_BOOT_ETH_FLAGS:
5085 rc = sprintf(str, "2\n");
5086 break;
5087 case ISCSI_BOOT_ETH_INDEX:
5088 rc = sprintf(str, "0\n");
5089 break;
5090 case ISCSI_BOOT_ETH_MAC:
5091 rc = beiscsi_get_macaddr(str, phba);
5092 break;
5093 }
5094 return rc;
5336} 5095}
5337 5096
5338static void beiscsi_remove(struct pci_dev *pcidev) 5097static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
5339{ 5098{
5340 struct beiscsi_hba *phba = NULL; 5099 umode_t rc = 0;
5341 5100
5342 phba = pci_get_drvdata(pcidev); 5101 switch (type) {
5343 if (!phba) { 5102 case ISCSI_BOOT_TGT_NAME:
5344 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5103 case ISCSI_BOOT_TGT_IP_ADDR:
5345 return; 5104 case ISCSI_BOOT_TGT_PORT:
5105 case ISCSI_BOOT_TGT_CHAP_NAME:
5106 case ISCSI_BOOT_TGT_CHAP_SECRET:
5107 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5108 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5109 case ISCSI_BOOT_TGT_NIC_ASSOC:
5110 case ISCSI_BOOT_TGT_FLAGS:
5111 rc = S_IRUGO;
5112 break;
5346 } 5113 }
5114 return rc;
5115}
5347 5116
5348 beiscsi_destroy_def_ifaces(phba); 5117static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
5349 iscsi_boot_destroy_kset(phba->boot_kset); 5118{
5350 iscsi_host_remove(phba->shost); 5119 umode_t rc = 0;
5351 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5120
5352 pci_dev_put(phba->pcidev); 5121 switch (type) {
5353 iscsi_host_free(phba->shost); 5122 case ISCSI_BOOT_INI_INITIATOR_NAME:
5354 pci_disable_pcie_error_reporting(pcidev); 5123 rc = S_IRUGO;
5355 pci_set_drvdata(pcidev, NULL); 5124 break;
5356 pci_release_regions(pcidev); 5125 }
5357 pci_disable_device(pcidev); 5126 return rc;
5358} 5127}
5359 5128
5360static void beiscsi_msix_enable(struct beiscsi_hba *phba) 5129static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
5361{ 5130{
5362 int i, status; 5131 umode_t rc = 0;
5363 5132
5364 for (i = 0; i <= phba->num_cpus; i++) 5133 switch (type) {
5365 phba->msix_entries[i].entry = i; 5134 case ISCSI_BOOT_ETH_FLAGS:
5135 case ISCSI_BOOT_ETH_MAC:
5136 case ISCSI_BOOT_ETH_INDEX:
5137 rc = S_IRUGO;
5138 break;
5139 }
5140 return rc;
5141}
5366 5142
5367 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries, 5143static void beiscsi_boot_kobj_release(void *data)
5368 phba->num_cpus + 1, phba->num_cpus + 1); 5144{
5369 if (status > 0) 5145 struct beiscsi_hba *phba = data;
5370 phba->msix_enabled = true;
5371 5146
5372 return; 5147 scsi_host_put(phba->shost);
5373} 5148}
5374 5149
5375static void be_eqd_update(struct beiscsi_hba *phba) 5150static int beiscsi_boot_create_kset(struct beiscsi_hba *phba)
5376{ 5151{
5152 struct boot_struct *bs = &phba->boot_struct;
5153 struct iscsi_boot_kobj *boot_kobj;
5154
5155 if (bs->boot_kset) {
5156 __beiscsi_log(phba, KERN_ERR,
5157 "BM_%d: boot_kset already created\n");
5158 return 0;
5159 }
5160
5161 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
5162 if (!bs->boot_kset) {
5163 __beiscsi_log(phba, KERN_ERR,
5164 "BM_%d: boot_kset alloc failed\n");
5165 return -ENOMEM;
5166 }
5167
5168 /* get shost ref because the show function will refer phba */
5169 if (!scsi_host_get(phba->shost))
5170 goto free_kset;
5171
5172 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba,
5173 beiscsi_show_boot_tgt_info,
5174 beiscsi_tgt_get_attr_visibility,
5175 beiscsi_boot_kobj_release);
5176 if (!boot_kobj)
5177 goto put_shost;
5178
5179 if (!scsi_host_get(phba->shost))
5180 goto free_kset;
5181
5182 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba,
5183 beiscsi_show_boot_ini_info,
5184 beiscsi_ini_get_attr_visibility,
5185 beiscsi_boot_kobj_release);
5186 if (!boot_kobj)
5187 goto put_shost;
5188
5189 if (!scsi_host_get(phba->shost))
5190 goto free_kset;
5191
5192 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba,
5193 beiscsi_show_boot_eth_info,
5194 beiscsi_eth_get_attr_visibility,
5195 beiscsi_boot_kobj_release);
5196 if (!boot_kobj)
5197 goto put_shost;
5198
5199 return 0;
5200
5201put_shost:
5202 scsi_host_put(phba->shost);
5203free_kset:
5204 iscsi_boot_destroy_kset(bs->boot_kset);
5205 bs->boot_kset = NULL;
5206 return -ENOMEM;
5207}
5208
5209static void beiscsi_boot_work(struct work_struct *work)
5210{
5211 struct beiscsi_hba *phba =
5212 container_of(work, struct beiscsi_hba, boot_work);
5213 struct boot_struct *bs = &phba->boot_struct;
5214 unsigned int tag = 0;
5215
5216 if (!beiscsi_hba_is_online(phba))
5217 return;
5218
5219 beiscsi_log(phba, KERN_INFO,
5220 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
5221 "BM_%d : %s action %d\n",
5222 __func__, phba->boot_struct.action);
5223
5224 switch (phba->boot_struct.action) {
5225 case BEISCSI_BOOT_REOPEN_SESS:
5226 tag = beiscsi_boot_reopen_sess(phba);
5227 break;
5228 case BEISCSI_BOOT_GET_SHANDLE:
5229 tag = __beiscsi_boot_get_shandle(phba, 1);
5230 break;
5231 case BEISCSI_BOOT_GET_SINFO:
5232 tag = beiscsi_boot_get_sinfo(phba);
5233 break;
5234 case BEISCSI_BOOT_LOGOUT_SESS:
5235 tag = beiscsi_boot_logout_sess(phba);
5236 break;
5237 case BEISCSI_BOOT_CREATE_KSET:
5238 beiscsi_boot_create_kset(phba);
5239 /**
5240 * updated boot_kset is made visible to all before
5241 * ending the boot work.
5242 */
5243 mb();
5244 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5245 return;
5246 }
5247 if (!tag) {
5248 if (bs->retry--)
5249 schedule_work(&phba->boot_work);
5250 else
5251 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5252 }
5253}
5254
5255static void beiscsi_eqd_update_work(struct work_struct *work)
5256{
5257 struct hwi_context_memory *phwi_context;
5377 struct be_set_eqd set_eqd[MAX_CPUS]; 5258 struct be_set_eqd set_eqd[MAX_CPUS];
5378 struct be_aic_obj *aic;
5379 struct be_eq_obj *pbe_eq;
5380 struct hwi_controller *phwi_ctrlr; 5259 struct hwi_controller *phwi_ctrlr;
5381 struct hwi_context_memory *phwi_context; 5260 struct be_eq_obj *pbe_eq;
5261 struct beiscsi_hba *phba;
5262 unsigned int pps, delta;
5263 struct be_aic_obj *aic;
5382 int eqd, i, num = 0; 5264 int eqd, i, num = 0;
5383 ulong now; 5265 unsigned long now;
5384 u32 pps, delta; 5266
5385 unsigned int tag; 5267 phba = container_of(work, struct beiscsi_hba, eqd_update.work);
5268 if (!beiscsi_hba_is_online(phba))
5269 return;
5386 5270
5387 phwi_ctrlr = phba->phwi_ctrlr; 5271 phwi_ctrlr = phba->phwi_ctrlr;
5388 phwi_context = phwi_ctrlr->phwi_ctxt; 5272 phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -5391,13 +5275,13 @@ static void be_eqd_update(struct beiscsi_hba *phba)
5391 aic = &phba->aic_obj[i]; 5275 aic = &phba->aic_obj[i];
5392 pbe_eq = &phwi_context->be_eq[i]; 5276 pbe_eq = &phwi_context->be_eq[i];
5393 now = jiffies; 5277 now = jiffies;
5394 if (!aic->jiffs || time_before(now, aic->jiffs) || 5278 if (!aic->jiffies || time_before(now, aic->jiffies) ||
5395 pbe_eq->cq_count < aic->eq_prev) { 5279 pbe_eq->cq_count < aic->eq_prev) {
5396 aic->jiffs = now; 5280 aic->jiffies = now;
5397 aic->eq_prev = pbe_eq->cq_count; 5281 aic->eq_prev = pbe_eq->cq_count;
5398 continue; 5282 continue;
5399 } 5283 }
5400 delta = jiffies_to_msecs(now - aic->jiffs); 5284 delta = jiffies_to_msecs(now - aic->jiffies);
5401 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5285 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5402 eqd = (pps / 1500) << 2; 5286 eqd = (pps / 1500) << 2;
5403 5287
@@ -5406,7 +5290,7 @@ static void be_eqd_update(struct beiscsi_hba *phba)
5406 eqd = min_t(u32, eqd, phwi_context->max_eqd); 5290 eqd = min_t(u32, eqd, phwi_context->max_eqd);
5407 eqd = max_t(u32, eqd, phwi_context->min_eqd); 5291 eqd = max_t(u32, eqd, phwi_context->min_eqd);
5408 5292
5409 aic->jiffs = now; 5293 aic->jiffies = now;
5410 aic->eq_prev = pbe_eq->cq_count; 5294 aic->eq_prev = pbe_eq->cq_count;
5411 5295
5412 if (eqd != aic->prev_eqd) { 5296 if (eqd != aic->prev_eqd) {
@@ -5416,53 +5300,242 @@ static void be_eqd_update(struct beiscsi_hba *phba)
5416 num++; 5300 num++;
5417 } 5301 }
5418 } 5302 }
5419 if (num) { 5303 if (num)
5420 tag = be_cmd_modify_eq_delay(phba, set_eqd, num); 5304 /* completion of this is ignored */
5421 if (tag) 5305 beiscsi_modify_eq_delay(phba, set_eqd, num);
5422 beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); 5306
5307 schedule_delayed_work(&phba->eqd_update,
5308 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5309}
5310
5311static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5312{
5313 int i, status;
5314
5315 for (i = 0; i <= phba->num_cpus; i++)
5316 phba->msix_entries[i].entry = i;
5317
5318 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
5319 phba->num_cpus + 1, phba->num_cpus + 1);
5320 if (status > 0)
5321 phba->msix_enabled = true;
5322}
5323
5324static void beiscsi_hw_tpe_check(unsigned long ptr)
5325{
5326 struct beiscsi_hba *phba;
5327 u32 wait;
5328
5329 phba = (struct beiscsi_hba *)ptr;
5330 /* if not TPE, do nothing */
5331 if (!beiscsi_detect_tpe(phba))
5332 return;
5333
5334 /* wait default 4000ms before recovering */
5335 wait = 4000;
5336 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL)
5337 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL;
5338 queue_delayed_work(phba->wq, &phba->recover_port,
5339 msecs_to_jiffies(wait));
5340}
5341
5342static void beiscsi_hw_health_check(unsigned long ptr)
5343{
5344 struct beiscsi_hba *phba;
5345
5346 phba = (struct beiscsi_hba *)ptr;
5347 beiscsi_detect_ue(phba);
5348 if (beiscsi_detect_ue(phba)) {
5349 __beiscsi_log(phba, KERN_ERR,
5350 "BM_%d : port in error: %lx\n", phba->state);
5351 /* sessions are no longer valid, so first fail the sessions */
5352 queue_work(phba->wq, &phba->sess_work);
5353
5354 /* detect UER supported */
5355 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
5356 return;
5357 /* modify this timer to check TPE */
5358 phba->hw_check.function = beiscsi_hw_tpe_check;
5423 } 5359 }
5360
5361 mod_timer(&phba->hw_check,
5362 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5424} 5363}
5425 5364
5426static void be_check_boot_session(struct beiscsi_hba *phba) 5365/*
5366 * beiscsi_enable_port()- Enables the disabled port.
5367 * Only port resources freed in disable function are reallocated.
5368 * This is called in HBA error handling path.
5369 *
5370 * @phba: Instance of driver private structure
5371 *
5372 **/
5373static int beiscsi_enable_port(struct beiscsi_hba *phba)
5427{ 5374{
5428 if (beiscsi_setup_boot_info(phba)) 5375 struct hwi_context_memory *phwi_context;
5429 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5376 struct hwi_controller *phwi_ctrlr;
5430 "BM_%d : Could not set up " 5377 struct be_eq_obj *pbe_eq;
5431 "iSCSI boot info on async event.\n"); 5378 int ret, i;
5379
5380 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
5381 __beiscsi_log(phba, KERN_ERR,
5382 "BM_%d : %s : port is online %lx\n",
5383 __func__, phba->state);
5384 return 0;
5385 }
5386
5387 ret = beiscsi_init_sliport(phba);
5388 if (ret)
5389 return ret;
5390
5391 if (enable_msix)
5392 find_num_cpus(phba);
5393 else
5394 phba->num_cpus = 1;
5395 if (enable_msix) {
5396 beiscsi_msix_enable(phba);
5397 if (!phba->msix_enabled)
5398 phba->num_cpus = 1;
5399 }
5400
5401 beiscsi_get_params(phba);
5402 /* Re-enable UER. If different TPE occurs then it is recoverable. */
5403 beiscsi_set_uer_feature(phba);
5404
5405 phba->shost->max_id = phba->params.cxns_per_ctrl;
5406 phba->shost->can_queue = phba->params.ios_per_ctrl;
5407 ret = hwi_init_controller(phba);
5408 if (ret) {
5409 __beiscsi_log(phba, KERN_ERR,
5410 "BM_%d : init controller failed %d\n", ret);
5411 goto disable_msix;
5412 }
5413
5414 for (i = 0; i < MAX_MCC_CMD; i++) {
5415 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5416 phba->ctrl.mcc_tag[i] = i + 1;
5417 phba->ctrl.mcc_tag_status[i + 1] = 0;
5418 phba->ctrl.mcc_tag_available++;
5419 }
5420
5421 phwi_ctrlr = phba->phwi_ctrlr;
5422 phwi_context = phwi_ctrlr->phwi_ctxt;
5423 for (i = 0; i < phba->num_cpus; i++) {
5424 pbe_eq = &phwi_context->be_eq[i];
5425 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5426 }
5427
5428 i = (phba->msix_enabled) ? i : 0;
5429 /* Work item for MCC handling */
5430 pbe_eq = &phwi_context->be_eq[i];
5431 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5432
5433 ret = beiscsi_init_irqs(phba);
5434 if (ret < 0) {
5435 __beiscsi_log(phba, KERN_ERR,
5436 "BM_%d : setup IRQs failed %d\n", ret);
5437 goto cleanup_port;
5438 }
5439 hwi_enable_intr(phba);
5440 /* port operational: clear all error bits */
5441 set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5442 __beiscsi_log(phba, KERN_INFO,
5443 "BM_%d : port online: 0x%lx\n", phba->state);
5444
5445 /* start hw_check timer and eqd_update work */
5446 schedule_delayed_work(&phba->eqd_update,
5447 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5448
5449 /**
5450 * Timer function gets modified for TPE detection.
5451 * Always reinit to do health check first.
5452 */
5453 phba->hw_check.function = beiscsi_hw_health_check;
5454 mod_timer(&phba->hw_check,
5455 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5456 return 0;
5457
5458cleanup_port:
5459 for (i = 0; i < phba->num_cpus; i++) {
5460 pbe_eq = &phwi_context->be_eq[i];
5461 irq_poll_disable(&pbe_eq->iopoll);
5462 }
5463 hwi_cleanup_port(phba);
5464
5465disable_msix:
5466 if (phba->msix_enabled)
5467 pci_disable_msix(phba->pcidev);
5468
5469 return ret;
5432} 5470}
5433 5471
5434/* 5472/*
5435 * beiscsi_hw_health_check()- Check adapter health 5473 * beiscsi_disable_port()- Disable port and cleanup driver resources.
5436 * @work: work item to check HW health 5474 * This is called in HBA error handling and driver removal.
5475 * @phba: Instance Priv structure
5476 * @unload: indicate driver is unloading
5437 * 5477 *
5438 * Check if adapter in an unrecoverable state or not. 5478 * Free the OS and HW resources held by the driver
5439 **/ 5479 **/
5440static void 5480static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
5441beiscsi_hw_health_check(struct work_struct *work)
5442{ 5481{
5443 struct beiscsi_hba *phba = 5482 struct hwi_context_memory *phwi_context;
5444 container_of(work, struct beiscsi_hba, 5483 struct hwi_controller *phwi_ctrlr;
5445 beiscsi_hw_check_task.work); 5484 struct be_eq_obj *pbe_eq;
5485 unsigned int i, msix_vec;
5446 5486
5447 be_eqd_update(phba); 5487 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
5488 return;
5448 5489
5449 if (phba->state & BE_ADAPTER_CHECK_BOOT) { 5490 phwi_ctrlr = phba->phwi_ctrlr;
5450 if ((phba->get_boot > 0) && (!phba->boot_kset)) { 5491 phwi_context = phwi_ctrlr->phwi_ctxt;
5451 phba->get_boot--; 5492 hwi_disable_intr(phba);
5452 if (!(phba->get_boot % BE_GET_BOOT_TO)) 5493 if (phba->msix_enabled) {
5453 be_check_boot_session(phba); 5494 for (i = 0; i <= phba->num_cpus; i++) {
5454 } else { 5495 msix_vec = phba->msix_entries[i].vector;
5455 phba->state &= ~BE_ADAPTER_CHECK_BOOT; 5496 free_irq(msix_vec, &phwi_context->be_eq[i]);
5456 phba->get_boot = 0; 5497 kfree(phba->msi_name[i]);
5457 } 5498 }
5499 } else
5500 if (phba->pcidev->irq)
5501 free_irq(phba->pcidev->irq, phba);
5502 pci_disable_msix(phba->pcidev);
5503
5504 for (i = 0; i < phba->num_cpus; i++) {
5505 pbe_eq = &phwi_context->be_eq[i];
5506 irq_poll_disable(&pbe_eq->iopoll);
5458 } 5507 }
5508 cancel_delayed_work_sync(&phba->eqd_update);
5509 cancel_work_sync(&phba->boot_work);
5510 /* WQ might be running cancel queued mcc_work if we are not exiting */
5511 if (!unload && beiscsi_hba_in_error(phba)) {
5512 pbe_eq = &phwi_context->be_eq[i];
5513 cancel_work_sync(&pbe_eq->mcc_work);
5514 }
5515 hwi_cleanup_port(phba);
5516}
5459 5517
5460 beiscsi_ue_detect(phba); 5518static void beiscsi_sess_work(struct work_struct *work)
5519{
5520 struct beiscsi_hba *phba;
5461 5521
5462 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5522 phba = container_of(work, struct beiscsi_hba, sess_work);
5463 msecs_to_jiffies(1000)); 5523 /*
5524 * This work gets scheduled only in case of HBA error.
5525 * Old sessions are gone so need to be re-established.
5526 * iscsi_session_failure needs process context hence this work.
5527 */
5528 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5464} 5529}
5465 5530
5531static void beiscsi_recover_port(struct work_struct *work)
5532{
5533 struct beiscsi_hba *phba;
5534
5535 phba = container_of(work, struct beiscsi_hba, recover_port.work);
5536 beiscsi_disable_port(phba, 0);
5537 beiscsi_enable_port(phba);
5538}
5466 5539
5467static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5540static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5468 pci_channel_state_t state) 5541 pci_channel_state_t state)
@@ -5470,12 +5543,18 @@ static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5470 struct beiscsi_hba *phba = NULL; 5543 struct beiscsi_hba *phba = NULL;
5471 5544
5472 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5545 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5473 phba->state |= BE_ADAPTER_PCI_ERR; 5546 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state);
5474 5547
5475 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5548 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5476 "BM_%d : EEH error detected\n"); 5549 "BM_%d : EEH error detected\n");
5477 5550
5478 beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); 5551 /* first stop UE detection when PCI error detected */
5552 del_timer_sync(&phba->hw_check);
5553 cancel_delayed_work_sync(&phba->recover_port);
5554
5555 /* sessions are no longer valid, so first fail the sessions */
5556 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5557 beiscsi_disable_port(phba, 0);
5479 5558
5480 if (state == pci_channel_io_perm_failure) { 5559 if (state == pci_channel_io_perm_failure) {
5481 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5560 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -5515,9 +5594,8 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5515 pci_set_power_state(pdev, PCI_D0); 5594 pci_set_power_state(pdev, PCI_D0);
5516 pci_restore_state(pdev); 5595 pci_restore_state(pdev);
5517 5596
5518 /* Wait for the CHIP Reset to complete */ 5597 status = beiscsi_check_fw_rdy(phba);
5519 status = be_chk_reset_complete(phba); 5598 if (status) {
5520 if (!status) {
5521 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5599 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5522 "BM_%d : EEH Reset Completed\n"); 5600 "BM_%d : EEH Reset Completed\n");
5523 } else { 5601 } else {
@@ -5532,87 +5610,16 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5532 5610
5533static void beiscsi_eeh_resume(struct pci_dev *pdev) 5611static void beiscsi_eeh_resume(struct pci_dev *pdev)
5534{ 5612{
5535 int ret = 0, i; 5613 struct beiscsi_hba *phba;
5536 struct be_eq_obj *pbe_eq; 5614 int ret;
5537 struct beiscsi_hba *phba = NULL;
5538 struct hwi_controller *phwi_ctrlr;
5539 struct hwi_context_memory *phwi_context;
5540 5615
5541 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5616 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5542 pci_save_state(pdev); 5617 pci_save_state(pdev);
5543 5618
5544 if (enable_msix) 5619 ret = beiscsi_enable_port(phba);
5545 find_num_cpus(phba); 5620 if (ret)
5546 else 5621 __beiscsi_log(phba, KERN_ERR,
5547 phba->num_cpus = 1; 5622 "BM_%d : AER EEH resume failed\n");
5548
5549 if (enable_msix) {
5550 beiscsi_msix_enable(phba);
5551 if (!phba->msix_enabled)
5552 phba->num_cpus = 1;
5553 }
5554
5555 ret = beiscsi_cmd_reset_function(phba);
5556 if (ret) {
5557 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5558 "BM_%d : Reset Failed\n");
5559 goto ret_err;
5560 }
5561
5562 ret = be_chk_reset_complete(phba);
5563 if (ret) {
5564 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5565 "BM_%d : Failed to get out of reset.\n");
5566 goto ret_err;
5567 }
5568
5569 beiscsi_get_params(phba);
5570 phba->shost->max_id = phba->params.cxns_per_ctrl;
5571 phba->shost->can_queue = phba->params.ios_per_ctrl;
5572 ret = hwi_init_controller(phba);
5573 if (ret) {
5574 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5575 "BM_%d : beiscsi_eeh_resume -"
5576 "Failed to initialize beiscsi_hba.\n");
5577 goto ret_err;
5578 }
5579
5580 for (i = 0; i < MAX_MCC_CMD; i++) {
5581 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5582 phba->ctrl.mcc_tag[i] = i + 1;
5583 phba->ctrl.mcc_tag_status[i + 1] = 0;
5584 phba->ctrl.mcc_tag_available++;
5585 }
5586
5587 phwi_ctrlr = phba->phwi_ctrlr;
5588 phwi_context = phwi_ctrlr->phwi_ctxt;
5589
5590 for (i = 0; i < phba->num_cpus; i++) {
5591 pbe_eq = &phwi_context->be_eq[i];
5592 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,
5593 be_iopoll);
5594 }
5595
5596 i = (phba->msix_enabled) ? i : 0;
5597 /* Work item for MCC handling */
5598 pbe_eq = &phwi_context->be_eq[i];
5599 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5600
5601 ret = beiscsi_init_irqs(phba);
5602 if (ret < 0) {
5603 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5604 "BM_%d : beiscsi_eeh_resume - "
5605 "Failed to beiscsi_init_irqs\n");
5606 goto ret_err;
5607 }
5608
5609 hwi_enable_intr(phba);
5610 phba->state &= ~BE_ADAPTER_PCI_ERR;
5611
5612 return;
5613ret_err:
5614 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5615 "BM_%d : AER EEH Resume Failed\n");
5616} 5623}
5617 5624
5618static int beiscsi_dev_probe(struct pci_dev *pcidev, 5625static int beiscsi_dev_probe(struct pci_dev *pcidev,
@@ -5622,7 +5629,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5622 struct hwi_controller *phwi_ctrlr; 5629 struct hwi_controller *phwi_ctrlr;
5623 struct hwi_context_memory *phwi_context; 5630 struct hwi_context_memory *phwi_context;
5624 struct be_eq_obj *pbe_eq; 5631 struct be_eq_obj *pbe_eq;
5625 int ret = 0, i; 5632 unsigned int s_handle;
5633 int ret, i;
5626 5634
5627 ret = beiscsi_enable_pci(pcidev); 5635 ret = beiscsi_enable_pci(pcidev);
5628 if (ret < 0) { 5636 if (ret < 0) {
@@ -5635,6 +5643,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5635 if (!phba) { 5643 if (!phba) {
5636 dev_err(&pcidev->dev, 5644 dev_err(&pcidev->dev,
5637 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5645 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5646 ret = -ENOMEM;
5638 goto disable_pci; 5647 goto disable_pci;
5639 } 5648 }
5640 5649
@@ -5650,10 +5659,8 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5650 /* Initialize Driver configuration Paramters */ 5659 /* Initialize Driver configuration Paramters */
5651 beiscsi_hba_attrs_init(phba); 5660 beiscsi_hba_attrs_init(phba);
5652 5661
5653 phba->fw_timeout = false;
5654 phba->mac_addr_set = false; 5662 phba->mac_addr_set = false;
5655 5663
5656
5657 switch (pcidev->device) { 5664 switch (pcidev->device) {
5658 case BE_DEVICE_ID1: 5665 case BE_DEVICE_ID1:
5659 case OC_DEVICE_ID1: 5666 case OC_DEVICE_ID1:
@@ -5677,39 +5684,26 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5677 ret = be_ctrl_init(phba, pcidev); 5684 ret = be_ctrl_init(phba, pcidev);
5678 if (ret) { 5685 if (ret) {
5679 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5686 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5680 "BM_%d : beiscsi_dev_probe-" 5687 "BM_%d : be_ctrl_init failed\n");
5681 "Failed in be_ctrl_init\n");
5682 goto hba_free; 5688 goto hba_free;
5683 } 5689 }
5684 5690
5685 /* 5691 ret = beiscsi_init_sliport(phba);
5686 * FUNCTION_RESET should clean up any stale info in FW for this fn 5692 if (ret)
5687 */
5688 ret = beiscsi_cmd_reset_function(phba);
5689 if (ret) {
5690 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5691 "BM_%d : Reset Failed\n");
5692 goto hba_free;
5693 }
5694 ret = be_chk_reset_complete(phba);
5695 if (ret) {
5696 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5697 "BM_%d : Failed to get out of reset.\n");
5698 goto hba_free; 5693 goto hba_free;
5699 }
5700 5694
5701 spin_lock_init(&phba->io_sgl_lock); 5695 spin_lock_init(&phba->io_sgl_lock);
5702 spin_lock_init(&phba->mgmt_sgl_lock); 5696 spin_lock_init(&phba->mgmt_sgl_lock);
5703 spin_lock_init(&phba->isr_lock);
5704 spin_lock_init(&phba->async_pdu_lock); 5697 spin_lock_init(&phba->async_pdu_lock);
5705 ret = mgmt_get_fw_config(&phba->ctrl, phba); 5698 ret = beiscsi_get_fw_config(&phba->ctrl, phba);
5706 if (ret != 0) { 5699 if (ret != 0) {
5707 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5700 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5708 "BM_%d : Error getting fw config\n"); 5701 "BM_%d : Error getting fw config\n");
5709 goto free_port; 5702 goto free_port;
5710 } 5703 }
5711 mgmt_get_port_name(&phba->ctrl, phba); 5704 beiscsi_get_port_name(&phba->ctrl, phba);
5712 beiscsi_get_params(phba); 5705 beiscsi_get_params(phba);
5706 beiscsi_set_uer_feature(phba);
5713 5707
5714 if (enable_msix) 5708 if (enable_msix)
5715 find_num_cpus(phba); 5709 find_num_cpus(phba);
@@ -5754,25 +5748,24 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5754 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5748 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5755 "BM_%d : beiscsi_dev_probe-" 5749 "BM_%d : beiscsi_dev_probe-"
5756 "Failed to allocate work queue\n"); 5750 "Failed to allocate work queue\n");
5751 ret = -ENOMEM;
5757 goto free_twq; 5752 goto free_twq;
5758 } 5753 }
5759 5754
5760 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task, 5755 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work);
5761 beiscsi_hw_health_check);
5762 5756
5763 phwi_ctrlr = phba->phwi_ctrlr; 5757 phwi_ctrlr = phba->phwi_ctrlr;
5764 phwi_context = phwi_ctrlr->phwi_ctxt; 5758 phwi_context = phwi_ctrlr->phwi_ctxt;
5765 5759
5766 for (i = 0; i < phba->num_cpus; i++) { 5760 for (i = 0; i < phba->num_cpus; i++) {
5767 pbe_eq = &phwi_context->be_eq[i]; 5761 pbe_eq = &phwi_context->be_eq[i];
5768 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, 5762 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5769 be_iopoll);
5770 } 5763 }
5771 5764
5772 i = (phba->msix_enabled) ? i : 0; 5765 i = (phba->msix_enabled) ? i : 0;
5773 /* Work item for MCC handling */ 5766 /* Work item for MCC handling */
5774 pbe_eq = &phwi_context->be_eq[i]; 5767 pbe_eq = &phwi_context->be_eq[i];
5775 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5768 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5776 5769
5777 ret = beiscsi_init_irqs(phba); 5770 ret = beiscsi_init_irqs(phba);
5778 if (ret < 0) { 5771 if (ret < 0) {
@@ -5783,22 +5776,42 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
5783 } 5776 }
5784 hwi_enable_intr(phba); 5777 hwi_enable_intr(phba);
5785 5778
5786 if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) 5779 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
5780 if (ret)
5787 goto free_blkenbld; 5781 goto free_blkenbld;
5788 5782
5789 if (beiscsi_setup_boot_info(phba)) 5783 /* set online bit after port is operational */
5790 /* 5784 set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5791 * log error but continue, because we may not be using 5785 __beiscsi_log(phba, KERN_INFO,
5792 * iscsi boot. 5786 "BM_%d : port online: 0x%lx\n", phba->state);
5787
5788 INIT_WORK(&phba->boot_work, beiscsi_boot_work);
5789 ret = beiscsi_boot_get_shandle(phba, &s_handle);
5790 if (ret > 0) {
5791 beiscsi_start_boot_work(phba, s_handle);
5792 /**
5793 * Set this bit after starting the work to let
5794 * probe handle it first.
5795 * ASYNC event can too schedule this work.
5793 */ 5796 */
5794 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5797 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state);
5795 "BM_%d : Could not set up " 5798 }
5796 "iSCSI boot info.\n");
5797 5799
5798 beiscsi_create_def_ifaces(phba); 5800 beiscsi_iface_create_default(phba);
5799 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5801 schedule_delayed_work(&phba->eqd_update,
5800 msecs_to_jiffies(1000)); 5802 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5801 5803
5804 INIT_WORK(&phba->sess_work, beiscsi_sess_work);
5805 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port);
5806 /**
5807 * Start UE detection here. UE before this will cause stall in probe
5808 * and eventually fail the probe.
5809 */
5810 init_timer(&phba->hw_check);
5811 phba->hw_check.function = beiscsi_hw_health_check;
5812 phba->hw_check.data = (unsigned long)phba;
5813 mod_timer(&phba->hw_check,
5814 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5802 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5815 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5803 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5816 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5804 return 0; 5817 return 0;
@@ -5810,7 +5823,8 @@ free_blkenbld:
5810 irq_poll_disable(&pbe_eq->iopoll); 5823 irq_poll_disable(&pbe_eq->iopoll);
5811 } 5824 }
5812free_twq: 5825free_twq:
5813 beiscsi_clean_port(phba); 5826 hwi_cleanup_port(phba);
5827 beiscsi_cleanup_port(phba);
5814 beiscsi_free_mem(phba); 5828 beiscsi_free_mem(phba);
5815free_port: 5829free_port:
5816 pci_free_consistent(phba->pcidev, 5830 pci_free_consistent(phba->pcidev,
@@ -5830,6 +5844,49 @@ disable_pci:
5830 return ret; 5844 return ret;
5831} 5845}
5832 5846
5847static void beiscsi_remove(struct pci_dev *pcidev)
5848{
5849 struct beiscsi_hba *phba = NULL;
5850
5851 phba = pci_get_drvdata(pcidev);
5852 if (!phba) {
5853 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5854 return;
5855 }
5856
5857 /* first stop UE detection before unloading */
5858 del_timer_sync(&phba->hw_check);
5859 cancel_delayed_work_sync(&phba->recover_port);
5860 cancel_work_sync(&phba->sess_work);
5861
5862 beiscsi_iface_destroy_default(phba);
5863 iscsi_host_remove(phba->shost);
5864 beiscsi_disable_port(phba, 1);
5865
5866 /* after cancelling boot_work */
5867 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset);
5868
5869 /* free all resources */
5870 destroy_workqueue(phba->wq);
5871 beiscsi_cleanup_port(phba);
5872 beiscsi_free_mem(phba);
5873
5874 /* ctrl uninit */
5875 beiscsi_unmap_pci_function(phba);
5876 pci_free_consistent(phba->pcidev,
5877 phba->ctrl.mbox_mem_alloced.size,
5878 phba->ctrl.mbox_mem_alloced.va,
5879 phba->ctrl.mbox_mem_alloced.dma);
5880
5881 pci_dev_put(phba->pcidev);
5882 iscsi_host_free(phba->shost);
5883 pci_disable_pcie_error_reporting(pcidev);
5884 pci_set_drvdata(pcidev, NULL);
5885 pci_release_regions(pcidev);
5886 pci_disable_device(pcidev);
5887}
5888
5889
5833static struct pci_error_handlers beiscsi_eeh_handlers = { 5890static struct pci_error_handlers beiscsi_eeh_handlers = {
5834 .error_detected = beiscsi_eeh_err_detected, 5891 .error_detected = beiscsi_eeh_err_detected,
5835 .slot_reset = beiscsi_eeh_reset, 5892 .slot_reset = beiscsi_eeh_reset,
@@ -5846,9 +5903,9 @@ struct iscsi_transport beiscsi_iscsi_transport = {
5846 .create_conn = beiscsi_conn_create, 5903 .create_conn = beiscsi_conn_create,
5847 .bind_conn = beiscsi_conn_bind, 5904 .bind_conn = beiscsi_conn_bind,
5848 .destroy_conn = iscsi_conn_teardown, 5905 .destroy_conn = iscsi_conn_teardown,
5849 .attr_is_visible = be2iscsi_attr_is_visible, 5906 .attr_is_visible = beiscsi_attr_is_visible,
5850 .set_iface_param = be2iscsi_iface_set_param, 5907 .set_iface_param = beiscsi_iface_set_param,
5851 .get_iface_param = be2iscsi_iface_get_param, 5908 .get_iface_param = beiscsi_iface_get_param,
5852 .set_param = beiscsi_set_param, 5909 .set_param = beiscsi_set_param,
5853 .get_conn_param = iscsi_conn_get_param, 5910 .get_conn_param = iscsi_conn_get_param,
5854 .get_session_param = iscsi_session_get_param, 5911 .get_session_param = iscsi_session_get_param,
@@ -5877,7 +5934,6 @@ static struct pci_driver beiscsi_pci_driver = {
5877 .err_handler = &beiscsi_eeh_handlers 5934 .err_handler = &beiscsi_eeh_handlers
5878}; 5935};
5879 5936
5880
5881static int __init beiscsi_module_init(void) 5937static int __init beiscsi_module_init(void)
5882{ 5938{
5883 int ret; 5939 int ret;
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 30a4606d9a3b..6376657e45f7 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@avagotech.com 13 * linux-drivers@broadcom.com
14 * 14 *
15 * Emulex 15 * Emulex
16 * 3333 Susan Street 16 * 3333 Susan Street
@@ -36,7 +36,7 @@
36#include <scsi/scsi_transport_iscsi.h> 36#include <scsi/scsi_transport_iscsi.h>
37 37
38#define DRV_NAME "be2iscsi" 38#define DRV_NAME "be2iscsi"
39#define BUILD_STR "11.0.0.0" 39#define BUILD_STR "11.2.0.0"
40#define BE_NAME "Emulex OneConnect" \ 40#define BE_NAME "Emulex OneConnect" \
41 "Open-iSCSI Driver version" BUILD_STR 41 "Open-iSCSI Driver version" BUILD_STR
42#define DRV_DESC BE_NAME " " "Driver" 42#define DRV_DESC BE_NAME " " "Driver"
@@ -82,36 +82,12 @@
82#define BEISCSI_MAX_FRAGS_INIT 192 82#define BEISCSI_MAX_FRAGS_INIT 192
83#define BE_NUM_MSIX_ENTRIES 1 83#define BE_NUM_MSIX_ENTRIES 1
84 84
85#define MPU_EP_CONTROL 0
86#define MPU_EP_SEMAPHORE 0xac
87#define BE2_SOFT_RESET 0x5c
88#define BE2_PCI_ONLINE0 0xb0
89#define BE2_PCI_ONLINE1 0xb4
90#define BE2_SET_RESET 0x80
91#define BE2_MPU_IRAM_ONLINE 0x00000080
92
93#define BE_SENSE_INFO_SIZE 258 85#define BE_SENSE_INFO_SIZE 258
94#define BE_ISCSI_PDU_HEADER_SIZE 64 86#define BE_ISCSI_PDU_HEADER_SIZE 64
95#define BE_MIN_MEM_SIZE 16384 87#define BE_MIN_MEM_SIZE 16384
96#define MAX_CMD_SZ 65536 88#define MAX_CMD_SZ 65536
97#define IIOC_SCSI_DATA 0x05 /* Write Operation */ 89#define IIOC_SCSI_DATA 0x05 /* Write Operation */
98 90
99#define INVALID_SESS_HANDLE 0xFFFFFFFF
100
101/**
102 * Adapter States
103 **/
104#define BE_ADAPTER_LINK_UP 0x001
105#define BE_ADAPTER_LINK_DOWN 0x002
106#define BE_ADAPTER_PCI_ERR 0x004
107#define BE_ADAPTER_CHECK_BOOT 0x008
108
109
110#define BEISCSI_CLEAN_UNLOAD 0x01
111#define BEISCSI_EEH_UNLOAD 0x02
112
113#define BE_GET_BOOT_RETRIES 45
114#define BE_GET_BOOT_TO 20
115/** 91/**
116 * hardware needs the async PDU buffers to be posted in multiples of 8 92 * hardware needs the async PDU buffers to be posted in multiples of 8
117 * So have atleast 8 of them by default 93 * So have atleast 8 of them by default
@@ -378,7 +354,6 @@ struct beiscsi_hba {
378 struct sgl_handle **eh_sgl_hndl_base; 354 struct sgl_handle **eh_sgl_hndl_base;
379 spinlock_t io_sgl_lock; 355 spinlock_t io_sgl_lock;
380 spinlock_t mgmt_sgl_lock; 356 spinlock_t mgmt_sgl_lock;
381 spinlock_t isr_lock;
382 spinlock_t async_pdu_lock; 357 spinlock_t async_pdu_lock;
383 unsigned int age; 358 unsigned int age;
384 struct list_head hba_queue; 359 struct list_head hba_queue;
@@ -390,7 +365,6 @@ struct beiscsi_hba {
390 struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT]; 365 struct ulp_cid_info *cid_array_info[BEISCSI_ULP_COUNT];
391 struct iscsi_endpoint **ep_array; 366 struct iscsi_endpoint **ep_array;
392 struct beiscsi_conn **conn_table; 367 struct beiscsi_conn **conn_table;
393 struct iscsi_boot_kset *boot_kset;
394 struct Scsi_Host *shost; 368 struct Scsi_Host *shost;
395 struct iscsi_iface *ipv4_iface; 369 struct iscsi_iface *ipv4_iface;
396 struct iscsi_iface *ipv6_iface; 370 struct iscsi_iface *ipv6_iface;
@@ -418,12 +392,33 @@ struct beiscsi_hba {
418 unsigned long ulp_supported; 392 unsigned long ulp_supported;
419 } fw_config; 393 } fw_config;
420 394
421 unsigned int state; 395 unsigned long state;
396#define BEISCSI_HBA_ONLINE 0
397#define BEISCSI_HBA_LINK_UP 1
398#define BEISCSI_HBA_BOOT_FOUND 2
399#define BEISCSI_HBA_BOOT_WORK 3
400#define BEISCSI_HBA_UER_SUPP 4
401#define BEISCSI_HBA_PCI_ERR 5
402#define BEISCSI_HBA_FW_TIMEOUT 6
403#define BEISCSI_HBA_IN_UE 7
404#define BEISCSI_HBA_IN_TPE 8
405
406/* error bits */
407#define BEISCSI_HBA_IN_ERR ((1 << BEISCSI_HBA_PCI_ERR) | \
408 (1 << BEISCSI_HBA_FW_TIMEOUT) | \
409 (1 << BEISCSI_HBA_IN_UE) | \
410 (1 << BEISCSI_HBA_IN_TPE))
411
422 u8 optic_state; 412 u8 optic_state;
423 int get_boot; 413 struct delayed_work eqd_update;
424 bool fw_timeout; 414 /* update EQ delay timer every 1000ms */
425 bool ue_detected; 415#define BEISCSI_EQD_UPDATE_INTERVAL 1000
426 struct delayed_work beiscsi_hw_check_task; 416 struct timer_list hw_check;
417 /* check for UE every 1000ms */
418#define BEISCSI_UE_DETECT_INTERVAL 1000
419 u32 ue2rp;
420 struct delayed_work recover_port;
421 struct work_struct sess_work;
427 422
428 bool mac_addr_set; 423 bool mac_addr_set;
429 u8 mac_address[ETH_ALEN]; 424 u8 mac_address[ETH_ALEN];
@@ -435,7 +430,6 @@ struct beiscsi_hba {
435 struct be_ctrl_info ctrl; 430 struct be_ctrl_info ctrl;
436 unsigned int generation; 431 unsigned int generation;
437 unsigned int interface_handle; 432 unsigned int interface_handle;
438 struct mgmt_session_info boot_sess;
439 struct invalidate_command_table inv_tbl[128]; 433 struct invalidate_command_table inv_tbl[128];
440 434
441 struct be_aic_obj aic_obj[MAX_CPUS]; 435 struct be_aic_obj aic_obj[MAX_CPUS];
@@ -444,8 +438,29 @@ struct beiscsi_hba {
444 struct scatterlist *sg, 438 struct scatterlist *sg,
445 uint32_t num_sg, uint32_t xferlen, 439 uint32_t num_sg, uint32_t xferlen,
446 uint32_t writedir); 440 uint32_t writedir);
441 struct boot_struct {
442 int retry;
443 unsigned int tag;
444 unsigned int s_handle;
445 struct be_dma_mem nonemb_cmd;
446 enum {
447 BEISCSI_BOOT_REOPEN_SESS = 1,
448 BEISCSI_BOOT_GET_SHANDLE,
449 BEISCSI_BOOT_GET_SINFO,
450 BEISCSI_BOOT_LOGOUT_SESS,
451 BEISCSI_BOOT_CREATE_KSET,
452 } action;
453 struct mgmt_session_info boot_sess;
454 struct iscsi_boot_kset *boot_kset;
455 } boot_struct;
456 struct work_struct boot_work;
447}; 457};
448 458
459#define beiscsi_hba_in_error(phba) ((phba)->state & BEISCSI_HBA_IN_ERR)
460#define beiscsi_hba_is_online(phba) \
461 (!beiscsi_hba_in_error((phba)) && \
462 test_bit(BEISCSI_HBA_ONLINE, &phba->state))
463
449struct beiscsi_session { 464struct beiscsi_session {
450 struct pci_pool *bhs_pool; 465 struct pci_pool *bhs_pool;
451}; 466};
@@ -508,6 +523,7 @@ struct beiscsi_io_task {
508 struct sgl_handle *psgl_handle; 523 struct sgl_handle *psgl_handle;
509 struct beiscsi_conn *conn; 524 struct beiscsi_conn *conn;
510 struct scsi_cmnd *scsi_cmnd; 525 struct scsi_cmnd *scsi_cmnd;
526 int num_sg;
511 struct hwi_wrb_context *pwrb_context; 527 struct hwi_wrb_context *pwrb_context;
512 unsigned int cmd_sn; 528 unsigned int cmd_sn;
513 unsigned int flags; 529 unsigned int flags;
@@ -592,80 +608,81 @@ struct amap_beiscsi_offload_params {
592 u8 max_recv_data_segment_length[32]; 608 u8 max_recv_data_segment_length[32];
593}; 609};
594 610
595/* void hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 611struct hd_async_handle {
596 struct beiscsi_hba *phba, struct sol_cqe *psol);*/
597
598struct async_pdu_handle {
599 struct list_head link; 612 struct list_head link;
600 struct be_bus_address pa; 613 struct be_bus_address pa;
601 void *pbuffer; 614 void *pbuffer;
602 unsigned int consumed; 615 u32 buffer_len;
603 unsigned char index; 616 u16 index;
604 unsigned char is_header; 617 u16 cri;
605 unsigned short cri; 618 u8 is_header;
606 unsigned long buffer_len; 619 u8 is_final;
607}; 620};
608 621
609struct hwi_async_entry { 622/**
610 struct { 623 * This has list of async PDUs that are waiting to be processed.
611 unsigned char hdr_received; 624 * Buffers live in this list for a brief duration before they get
612 unsigned char hdr_len; 625 * processed and posted back to hardware.
613 unsigned short bytes_received; 626 * Note that we don't really need one cri_wait_queue per async_entry.
627 * We need one cri_wait_queue per CRI. Its easier to manage if this
628 * is tagged along with the async_entry.
629 */
630struct hd_async_entry {
631 struct cri_wait_queue {
632 unsigned short hdr_len;
633 unsigned int bytes_received;
614 unsigned int bytes_needed; 634 unsigned int bytes_needed;
615 struct list_head list; 635 struct list_head list;
616 } wait_queue; 636 } wq;
617 637 /* handles posted to FW resides here */
618 struct list_head header_busy_list; 638 struct hd_async_handle *header;
619 struct list_head data_busy_list; 639 struct hd_async_handle *data;
620}; 640};
621 641
622struct hwi_async_pdu_context { 642struct hd_async_buf_context {
623 struct { 643 struct be_bus_address pa_base;
624 struct be_bus_address pa_base; 644 void *va_base;
625 void *va_base; 645 void *ring_base;
626 void *ring_base; 646 struct hd_async_handle *handle_base;
627 struct async_pdu_handle *handle_base; 647 u16 free_entries;
628 648 u32 buffer_size;
629 unsigned int host_write_ptr; 649 /**
630 unsigned int ep_read_ptr; 650 * Once iSCSI layer finishes processing an async PDU, the
631 unsigned int writables; 651 * handles used for the PDU are added to this list.
632 652 * They are posted back to FW in groups of 8.
633 unsigned int free_entries; 653 */
634 unsigned int busy_entries; 654 struct list_head free_list;
635 655};
636 struct list_head free_list;
637 } async_header;
638 656
639 struct { 657/**
640 struct be_bus_address pa_base; 658 * hd_async_context is declared for each ULP supporting iSCSI function.
641 void *va_base; 659 */
642 void *ring_base; 660struct hd_async_context {
643 struct async_pdu_handle *handle_base; 661 struct hd_async_buf_context async_header;
644 662 struct hd_async_buf_context async_data;
645 unsigned int host_write_ptr; 663 u16 num_entries;
646 unsigned int ep_read_ptr; 664 /**
647 unsigned int writables; 665 * When unsol PDU is in, it needs to be chained till all the bytes are
648 666 * received and then processing is done. hd_async_entry is created
649 unsigned int free_entries; 667 * based on the cid_count for each ULP. When unsol PDU comes in based
650 unsigned int busy_entries; 668 * on the conn_id it needs to be added to the correct async_entry wq.
651 struct list_head free_list; 669 * Below defined cid_to_async_cri_map is used to reterive the
652 } async_data; 670 * async_cri_map for a particular connection.
653 671 *
654 unsigned int buffer_size; 672 * This array is initialized after beiscsi_create_wrb_rings returns.
655 unsigned int num_entries; 673 *
674 * - this method takes more memory space, fixed to 2K
675 * - any support for connections greater than this the array size needs
676 * to be incremented
677 */
656#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid]) 678#define BE_GET_ASYNC_CRI_FROM_CID(cid) (pasync_ctx->cid_to_async_cri_map[cid])
657 unsigned short cid_to_async_cri_map[BE_MAX_SESSION]; 679 unsigned short cid_to_async_cri_map[BE_MAX_SESSION];
658 /** 680 /**
659 * This is a varying size list! Do not add anything 681 * This is a variable size array. Don`t add anything after this field!!
660 * after this entry!!
661 */ 682 */
662 struct hwi_async_entry *async_entry; 683 struct hd_async_entry *async_entry;
663}; 684};
664 685
665#define PDUCQE_CODE_MASK 0x0000003F
666#define PDUCQE_DPL_MASK 0xFFFF0000
667#define PDUCQE_INDEX_MASK 0x0000FFFF
668
669struct i_t_dpdu_cqe { 686struct i_t_dpdu_cqe {
670 u32 dw[4]; 687 u32 dw[4];
671} __packed; 688} __packed;
@@ -845,7 +862,6 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
845void 862void
846free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); 863free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
847 864
848void beiscsi_process_all_cqs(struct work_struct *work);
849void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 865void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
850 struct iscsi_task *task); 866 struct iscsi_task *task);
851 867
@@ -856,11 +872,6 @@ void hwi_ring_cq_db(struct beiscsi_hba *phba,
856unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget); 872unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget);
857void beiscsi_process_mcc_cq(struct beiscsi_hba *phba); 873void beiscsi_process_mcc_cq(struct beiscsi_hba *phba);
858 874
859static inline bool beiscsi_error(struct beiscsi_hba *phba)
860{
861 return phba->ue_detected || phba->fw_timeout;
862}
863
864struct pdu_nop_out { 875struct pdu_nop_out {
865 u32 dw[12]; 876 u32 dw[12];
866}; 877};
@@ -1067,11 +1078,18 @@ struct hwi_context_memory {
1067 struct be_queue_info be_cq[MAX_CPUS - 1]; 1078 struct be_queue_info be_cq[MAX_CPUS - 1];
1068 1079
1069 struct be_queue_info *be_wrbq; 1080 struct be_queue_info *be_wrbq;
1081 /**
1082 * Create array of ULP number for below entries as DEFQ
1083 * will be created for both ULP if iSCSI Protocol is
1084 * loaded on both ULP.
1085 */
1070 struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT]; 1086 struct be_queue_info be_def_hdrq[BEISCSI_ULP_COUNT];
1071 struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT]; 1087 struct be_queue_info be_def_dataq[BEISCSI_ULP_COUNT];
1072 struct hwi_async_pdu_context *pasync_ctx[BEISCSI_ULP_COUNT]; 1088 struct hd_async_context *pasync_ctx[BEISCSI_ULP_COUNT];
1073}; 1089};
1074 1090
1091void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle);
1092
1075/* Logging related definitions */ 1093/* Logging related definitions */
1076#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */ 1094#define BEISCSI_LOG_INIT 0x0001 /* Initialization events */
1077#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */ 1095#define BEISCSI_LOG_MBOX 0x0002 /* Mailbox Events */
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 83926e221f1e..aebc4ddb3060 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@avagotech.com 13 * linux-drivers@broadcom.com
14 * 14 *
15 * Emulex 15 * Emulex
16 * 3333 Susan Street 16 * 3333 Susan Street
@@ -24,139 +24,9 @@
24#include "be_iscsi.h" 24#include "be_iscsi.h"
25#include "be_main.h" 25#include "be_main.h"
26 26
27/* UE Status Low CSR */ 27int beiscsi_modify_eq_delay(struct beiscsi_hba *phba,
28static const char * const desc_ue_status_low[] = { 28 struct be_set_eqd *set_eqd,
29 "CEV", 29 int num)
30 "CTX",
31 "DBUF",
32 "ERX",
33 "Host",
34 "MPU",
35 "NDMA",
36 "PTC ",
37 "RDMA ",
38 "RXF ",
39 "RXIPS ",
40 "RXULP0 ",
41 "RXULP1 ",
42 "RXULP2 ",
43 "TIM ",
44 "TPOST ",
45 "TPRE ",
46 "TXIPS ",
47 "TXULP0 ",
48 "TXULP1 ",
49 "UC ",
50 "WDMA ",
51 "TXULP2 ",
52 "HOST1 ",
53 "P0_OB_LINK ",
54 "P1_OB_LINK ",
55 "HOST_GPIO ",
56 "MBOX ",
57 "AXGMAC0",
58 "AXGMAC1",
59 "JTAG",
60 "MPU_INTPEND"
61};
62
63/* UE Status High CSR */
64static const char * const desc_ue_status_hi[] = {
65 "LPCMEMHOST",
66 "MGMT_MAC",
67 "PCS0ONLINE",
68 "MPU_IRAM",
69 "PCS1ONLINE",
70 "PCTL0",
71 "PCTL1",
72 "PMEM",
73 "RR",
74 "TXPB",
75 "RXPP",
76 "XAUI",
77 "TXP",
78 "ARM",
79 "IPC",
80 "HOST2",
81 "HOST3",
82 "HOST4",
83 "HOST5",
84 "HOST6",
85 "HOST7",
86 "HOST8",
87 "HOST9",
88 "NETC",
89 "Unknown",
90 "Unknown",
91 "Unknown",
92 "Unknown",
93 "Unknown",
94 "Unknown",
95 "Unknown",
96 "Unknown"
97};
98
99/*
100 * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
101 * @phba: Driver priv structure
102 *
103 * Read registers linked to UE and check for the UE status
104 **/
105void beiscsi_ue_detect(struct beiscsi_hba *phba)
106{
107 uint32_t ue_hi = 0, ue_lo = 0;
108 uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
109 uint8_t i = 0;
110
111 if (phba->ue_detected)
112 return;
113
114 pci_read_config_dword(phba->pcidev,
115 PCICFG_UE_STATUS_LOW, &ue_lo);
116 pci_read_config_dword(phba->pcidev,
117 PCICFG_UE_STATUS_MASK_LOW,
118 &ue_mask_lo);
119 pci_read_config_dword(phba->pcidev,
120 PCICFG_UE_STATUS_HIGH,
121 &ue_hi);
122 pci_read_config_dword(phba->pcidev,
123 PCICFG_UE_STATUS_MASK_HI,
124 &ue_mask_hi);
125
126 ue_lo = (ue_lo & ~ue_mask_lo);
127 ue_hi = (ue_hi & ~ue_mask_hi);
128
129
130 if (ue_lo || ue_hi) {
131 phba->ue_detected = true;
132 beiscsi_log(phba, KERN_ERR,
133 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
134 "BG_%d : Error detected on the adapter\n");
135 }
136
137 if (ue_lo) {
138 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
139 if (ue_lo & 1)
140 beiscsi_log(phba, KERN_ERR,
141 BEISCSI_LOG_CONFIG,
142 "BG_%d : UE_LOW %s bit set\n",
143 desc_ue_status_low[i]);
144 }
145 }
146
147 if (ue_hi) {
148 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
149 if (ue_hi & 1)
150 beiscsi_log(phba, KERN_ERR,
151 BEISCSI_LOG_CONFIG,
152 "BG_%d : UE_HIGH %s bit set\n",
153 desc_ue_status_hi[i]);
154 }
155 }
156}
157
158int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
159 struct be_set_eqd *set_eqd, int num)
160{ 30{
161 struct be_ctrl_info *ctrl = &phba->ctrl; 31 struct be_ctrl_info *ctrl = &phba->ctrl;
162 struct be_mcc_wrb *wrb; 32 struct be_mcc_wrb *wrb;
@@ -174,7 +44,7 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
174 req = embedded_payload(wrb); 44 req = embedded_payload(wrb);
175 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); 45 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
176 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 46 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
177 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); 47 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
178 48
179 req->num_eq = cpu_to_le32(num); 49 req->num_eq = cpu_to_le32(num);
180 for (i = 0; i < num; i++) { 50 for (i = 0; i < num; i++) {
@@ -184,386 +54,13 @@ int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
184 cpu_to_le32(set_eqd[i].delay_multiplier); 54 cpu_to_le32(set_eqd[i].delay_multiplier);
185 } 55 }
186 56
57 /* ignore the completion of this mbox command */
58 set_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state);
187 be_mcc_notify(phba, tag); 59 be_mcc_notify(phba, tag);
188 mutex_unlock(&ctrl->mbox_lock); 60 mutex_unlock(&ctrl->mbox_lock);
189 return tag; 61 return tag;
190} 62}
191 63
192/**
193 * mgmt_reopen_session()- Reopen a session based on reopen_type
194 * @phba: Device priv structure instance
195 * @reopen_type: Type of reopen_session FW should do.
196 * @sess_handle: Session Handle of the session to be re-opened
197 *
198 * return
199 * the TAG used for MBOX Command
200 *
201 **/
202unsigned int mgmt_reopen_session(struct beiscsi_hba *phba,
203 unsigned int reopen_type,
204 unsigned int sess_handle)
205{
206 struct be_ctrl_info *ctrl = &phba->ctrl;
207 struct be_mcc_wrb *wrb;
208 struct be_cmd_reopen_session_req *req;
209 unsigned int tag;
210
211 beiscsi_log(phba, KERN_INFO,
212 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
213 "BG_%d : In bescsi_get_boot_target\n");
214
215 mutex_lock(&ctrl->mbox_lock);
216 wrb = alloc_mcc_wrb(phba, &tag);
217 if (!wrb) {
218 mutex_unlock(&ctrl->mbox_lock);
219 return 0;
220 }
221
222 req = embedded_payload(wrb);
223 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
224 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
225 OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
226 sizeof(struct be_cmd_reopen_session_resp));
227
228 /* set the reopen_type,sess_handle */
229 req->reopen_type = reopen_type;
230 req->session_handle = sess_handle;
231
232 be_mcc_notify(phba, tag);
233 mutex_unlock(&ctrl->mbox_lock);
234 return tag;
235}
236
237unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba)
238{
239 struct be_ctrl_info *ctrl = &phba->ctrl;
240 struct be_mcc_wrb *wrb;
241 struct be_cmd_get_boot_target_req *req;
242 unsigned int tag;
243
244 beiscsi_log(phba, KERN_INFO,
245 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
246 "BG_%d : In bescsi_get_boot_target\n");
247
248 mutex_lock(&ctrl->mbox_lock);
249 wrb = alloc_mcc_wrb(phba, &tag);
250 if (!wrb) {
251 mutex_unlock(&ctrl->mbox_lock);
252 return 0;
253 }
254
255 req = embedded_payload(wrb);
256 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
257 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
258 OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
259 sizeof(struct be_cmd_get_boot_target_resp));
260
261 be_mcc_notify(phba, tag);
262 mutex_unlock(&ctrl->mbox_lock);
263 return tag;
264}
265
266unsigned int mgmt_get_session_info(struct beiscsi_hba *phba,
267 u32 boot_session_handle,
268 struct be_dma_mem *nonemb_cmd)
269{
270 struct be_ctrl_info *ctrl = &phba->ctrl;
271 struct be_mcc_wrb *wrb;
272 unsigned int tag;
273 struct be_cmd_get_session_req *req;
274 struct be_cmd_get_session_resp *resp;
275 struct be_sge *sge;
276
277 beiscsi_log(phba, KERN_INFO,
278 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
279 "BG_%d : In beiscsi_get_session_info\n");
280
281 mutex_lock(&ctrl->mbox_lock);
282 wrb = alloc_mcc_wrb(phba, &tag);
283 if (!wrb) {
284 mutex_unlock(&ctrl->mbox_lock);
285 return 0;
286 }
287
288 nonemb_cmd->size = sizeof(*resp);
289 req = nonemb_cmd->va;
290 memset(req, 0, sizeof(*req));
291 sge = nonembedded_sgl(wrb);
292 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
293 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
294 OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
295 sizeof(*resp));
296 req->session_handle = boot_session_handle;
297 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
298 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
299 sge->len = cpu_to_le32(nonemb_cmd->size);
300
301 be_mcc_notify(phba, tag);
302 mutex_unlock(&ctrl->mbox_lock);
303 return tag;
304}
305
306/**
307 * mgmt_get_port_name()- Get port name for the function
308 * @ctrl: ptr to Ctrl Info
309 * @phba: ptr to the dev priv structure
310 *
311 * Get the alphanumeric character for port
312 *
313 **/
314int mgmt_get_port_name(struct be_ctrl_info *ctrl,
315 struct beiscsi_hba *phba)
316{
317 int ret = 0;
318 struct be_mcc_wrb *wrb;
319 struct be_cmd_get_port_name *ioctl;
320
321 mutex_lock(&ctrl->mbox_lock);
322 wrb = wrb_from_mbox(&ctrl->mbox_mem);
323 memset(wrb, 0, sizeof(*wrb));
324 ioctl = embedded_payload(wrb);
325
326 be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
327 be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
328 OPCODE_COMMON_GET_PORT_NAME,
329 EMBED_MBX_MAX_PAYLOAD_SIZE);
330 ret = be_mbox_notify(ctrl);
331 phba->port_name = 0;
332 if (!ret) {
333 phba->port_name = ioctl->p.resp.port_names >>
334 (phba->fw_config.phys_port * 8) & 0xff;
335 } else {
336 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
337 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
338 ret, ioctl->h.resp_hdr.status);
339 }
340
341 if (phba->port_name == 0)
342 phba->port_name = '?';
343
344 mutex_unlock(&ctrl->mbox_lock);
345 return ret;
346}
347
348/**
349 * mgmt_get_fw_config()- Get the FW config for the function
350 * @ctrl: ptr to Ctrl Info
351 * @phba: ptr to the dev priv structure
352 *
353 * Get the FW config and resources available for the function.
354 * The resources are created based on the count received here.
355 *
356 * return
357 * Success: 0
358 * Failure: Non-Zero Value
359 **/
360int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
361 struct beiscsi_hba *phba)
362{
363 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
364 struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
365 uint32_t cid_count, icd_count;
366 int status = -EINVAL;
367 uint8_t ulp_num = 0;
368
369 mutex_lock(&ctrl->mbox_lock);
370 memset(wrb, 0, sizeof(*wrb));
371 be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
372
373 be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
374 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
375 EMBED_MBX_MAX_PAYLOAD_SIZE);
376
377 if (be_mbox_notify(ctrl)) {
378 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
379 "BG_%d : Failed in mgmt_get_fw_config\n");
380 goto fail_init;
381 }
382
383 /* FW response formats depend on port id */
384 phba->fw_config.phys_port = pfw_cfg->phys_port;
385 if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
386 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
387 "BG_%d : invalid physical port id %d\n",
388 phba->fw_config.phys_port);
389 goto fail_init;
390 }
391
392 /* populate and check FW config against min and max values */
393 if (!is_chip_be2_be3r(phba)) {
394 phba->fw_config.eqid_count = pfw_cfg->eqid_count;
395 phba->fw_config.cqid_count = pfw_cfg->cqid_count;
396 if (phba->fw_config.eqid_count == 0 ||
397 phba->fw_config.eqid_count > 2048) {
398 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
399 "BG_%d : invalid EQ count %d\n",
400 phba->fw_config.eqid_count);
401 goto fail_init;
402 }
403 if (phba->fw_config.cqid_count == 0 ||
404 phba->fw_config.cqid_count > 4096) {
405 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
406 "BG_%d : invalid CQ count %d\n",
407 phba->fw_config.cqid_count);
408 goto fail_init;
409 }
410 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
411 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
412 phba->fw_config.eqid_count,
413 phba->fw_config.cqid_count);
414 }
415
416 /**
417 * Check on which all ULP iSCSI Protocol is loaded.
418 * Set the Bit for those ULP. This set flag is used
419 * at all places in the code to check on which ULP
420 * iSCSi Protocol is loaded
421 **/
422 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
423 if (pfw_cfg->ulp[ulp_num].ulp_mode &
424 BEISCSI_ULP_ISCSI_INI_MODE) {
425 set_bit(ulp_num, &phba->fw_config.ulp_supported);
426
427 /* Get the CID, ICD and Chain count for each ULP */
428 phba->fw_config.iscsi_cid_start[ulp_num] =
429 pfw_cfg->ulp[ulp_num].sq_base;
430 phba->fw_config.iscsi_cid_count[ulp_num] =
431 pfw_cfg->ulp[ulp_num].sq_count;
432
433 phba->fw_config.iscsi_icd_start[ulp_num] =
434 pfw_cfg->ulp[ulp_num].icd_base;
435 phba->fw_config.iscsi_icd_count[ulp_num] =
436 pfw_cfg->ulp[ulp_num].icd_count;
437
438 phba->fw_config.iscsi_chain_start[ulp_num] =
439 pfw_cfg->chain_icd[ulp_num].chain_base;
440 phba->fw_config.iscsi_chain_count[ulp_num] =
441 pfw_cfg->chain_icd[ulp_num].chain_count;
442
443 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
444 "BG_%d : Function loaded on ULP : %d\n"
445 "\tiscsi_cid_count : %d\n"
446 "\tiscsi_cid_start : %d\n"
447 "\t iscsi_icd_count : %d\n"
448 "\t iscsi_icd_start : %d\n",
449 ulp_num,
450 phba->fw_config.
451 iscsi_cid_count[ulp_num],
452 phba->fw_config.
453 iscsi_cid_start[ulp_num],
454 phba->fw_config.
455 iscsi_icd_count[ulp_num],
456 phba->fw_config.
457 iscsi_icd_start[ulp_num]);
458 }
459 }
460
461 if (phba->fw_config.ulp_supported == 0) {
462 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
463 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
464 pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
465 pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
466 goto fail_init;
467 }
468
469 /**
470 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
471 **/
472 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
473 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
474 break;
475 icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
476 if (icd_count == 0 || icd_count > 65536) {
477 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
478 "BG_%d: invalid ICD count %d\n", icd_count);
479 goto fail_init;
480 }
481
482 cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
483 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
484 if (cid_count == 0 || cid_count > 4096) {
485 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
486 "BG_%d: invalid CID count %d\n", cid_count);
487 goto fail_init;
488 }
489
490 /**
491 * Check FW is dual ULP aware i.e. can handle either
492 * of the protocols.
493 */
494 phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
495 BEISCSI_FUNC_DUA_MODE);
496
497 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
498 "BG_%d : DUA Mode : 0x%x\n",
499 phba->fw_config.dual_ulp_aware);
500
501 /* all set, continue using this FW config */
502 status = 0;
503fail_init:
504 mutex_unlock(&ctrl->mbox_lock);
505 return status;
506}
507
508int mgmt_check_supported_fw(struct be_ctrl_info *ctrl,
509 struct beiscsi_hba *phba)
510{
511 struct be_dma_mem nonemb_cmd;
512 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
513 struct be_mgmt_controller_attributes *req;
514 struct be_sge *sge = nonembedded_sgl(wrb);
515 int status = 0;
516
517 nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
518 sizeof(struct be_mgmt_controller_attributes),
519 &nonemb_cmd.dma);
520 if (nonemb_cmd.va == NULL) {
521 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
522 "BG_%d : Failed to allocate memory for "
523 "mgmt_check_supported_fw\n");
524 return -ENOMEM;
525 }
526 nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
527 req = nonemb_cmd.va;
528 memset(req, 0, sizeof(*req));
529 mutex_lock(&ctrl->mbox_lock);
530 memset(wrb, 0, sizeof(*wrb));
531 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
532 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
533 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
534 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
535 sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
536 sge->len = cpu_to_le32(nonemb_cmd.size);
537 status = be_mbox_notify(ctrl);
538 if (!status) {
539 struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
540 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
541 "BG_%d : Firmware Version of CMD : %s\n"
542 "Firmware Version is : %s\n"
543 "Developer Build, not performing version check...\n",
544 resp->params.hba_attribs
545 .flashrom_version_string,
546 resp->params.hba_attribs.
547 firmware_version_string);
548
549 phba->fw_config.iscsi_features =
550 resp->params.hba_attribs.iscsi_features;
551 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
552 "BM_%d : phba->fw_config.iscsi_features = %d\n",
553 phba->fw_config.iscsi_features);
554 memcpy(phba->fw_ver_str, resp->params.hba_attribs.
555 firmware_version_string, BEISCSI_VER_STRLEN);
556 } else
557 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
558 "BG_%d : Failed in mgmt_check_supported_fw\n");
559 mutex_unlock(&ctrl->mbox_lock);
560 if (nonemb_cmd.va)
561 pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
562 nonemb_cmd.va, nonemb_cmd.dma);
563
564 return status;
565}
566
567unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl, 64unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
568 struct beiscsi_hba *phba, 65 struct beiscsi_hba *phba,
569 struct bsg_job *job, 66 struct bsg_job *job,
@@ -609,7 +106,7 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
609 bsg_req->rqst_data.h_vendor.vendor_cmd[0]); 106 bsg_req->rqst_data.h_vendor.vendor_cmd[0]);
610 107
611 mutex_unlock(&ctrl->mbox_lock); 108 mutex_unlock(&ctrl->mbox_lock);
612 return -ENOSYS; 109 return -EPERM;
613 } 110 }
614 111
615 wrb = alloc_mcc_wrb(phba, &tag); 112 wrb = alloc_mcc_wrb(phba, &tag);
@@ -631,48 +128,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
631 return tag; 128 return tag;
632} 129}
633 130
634/**
635 * mgmt_epfw_cleanup()- Inform FW to cleanup data structures.
636 * @phba: pointer to dev priv structure
637 * @ulp_num: ULP number.
638 *
639 * return
640 * Success: 0
641 * Failure: Non-Zero Value
642 **/
643int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
644{
645 struct be_ctrl_info *ctrl = &phba->ctrl;
646 struct be_mcc_wrb *wrb;
647 struct iscsi_cleanup_req *req;
648 unsigned int tag;
649 int status;
650
651 mutex_lock(&ctrl->mbox_lock);
652 wrb = alloc_mcc_wrb(phba, &tag);
653 if (!wrb) {
654 mutex_unlock(&ctrl->mbox_lock);
655 return -EBUSY;
656 }
657
658 req = embedded_payload(wrb);
659 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
660 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
661 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
662
663 req->chute = (1 << ulp_num);
664 req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num));
665 req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num));
666
667 be_mcc_notify(phba, tag);
668 status = be_mcc_compl_poll(phba, tag);
669 if (status)
670 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
671 "BG_%d : mgmt_epfw_cleanup , FAILED\n");
672 mutex_unlock(&ctrl->mbox_lock);
673 return status;
674}
675
676unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba, 131unsigned int mgmt_invalidate_icds(struct beiscsi_hba *phba,
677 struct invalidate_command_table *inv_tbl, 132 struct invalidate_command_table *inv_tbl,
678 unsigned int num_invalidate, unsigned int cid, 133 unsigned int num_invalidate, unsigned int cid,
@@ -844,7 +299,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
844 nonemb_cmd->size); 299 nonemb_cmd->size);
845 if (dst_addr->sa_family == PF_INET) { 300 if (dst_addr->sa_family == PF_INET) {
846 __be32 s_addr = daddr_in->sin_addr.s_addr; 301 __be32 s_addr = daddr_in->sin_addr.s_addr;
847 req->ip_address.ip_type = BE2_IPV4; 302 req->ip_address.ip_type = BEISCSI_IP_TYPE_V4;
848 req->ip_address.addr[0] = s_addr & 0x000000ff; 303 req->ip_address.addr[0] = s_addr & 0x000000ff;
849 req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8; 304 req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8;
850 req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16; 305 req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16;
@@ -852,17 +307,17 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
852 req->tcp_port = ntohs(daddr_in->sin_port); 307 req->tcp_port = ntohs(daddr_in->sin_port);
853 beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr; 308 beiscsi_ep->dst_addr = daddr_in->sin_addr.s_addr;
854 beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port); 309 beiscsi_ep->dst_tcpport = ntohs(daddr_in->sin_port);
855 beiscsi_ep->ip_type = BE2_IPV4; 310 beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V4;
856 } else { 311 } else {
857 /* else its PF_INET6 family */ 312 /* else its PF_INET6 family */
858 req->ip_address.ip_type = BE2_IPV6; 313 req->ip_address.ip_type = BEISCSI_IP_TYPE_V6;
859 memcpy(&req->ip_address.addr, 314 memcpy(&req->ip_address.addr,
860 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); 315 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
861 req->tcp_port = ntohs(daddr_in6->sin6_port); 316 req->tcp_port = ntohs(daddr_in6->sin6_port);
862 beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port); 317 beiscsi_ep->dst_tcpport = ntohs(daddr_in6->sin6_port);
863 memcpy(&beiscsi_ep->dst6_addr, 318 memcpy(&beiscsi_ep->dst6_addr,
864 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16); 319 &daddr_in6->sin6_addr.in6_u.u6_addr8, 16);
865 beiscsi_ep->ip_type = BE2_IPV6; 320 beiscsi_ep->ip_type = BEISCSI_IP_TYPE_V6;
866 } 321 }
867 req->cid = cid; 322 req->cid = cid;
868 i = phba->nxt_cqid++; 323 i = phba->nxt_cqid++;
@@ -883,7 +338,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
883 338
884 if (!is_chip_be2_be3r(phba)) { 339 if (!is_chip_be2_be3r(phba)) {
885 req->hdr.version = MBX_CMD_VER1; 340 req->hdr.version = MBX_CMD_VER1;
886 req->tcp_window_size = 0; 341 req->tcp_window_size = 0x8000;
887 req->tcp_window_scale_count = 2; 342 req->tcp_window_scale_count = 2;
888 } 343 }
889 344
@@ -892,44 +347,6 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
892 return tag; 347 return tag;
893} 348}
894 349
895unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
896{
897 struct be_ctrl_info *ctrl = &phba->ctrl;
898 struct be_mcc_wrb *wrb;
899 struct be_cmd_get_all_if_id_req *req;
900 struct be_cmd_get_all_if_id_req *pbe_allid;
901 unsigned int tag;
902 int status = 0;
903
904 if (mutex_lock_interruptible(&ctrl->mbox_lock))
905 return -EINTR;
906 wrb = alloc_mcc_wrb(phba, &tag);
907 if (!wrb) {
908 mutex_unlock(&ctrl->mbox_lock);
909 return -ENOMEM;
910 }
911
912 req = embedded_payload(wrb);
913 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
914 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
915 OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
916 sizeof(*req));
917 be_mcc_notify(phba, tag);
918 mutex_unlock(&ctrl->mbox_lock);
919
920 status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
921 if (status) {
922 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
923 "BG_%d : Failed in mgmt_get_all_if_id\n");
924 return -EBUSY;
925 }
926
927 pbe_allid = embedded_payload(wrb);
928 phba->interface_handle = pbe_allid->if_hndl_list[0];
929
930 return status;
931}
932
933/* 350/*
934 * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd 351 * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
935 * @phba: Driver priv structure 352 * @phba: Driver priv structure
@@ -1001,72 +418,68 @@ static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
1001 } 418 }
1002 cmd->size = size; 419 cmd->size = size;
1003 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); 420 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
421 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
422 "BG_%d : subsystem iSCSI cmd %d size %d\n",
423 iscsi_cmd, size);
1004 return 0; 424 return 0;
1005} 425}
1006 426
1007static int 427unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba)
1008mgmt_static_ip_modify(struct beiscsi_hba *phba,
1009 struct be_cmd_get_if_info_resp *if_info,
1010 struct iscsi_iface_param_info *ip_param,
1011 struct iscsi_iface_param_info *subnet_param,
1012 uint32_t ip_action)
1013{ 428{
1014 struct be_cmd_set_ip_addr_req *req; 429 struct be_ctrl_info *ctrl = &phba->ctrl;
1015 struct be_dma_mem nonemb_cmd; 430 struct be_mcc_wrb *wrb;
1016 uint32_t ip_type; 431 struct be_cmd_get_all_if_id_req *req;
1017 int rc; 432 struct be_cmd_get_all_if_id_req *pbe_allid;
433 unsigned int tag;
434 int status = 0;
1018 435
1019 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 436 if (mutex_lock_interruptible(&ctrl->mbox_lock))
1020 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR, 437 return -EINTR;
1021 sizeof(*req)); 438 wrb = alloc_mcc_wrb(phba, &tag);
1022 if (rc) 439 if (!wrb) {
1023 return rc; 440 mutex_unlock(&ctrl->mbox_lock);
441 return -ENOMEM;
442 }
1024 443
1025 ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? 444 req = embedded_payload(wrb);
1026 BE2_IPV6 : BE2_IPV4 ; 445 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
446 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
447 OPCODE_COMMON_ISCSI_NTWK_GET_ALL_IF_ID,
448 sizeof(*req));
449 be_mcc_notify(phba, tag);
450 mutex_unlock(&ctrl->mbox_lock);
1027 451
1028 req = nonemb_cmd.va; 452 status = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
1029 req->ip_params.record_entry_count = 1; 453 if (status) {
1030 req->ip_params.ip_record.action = ip_action; 454 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1031 req->ip_params.ip_record.interface_hndl = 455 "BG_%d : %s failed: %d\n", __func__, status);
1032 phba->interface_handle; 456 return -EBUSY;
1033 req->ip_params.ip_record.ip_addr.size_of_structure = 457 }
1034 sizeof(struct be_ip_addr_subnet_format);
1035 req->ip_params.ip_record.ip_addr.ip_type = ip_type;
1036 458
1037 if (ip_action == IP_ACTION_ADD) { 459 pbe_allid = embedded_payload(wrb);
1038 memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value, 460 /* we now support only one interface per function */
1039 sizeof(req->ip_params.ip_record.ip_addr.addr)); 461 phba->interface_handle = pbe_allid->if_hndl_list[0];
1040 462
1041 if (subnet_param) 463 return status;
1042 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, 464}
1043 subnet_param->value,
1044 sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
1045 } else {
1046 memcpy(req->ip_params.ip_record.ip_addr.addr,
1047 if_info->ip_addr.addr,
1048 sizeof(req->ip_params.ip_record.ip_addr.addr));
1049 465
1050 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, 466static inline bool beiscsi_if_zero_ip(u8 *ip, u32 ip_type)
1051 if_info->ip_addr.subnet_mask, 467{
1052 sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); 468 u32 len;
1053 }
1054 469
1055 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 470 len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
1056 if (rc < 0) 471 while (len && !ip[len - 1])
1057 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 472 len--;
1058 "BG_%d : Failed to Modify existing IP Address\n"); 473 return (len == 0);
1059 return rc;
1060} 474}
1061 475
1062static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr, 476static int beiscsi_if_mod_gw(struct beiscsi_hba *phba,
1063 uint32_t gtway_action, uint32_t param_len) 477 u32 action, u32 ip_type, u8 *gw)
1064{ 478{
1065 struct be_cmd_set_def_gateway_req *req; 479 struct be_cmd_set_def_gateway_req *req;
1066 struct be_dma_mem nonemb_cmd; 480 struct be_dma_mem nonemb_cmd;
1067 int rt_val; 481 int rt_val;
1068 482
1069
1070 rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 483 rt_val = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
1071 OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY, 484 OPCODE_COMMON_ISCSI_NTWK_MODIFY_DEFAULT_GATEWAY,
1072 sizeof(*req)); 485 sizeof(*req));
@@ -1074,200 +487,300 @@ static int mgmt_modify_gateway(struct beiscsi_hba *phba, uint8_t *gt_addr,
1074 return rt_val; 487 return rt_val;
1075 488
1076 req = nonemb_cmd.va; 489 req = nonemb_cmd.va;
1077 req->action = gtway_action; 490 req->action = action;
1078 req->ip_addr.ip_type = BE2_IPV4; 491 req->ip_addr.ip_type = ip_type;
492 memcpy(req->ip_addr.addr, gw,
493 (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN);
494 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
495}
1079 496
1080 memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr)); 497int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw)
498{
499 struct be_cmd_get_def_gateway_resp gw_resp;
500 int rt_val;
1081 501
1082 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 502 memset(&gw_resp, 0, sizeof(gw_resp));
503 rt_val = beiscsi_if_get_gw(phba, ip_type, &gw_resp);
504 if (rt_val) {
505 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
506 "BG_%d : Failed to Get Gateway Addr\n");
507 return rt_val;
508 }
509
510 if (!beiscsi_if_zero_ip(gw_resp.ip_addr.addr, ip_type)) {
511 rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_DEL, ip_type,
512 gw_resp.ip_addr.addr);
513 if (rt_val) {
514 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
515 "BG_%d : Failed to clear Gateway Addr Set\n");
516 return rt_val;
517 }
518 }
519
520 rt_val = beiscsi_if_mod_gw(phba, IP_ACTION_ADD, ip_type, gw);
521 if (rt_val)
522 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
523 "BG_%d : Failed to Set Gateway Addr\n");
524
525 return rt_val;
1083} 526}
1084 527
1085int mgmt_set_ip(struct beiscsi_hba *phba, 528int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
1086 struct iscsi_iface_param_info *ip_param, 529 struct be_cmd_get_def_gateway_resp *resp)
1087 struct iscsi_iface_param_info *subnet_param,
1088 uint32_t boot_proto)
1089{ 530{
1090 struct be_cmd_get_def_gateway_resp gtway_addr_set; 531 struct be_cmd_get_def_gateway_req *req;
1091 struct be_cmd_get_if_info_resp *if_info;
1092 struct be_cmd_set_dhcp_req *dhcpreq;
1093 struct be_cmd_rel_dhcp_req *reldhcp;
1094 struct be_dma_mem nonemb_cmd; 532 struct be_dma_mem nonemb_cmd;
1095 uint8_t *gtway_addr;
1096 uint32_t ip_type;
1097 int rc; 533 int rc;
1098 534
1099 rc = mgmt_get_all_if_id(phba); 535 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
536 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY,
537 sizeof(*resp));
1100 if (rc) 538 if (rc)
1101 return rc; 539 return rc;
1102 540
1103 ip_type = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? 541 req = nonemb_cmd.va;
1104 BE2_IPV6 : BE2_IPV4 ; 542 req->ip_type = ip_type;
543
544 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, resp,
545 sizeof(*resp));
546}
547
548static int
549beiscsi_if_clr_ip(struct beiscsi_hba *phba,
550 struct be_cmd_get_if_info_resp *if_info)
551{
552 struct be_cmd_set_ip_addr_req *req;
553 struct be_dma_mem nonemb_cmd;
554 int rc;
1105 555
1106 rc = mgmt_get_if_info(phba, ip_type, &if_info); 556 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
557 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
558 sizeof(*req));
1107 if (rc) 559 if (rc)
1108 return rc; 560 return rc;
1109 561
1110 if (boot_proto == ISCSI_BOOTPROTO_DHCP) { 562 req = nonemb_cmd.va;
1111 if (if_info->dhcp_state) { 563 req->ip_params.record_entry_count = 1;
1112 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 564 req->ip_params.ip_record.action = IP_ACTION_DEL;
1113 "BG_%d : DHCP Already Enabled\n"); 565 req->ip_params.ip_record.interface_hndl =
1114 goto exit; 566 phba->interface_handle;
1115 } 567 req->ip_params.ip_record.ip_addr.size_of_structure =
1116 /* The ip_param->len is 1 in DHCP case. Setting 568 sizeof(struct be_ip_addr_subnet_format);
1117 proper IP len as this it is used while 569 req->ip_params.ip_record.ip_addr.ip_type = if_info->ip_addr.ip_type;
1118 freeing the Static IP. 570 memcpy(req->ip_params.ip_record.ip_addr.addr,
1119 */ 571 if_info->ip_addr.addr,
1120 ip_param->len = (ip_param->param == ISCSI_NET_PARAM_IPV6_ADDR) ? 572 sizeof(if_info->ip_addr.addr));
1121 IP_V6_LEN : IP_V4_LEN; 573 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
1122 574 if_info->ip_addr.subnet_mask,
1123 } else { 575 sizeof(if_info->ip_addr.subnet_mask));
1124 if (if_info->dhcp_state) { 576 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
577 if (rc < 0 || req->ip_params.ip_record.status) {
578 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
579 "BG_%d : failed to clear IP: rc %d status %d\n",
580 rc, req->ip_params.ip_record.status);
581 }
582 return rc;
583}
1125 584
1126 memset(if_info, 0, sizeof(*if_info)); 585static int
1127 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 586beiscsi_if_set_ip(struct beiscsi_hba *phba, u8 *ip,
1128 OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR, 587 u8 *subnet, u32 ip_type)
1129 sizeof(*reldhcp)); 588{
589 struct be_cmd_set_ip_addr_req *req;
590 struct be_dma_mem nonemb_cmd;
591 uint32_t ip_len;
592 int rc;
1130 593
1131 if (rc) 594 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
1132 goto exit; 595 OPCODE_COMMON_ISCSI_NTWK_MODIFY_IP_ADDR,
596 sizeof(*req));
597 if (rc)
598 return rc;
1133 599
1134 reldhcp = nonemb_cmd.va; 600 req = nonemb_cmd.va;
1135 reldhcp->interface_hndl = phba->interface_handle; 601 req->ip_params.record_entry_count = 1;
1136 reldhcp->ip_type = ip_type; 602 req->ip_params.ip_record.action = IP_ACTION_ADD;
603 req->ip_params.ip_record.interface_hndl =
604 phba->interface_handle;
605 req->ip_params.ip_record.ip_addr.size_of_structure =
606 sizeof(struct be_ip_addr_subnet_format);
607 req->ip_params.ip_record.ip_addr.ip_type = ip_type;
608 ip_len = (ip_type < BEISCSI_IP_TYPE_V6) ? IP_V4_LEN : IP_V6_LEN;
609 memcpy(req->ip_params.ip_record.ip_addr.addr, ip, ip_len);
610 if (subnet)
611 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
612 subnet, ip_len);
1137 613
1138 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0); 614 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
1139 if (rc < 0) { 615 /**
1140 beiscsi_log(phba, KERN_WARNING, 616 * In some cases, host needs to look into individual record status
1141 BEISCSI_LOG_CONFIG, 617 * even though FW reported success for that IOCTL.
1142 "BG_%d : Failed to Delete existing dhcp\n"); 618 */
1143 goto exit; 619 if (rc < 0 || req->ip_params.ip_record.status) {
1144 } 620 __beiscsi_log(phba, KERN_ERR,
1145 } 621 "BG_%d : failed to set IP: rc %d status %d\n",
622 rc, req->ip_params.ip_record.status);
623 if (req->ip_params.ip_record.status)
624 rc = -EINVAL;
1146 } 625 }
626 return rc;
627}
1147 628
1148 /* Delete the Static IP Set */ 629int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
1149 if (if_info->ip_addr.addr[0]) { 630 u8 *ip, u8 *subnet)
1150 rc = mgmt_static_ip_modify(phba, if_info, ip_param, NULL, 631{
1151 IP_ACTION_DEL); 632 struct be_cmd_get_if_info_resp *if_info;
633 struct be_cmd_rel_dhcp_req *reldhcp;
634 struct be_dma_mem nonemb_cmd;
635 int rc;
636
637 rc = beiscsi_if_get_info(phba, ip_type, &if_info);
638 if (rc)
639 return rc;
640
641 if (if_info->dhcp_state) {
642 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
643 OPCODE_COMMON_ISCSI_NTWK_REL_STATELESS_IP_ADDR,
644 sizeof(*reldhcp));
1152 if (rc) 645 if (rc)
1153 goto exit; 646 goto exit;
1154 }
1155 647
1156 /* Delete the Gateway settings if mode change is to DHCP */ 648 reldhcp = nonemb_cmd.va;
1157 if (boot_proto == ISCSI_BOOTPROTO_DHCP) { 649 reldhcp->interface_hndl = phba->interface_handle;
1158 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set)); 650 reldhcp->ip_type = ip_type;
1159 rc = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set); 651 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
1160 if (rc) { 652 if (rc < 0) {
1161 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 653 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1162 "BG_%d : Failed to Get Gateway Addr\n"); 654 "BG_%d : failed to release existing DHCP: %d\n",
655 rc);
1163 goto exit; 656 goto exit;
1164 } 657 }
1165
1166 if (gtway_addr_set.ip_addr.addr[0]) {
1167 gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr;
1168 rc = mgmt_modify_gateway(phba, gtway_addr,
1169 IP_ACTION_DEL, IP_V4_LEN);
1170
1171 if (rc) {
1172 beiscsi_log(phba, KERN_WARNING,
1173 BEISCSI_LOG_CONFIG,
1174 "BG_%d : Failed to clear Gateway Addr Set\n");
1175 goto exit;
1176 }
1177 }
1178 } 658 }
1179 659
1180 /* Set Adapter to DHCP/Static Mode */ 660 /* first delete any IP set */
1181 if (boot_proto == ISCSI_BOOTPROTO_DHCP) { 661 if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
1182 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 662 rc = beiscsi_if_clr_ip(phba, if_info);
1183 OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
1184 sizeof(*dhcpreq));
1185 if (rc) 663 if (rc)
1186 goto exit; 664 goto exit;
1187
1188 dhcpreq = nonemb_cmd.va;
1189 dhcpreq->flags = BLOCKING;
1190 dhcpreq->retry_count = 1;
1191 dhcpreq->interface_hndl = phba->interface_handle;
1192 dhcpreq->ip_type = BE2_DHCP_V4;
1193
1194 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
1195 } else {
1196 rc = mgmt_static_ip_modify(phba, if_info, ip_param,
1197 subnet_param, IP_ACTION_ADD);
1198 } 665 }
1199 666
667 /* if ip == NULL then this is called just to release DHCP IP */
668 if (ip)
669 rc = beiscsi_if_set_ip(phba, ip, subnet, ip_type);
1200exit: 670exit:
1201 kfree(if_info); 671 kfree(if_info);
1202 return rc; 672 return rc;
1203} 673}
1204 674
1205int mgmt_set_gateway(struct beiscsi_hba *phba, 675int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type)
1206 struct iscsi_iface_param_info *gateway_param)
1207{ 676{
1208 struct be_cmd_get_def_gateway_resp gtway_addr_set; 677 struct be_cmd_get_def_gateway_resp gw_resp;
1209 uint8_t *gtway_addr; 678 struct be_cmd_get_if_info_resp *if_info;
1210 int rt_val; 679 struct be_cmd_set_dhcp_req *dhcpreq;
680 struct be_dma_mem nonemb_cmd;
681 u8 *gw;
682 int rc;
1211 683
1212 memset(&gtway_addr_set, 0, sizeof(gtway_addr_set)); 684 rc = beiscsi_if_get_info(phba, ip_type, &if_info);
1213 rt_val = mgmt_get_gateway(phba, BE2_IPV4, &gtway_addr_set); 685 if (rc)
1214 if (rt_val) { 686 return rc;
687
688 if (if_info->dhcp_state) {
1215 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 689 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1216 "BG_%d : Failed to Get Gateway Addr\n"); 690 "BG_%d : DHCP Already Enabled\n");
1217 return rt_val; 691 goto exit;
1218 } 692 }
1219 693
1220 if (gtway_addr_set.ip_addr.addr[0]) { 694 /* first delete any IP set */
1221 gtway_addr = (uint8_t *)&gtway_addr_set.ip_addr.addr; 695 if (!beiscsi_if_zero_ip(if_info->ip_addr.addr, ip_type)) {
1222 rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_DEL, 696 rc = beiscsi_if_clr_ip(phba, if_info);
1223 gateway_param->len); 697 if (rc)
1224 if (rt_val) { 698 goto exit;
699 }
700
701 /* delete gateway settings if mode change is to DHCP */
702 memset(&gw_resp, 0, sizeof(gw_resp));
703 /* use ip_type provided in if_info */
704 rc = beiscsi_if_get_gw(phba, if_info->ip_addr.ip_type, &gw_resp);
705 if (rc) {
706 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
707 "BG_%d : Failed to Get Gateway Addr\n");
708 goto exit;
709 }
710 gw = (u8 *)&gw_resp.ip_addr.addr;
711 if (!beiscsi_if_zero_ip(gw, if_info->ip_addr.ip_type)) {
712 rc = beiscsi_if_mod_gw(phba, IP_ACTION_DEL,
713 if_info->ip_addr.ip_type, gw);
714 if (rc) {
1225 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 715 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1226 "BG_%d : Failed to clear Gateway Addr Set\n"); 716 "BG_%d : Failed to clear Gateway Addr Set\n");
1227 return rt_val; 717 goto exit;
1228 } 718 }
1229 } 719 }
1230 720
1231 gtway_addr = (uint8_t *)&gateway_param->value; 721 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd,
1232 rt_val = mgmt_modify_gateway(phba, gtway_addr, IP_ACTION_ADD, 722 OPCODE_COMMON_ISCSI_NTWK_CONFIG_STATELESS_IP_ADDR,
1233 gateway_param->len); 723 sizeof(*dhcpreq));
724 if (rc)
725 goto exit;
1234 726
1235 if (rt_val) 727 dhcpreq = nonemb_cmd.va;
1236 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 728 dhcpreq->flags = 1; /* 1 - blocking; 0 - non-blocking */
1237 "BG_%d : Failed to Set Gateway Addr\n"); 729 dhcpreq->retry_count = 1;
730 dhcpreq->interface_hndl = phba->interface_handle;
731 dhcpreq->ip_type = ip_type;
732 rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
1238 733
1239 return rt_val; 734exit:
735 kfree(if_info);
736 return rc;
1240} 737}
1241 738
1242int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, 739/**
1243 struct be_cmd_get_def_gateway_resp *gateway) 740 * beiscsi_if_set_vlan()- Issue and wait for CMD completion
741 * @phba: device private structure instance
742 * @vlan_tag: VLAN tag
743 *
744 * Issue the MBX Cmd and wait for the completion of the
745 * command.
746 *
747 * returns
748 * Success: 0
749 * Failure: Non-Xero Value
750 **/
751int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag)
1244{ 752{
1245 struct be_cmd_get_def_gateway_req *req;
1246 struct be_dma_mem nonemb_cmd;
1247 int rc; 753 int rc;
754 unsigned int tag;
1248 755
1249 rc = mgmt_alloc_cmd_data(phba, &nonemb_cmd, 756 tag = be_cmd_set_vlan(phba, vlan_tag);
1250 OPCODE_COMMON_ISCSI_NTWK_GET_DEFAULT_GATEWAY, 757 if (!tag) {
1251 sizeof(*gateway)); 758 beiscsi_log(phba, KERN_ERR,
1252 if (rc) 759 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
1253 return rc; 760 "BG_%d : VLAN Setting Failed\n");
1254 761 return -EBUSY;
1255 req = nonemb_cmd.va; 762 }
1256 req->ip_type = ip_type;
1257 763
1258 return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, gateway, 764 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL);
1259 sizeof(*gateway)); 765 if (rc) {
766 beiscsi_log(phba, KERN_ERR,
767 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
768 "BS_%d : VLAN MBX Cmd Failed\n");
769 return rc;
770 }
771 return rc;
1260} 772}
1261 773
1262int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, 774
1263 struct be_cmd_get_if_info_resp **if_info) 775int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
776 struct be_cmd_get_if_info_resp **if_info)
1264{ 777{
1265 struct be_cmd_get_if_info_req *req; 778 struct be_cmd_get_if_info_req *req;
1266 struct be_dma_mem nonemb_cmd; 779 struct be_dma_mem nonemb_cmd;
1267 uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp); 780 uint32_t ioctl_size = sizeof(struct be_cmd_get_if_info_resp);
1268 int rc; 781 int rc;
1269 782
1270 rc = mgmt_get_all_if_id(phba); 783 rc = beiscsi_if_get_handle(phba);
1271 if (rc) 784 if (rc)
1272 return rc; 785 return rc;
1273 786
@@ -1364,123 +877,317 @@ unsigned int be_cmd_get_initname(struct beiscsi_hba *phba)
1364 return tag; 877 return tag;
1365} 878}
1366 879
880static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
881 unsigned int tag)
882{
883 struct be_cmd_get_boot_target_resp *boot_resp;
884 struct be_cmd_resp_logout_fw_sess *logo_resp;
885 struct be_cmd_get_session_resp *sess_resp;
886 struct be_mcc_wrb *wrb;
887 struct boot_struct *bs;
888 int boot_work, status;
889
890 if (!test_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) {
891 __beiscsi_log(phba, KERN_ERR,
892 "BG_%d : %s no boot work %lx\n",
893 __func__, phba->state);
894 return;
895 }
896
897 if (phba->boot_struct.tag != tag) {
898 __beiscsi_log(phba, KERN_ERR,
899 "BG_%d : %s tag mismatch %d:%d\n",
900 __func__, tag, phba->boot_struct.tag);
901 return;
902 }
903 bs = &phba->boot_struct;
904 boot_work = 1;
905 status = 0;
906 switch (bs->action) {
907 case BEISCSI_BOOT_REOPEN_SESS:
908 status = __beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
909 if (!status)
910 bs->action = BEISCSI_BOOT_GET_SHANDLE;
911 else
912 bs->retry--;
913 break;
914 case BEISCSI_BOOT_GET_SHANDLE:
915 status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
916 if (!status) {
917 boot_resp = embedded_payload(wrb);
918 bs->s_handle = boot_resp->boot_session_handle;
919 }
920 if (bs->s_handle == BE_BOOT_INVALID_SHANDLE) {
921 bs->action = BEISCSI_BOOT_REOPEN_SESS;
922 bs->retry--;
923 } else {
924 bs->action = BEISCSI_BOOT_GET_SINFO;
925 }
926 break;
927 case BEISCSI_BOOT_GET_SINFO:
928 status = __beiscsi_mcc_compl_status(phba, tag, NULL,
929 &bs->nonemb_cmd);
930 if (!status) {
931 sess_resp = bs->nonemb_cmd.va;
932 memcpy(&bs->boot_sess, &sess_resp->session_info,
933 sizeof(struct mgmt_session_info));
934 bs->action = BEISCSI_BOOT_LOGOUT_SESS;
935 } else {
936 __beiscsi_log(phba, KERN_ERR,
937 "BG_%d : get boot session info error : 0x%x\n",
938 status);
939 boot_work = 0;
940 }
941 pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
942 bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
943 bs->nonemb_cmd.va = NULL;
944 break;
945 case BEISCSI_BOOT_LOGOUT_SESS:
946 status = __beiscsi_mcc_compl_status(phba, tag, &wrb, NULL);
947 if (!status) {
948 logo_resp = embedded_payload(wrb);
949 if (logo_resp->session_status != BE_SESS_STATUS_CLOSE) {
950 __beiscsi_log(phba, KERN_ERR,
951 "BG_%d : FW boot session logout error : 0x%x\n",
952 logo_resp->session_status);
953 }
954 }
955 /* continue to create boot_kset even if logout failed? */
956 bs->action = BEISCSI_BOOT_CREATE_KSET;
957 break;
958 default:
959 break;
960 }
961
962 /* clear the tag so no other completion matches this tag */
963 bs->tag = 0;
964 if (!bs->retry) {
965 boot_work = 0;
966 __beiscsi_log(phba, KERN_ERR,
967 "BG_%d : failed to setup boot target: status %d action %d\n",
968 status, bs->action);
969 }
970 if (!boot_work) {
971 /* wait for next event to start boot_work */
972 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
973 return;
974 }
975 schedule_work(&phba->boot_work);
976}
977
1367/** 978/**
1368 * be_mgmt_get_boot_shandle()- Get the session handle 979 * beiscsi_boot_logout_sess()- Logout from boot FW session
1369 * @phba: device priv structure instance 980 * @phba: Device priv structure instance
1370 * @s_handle: session handle returned for boot session.
1371 * 981 *
1372 * Get the boot target session handle. In case of 982 * return
1373 * crashdump mode driver has to issue and MBX Cmd 983 * the TAG used for MBOX Command
1374 * for FW to login to boot target 984 *
985 */
986unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba)
987{
988 struct be_ctrl_info *ctrl = &phba->ctrl;
989 struct be_mcc_wrb *wrb;
990 struct be_cmd_req_logout_fw_sess *req;
991 unsigned int tag;
992
993 mutex_lock(&ctrl->mbox_lock);
994 wrb = alloc_mcc_wrb(phba, &tag);
995 if (!wrb) {
996 mutex_unlock(&ctrl->mbox_lock);
997 return 0;
998 }
999
1000 req = embedded_payload(wrb);
1001 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1002 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
1003 OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
1004 sizeof(struct be_cmd_req_logout_fw_sess));
1005 /* Use the session handle copied into boot_sess */
1006 req->session_handle = phba->boot_struct.boot_sess.session_handle;
1007
1008 phba->boot_struct.tag = tag;
1009 set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
1010 ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
1011
1012 be_mcc_notify(phba, tag);
1013 mutex_unlock(&ctrl->mbox_lock);
1014
1015 return tag;
1016}
1017/**
1018 * beiscsi_boot_reopen_sess()- Reopen boot session
1019 * @phba: Device priv structure instance
1375 * 1020 *
1376 * return 1021 * return
1377 * Success: 0 1022 * the TAG used for MBOX Command
1378 * Failure: Non-Zero value
1379 * 1023 *
1380 **/ 1024 **/
1381int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba, 1025unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba)
1382 unsigned int *s_handle)
1383{ 1026{
1384 struct be_cmd_get_boot_target_resp *boot_resp; 1027 struct be_ctrl_info *ctrl = &phba->ctrl;
1385 struct be_mcc_wrb *wrb; 1028 struct be_mcc_wrb *wrb;
1029 struct be_cmd_reopen_session_req *req;
1386 unsigned int tag; 1030 unsigned int tag;
1387 uint8_t boot_retry = 3;
1388 int rc;
1389 1031
1390 do { 1032 mutex_lock(&ctrl->mbox_lock);
1391 /* Get the Boot Target Session Handle and Count*/ 1033 wrb = alloc_mcc_wrb(phba, &tag);
1392 tag = mgmt_get_boot_target(phba); 1034 if (!wrb) {
1393 if (!tag) { 1035 mutex_unlock(&ctrl->mbox_lock);
1394 beiscsi_log(phba, KERN_ERR, 1036 return 0;
1395 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, 1037 }
1396 "BG_%d : Getting Boot Target Info Failed\n");
1397 return -EAGAIN;
1398 }
1399 1038
1400 rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL); 1039 req = embedded_payload(wrb);
1401 if (rc) { 1040 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1402 beiscsi_log(phba, KERN_ERR, 1041 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
1403 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 1042 OPCODE_ISCSI_INI_DRIVER_REOPEN_ALL_SESSIONS,
1404 "BG_%d : MBX CMD get_boot_target Failed\n"); 1043 sizeof(struct be_cmd_reopen_session_resp));
1405 return -EBUSY; 1044 req->reopen_type = BE_REOPEN_BOOT_SESSIONS;
1406 } 1045 req->session_handle = BE_BOOT_INVALID_SHANDLE;
1407 1046
1408 boot_resp = embedded_payload(wrb); 1047 phba->boot_struct.tag = tag;
1048 set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
1049 ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
1409 1050
1410 /* Check if the there are any Boot targets configured */ 1051 be_mcc_notify(phba, tag);
1411 if (!boot_resp->boot_session_count) { 1052 mutex_unlock(&ctrl->mbox_lock);
1412 beiscsi_log(phba, KERN_INFO, 1053 return tag;
1413 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 1054}
1414 "BG_%d ;No boot targets configured\n");
1415 return -ENXIO;
1416 }
1417 1055
1418 /* FW returns the session handle of the boot session */
1419 if (boot_resp->boot_session_handle != INVALID_SESS_HANDLE) {
1420 *s_handle = boot_resp->boot_session_handle;
1421 return 0;
1422 }
1423 1056
1424 /* Issue MBX Cmd to FW to login to the boot target */ 1057/**
1425 tag = mgmt_reopen_session(phba, BE_REOPEN_BOOT_SESSIONS, 1058 * beiscsi_boot_get_sinfo()- Get boot session info
1426 INVALID_SESS_HANDLE); 1059 * @phba: device priv structure instance
1427 if (!tag) { 1060 *
1428 beiscsi_log(phba, KERN_ERR, 1061 * Fetches the boot_struct.s_handle info from FW.
1429 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 1062 * return
1430 "BG_%d : mgmt_reopen_session Failed\n"); 1063 * the TAG used for MBOX Command
1431 return -EAGAIN; 1064 *
1432 } 1065 **/
1066unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
1067{
1068 struct be_ctrl_info *ctrl = &phba->ctrl;
1069 struct be_cmd_get_session_resp *resp;
1070 struct be_cmd_get_session_req *req;
1071 struct be_dma_mem *nonemb_cmd;
1072 struct be_mcc_wrb *wrb;
1073 struct be_sge *sge;
1074 unsigned int tag;
1433 1075
1434 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); 1076 mutex_lock(&ctrl->mbox_lock);
1435 if (rc) { 1077 wrb = alloc_mcc_wrb(phba, &tag);
1436 beiscsi_log(phba, KERN_ERR, 1078 if (!wrb) {
1437 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 1079 mutex_unlock(&ctrl->mbox_lock);
1438 "BG_%d : mgmt_reopen_session Failed"); 1080 return 0;
1439 return rc; 1081 }
1440 }
1441 } while (--boot_retry);
1442 1082
1443 /* Couldn't log into the boot target */ 1083 nonemb_cmd = &phba->boot_struct.nonemb_cmd;
1444 beiscsi_log(phba, KERN_ERR, 1084 nonemb_cmd->size = sizeof(*resp);
1445 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 1085 nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
1446 "BG_%d : Login to Boot Target Failed\n"); 1086 sizeof(nonemb_cmd->size),
1447 return -ENXIO; 1087 &nonemb_cmd->dma);
1088 if (!nonemb_cmd->va) {
1089 mutex_unlock(&ctrl->mbox_lock);
1090 return 0;
1091 }
1092
1093 req = nonemb_cmd->va;
1094 memset(req, 0, sizeof(*req));
1095 sge = nonembedded_sgl(wrb);
1096 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
1097 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
1098 OPCODE_ISCSI_INI_SESSION_GET_A_SESSION,
1099 sizeof(*resp));
1100 req->session_handle = phba->boot_struct.s_handle;
1101 sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1102 sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1103 sge->len = cpu_to_le32(nonemb_cmd->size);
1104
1105 phba->boot_struct.tag = tag;
1106 set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
1107 ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
1108
1109 be_mcc_notify(phba, tag);
1110 mutex_unlock(&ctrl->mbox_lock);
1111 return tag;
1112}
1113
1114unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async)
1115{
1116 struct be_ctrl_info *ctrl = &phba->ctrl;
1117 struct be_mcc_wrb *wrb;
1118 struct be_cmd_get_boot_target_req *req;
1119 unsigned int tag;
1120
1121 mutex_lock(&ctrl->mbox_lock);
1122 wrb = alloc_mcc_wrb(phba, &tag);
1123 if (!wrb) {
1124 mutex_unlock(&ctrl->mbox_lock);
1125 return 0;
1126 }
1127
1128 req = embedded_payload(wrb);
1129 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1130 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
1131 OPCODE_ISCSI_INI_BOOT_GET_BOOT_TARGET,
1132 sizeof(struct be_cmd_get_boot_target_resp));
1133
1134 if (async) {
1135 phba->boot_struct.tag = tag;
1136 set_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state);
1137 ctrl->ptag_state[tag].cbfn = beiscsi_boot_process_compl;
1138 }
1139
1140 be_mcc_notify(phba, tag);
1141 mutex_unlock(&ctrl->mbox_lock);
1142 return tag;
1448} 1143}
1449 1144
1450/** 1145/**
1451 * mgmt_set_vlan()- Issue and wait for CMD completion 1146 * beiscsi_boot_get_shandle()- Get boot session handle
1452 * @phba: device private structure instance 1147 * @phba: device priv structure instance
1453 * @vlan_tag: VLAN tag 1148 * @s_handle: session handle returned for boot session.
1454 * 1149 *
1455 * Issue the MBX Cmd and wait for the completion of the 1150 * return
1456 * command. 1151 * Success: 1
1152 * Failure: negative
1457 * 1153 *
1458 * returns
1459 * Success: 0
1460 * Failure: Non-Xero Value
1461 **/ 1154 **/
1462int mgmt_set_vlan(struct beiscsi_hba *phba, 1155int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle)
1463 uint16_t vlan_tag)
1464{ 1156{
1465 int rc; 1157 struct be_cmd_get_boot_target_resp *boot_resp;
1158 struct be_mcc_wrb *wrb;
1466 unsigned int tag; 1159 unsigned int tag;
1160 int rc;
1467 1161
1468 tag = be_cmd_set_vlan(phba, vlan_tag); 1162 *s_handle = BE_BOOT_INVALID_SHANDLE;
1163 /* get configured boot session count and handle */
1164 tag = __beiscsi_boot_get_shandle(phba, 0);
1469 if (!tag) { 1165 if (!tag) {
1470 beiscsi_log(phba, KERN_ERR, 1166 beiscsi_log(phba, KERN_ERR,
1471 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), 1167 BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
1472 "BG_%d : VLAN Setting Failed\n"); 1168 "BG_%d : Getting Boot Target Info Failed\n");
1473 return -EBUSY; 1169 return -EAGAIN;
1474 } 1170 }
1475 1171
1476 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, NULL); 1172 rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
1477 if (rc) { 1173 if (rc) {
1478 beiscsi_log(phba, KERN_ERR, 1174 beiscsi_log(phba, KERN_ERR,
1479 (BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX), 1175 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1480 "BS_%d : VLAN MBX Cmd Failed\n"); 1176 "BG_%d : MBX CMD get_boot_target Failed\n");
1481 return rc; 1177 return -EBUSY;
1482 } 1178 }
1483 return rc; 1179
1180 boot_resp = embedded_payload(wrb);
1181 /* check if there are any boot targets configured */
1182 if (!boot_resp->boot_session_count) {
1183 __beiscsi_log(phba, KERN_INFO,
1184 "BG_%d : No boot targets configured\n");
1185 return -ENXIO;
1186 }
1187
1188 /* only if FW has logged in to the boot target, s_handle is valid */
1189 *s_handle = boot_resp->boot_session_handle;
1190 return 1;
1484} 1191}
1485 1192
1486/** 1193/**
@@ -1645,7 +1352,6 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
1645{ 1352{
1646 struct iscsi_wrb *pwrb = pwrb_handle->pwrb; 1353 struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
1647 1354
1648 memset(pwrb, 0, sizeof(*pwrb));
1649 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 1355 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
1650 max_send_data_segment_length, pwrb, 1356 max_send_data_segment_length, pwrb,
1651 params->dw[offsetof(struct amap_beiscsi_offload_params, 1357 params->dw[offsetof(struct amap_beiscsi_offload_params,
@@ -1717,8 +1423,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
1717{ 1423{
1718 struct iscsi_wrb *pwrb = pwrb_handle->pwrb; 1424 struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
1719 1425
1720 memset(pwrb, 0, sizeof(*pwrb));
1721
1722 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, 1426 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
1723 max_burst_length, pwrb, params->dw[offsetof 1427 max_burst_length, pwrb, params->dw[offsetof
1724 (struct amap_beiscsi_offload_params, 1428 (struct amap_beiscsi_offload_params,
@@ -1790,70 +1494,3 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
1790 (params->dw[offsetof(struct amap_beiscsi_offload_params, 1494 (params->dw[offsetof(struct amap_beiscsi_offload_params,
1791 exp_statsn) / 32] + 1)); 1495 exp_statsn) / 32] + 1));
1792} 1496}
1793
1794/**
1795 * beiscsi_logout_fw_sess()- Firmware Session Logout
1796 * @phba: Device priv structure instance
1797 * @fw_sess_handle: FW session handle
1798 *
1799 * Logout from the FW established sessions.
1800 * returns
1801 * Success: 0
1802 * Failure: Non-Zero Value
1803 *
1804 */
1805int beiscsi_logout_fw_sess(struct beiscsi_hba *phba,
1806 uint32_t fw_sess_handle)
1807{
1808 struct be_ctrl_info *ctrl = &phba->ctrl;
1809 struct be_mcc_wrb *wrb;
1810 struct be_cmd_req_logout_fw_sess *req;
1811 struct be_cmd_resp_logout_fw_sess *resp;
1812 unsigned int tag;
1813 int rc;
1814
1815 beiscsi_log(phba, KERN_INFO,
1816 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
1817 "BG_%d : In bescsi_logout_fwboot_sess\n");
1818
1819 mutex_lock(&ctrl->mbox_lock);
1820 wrb = alloc_mcc_wrb(phba, &tag);
1821 if (!wrb) {
1822 mutex_unlock(&ctrl->mbox_lock);
1823 beiscsi_log(phba, KERN_INFO,
1824 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
1825 "BG_%d : MBX Tag Failure\n");
1826 return -EINVAL;
1827 }
1828
1829 req = embedded_payload(wrb);
1830 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1831 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI,
1832 OPCODE_ISCSI_INI_SESSION_LOGOUT_TARGET,
1833 sizeof(struct be_cmd_req_logout_fw_sess));
1834
1835 /* Set the session handle */
1836 req->session_handle = fw_sess_handle;
1837 be_mcc_notify(phba, tag);
1838 mutex_unlock(&ctrl->mbox_lock);
1839
1840 rc = beiscsi_mccq_compl_wait(phba, tag, &wrb, NULL);
1841 if (rc) {
1842 beiscsi_log(phba, KERN_ERR,
1843 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1844 "BG_%d : MBX CMD FW_SESSION_LOGOUT_TARGET Failed\n");
1845 return -EBUSY;
1846 }
1847
1848 resp = embedded_payload(wrb);
1849 if (resp->session_status !=
1850 BEISCSI_MGMT_SESSION_CLOSE) {
1851 beiscsi_log(phba, KERN_ERR,
1852 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
1853 "BG_%d : FW_SESSION_LOGOUT_TARGET resp : 0x%x\n",
1854 resp->session_status);
1855 rc = -EINVAL;
1856 }
1857
1858 return rc;
1859}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index f3a48a04b2ca..b897cfd57c72 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
1/** 1/**
2 * Copyright (C) 2005 - 2015 Emulex 2 * Copyright (C) 2005 - 2016 Broadcom
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -7,10 +7,10 @@
7 * as published by the Free Software Foundation. The full GNU General 7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING. 8 * Public License is included in this distribution in the file called COPYING.
9 * 9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
11 * 11 *
12 * Contact Information: 12 * Contact Information:
13 * linux-drivers@avagotech.com 13 * linux-drivers@broadcom.com
14 * 14 *
15 * Emulex 15 * Emulex
16 * 3333 Susan Street 16 * 3333 Susan Street
@@ -96,7 +96,6 @@ struct mcc_wrb {
96 struct mcc_wrb_payload payload; 96 struct mcc_wrb_payload payload;
97}; 97};
98 98
99int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute);
100int mgmt_open_connection(struct beiscsi_hba *phba, 99int mgmt_open_connection(struct beiscsi_hba *phba,
101 struct sockaddr *dst_addr, 100 struct sockaddr *dst_addr,
102 struct beiscsi_endpoint *beiscsi_ep, 101 struct beiscsi_endpoint *beiscsi_ep,
@@ -266,50 +265,41 @@ struct beiscsi_endpoint {
266 u16 cid_vld; 265 u16 cid_vld;
267}; 266};
268 267
269int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
270 struct beiscsi_hba *phba);
271int mgmt_get_port_name(struct be_ctrl_info *ctrl,
272 struct beiscsi_hba *phba);
273
274unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba, 268unsigned int mgmt_invalidate_connection(struct beiscsi_hba *phba,
275 struct beiscsi_endpoint *beiscsi_ep, 269 struct beiscsi_endpoint *beiscsi_ep,
276 unsigned short cid, 270 unsigned short cid,
277 unsigned short issue_reset, 271 unsigned short issue_reset,
278 unsigned short savecfg_flag); 272 unsigned short savecfg_flag);
279 273
280int mgmt_set_ip(struct beiscsi_hba *phba, 274int beiscsi_if_en_dhcp(struct beiscsi_hba *phba, u32 ip_type);
281 struct iscsi_iface_param_info *ip_param,
282 struct iscsi_iface_param_info *subnet_param,
283 uint32_t boot_proto);
284 275
285unsigned int mgmt_get_boot_target(struct beiscsi_hba *phba); 276int beiscsi_if_en_static(struct beiscsi_hba *phba, u32 ip_type,
277 u8 *ip, u8 *subnet);
286 278
287unsigned int mgmt_reopen_session(struct beiscsi_hba *phba, 279int beiscsi_if_set_gw(struct beiscsi_hba *phba, u32 ip_type, u8 *gw);
288 unsigned int reopen_type,
289 unsigned sess_handle);
290 280
291unsigned int mgmt_get_session_info(struct beiscsi_hba *phba, 281int beiscsi_if_get_gw(struct beiscsi_hba *phba, u32 ip_type,
292 u32 boot_session_handle, 282 struct be_cmd_get_def_gateway_resp *resp);
293 struct be_dma_mem *nonemb_cmd);
294 283
295int mgmt_get_nic_conf(struct beiscsi_hba *phba, 284int mgmt_get_nic_conf(struct beiscsi_hba *phba,
296 struct be_cmd_get_nic_conf_resp *mac); 285 struct be_cmd_get_nic_conf_resp *mac);
297 286
298int mgmt_get_if_info(struct beiscsi_hba *phba, int ip_type, 287int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
299 struct be_cmd_get_if_info_resp **if_info); 288 struct be_cmd_get_if_info_resp **if_info);
289
290unsigned int beiscsi_if_get_handle(struct beiscsi_hba *phba);
291
292int beiscsi_if_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
300 293
301int mgmt_get_gateway(struct beiscsi_hba *phba, int ip_type, 294unsigned int beiscsi_boot_logout_sess(struct beiscsi_hba *phba);
302 struct be_cmd_get_def_gateway_resp *gateway);
303 295
304int mgmt_set_gateway(struct beiscsi_hba *phba, 296unsigned int beiscsi_boot_reopen_sess(struct beiscsi_hba *phba);
305 struct iscsi_iface_param_info *gateway_param);
306 297
307int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba, 298unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba);
308 unsigned int *s_handle);
309 299
310unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba); 300unsigned int __beiscsi_boot_get_shandle(struct beiscsi_hba *phba, int async);
311 301
312int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); 302int beiscsi_boot_get_shandle(struct beiscsi_hba *phba, unsigned int *s_handle);
313 303
314ssize_t beiscsi_drvr_ver_disp(struct device *dev, 304ssize_t beiscsi_drvr_ver_disp(struct device *dev,
315 struct device_attribute *attr, char *buf); 305 struct device_attribute *attr, char *buf);
@@ -339,7 +329,6 @@ void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
339 struct wrb_handle *pwrb_handle, 329 struct wrb_handle *pwrb_handle,
340 struct hwi_wrb_context *pwrb_context); 330 struct hwi_wrb_context *pwrb_context);
341 331
342void beiscsi_ue_detect(struct beiscsi_hba *phba);
343int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, 332int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
344 struct be_set_eqd *, int num); 333 struct be_set_eqd *, int num);
345 334
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 7733ad5305d4..4ddda72f60e6 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5827,13 +5827,13 @@ bfa_fcs_lport_get_rport_max_speed(bfa_fcs_lport_t *port)
5827 bfa_port_speed_t max_speed = 0; 5827 bfa_port_speed_t max_speed = 0;
5828 struct bfa_port_attr_s port_attr; 5828 struct bfa_port_attr_s port_attr;
5829 bfa_port_speed_t port_speed, rport_speed; 5829 bfa_port_speed_t port_speed, rport_speed;
5830 bfa_boolean_t trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa); 5830 bfa_boolean_t trl_enabled;
5831
5832 5831
5833 if (port == NULL) 5832 if (port == NULL)
5834 return 0; 5833 return 0;
5835 5834
5836 fcs = port->fcs; 5835 fcs = port->fcs;
5836 trl_enabled = bfa_fcport_is_ratelim(port->fcs->bfa);
5837 5837
5838 /* Get Physical port's current speed */ 5838 /* Get Physical port's current speed */
5839 bfa_fcport_get_attr(port->fcs->bfa, &port_attr); 5839 bfa_fcport_get_attr(port->fcs->bfa, &port_attr);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index 5beea776b9f5..68ca518d34b0 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -254,7 +254,7 @@ int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
254 return rc; 254 return rc;
255} 255}
256 256
257void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) 257static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
258{ 258{
259 struct bnx2fc_mp_req *mp_req; 259 struct bnx2fc_mp_req *mp_req;
260 struct fc_frame_header *fc_hdr, *fh; 260 struct fc_frame_header *fc_hdr, *fh;
@@ -364,7 +364,7 @@ srr_compl_done:
364 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 364 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
365} 365}
366 366
367void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) 367static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
368{ 368{
369 struct bnx2fc_cmd *orig_io_req, *new_io_req; 369 struct bnx2fc_cmd *orig_io_req, *new_io_req;
370 struct bnx2fc_cmd *rec_req; 370 struct bnx2fc_cmd *rec_req;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index a5052dd8d7e6..f9ddb6156f14 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -625,7 +625,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
625 * 625 *
626 * @arg: ptr to bnx2fc_percpu_info structure 626 * @arg: ptr to bnx2fc_percpu_info structure
627 */ 627 */
628int bnx2fc_percpu_io_thread(void *arg) 628static int bnx2fc_percpu_io_thread(void *arg)
629{ 629{
630 struct bnx2fc_percpu_s *p = arg; 630 struct bnx2fc_percpu_s *p = arg;
631 struct bnx2fc_work *work, *tmp; 631 struct bnx2fc_work *work, *tmp;
@@ -1410,9 +1410,10 @@ bind_err:
1410 return NULL; 1410 return NULL;
1411} 1411}
1412 1412
1413struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, 1413static struct bnx2fc_interface *
1414 struct net_device *netdev, 1414bnx2fc_interface_create(struct bnx2fc_hba *hba,
1415 enum fip_state fip_mode) 1415 struct net_device *netdev,
1416 enum fip_state fip_mode)
1416{ 1417{
1417 struct fcoe_ctlr_device *ctlr_dev; 1418 struct fcoe_ctlr_device *ctlr_dev;
1418 struct bnx2fc_interface *interface; 1419 struct bnx2fc_interface *interface;
@@ -2765,8 +2766,7 @@ static void __exit bnx2fc_mod_exit(void)
2765 * held. 2766 * held.
2766 */ 2767 */
2767 mutex_lock(&bnx2fc_dev_lock); 2768 mutex_lock(&bnx2fc_dev_lock);
2768 list_splice(&adapter_list, &to_be_deleted); 2769 list_splice_init(&adapter_list, &to_be_deleted);
2769 INIT_LIST_HEAD(&adapter_list);
2770 adapter_count = 0; 2770 adapter_count = 0;
2771 mutex_unlock(&bnx2fc_dev_lock); 2771 mutex_unlock(&bnx2fc_dev_lock);
2772 2772
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 28c671b609b2..5ff9f89c17c7 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -994,7 +994,7 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
994 994
995} 995}
996 996
997struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) 997static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
998{ 998{
999 struct bnx2fc_work *work; 999 struct bnx2fc_work *work;
1000 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); 1000 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8f24d60f09d7..f501095f91ac 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1079,7 +1079,7 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1079 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1079 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1080} 1080}
1081 1081
1082int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) 1082static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req)
1083{ 1083{
1084 struct bnx2fc_rport *tgt = io_req->tgt; 1084 struct bnx2fc_rport *tgt = io_req->tgt;
1085 int rc = SUCCESS; 1085 int rc = SUCCESS;
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index c2a6f9f29427..89a52b941ea8 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -1721,7 +1721,7 @@ out:
1721 1721
1722 /* Wake up waiting threads */ 1722 /* Wake up waiting threads */
1723 csio_scsi_cmnd(req) = NULL; 1723 csio_scsi_cmnd(req) = NULL;
1724 complete_all(&req->cmplobj); 1724 complete(&req->cmplobj);
1725} 1725}
1726 1726
1727/* 1727/*
@@ -1945,6 +1945,7 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
1945 ready = csio_is_lnode_ready(ln); 1945 ready = csio_is_lnode_ready(ln);
1946 tmo = CSIO_SCSI_ABRT_TMO_MS; 1946 tmo = CSIO_SCSI_ABRT_TMO_MS;
1947 1947
1948 reinit_completion(&ioreq->cmplobj);
1948 spin_lock_irq(&hw->lock); 1949 spin_lock_irq(&hw->lock);
1949 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); 1950 rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
1950 spin_unlock_irq(&hw->lock); 1951 spin_unlock_irq(&hw->lock);
@@ -1964,8 +1965,6 @@ csio_eh_abort_handler(struct scsi_cmnd *cmnd)
1964 goto inval_scmnd; 1965 goto inval_scmnd;
1965 } 1966 }
1966 1967
1967 /* Wait for completion */
1968 init_completion(&ioreq->cmplobj);
1969 wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); 1968 wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
1970 1969
1971 /* FW didnt respond to abort within our timeout */ 1970 /* FW didnt respond to abort within our timeout */
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 661bb94e2548..b301655f91cd 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -823,17 +823,6 @@ static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
823} 823}
824 824
825/** 825/**
826 * cxlflash_shutdown() - shutdown handler
827 * @pdev: PCI device associated with the host.
828 */
829static void cxlflash_shutdown(struct pci_dev *pdev)
830{
831 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
832
833 notify_shutdown(cfg, false);
834}
835
836/**
837 * cxlflash_remove() - PCI entry point to tear down host 826 * cxlflash_remove() - PCI entry point to tear down host
838 * @pdev: PCI device associated with the host. 827 * @pdev: PCI device associated with the host.
839 * 828 *
@@ -844,6 +833,11 @@ static void cxlflash_remove(struct pci_dev *pdev)
844 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); 833 struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
845 ulong lock_flags; 834 ulong lock_flags;
846 835
836 if (!pci_is_enabled(pdev)) {
837 pr_debug("%s: Device is disabled\n", __func__);
838 return;
839 }
840
847 /* If a Task Management Function is active, wait for it to complete 841 /* If a Task Management Function is active, wait for it to complete
848 * before continuing with remove. 842 * before continuing with remove.
849 */ 843 */
@@ -1046,6 +1040,8 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1046 do { 1040 do {
1047 msleep(delay_us / 1000); 1041 msleep(delay_us / 1000);
1048 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 1042 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1043 if (status == U64_MAX)
1044 nretry /= 2;
1049 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && 1045 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1050 nretry--); 1046 nretry--);
1051 1047
@@ -1077,6 +1073,8 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1077 do { 1073 do {
1078 msleep(delay_us / 1000); 1074 msleep(delay_us / 1000);
1079 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); 1075 status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1076 if (status == U64_MAX)
1077 nretry /= 2;
1080 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && 1078 } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1081 nretry--); 1079 nretry--);
1082 1080
@@ -1095,42 +1093,25 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1095 * online. This toggling action can cause this routine to delay up to a few 1093 * online. This toggling action can cause this routine to delay up to a few
1096 * seconds. When configured to use the internal LUN feature of the AFU, a 1094 * seconds. When configured to use the internal LUN feature of the AFU, a
1097 * failure to come online is overridden. 1095 * failure to come online is overridden.
1098 *
1099 * Return:
1100 * 0 when the WWPN is successfully written and the port comes back online
1101 * -1 when the port fails to go offline or come back up online
1102 */ 1096 */
1103static int afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, 1097static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1104 u64 wwpn) 1098 u64 wwpn)
1105{ 1099{
1106 int rc = 0;
1107
1108 set_port_offline(fc_regs); 1100 set_port_offline(fc_regs);
1109
1110 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1101 if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1111 FC_PORT_STATUS_RETRY_CNT)) { 1102 FC_PORT_STATUS_RETRY_CNT)) {
1112 pr_debug("%s: wait on port %d to go offline timed out\n", 1103 pr_debug("%s: wait on port %d to go offline timed out\n",
1113 __func__, port); 1104 __func__, port);
1114 rc = -1; /* but continue on to leave the port back online */
1115 } 1105 }
1116 1106
1117 if (rc == 0) 1107 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1118 writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1119
1120 /* Always return success after programming WWPN */
1121 rc = 0;
1122 1108
1123 set_port_online(fc_regs); 1109 set_port_online(fc_regs);
1124
1125 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, 1110 if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1126 FC_PORT_STATUS_RETRY_CNT)) { 1111 FC_PORT_STATUS_RETRY_CNT)) {
1127 pr_err("%s: wait on port %d to go online timed out\n", 1112 pr_debug("%s: wait on port %d to go online timed out\n",
1128 __func__, port); 1113 __func__, port);
1129 } 1114 }
1130
1131 pr_debug("%s: returning rc=%d\n", __func__, rc);
1132
1133 return rc;
1134} 1115}
1135 1116
1136/** 1117/**
@@ -1187,7 +1168,7 @@ static const struct asyc_intr_info ainfo[] = {
1187 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR}, 1168 {SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1188 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST}, 1169 {SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1189 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0}, 1170 {SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1190 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST}, 1171 {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1191 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET}, 1172 {SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
1192 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0}, 1173 {SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
1193 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET}, 1174 {SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
@@ -1195,7 +1176,7 @@ static const struct asyc_intr_info ainfo[] = {
1195 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR}, 1176 {SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1196 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST}, 1177 {SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1197 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0}, 1178 {SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1198 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST}, 1179 {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1199 {0x0, "", 0, 0} /* terminator */ 1180 {0x0, "", 0, 0} /* terminator */
1200}; 1181};
1201 1182
@@ -1631,15 +1612,10 @@ static int init_global(struct cxlflash_cfg *cfg)
1631 [FC_CRC_THRESH / 8]); 1612 [FC_CRC_THRESH / 8]);
1632 1613
1633 /* Set WWPNs. If already programmed, wwpn[i] is 0 */ 1614 /* Set WWPNs. If already programmed, wwpn[i] is 0 */
1634 if (wwpn[i] != 0 && 1615 if (wwpn[i] != 0)
1635 afu_set_wwpn(afu, i, 1616 afu_set_wwpn(afu, i,
1636 &afu->afu_map->global.fc_regs[i][0], 1617 &afu->afu_map->global.fc_regs[i][0],
1637 wwpn[i])) { 1618 wwpn[i]);
1638 dev_err(dev, "%s: failed to set WWPN on port %d\n",
1639 __func__, i);
1640 rc = -EIO;
1641 goto out;
1642 }
1643 /* Programming WWPN back to back causes additional 1619 /* Programming WWPN back to back causes additional
1644 * offline/online transitions and a PLOGI 1620 * offline/online transitions and a PLOGI
1645 */ 1621 */
@@ -2048,6 +2024,11 @@ retry:
2048 * cxlflash_eh_host_reset_handler() - reset the host adapter 2024 * cxlflash_eh_host_reset_handler() - reset the host adapter
2049 * @scp: SCSI command from stack identifying host. 2025 * @scp: SCSI command from stack identifying host.
2050 * 2026 *
2027 * Following a reset, the state is evaluated again in case an EEH occurred
2028 * during the reset. In such a scenario, the host reset will either yield
2029 * until the EEH recovery is complete or return success or failure based
2030 * upon the current device state.
2031 *
2051 * Return: 2032 * Return:
2052 * SUCCESS as defined in scsi/scsi.h 2033 * SUCCESS as defined in scsi/scsi.h
2053 * FAILED as defined in scsi/scsi.h 2034 * FAILED as defined in scsi/scsi.h
@@ -2080,7 +2061,8 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2080 } else 2061 } else
2081 cfg->state = STATE_NORMAL; 2062 cfg->state = STATE_NORMAL;
2082 wake_up_all(&cfg->reset_waitq); 2063 wake_up_all(&cfg->reset_waitq);
2083 break; 2064 ssleep(1);
2065 /* fall through */
2084 case STATE_RESET: 2066 case STATE_RESET:
2085 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); 2067 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2086 if (cfg->state == STATE_NORMAL) 2068 if (cfg->state == STATE_NORMAL)
@@ -2596,6 +2578,9 @@ out_remove:
2596 * @pdev: PCI device struct. 2578 * @pdev: PCI device struct.
2597 * @state: PCI channel state. 2579 * @state: PCI channel state.
2598 * 2580 *
2581 * When an EEH occurs during an active reset, wait until the reset is
2582 * complete and then take action based upon the device state.
2583 *
2599 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 2584 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
2600 */ 2585 */
2601static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, 2586static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
@@ -2609,6 +2594,10 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
2609 2594
2610 switch (state) { 2595 switch (state) {
2611 case pci_channel_io_frozen: 2596 case pci_channel_io_frozen:
2597 wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2598 if (cfg->state == STATE_FAILTERM)
2599 return PCI_ERS_RESULT_DISCONNECT;
2600
2612 cfg->state = STATE_RESET; 2601 cfg->state = STATE_RESET;
2613 scsi_block_requests(cfg->host); 2602 scsi_block_requests(cfg->host);
2614 drain_ioctls(cfg); 2603 drain_ioctls(cfg);
@@ -2685,7 +2674,7 @@ static struct pci_driver cxlflash_driver = {
2685 .id_table = cxlflash_pci_table, 2674 .id_table = cxlflash_pci_table,
2686 .probe = cxlflash_probe, 2675 .probe = cxlflash_probe,
2687 .remove = cxlflash_remove, 2676 .remove = cxlflash_remove,
2688 .shutdown = cxlflash_shutdown, 2677 .shutdown = cxlflash_remove,
2689 .err_handler = &cxlflash_err_handler, 2678 .err_handler = &cxlflash_err_handler,
2690}; 2679};
2691 2680
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index ce1507023132..9636970d9611 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -709,14 +709,13 @@ int cxlflash_disk_release(struct scsi_device *sdev,
709 * @cfg: Internal structure associated with the host. 709 * @cfg: Internal structure associated with the host.
710 * @ctxi: Context to release. 710 * @ctxi: Context to release.
711 * 711 *
712 * This routine is safe to be called with a a non-initialized context 712 * This routine is safe to be called with a a non-initialized context.
713 * and is tolerant of being called with the context's mutex held (it 713 * Also note that the routine conditionally checks for the existence
714 * will be unlocked if necessary before freeing). Also note that the 714 * of the context control map before clearing the RHT registers and
715 * routine conditionally checks for the existence of the context control 715 * context capabilities because it is possible to destroy a context
716 * map before clearing the RHT registers and context capabilities because 716 * while the context is in the error state (previous mapping was
717 * it is possible to destroy a context while the context is in the error 717 * removed [so there is no need to worry about clearing] and context
718 * state (previous mapping was removed [so there is no need to worry about 718 * is waiting for a new mapping).
719 * clearing] and context is waiting for a new mapping).
720 */ 719 */
721static void destroy_context(struct cxlflash_cfg *cfg, 720static void destroy_context(struct cxlflash_cfg *cfg,
722 struct ctx_info *ctxi) 721 struct ctx_info *ctxi)
@@ -732,9 +731,6 @@ static void destroy_context(struct cxlflash_cfg *cfg,
732 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); 731 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id);
733 writeq_be(0, &ctxi->ctrl_map->ctx_cap); 732 writeq_be(0, &ctxi->ctrl_map->ctx_cap);
734 } 733 }
735
736 if (mutex_is_locked(&ctxi->mutex))
737 mutex_unlock(&ctxi->mutex);
738 } 734 }
739 735
740 /* Free memory associated with context */ 736 /* Free memory associated with context */
@@ -792,32 +788,58 @@ err:
792 * @cfg: Internal structure associated with the host. 788 * @cfg: Internal structure associated with the host.
793 * @ctx: Previously obtained CXL context reference. 789 * @ctx: Previously obtained CXL context reference.
794 * @ctxid: Previously obtained process element associated with CXL context. 790 * @ctxid: Previously obtained process element associated with CXL context.
795 * @adap_fd: Previously obtained adapter fd associated with CXL context.
796 * @file: Previously obtained file associated with CXL context. 791 * @file: Previously obtained file associated with CXL context.
797 * @perms: User-specified permissions. 792 * @perms: User-specified permissions.
798 *
799 * Upon return, the context is marked as initialized and the context's mutex
800 * is locked.
801 */ 793 */
802static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg, 794static void init_context(struct ctx_info *ctxi, struct cxlflash_cfg *cfg,
803 struct cxl_context *ctx, int ctxid, int adap_fd, 795 struct cxl_context *ctx, int ctxid, struct file *file,
804 struct file *file, u32 perms) 796 u32 perms)
805{ 797{
806 struct afu *afu = cfg->afu; 798 struct afu *afu = cfg->afu;
807 799
808 ctxi->rht_perms = perms; 800 ctxi->rht_perms = perms;
809 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 801 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl;
810 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 802 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
811 ctxi->lfd = adap_fd;
812 ctxi->pid = current->tgid; /* tgid = pid */ 803 ctxi->pid = current->tgid; /* tgid = pid */
813 ctxi->ctx = ctx; 804 ctxi->ctx = ctx;
805 ctxi->cfg = cfg;
814 ctxi->file = file; 806 ctxi->file = file;
815 ctxi->initialized = true; 807 ctxi->initialized = true;
816 mutex_init(&ctxi->mutex); 808 mutex_init(&ctxi->mutex);
809 kref_init(&ctxi->kref);
817 INIT_LIST_HEAD(&ctxi->luns); 810 INIT_LIST_HEAD(&ctxi->luns);
818 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ 811 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */
812}
819 813
814/**
815 * remove_context() - context kref release handler
816 * @kref: Kernel reference associated with context to be removed.
817 *
818 * When a context no longer has any references it can safely be removed
819 * from global access and destroyed. Note that it is assumed the thread
820 * relinquishing access to the context holds its mutex.
821 */
822static void remove_context(struct kref *kref)
823{
824 struct ctx_info *ctxi = container_of(kref, struct ctx_info, kref);
825 struct cxlflash_cfg *cfg = ctxi->cfg;
826 u64 ctxid = DECODE_CTXID(ctxi->ctxid);
827
828 /* Remove context from table/error list */
829 WARN_ON(!mutex_is_locked(&ctxi->mutex));
830 ctxi->unavail = true;
831 mutex_unlock(&ctxi->mutex);
832 mutex_lock(&cfg->ctx_tbl_list_mutex);
820 mutex_lock(&ctxi->mutex); 833 mutex_lock(&ctxi->mutex);
834
835 if (!list_empty(&ctxi->list))
836 list_del(&ctxi->list);
837 cfg->ctx_tbl[ctxid] = NULL;
838 mutex_unlock(&cfg->ctx_tbl_list_mutex);
839 mutex_unlock(&ctxi->mutex);
840
841 /* Context now completely uncoupled/unreachable */
842 destroy_context(cfg, ctxi);
821} 843}
822 844
823/** 845/**
@@ -845,7 +867,6 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
845 867
846 int i; 868 int i;
847 int rc = 0; 869 int rc = 0;
848 int lfd;
849 u64 ctxid = DECODE_CTXID(detach->context_id), 870 u64 ctxid = DECODE_CTXID(detach->context_id),
850 rctxid = detach->context_id; 871 rctxid = detach->context_id;
851 872
@@ -887,40 +908,13 @@ static int _cxlflash_disk_detach(struct scsi_device *sdev,
887 break; 908 break;
888 } 909 }
889 910
890 /* Tear down context following last LUN cleanup */ 911 /*
891 if (list_empty(&ctxi->luns)) { 912 * Release the context reference and the sdev reference that
892 ctxi->unavail = true; 913 * bound this LUN to the context.
893 mutex_unlock(&ctxi->mutex); 914 */
894 mutex_lock(&cfg->ctx_tbl_list_mutex); 915 if (kref_put(&ctxi->kref, remove_context))
895 mutex_lock(&ctxi->mutex);
896
897 /* Might not have been in error list so conditionally remove */
898 if (!list_empty(&ctxi->list))
899 list_del(&ctxi->list);
900 cfg->ctx_tbl[ctxid] = NULL;
901 mutex_unlock(&cfg->ctx_tbl_list_mutex);
902 mutex_unlock(&ctxi->mutex);
903
904 lfd = ctxi->lfd;
905 destroy_context(cfg, ctxi);
906 ctxi = NULL;
907 put_ctx = false; 916 put_ctx = false;
908
909 /*
910 * As a last step, clean up external resources when not
911 * already on an external cleanup thread, i.e.: close(adap_fd).
912 *
913 * NOTE: this will free up the context from the CXL services,
914 * allowing it to dole out the same context_id on a future
915 * (or even currently in-flight) disk_attach operation.
916 */
917 if (lfd != -1)
918 sys_close(lfd);
919 }
920
921 /* Release the sdev reference that bound this LUN to the context */
922 scsi_device_put(sdev); 917 scsi_device_put(sdev);
923
924out: 918out:
925 if (put_ctx) 919 if (put_ctx)
926 put_context(ctxi); 920 put_context(ctxi);
@@ -941,34 +935,18 @@ static int cxlflash_disk_detach(struct scsi_device *sdev,
941 * 935 *
942 * This routine is the release handler for the fops registered with 936 * This routine is the release handler for the fops registered with
943 * the CXL services on an initial attach for a context. It is called 937 * the CXL services on an initial attach for a context. It is called
944 * when a close is performed on the adapter file descriptor returned 938 * when a close (explicity by the user or as part of a process tear
945 * to the user. Programmatically, the user is not required to perform 939 * down) is performed on the adapter file descriptor returned to the
946 * the close, as it is handled internally via the detach ioctl when 940 * user. The user should be aware that explicitly performing a close
947 * a context is being removed. Note that nothing prevents the user 941 * considered catastrophic and subsequent usage of the superpipe API
948 * from performing a close, but the user should be aware that doing 942 * with previously saved off tokens will fail.
949 * so is considered catastrophic and subsequent usage of the superpipe
950 * API with previously saved off tokens will fail.
951 *
952 * When initiated from an external close (either by the user or via
953 * a process tear down), the routine derives the context reference
954 * and calls detach for each LUN associated with the context. The
955 * final detach operation will cause the context itself to be freed.
956 * Note that the saved off lfd is reset prior to calling detach to
957 * signify that the final detach should not perform a close.
958 *
959 * When initiated from a detach operation as part of the tear down
960 * of a context, the context is first completely freed and then the
961 * close is performed. This routine will fail to derive the context
962 * reference (due to the context having already been freed) and then
963 * call into the CXL release entry point.
964 * 943 *
965 * Thus, with exception to when the CXL process element (context id) 944 * This routine derives the context reference and calls detach for
966 * lookup fails (a case that should theoretically never occur), every 945 * each LUN associated with the context.The final detach operation
967 * call into this routine results in a complete freeing of a context. 946 * causes the context itself to be freed. With exception to when the
968 * 947 * CXL process element (context id) lookup fails (a case that should
969 * As part of the detach, all per-context resources associated with the LUN 948 * theoretically never occur), every call into this routine results
970 * are cleaned up. When detaching the last LUN for a context, the context 949 * in a complete freeing of a context.
971 * itself is cleaned up and released.
972 * 950 *
973 * Return: 0 on success 951 * Return: 0 on success
974 */ 952 */
@@ -1006,11 +984,8 @@ static int cxlflash_cxl_release(struct inode *inode, struct file *file)
1006 goto out; 984 goto out;
1007 } 985 }
1008 986
1009 dev_dbg(dev, "%s: close(%d) for context %d\n", 987 dev_dbg(dev, "%s: close for context %d\n", __func__, ctxid);
1010 __func__, ctxi->lfd, ctxid);
1011 988
1012 /* Reset the file descriptor to indicate we're on a close() thread */
1013 ctxi->lfd = -1;
1014 detach.context_id = ctxi->ctxid; 989 detach.context_id = ctxi->ctxid;
1015 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 990 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list)
1016 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); 991 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach);
@@ -1110,8 +1085,7 @@ static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1110 goto err; 1085 goto err;
1111 } 1086 }
1112 1087
1113 dev_dbg(dev, "%s: fault(%d) for context %d\n", 1088 dev_dbg(dev, "%s: fault for context %d\n", __func__, ctxid);
1114 __func__, ctxi->lfd, ctxid);
1115 1089
1116 if (likely(!ctxi->err_recovery_active)) { 1090 if (likely(!ctxi->err_recovery_active)) {
1117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1091 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -1186,8 +1160,7 @@ static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma)
1186 goto out; 1160 goto out;
1187 } 1161 }
1188 1162
1189 dev_dbg(dev, "%s: mmap(%d) for context %d\n", 1163 dev_dbg(dev, "%s: mmap for context %d\n", __func__, ctxid);
1190 __func__, ctxi->lfd, ctxid);
1191 1164
1192 rc = cxl_fd_mmap(file, vma); 1165 rc = cxl_fd_mmap(file, vma);
1193 if (likely(!rc)) { 1166 if (likely(!rc)) {
@@ -1377,12 +1350,12 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1377 lun_access->lli = lli; 1350 lun_access->lli = lli;
1378 lun_access->sdev = sdev; 1351 lun_access->sdev = sdev;
1379 1352
1380 /* Non-NULL context indicates reuse */ 1353 /* Non-NULL context indicates reuse (another context reference) */
1381 if (ctxi) { 1354 if (ctxi) {
1382 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n", 1355 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n",
1383 __func__, rctxid); 1356 __func__, rctxid);
1357 kref_get(&ctxi->kref);
1384 list_add(&lun_access->list, &ctxi->luns); 1358 list_add(&lun_access->list, &ctxi->luns);
1385 fd = ctxi->lfd;
1386 goto out_attach; 1359 goto out_attach;
1387 } 1360 }
1388 1361
@@ -1430,7 +1403,7 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1430 perms = SISL_RHT_PERM(attach->hdr.flags + 1); 1403 perms = SISL_RHT_PERM(attach->hdr.flags + 1);
1431 1404
1432 /* Context mutex is locked upon return */ 1405 /* Context mutex is locked upon return */
1433 init_context(ctxi, cfg, ctx, ctxid, fd, file, perms); 1406 init_context(ctxi, cfg, ctx, ctxid, file, perms);
1434 1407
1435 rc = afu_attach(cfg, ctxi); 1408 rc = afu_attach(cfg, ctxi);
1436 if (unlikely(rc)) { 1409 if (unlikely(rc)) {
@@ -1445,7 +1418,6 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1445 * knows about us yet; we can be the only one holding our mutex. 1418 * knows about us yet; we can be the only one holding our mutex.
1446 */ 1419 */
1447 list_add(&lun_access->list, &ctxi->luns); 1420 list_add(&lun_access->list, &ctxi->luns);
1448 mutex_unlock(&ctxi->mutex);
1449 mutex_lock(&cfg->ctx_tbl_list_mutex); 1421 mutex_lock(&cfg->ctx_tbl_list_mutex);
1450 mutex_lock(&ctxi->mutex); 1422 mutex_lock(&ctxi->mutex);
1451 cfg->ctx_tbl[ctxid] = ctxi; 1423 cfg->ctx_tbl[ctxid] = ctxi;
@@ -1453,7 +1425,11 @@ static int cxlflash_disk_attach(struct scsi_device *sdev,
1453 fd_install(fd, file); 1425 fd_install(fd, file);
1454 1426
1455out_attach: 1427out_attach:
1456 attach->hdr.return_flags = 0; 1428 if (fd != -1)
1429 attach->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD;
1430 else
1431 attach->hdr.return_flags = 0;
1432
1457 attach->context_id = ctxi->ctxid; 1433 attach->context_id = ctxi->ctxid;
1458 attach->block_size = gli->blk_len; 1434 attach->block_size = gli->blk_len;
1459 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1435 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
@@ -1494,7 +1470,7 @@ err:
1494 file = NULL; 1470 file = NULL;
1495 } 1471 }
1496 1472
1497 /* Cleanup our context; safe to call even with mutex locked */ 1473 /* Cleanup our context */
1498 if (ctxi) { 1474 if (ctxi) {
1499 destroy_context(cfg, ctxi); 1475 destroy_context(cfg, ctxi);
1500 ctxi = NULL; 1476 ctxi = NULL;
@@ -1509,16 +1485,19 @@ err:
1509 * recover_context() - recovers a context in error 1485 * recover_context() - recovers a context in error
1510 * @cfg: Internal structure associated with the host. 1486 * @cfg: Internal structure associated with the host.
1511 * @ctxi: Context to release. 1487 * @ctxi: Context to release.
1488 * @adap_fd: Adapter file descriptor associated with new/recovered context.
1512 * 1489 *
1513 * Restablishes the state for a context-in-error. 1490 * Restablishes the state for a context-in-error.
1514 * 1491 *
1515 * Return: 0 on success, -errno on failure 1492 * Return: 0 on success, -errno on failure
1516 */ 1493 */
1517static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) 1494static int recover_context(struct cxlflash_cfg *cfg,
1495 struct ctx_info *ctxi,
1496 int *adap_fd)
1518{ 1497{
1519 struct device *dev = &cfg->dev->dev; 1498 struct device *dev = &cfg->dev->dev;
1520 int rc = 0; 1499 int rc = 0;
1521 int old_fd, fd = -1; 1500 int fd = -1;
1522 int ctxid = -1; 1501 int ctxid = -1;
1523 struct file *file; 1502 struct file *file;
1524 struct cxl_context *ctx; 1503 struct cxl_context *ctx;
@@ -1566,9 +1545,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
1566 * No error paths after this point. Once the fd is installed it's 1545 * No error paths after this point. Once the fd is installed it's
1567 * visible to user space and can't be undone safely on this thread. 1546 * visible to user space and can't be undone safely on this thread.
1568 */ 1547 */
1569 old_fd = ctxi->lfd;
1570 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 1548 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid);
1571 ctxi->lfd = fd;
1572 ctxi->ctx = ctx; 1549 ctxi->ctx = ctx;
1573 ctxi->file = file; 1550 ctxi->file = file;
1574 1551
@@ -1585,9 +1562,7 @@ static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi)
1585 cfg->ctx_tbl[ctxid] = ctxi; 1562 cfg->ctx_tbl[ctxid] = ctxi;
1586 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1563 mutex_unlock(&cfg->ctx_tbl_list_mutex);
1587 fd_install(fd, file); 1564 fd_install(fd, file);
1588 1565 *adap_fd = fd;
1589 /* Release the original adapter fd and associated CXL resources */
1590 sys_close(old_fd);
1591out: 1566out:
1592 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n", 1567 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n",
1593 __func__, ctxid, fd, rc); 1568 __func__, ctxid, fd, rc);
@@ -1646,6 +1621,7 @@ static int cxlflash_afu_recover(struct scsi_device *sdev,
1646 rctxid = recover->context_id; 1621 rctxid = recover->context_id;
1647 long reg; 1622 long reg;
1648 int lretry = 20; /* up to 2 seconds */ 1623 int lretry = 20; /* up to 2 seconds */
1624 int new_adap_fd = -1;
1649 int rc = 0; 1625 int rc = 0;
1650 1626
1651 atomic_inc(&cfg->recovery_threads); 1627 atomic_inc(&cfg->recovery_threads);
@@ -1675,7 +1651,7 @@ retry:
1675 1651
1676 if (ctxi->err_recovery_active) { 1652 if (ctxi->err_recovery_active) {
1677retry_recover: 1653retry_recover:
1678 rc = recover_context(cfg, ctxi); 1654 rc = recover_context(cfg, ctxi, &new_adap_fd);
1679 if (unlikely(rc)) { 1655 if (unlikely(rc)) {
1680 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n", 1656 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n",
1681 __func__, ctxid, rc); 1657 __func__, ctxid, rc);
@@ -1697,9 +1673,9 @@ retry_recover:
1697 1673
1698 ctxi->err_recovery_active = false; 1674 ctxi->err_recovery_active = false;
1699 recover->context_id = ctxi->ctxid; 1675 recover->context_id = ctxi->ctxid;
1700 recover->adap_fd = ctxi->lfd; 1676 recover->adap_fd = new_adap_fd;
1701 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1677 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea);
1702 recover->hdr.return_flags |= 1678 recover->hdr.return_flags = DK_CXLFLASH_APP_CLOSE_ADAP_FD |
1703 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; 1679 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET;
1704 goto out; 1680 goto out;
1705 } 1681 }
diff --git a/drivers/scsi/cxlflash/superpipe.h b/drivers/scsi/cxlflash/superpipe.h
index 5f9a091fda95..9e62ff304e4b 100644
--- a/drivers/scsi/cxlflash/superpipe.h
+++ b/drivers/scsi/cxlflash/superpipe.h
@@ -100,13 +100,14 @@ struct ctx_info {
100 100
101 struct cxl_ioctl_start_work work; 101 struct cxl_ioctl_start_work work;
102 u64 ctxid; 102 u64 ctxid;
103 int lfd;
104 pid_t pid; 103 pid_t pid;
105 bool initialized; 104 bool initialized;
106 bool unavail; 105 bool unavail;
107 bool err_recovery_active; 106 bool err_recovery_active;
108 struct mutex mutex; /* Context protection */ 107 struct mutex mutex; /* Context protection */
108 struct kref kref;
109 struct cxl_context *ctx; 109 struct cxl_context *ctx;
110 struct cxlflash_cfg *cfg;
110 struct list_head luns; /* LUNs attached to this context */ 111 struct list_head luns; /* LUNs attached to this context */
111 const struct vm_operations_struct *cxl_mmap_vmops; 112 const struct vm_operations_struct *cxl_mmap_vmops;
112 struct file *file; 113 struct file *file;
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 50f8e9300770..90c5d7f5278e 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -1135,14 +1135,13 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
1135 ctxid_dst = DECODE_CTXID(clone->context_id_dst), 1135 ctxid_dst = DECODE_CTXID(clone->context_id_dst),
1136 rctxid_src = clone->context_id_src, 1136 rctxid_src = clone->context_id_src,
1137 rctxid_dst = clone->context_id_dst; 1137 rctxid_dst = clone->context_id_dst;
1138 int adap_fd_src = clone->adap_fd_src;
1139 int i, j; 1138 int i, j;
1140 int rc = 0; 1139 int rc = 0;
1141 bool found; 1140 bool found;
1142 LIST_HEAD(sidecar); 1141 LIST_HEAD(sidecar);
1143 1142
1144 pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu adap_fd_src=%d\n", 1143 pr_debug("%s: ctxid_src=%llu ctxid_dst=%llu\n",
1145 __func__, ctxid_src, ctxid_dst, adap_fd_src); 1144 __func__, ctxid_src, ctxid_dst);
1146 1145
1147 /* Do not clone yourself */ 1146 /* Do not clone yourself */
1148 if (unlikely(rctxid_src == rctxid_dst)) { 1147 if (unlikely(rctxid_src == rctxid_dst)) {
@@ -1166,13 +1165,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
1166 goto out; 1165 goto out;
1167 } 1166 }
1168 1167
1169 if (unlikely(adap_fd_src != ctxi_src->lfd)) {
1170 pr_debug("%s: Invalid source adapter fd! (%d)\n",
1171 __func__, adap_fd_src);
1172 rc = -EINVAL;
1173 goto out;
1174 }
1175
1176 /* Verify there is no open resource handle in the destination context */ 1168 /* Verify there is no open resource handle in the destination context */
1177 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) 1169 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++)
1178 if (ctxi_dst->rht_start[i].nmask != 0) { 1170 if (ctxi_dst->rht_start[i].nmask != 0) {
@@ -1257,7 +1249,6 @@ int cxlflash_disk_clone(struct scsi_device *sdev,
1257 1249
1258out_success: 1250out_success:
1259 list_splice(&sidecar, &ctxi_dst->luns); 1251 list_splice(&sidecar, &ctxi_dst->luns);
1260 sys_close(adap_fd_src);
1261 1252
1262 /* fall through */ 1253 /* fall through */
1263out: 1254out:
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 752b5c9d1ab2..241829e59668 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -583,6 +583,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
583 sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", 583 sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
584 ALUA_DH_NAME); 584 ALUA_DH_NAME);
585 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); 585 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
586 kfree(buff);
586 return err; 587 return err;
587 } 588 }
588 sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", 589 sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
diff --git a/drivers/scsi/dtc.c b/drivers/scsi/dtc.c
deleted file mode 100644
index 459863f94e46..000000000000
--- a/drivers/scsi/dtc.c
+++ /dev/null
@@ -1,447 +0,0 @@
1/*
2 * DTC 3180/3280 driver, by
3 * Ray Van Tassle rayvt@comm.mot.com
4 *
5 * taken from ...
6 * Trantor T128/T128F/T228 driver by...
7 *
8 * Drew Eckhardt
9 * Visionary Computing
10 * (Unix and Linux consulting and custom programming)
11 * drew@colorado.edu
12 * +1 (303) 440-4894
13 */
14
15/*
16 * The card is detected and initialized in one of several ways :
17 * 1. Autoprobe (default) - since the board is memory mapped,
18 * a BIOS signature is scanned for to locate the registers.
19 * An interrupt is triggered to autoprobe for the interrupt
20 * line.
21 *
22 * 2. With command line overrides - dtc=address,irq may be
23 * used on the LILO command line to override the defaults.
24 *
25*/
26
27/*----------------------------------------------------------------*/
28/* the following will set the monitor border color (useful to find
29 where something crashed or gets stuck at */
30/* 1 = blue
31 2 = green
32 3 = cyan
33 4 = red
34 5 = magenta
35 6 = yellow
36 7 = white
37*/
38#if 0
39#define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);}
40#else
41#define rtrc(i) {}
42#endif
43
44
45#include <linux/module.h>
46#include <linux/blkdev.h>
47#include <linux/string.h>
48#include <linux/init.h>
49#include <linux/interrupt.h>
50#include <linux/io.h>
51#include <scsi/scsi_host.h>
52
53#include "dtc.h"
54#include "NCR5380.h"
55
56/*
57 * The DTC3180 & 3280 boards are memory mapped.
58 *
59 */
60
61/*
62 */
63/* Offset from DTC_5380_OFFSET */
64#define DTC_CONTROL_REG 0x100 /* rw */
65#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
66#define CSR_DIR_READ 0x40 /* rw direction, 1 = read 0 = write */
67
68#define CSR_RESET 0x80 /* wo Resets 53c400 */
69#define CSR_5380_REG 0x80 /* ro 5380 registers can be accessed */
70#define CSR_TRANS_DIR 0x40 /* rw Data transfer direction */
71#define CSR_SCSI_BUFF_INTR 0x20 /* rw Enable int on transfer ready */
72#define CSR_5380_INTR 0x10 /* rw Enable 5380 interrupts */
73#define CSR_SHARED_INTR 0x08 /* rw Interrupt sharing */
74#define CSR_HOST_BUF_NOT_RDY 0x04 /* ro Host buffer not ready */
75#define CSR_SCSI_BUF_RDY 0x02 /* ro SCSI buffer ready */
76#define CSR_GATED_5380_IRQ 0x01 /* ro Last block xferred */
77#define CSR_INT_BASE (CSR_SCSI_BUFF_INTR | CSR_5380_INTR)
78
79
80#define DTC_BLK_CNT 0x101 /* rw
81 * # of 128-byte blocks to transfer */
82
83
84#define D_CR_ACCESS 0x80 /* ro set=can access 3280 registers */
85
86#define DTC_SWITCH_REG 0x3982 /* ro - DIP switches */
87#define DTC_RESUME_XFER 0x3982 /* wo - resume data xfer
88 * after disconnect/reconnect*/
89
90#define DTC_5380_OFFSET 0x3880 /* 8 registers here, see NCR5380.h */
91
92/*!!!! for dtc, it's a 128 byte buffer at 3900 !!! */
93#define DTC_DATA_BUF 0x3900 /* rw 128 bytes long */
94
95static struct override {
96 unsigned int address;
97 int irq;
98} overrides
99#ifdef OVERRIDE
100[] __initdata = OVERRIDE;
101#else
102[4] __initdata = {
103 { 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }, { 0, IRQ_AUTO }
104};
105#endif
106
107#define NO_OVERRIDES ARRAY_SIZE(overrides)
108
109static struct base {
110 unsigned long address;
111 int noauto;
112} bases[] __initdata = {
113 { 0xcc000, 0 },
114 { 0xc8000, 0 },
115 { 0xdc000, 0 },
116 { 0xd8000, 0 }
117};
118
119#define NO_BASES ARRAY_SIZE(bases)
120
121static const struct signature {
122 const char *string;
123 int offset;
124} signatures[] = {
125 {"DATA TECHNOLOGY CORPORATION BIOS", 0x25},
126};
127
128#define NO_SIGNATURES ARRAY_SIZE(signatures)
129
130#ifndef MODULE
131/*
132 * Function : dtc_setup(char *str, int *ints)
133 *
134 * Purpose : LILO command line initialization of the overrides array,
135 *
136 * Inputs : str - unused, ints - array of integer parameters with ints[0]
137 * equal to the number of ints.
138 *
139 */
140
141static int __init dtc_setup(char *str)
142{
143 static int commandline_current;
144 int i;
145 int ints[10];
146
147 get_options(str, ARRAY_SIZE(ints), ints);
148 if (ints[0] != 2)
149 printk("dtc_setup: usage dtc=address,irq\n");
150 else if (commandline_current < NO_OVERRIDES) {
151 overrides[commandline_current].address = ints[1];
152 overrides[commandline_current].irq = ints[2];
153 for (i = 0; i < NO_BASES; ++i)
154 if (bases[i].address == ints[1]) {
155 bases[i].noauto = 1;
156 break;
157 }
158 ++commandline_current;
159 }
160 return 1;
161}
162
163__setup("dtc=", dtc_setup);
164#endif
165
166/*
167 * Function : int dtc_detect(struct scsi_host_template * tpnt)
168 *
169 * Purpose : detects and initializes DTC 3180/3280 controllers
170 * that were autoprobed, overridden on the LILO command line,
171 * or specified at compile time.
172 *
173 * Inputs : tpnt - template for this SCSI adapter.
174 *
175 * Returns : 1 if a host adapter was found, 0 if not.
176 *
177*/
178
179static int __init dtc_detect(struct scsi_host_template * tpnt)
180{
181 static int current_override, current_base;
182 struct Scsi_Host *instance;
183 unsigned int addr;
184 void __iomem *base;
185 int sig, count;
186
187 for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
188 addr = 0;
189 base = NULL;
190
191 if (overrides[current_override].address) {
192 addr = overrides[current_override].address;
193 base = ioremap(addr, 0x2000);
194 if (!base)
195 addr = 0;
196 } else
197 for (; !addr && (current_base < NO_BASES); ++current_base) {
198 dprintk(NDEBUG_INIT, "dtc: probing address 0x%08x\n",
199 (unsigned int)bases[current_base].address);
200 if (bases[current_base].noauto)
201 continue;
202 base = ioremap(bases[current_base].address, 0x2000);
203 if (!base)
204 continue;
205 for (sig = 0; sig < NO_SIGNATURES; ++sig) {
206 if (check_signature(base + signatures[sig].offset, signatures[sig].string, strlen(signatures[sig].string))) {
207 addr = bases[current_base].address;
208 dprintk(NDEBUG_INIT, "dtc: detected board\n");
209 goto found;
210 }
211 }
212 iounmap(base);
213 }
214
215 dprintk(NDEBUG_INIT, "dtc: addr = 0x%08x\n", addr);
216
217 if (!addr)
218 break;
219
220found:
221 instance = scsi_register(tpnt, sizeof(struct NCR5380_hostdata));
222 if (instance == NULL)
223 goto out_unmap;
224
225 instance->base = addr;
226 ((struct NCR5380_hostdata *)(instance)->hostdata)->base = base;
227
228 if (NCR5380_init(instance, FLAG_LATE_DMA_SETUP))
229 goto out_unregister;
230
231 NCR5380_maybe_reset_bus(instance);
232
233 NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR); /* Enable int's */
234 if (overrides[current_override].irq != IRQ_AUTO)
235 instance->irq = overrides[current_override].irq;
236 else
237 instance->irq = NCR5380_probe_irq(instance, DTC_IRQS);
238
239 /* Compatibility with documented NCR5380 kernel parameters */
240 if (instance->irq == 255)
241 instance->irq = NO_IRQ;
242
243 /* With interrupts enabled, it will sometimes hang when doing heavy
244 * reads. So better not enable them until I finger it out. */
245 instance->irq = NO_IRQ;
246
247 if (instance->irq != NO_IRQ)
248 if (request_irq(instance->irq, dtc_intr, 0,
249 "dtc", instance)) {
250 printk(KERN_ERR "scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq);
251 instance->irq = NO_IRQ;
252 }
253
254 if (instance->irq == NO_IRQ) {
255 printk(KERN_WARNING "scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
256 printk(KERN_WARNING "scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
257 }
258
259 dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
260 instance->host_no, instance->irq);
261
262 ++current_override;
263 ++count;
264 }
265 return count;
266
267out_unregister:
268 scsi_unregister(instance);
269out_unmap:
270 iounmap(base);
271 return count;
272}
273
274/*
275 * Function : int dtc_biosparam(Disk * disk, struct block_device *dev, int *ip)
276 *
277 * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
278 * the specified device / size.
279 *
280 * Inputs : size = size of device in sectors (512 bytes), dev = block device
281 * major / minor, ip[] = {heads, sectors, cylinders}
282 *
283 * Returns : always 0 (success), initializes ip
284 *
285*/
286
287/*
288 * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
289 * using hard disks on a trantor should verify that this mapping corresponds
290 * to that used by the BIOS / ASPI driver by running the linux fdisk program
291 * and matching the H_C_S coordinates to what DOS uses.
292*/
293
294static int dtc_biosparam(struct scsi_device *sdev, struct block_device *dev,
295 sector_t capacity, int *ip)
296{
297 int size = capacity;
298
299 ip[0] = 64;
300 ip[1] = 32;
301 ip[2] = size >> 11;
302 return 0;
303}
304
305
306/****************************************************************
307 * Function : int NCR5380_pread (struct Scsi_Host *instance,
308 * unsigned char *dst, int len)
309 *
310 * Purpose : Fast 5380 pseudo-dma read function, reads len bytes to
311 * dst
312 *
313 * Inputs : dst = destination, len = length in bytes
314 *
315 * Returns : 0 on success, non zero on a failure such as a watchdog
316 * timeout.
317*/
318
319static inline int dtc_pread(struct Scsi_Host *instance,
320 unsigned char *dst, int len)
321{
322 unsigned char *d = dst;
323 int i; /* For counting time spent in the poll-loop */
324 struct NCR5380_hostdata *hostdata = shost_priv(instance);
325
326 i = 0;
327 if (instance->irq == NO_IRQ)
328 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ);
329 else
330 NCR5380_write(DTC_CONTROL_REG, CSR_DIR_READ | CSR_INT_BASE);
331 NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
332 rtrc(1);
333 while (len > 0) {
334 rtrc(2);
335 while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
336 ++i;
337 rtrc(3);
338 memcpy_fromio(d, hostdata->base + DTC_DATA_BUF, 128);
339 d += 128;
340 len -= 128;
341 rtrc(7);
342 /*** with int's on, it sometimes hangs after here.
343 * Looks like something makes HBNR go away. */
344 }
345 rtrc(4);
346 while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
347 ++i;
348 rtrc(0);
349 return (0);
350}
351
352/****************************************************************
353 * Function : int NCR5380_pwrite (struct Scsi_Host *instance,
354 * unsigned char *src, int len)
355 *
356 * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
357 * src
358 *
359 * Inputs : src = source, len = length in bytes
360 *
361 * Returns : 0 on success, non zero on a failure such as a watchdog
362 * timeout.
363*/
364
365static inline int dtc_pwrite(struct Scsi_Host *instance,
366 unsigned char *src, int len)
367{
368 int i;
369 struct NCR5380_hostdata *hostdata = shost_priv(instance);
370
371 if (instance->irq == NO_IRQ)
372 NCR5380_write(DTC_CONTROL_REG, 0);
373 else
374 NCR5380_write(DTC_CONTROL_REG, CSR_5380_INTR);
375 NCR5380_write(DTC_BLK_CNT, len >> 7); /* Block count */
376 for (i = 0; len > 0; ++i) {
377 rtrc(5);
378 /* Poll until the host buffer can accept data. */
379 while (NCR5380_read(DTC_CONTROL_REG) & CSR_HOST_BUF_NOT_RDY)
380 ++i;
381 rtrc(3);
382 memcpy_toio(hostdata->base + DTC_DATA_BUF, src, 128);
383 src += 128;
384 len -= 128;
385 }
386 rtrc(4);
387 while (!(NCR5380_read(DTC_CONTROL_REG) & D_CR_ACCESS))
388 ++i;
389 rtrc(6);
390 /* Wait until the last byte has been sent to the disk */
391 while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT))
392 ++i;
393 rtrc(7);
394 /* Check for parity error here. fixme. */
395 rtrc(0);
396 return (0);
397}
398
399static int dtc_dma_xfer_len(struct scsi_cmnd *cmd)
400{
401 int transfersize = cmd->transfersize;
402
403 /* Limit transfers to 32K, for xx400 & xx406
404 * pseudoDMA that transfers in 128 bytes blocks.
405 */
406 if (transfersize > 32 * 1024 && cmd->SCp.this_residual &&
407 !(cmd->SCp.this_residual % transfersize))
408 transfersize = 32 * 1024;
409
410 return transfersize;
411}
412
413MODULE_LICENSE("GPL");
414
415#include "NCR5380.c"
416
417static int dtc_release(struct Scsi_Host *shost)
418{
419 struct NCR5380_hostdata *hostdata = shost_priv(shost);
420
421 if (shost->irq != NO_IRQ)
422 free_irq(shost->irq, shost);
423 NCR5380_exit(shost);
424 scsi_unregister(shost);
425 iounmap(hostdata->base);
426 return 0;
427}
428
429static struct scsi_host_template driver_template = {
430 .name = "DTC 3180/3280",
431 .detect = dtc_detect,
432 .release = dtc_release,
433 .proc_name = "dtc3x80",
434 .info = dtc_info,
435 .queuecommand = dtc_queue_command,
436 .eh_abort_handler = dtc_abort,
437 .eh_bus_reset_handler = dtc_bus_reset,
438 .bios_param = dtc_biosparam,
439 .can_queue = 32,
440 .this_id = 7,
441 .sg_tablesize = SG_ALL,
442 .cmd_per_lun = 2,
443 .use_clustering = DISABLE_CLUSTERING,
444 .cmd_size = NCR5380_CMD_SIZE,
445 .max_sectors = 128,
446};
447#include "scsi_module.c"
diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h
deleted file mode 100644
index fcb0a8ea7bda..000000000000
--- a/drivers/scsi/dtc.h
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * DTC controller, taken from T128 driver by...
3 * Copyright 1993, Drew Eckhardt
4 * Visionary Computing
5 * (Unix and Linux consulting and custom programming)
6 * drew@colorado.edu
7 * +1 (303) 440-4894
8 */
9
10#ifndef DTC3280_H
11#define DTC3280_H
12
13#define NCR5380_implementation_fields \
14 void __iomem *base
15
16#define DTC_address(reg) \
17 (((struct NCR5380_hostdata *)shost_priv(instance))->base + DTC_5380_OFFSET + reg)
18
19#define NCR5380_read(reg) (readb(DTC_address(reg)))
20#define NCR5380_write(reg, value) (writeb(value, DTC_address(reg)))
21
22#define NCR5380_dma_xfer_len(instance, cmd, phase) \
23 dtc_dma_xfer_len(cmd)
24#define NCR5380_dma_recv_setup dtc_pread
25#define NCR5380_dma_send_setup dtc_pwrite
26#define NCR5380_dma_residual(instance) (0)
27
28#define NCR5380_intr dtc_intr
29#define NCR5380_queue_command dtc_queue_command
30#define NCR5380_abort dtc_abort
31#define NCR5380_bus_reset dtc_bus_reset
32#define NCR5380_info dtc_info
33
34#define NCR5380_io_delay(x) udelay(x)
35
36/* 15 12 11 10
37 1001 1100 0000 0000 */
38
39#define DTC_IRQS 0x9c00
40
41
42#endif /* DTC3280_H */
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
index 78ce4d61a69b..d6e53aee2295 100644
--- a/drivers/scsi/esas2r/esas2r_init.c
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -963,10 +963,6 @@ bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
963 963
964 /* initialize the allocated memory */ 964 /* initialize the allocated memory */
965 if (test_bit(AF_FIRST_INIT, &a->flags)) { 965 if (test_bit(AF_FIRST_INIT, &a->flags)) {
966 memset(a->req_table, 0,
967 (num_requests + num_ae_requests +
968 1) * sizeof(struct esas2r_request *));
969
970 esas2r_targ_db_initialize(a); 966 esas2r_targ_db_initialize(a);
971 967
972 /* prime parts of the inbound list */ 968 /* prime parts of the inbound list */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
index 2aca4d16f39e..5092c821d088 100644
--- a/drivers/scsi/esas2r/esas2r_main.c
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -194,7 +194,7 @@ static ssize_t write_hw(struct file *file, struct kobject *kobj,
194 int length = min(sizeof(struct atto_ioctl), count); 194 int length = min(sizeof(struct atto_ioctl), count);
195 195
196 if (!a->local_atto_ioctl) { 196 if (!a->local_atto_ioctl) {
197 a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl), 197 a->local_atto_ioctl = kmalloc(sizeof(struct atto_ioctl),
198 GFP_KERNEL); 198 GFP_KERNEL);
199 if (a->local_atto_ioctl == NULL) { 199 if (a->local_atto_ioctl == NULL) {
200 esas2r_log(ESAS2R_LOG_WARN, 200 esas2r_log(ESAS2R_LOG_WARN,
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 7028dd37e5dd..375c536cbc68 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -83,6 +83,41 @@ static struct notifier_block libfcoe_notifier = {
83 .notifier_call = libfcoe_device_notification, 83 .notifier_call = libfcoe_device_notification,
84}; 84};
85 85
86static const struct {
87 u32 fc_port_speed;
88#define SPEED_2000 2000
89#define SPEED_4000 4000
90#define SPEED_8000 8000
91#define SPEED_16000 16000
92#define SPEED_32000 32000
93 u32 eth_port_speed;
94} fcoe_port_speed_mapping[] = {
95 { FC_PORTSPEED_1GBIT, SPEED_1000 },
96 { FC_PORTSPEED_2GBIT, SPEED_2000 },
97 { FC_PORTSPEED_4GBIT, SPEED_4000 },
98 { FC_PORTSPEED_8GBIT, SPEED_8000 },
99 { FC_PORTSPEED_10GBIT, SPEED_10000 },
100 { FC_PORTSPEED_16GBIT, SPEED_16000 },
101 { FC_PORTSPEED_20GBIT, SPEED_20000 },
102 { FC_PORTSPEED_25GBIT, SPEED_25000 },
103 { FC_PORTSPEED_32GBIT, SPEED_32000 },
104 { FC_PORTSPEED_40GBIT, SPEED_40000 },
105 { FC_PORTSPEED_50GBIT, SPEED_50000 },
106 { FC_PORTSPEED_100GBIT, SPEED_100000 },
107};
108
109static inline u32 eth2fc_speed(u32 eth_port_speed)
110{
111 int i;
112
113 for (i = 0; i < ARRAY_SIZE(fcoe_port_speed_mapping); i++) {
114 if (fcoe_port_speed_mapping[i].eth_port_speed == eth_port_speed)
115 return fcoe_port_speed_mapping[i].fc_port_speed;
116 }
117
118 return FC_PORTSPEED_UNKNOWN;
119}
120
86/** 121/**
87 * fcoe_link_speed_update() - Update the supported and actual link speeds 122 * fcoe_link_speed_update() - Update the supported and actual link speeds
88 * @lport: The local port to update speeds for 123 * @lport: The local port to update speeds for
@@ -126,23 +161,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
126 SUPPORTED_40000baseLR4_Full)) 161 SUPPORTED_40000baseLR4_Full))
127 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; 162 lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
128 163
129 switch (ecmd.base.speed) { 164 lport->link_speed = eth2fc_speed(ecmd.base.speed);
130 case SPEED_1000:
131 lport->link_speed = FC_PORTSPEED_1GBIT;
132 break;
133 case SPEED_10000:
134 lport->link_speed = FC_PORTSPEED_10GBIT;
135 break;
136 case SPEED_20000:
137 lport->link_speed = FC_PORTSPEED_20GBIT;
138 break;
139 case SPEED_40000:
140 lport->link_speed = FC_PORTSPEED_40GBIT;
141 break;
142 default:
143 lport->link_speed = FC_PORTSPEED_UNKNOWN;
144 break;
145 }
146 return 0; 165 return 0;
147 } 166 }
148 return -1; 167 return -1;
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 4731d3241323..72c98522bd26 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -23,7 +23,7 @@
23#include <scsi/sas_ata.h> 23#include <scsi/sas_ata.h>
24#include <scsi/libsas.h> 24#include <scsi/libsas.h>
25 25
26#define DRV_VERSION "v1.5" 26#define DRV_VERSION "v1.6"
27 27
28#define HISI_SAS_MAX_PHYS 9 28#define HISI_SAS_MAX_PHYS 9
29#define HISI_SAS_MAX_QUEUES 32 29#define HISI_SAS_MAX_QUEUES 32
@@ -56,6 +56,11 @@ enum dev_status {
56 HISI_SAS_DEV_EH, 56 HISI_SAS_DEV_EH,
57}; 57};
58 58
59enum {
60 HISI_SAS_INT_ABT_CMD = 0,
61 HISI_SAS_INT_ABT_DEV = 1,
62};
63
59enum hisi_sas_dev_type { 64enum hisi_sas_dev_type {
60 HISI_SAS_DEV_TYPE_STP = 0, 65 HISI_SAS_DEV_TYPE_STP = 0,
61 HISI_SAS_DEV_TYPE_SSP, 66 HISI_SAS_DEV_TYPE_SSP,
@@ -89,6 +94,13 @@ struct hisi_sas_port {
89 94
90struct hisi_sas_cq { 95struct hisi_sas_cq {
91 struct hisi_hba *hisi_hba; 96 struct hisi_hba *hisi_hba;
97 int rd_point;
98 int id;
99};
100
101struct hisi_sas_dq {
102 struct hisi_hba *hisi_hba;
103 int wr_point;
92 int id; 104 int id;
93}; 105};
94 106
@@ -146,6 +158,9 @@ struct hisi_sas_hw {
146 struct hisi_sas_slot *slot); 158 struct hisi_sas_slot *slot);
147 int (*prep_stp)(struct hisi_hba *hisi_hba, 159 int (*prep_stp)(struct hisi_hba *hisi_hba,
148 struct hisi_sas_slot *slot); 160 struct hisi_sas_slot *slot);
161 int (*prep_abort)(struct hisi_hba *hisi_hba,
162 struct hisi_sas_slot *slot,
163 int device_id, int abort_flag, int tag_to_abort);
149 int (*slot_complete)(struct hisi_hba *hisi_hba, 164 int (*slot_complete)(struct hisi_hba *hisi_hba,
150 struct hisi_sas_slot *slot, int abort); 165 struct hisi_sas_slot *slot, int abort);
151 void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no); 166 void (*phy_enable)(struct hisi_hba *hisi_hba, int phy_no);
@@ -185,6 +200,7 @@ struct hisi_hba {
185 struct Scsi_Host *shost; 200 struct Scsi_Host *shost;
186 201
187 struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES]; 202 struct hisi_sas_cq cq[HISI_SAS_MAX_QUEUES];
203 struct hisi_sas_dq dq[HISI_SAS_MAX_QUEUES];
188 struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS]; 204 struct hisi_sas_phy phy[HISI_SAS_MAX_PHYS];
189 struct hisi_sas_port port[HISI_SAS_MAX_PHYS]; 205 struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
190 206
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 18dd5ea2c721..2f872f784e10 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -17,6 +17,10 @@
17 17
18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device, 18static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
19 u8 *lun, struct hisi_sas_tmf_task *tmf); 19 u8 *lun, struct hisi_sas_tmf_task *tmf);
20static int
21hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
22 struct domain_device *device,
23 int abort_flag, int tag);
20 24
21static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) 25static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
22{ 26{
@@ -93,7 +97,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
93 slot->task = NULL; 97 slot->task = NULL;
94 slot->port = NULL; 98 slot->port = NULL;
95 hisi_sas_slot_index_free(hisi_hba, slot->idx); 99 hisi_sas_slot_index_free(hisi_hba, slot->idx);
96 memset(slot, 0, sizeof(*slot)); 100 /* slot memory is fully zeroed when it is reused */
97} 101}
98EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free); 102EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
99 103
@@ -116,6 +120,14 @@ static int hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
116 return hisi_hba->hw->prep_stp(hisi_hba, slot); 120 return hisi_hba->hw->prep_stp(hisi_hba, slot);
117} 121}
118 122
123static int hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
124 struct hisi_sas_slot *slot,
125 int device_id, int abort_flag, int tag_to_abort)
126{
127 return hisi_hba->hw->prep_abort(hisi_hba, slot,
128 device_id, abort_flag, tag_to_abort);
129}
130
119/* 131/*
120 * This function will issue an abort TMF regardless of whether the 132 * This function will issue an abort TMF regardless of whether the
121 * task is in the sdev or not. Then it will do the task complete 133 * task is in the sdev or not. Then it will do the task complete
@@ -192,27 +204,13 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
192 return rc; 204 return rc;
193 } 205 }
194 port = device->port->lldd_port; 206 port = device->port->lldd_port;
195 if (port && !port->port_attached && !tmf) { 207 if (port && !port->port_attached) {
196 if (sas_protocol_ata(task->task_proto)) { 208 dev_info(dev, "task prep: %s port%d not attach device\n",
197 struct task_status_struct *ts = &task->task_status; 209 (sas_protocol_ata(task->task_proto)) ?
198 210 "SATA/STP" : "SAS",
199 dev_info(dev, 211 device->port->id);
200 "task prep: SATA/STP port%d not attach device\n", 212
201 device->port->id); 213 return SAS_PHY_DOWN;
202 ts->resp = SAS_TASK_COMPLETE;
203 ts->stat = SAS_PHY_DOWN;
204 task->task_done(task);
205 } else {
206 struct task_status_struct *ts = &task->task_status;
207
208 dev_info(dev,
209 "task prep: SAS port%d does not attach device\n",
210 device->port->id);
211 ts->resp = SAS_TASK_UNDELIVERED;
212 ts->stat = SAS_PHY_DOWN;
213 task->task_done(task);
214 }
215 return 0;
216 } 214 }
217 215
218 if (!sas_protocol_ata(task->task_proto)) { 216 if (!sas_protocol_ata(task->task_proto)) {
@@ -609,6 +607,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
609 dev_info(dev, "found dev[%lld:%x] is gone\n", 607 dev_info(dev, "found dev[%lld:%x] is gone\n",
610 sas_dev->device_id, sas_dev->dev_type); 608 sas_dev->device_id, sas_dev->dev_type);
611 609
610 hisi_sas_internal_task_abort(hisi_hba, device,
611 HISI_SAS_INT_ABT_DEV, 0);
612
612 hisi_hba->hw->free_device(hisi_hba, sas_dev); 613 hisi_hba->hw->free_device(hisi_hba, sas_dev);
613 device->lldd_dev = NULL; 614 device->lldd_dev = NULL;
614 memset(sas_dev, 0, sizeof(*sas_dev)); 615 memset(sas_dev, 0, sizeof(*sas_dev));
@@ -729,6 +730,12 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
729 } 730 }
730 731
731 if (task->task_status.resp == SAS_TASK_COMPLETE && 732 if (task->task_status.resp == SAS_TASK_COMPLETE &&
733 task->task_status.stat == TMF_RESP_FUNC_SUCC) {
734 res = TMF_RESP_FUNC_SUCC;
735 break;
736 }
737
738 if (task->task_status.resp == SAS_TASK_COMPLETE &&
732 task->task_status.stat == SAS_DATA_UNDERRUN) { 739 task->task_status.stat == SAS_DATA_UNDERRUN) {
733 /* no error, but return the number of bytes of 740 /* no error, but return the number of bytes of
734 * underrun 741 * underrun
@@ -826,18 +833,22 @@ static int hisi_sas_abort_task(struct sas_task *task)
826 } 833 }
827 } 834 }
828 835
836 hisi_sas_internal_task_abort(hisi_hba, device,
837 HISI_SAS_INT_ABT_CMD, tag);
829 } else if (task->task_proto & SAS_PROTOCOL_SATA || 838 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
830 task->task_proto & SAS_PROTOCOL_STP) { 839 task->task_proto & SAS_PROTOCOL_STP) {
831 if (task->dev->dev_type == SAS_SATA_DEV) { 840 if (task->dev->dev_type == SAS_SATA_DEV) {
832 struct hisi_slot_info *slot = task->lldd_task; 841 hisi_sas_internal_task_abort(hisi_hba, device,
833 842 HISI_SAS_INT_ABT_DEV, 0);
834 dev_notice(dev, "abort task: hba=%p task=%p slot=%p\n",
835 hisi_hba, task, slot);
836 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
837 rc = TMF_RESP_FUNC_COMPLETE; 843 rc = TMF_RESP_FUNC_COMPLETE;
838 goto out;
839 } 844 }
845 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
846 /* SMP */
847 struct hisi_sas_slot *slot = task->lldd_task;
848 u32 tag = slot->idx;
840 849
850 hisi_sas_internal_task_abort(hisi_hba, device,
851 HISI_SAS_INT_ABT_CMD, tag);
841 } 852 }
842 853
843out: 854out:
@@ -954,6 +965,157 @@ static int hisi_sas_query_task(struct sas_task *task)
954 return rc; 965 return rc;
955} 966}
956 967
968static int
969hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
970 struct sas_task *task, int abort_flag,
971 int task_tag)
972{
973 struct domain_device *device = task->dev;
974 struct hisi_sas_device *sas_dev = device->lldd_dev;
975 struct device *dev = &hisi_hba->pdev->dev;
976 struct hisi_sas_port *port;
977 struct hisi_sas_slot *slot;
978 struct hisi_sas_cmd_hdr *cmd_hdr_base;
979 int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
980
981 if (!device->port)
982 return -1;
983
984 port = device->port->lldd_port;
985
986 /* simply get a slot and send abort command */
987 rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
988 if (rc)
989 goto err_out;
990 rc = hisi_hba->hw->get_free_slot(hisi_hba, &dlvry_queue,
991 &dlvry_queue_slot);
992 if (rc)
993 goto err_out_tag;
994
995 slot = &hisi_hba->slot_info[slot_idx];
996 memset(slot, 0, sizeof(struct hisi_sas_slot));
997
998 slot->idx = slot_idx;
999 slot->n_elem = n_elem;
1000 slot->dlvry_queue = dlvry_queue;
1001 slot->dlvry_queue_slot = dlvry_queue_slot;
1002 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
1003 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
1004 slot->task = task;
1005 slot->port = port;
1006 task->lldd_task = slot;
1007
1008 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
1009
1010 rc = hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
1011 abort_flag, task_tag);
1012 if (rc)
1013 goto err_out_tag;
1014
1015 /* Port structure is static for the HBA, so
1016 * even if the port is deformed it is ok
1017 * to reference.
1018 */
1019 list_add_tail(&slot->entry, &port->list);
1020 spin_lock(&task->task_state_lock);
1021 task->task_state_flags |= SAS_TASK_AT_INITIATOR;
1022 spin_unlock(&task->task_state_lock);
1023
1024 hisi_hba->slot_prep = slot;
1025
1026 sas_dev->running_req++;
1027 /* send abort command to our chip */
1028 hisi_hba->hw->start_delivery(hisi_hba);
1029
1030 return 0;
1031
1032err_out_tag:
1033 hisi_sas_slot_index_free(hisi_hba, slot_idx);
1034err_out:
1035 dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
1036
1037 return rc;
1038}
1039
1040/**
1041 * hisi_sas_internal_task_abort -- execute an internal
1042 * abort command for single IO command or a device
1043 * @hisi_hba: host controller struct
1044 * @device: domain device
1045 * @abort_flag: mode of operation, device or single IO
1046 * @tag: tag of IO to be aborted (only relevant to single
1047 * IO mode)
1048 */
1049static int
1050hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
1051 struct domain_device *device,
1052 int abort_flag, int tag)
1053{
1054 struct sas_task *task;
1055 struct hisi_sas_device *sas_dev = device->lldd_dev;
1056 struct device *dev = &hisi_hba->pdev->dev;
1057 int res;
1058 unsigned long flags;
1059
1060 if (!hisi_hba->hw->prep_abort)
1061 return -EOPNOTSUPP;
1062
1063 task = sas_alloc_slow_task(GFP_KERNEL);
1064 if (!task)
1065 return -ENOMEM;
1066
1067 task->dev = device;
1068 task->task_proto = device->tproto;
1069 task->task_done = hisi_sas_task_done;
1070 task->slow_task->timer.data = (unsigned long)task;
1071 task->slow_task->timer.function = hisi_sas_tmf_timedout;
1072 task->slow_task->timer.expires = jiffies + 20*HZ;
1073 add_timer(&task->slow_task->timer);
1074
1075 /* Lock as we are alloc'ing a slot, which cannot be interrupted */
1076 spin_lock_irqsave(&hisi_hba->lock, flags);
1077 res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
1078 task, abort_flag, tag);
1079 spin_unlock_irqrestore(&hisi_hba->lock, flags);
1080 if (res) {
1081 del_timer(&task->slow_task->timer);
1082 dev_err(dev, "internal task abort: executing internal task failed: %d\n",
1083 res);
1084 goto exit;
1085 }
1086 wait_for_completion(&task->slow_task->completion);
1087 res = TMF_RESP_FUNC_FAILED;
1088
1089 if (task->task_status.resp == SAS_TASK_COMPLETE &&
1090 task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
1091 res = TMF_RESP_FUNC_COMPLETE;
1092 goto exit;
1093 }
1094
1095 /* TMF timed out, return direct. */
1096 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1097 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1098 dev_err(dev, "internal task abort: timeout.\n");
1099 if (task->lldd_task) {
1100 struct hisi_sas_slot *slot = task->lldd_task;
1101
1102 hisi_sas_slot_task_free(hisi_hba, task, slot);
1103 }
1104 }
1105 }
1106
1107exit:
1108 dev_info(dev, "internal task abort: task to dev %016llx task=%p "
1109 "resp: 0x%x sts 0x%x\n",
1110 SAS_ADDR(device->sas_addr),
1111 task,
1112 task->task_status.resp, /* 0 is complete, -1 is undelivered */
1113 task->task_status.stat);
1114 sas_free_task(task);
1115
1116 return res;
1117}
1118
957static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy) 1119static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
958{ 1120{
959 hisi_sas_port_notify_formed(sas_phy); 1121 hisi_sas_port_notify_formed(sas_phy);
@@ -1063,11 +1225,16 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1063 1225
1064 for (i = 0; i < hisi_hba->queue_count; i++) { 1226 for (i = 0; i < hisi_hba->queue_count; i++) {
1065 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; 1227 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
1228 struct hisi_sas_dq *dq = &hisi_hba->dq[i];
1066 1229
1067 /* Completion queue structure */ 1230 /* Completion queue structure */
1068 cq->id = i; 1231 cq->id = i;
1069 cq->hisi_hba = hisi_hba; 1232 cq->hisi_hba = hisi_hba;
1070 1233
1234 /* Delivery queue structure */
1235 dq->id = i;
1236 dq->hisi_hba = hisi_hba;
1237
1071 /* Delivery queue */ 1238 /* Delivery queue */
1072 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; 1239 s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
1073 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s, 1240 hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
@@ -1128,7 +1295,7 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
1128 memset(hisi_hba->breakpoint, 0, s); 1295 memset(hisi_hba->breakpoint, 0, s);
1129 1296
1130 hisi_hba->slot_index_count = max_command_entries; 1297 hisi_hba->slot_index_count = max_command_entries;
1131 s = hisi_hba->slot_index_count / sizeof(unsigned long); 1298 s = hisi_hba->slot_index_count / BITS_PER_BYTE;
1132 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL); 1299 hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
1133 if (!hisi_hba->slot_index_tags) 1300 if (!hisi_hba->slot_index_tags)
1134 goto err_out; 1301 goto err_out;
@@ -1272,6 +1439,12 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
1272 &hisi_hba->queue_count)) 1439 &hisi_hba->queue_count))
1273 goto err_out; 1440 goto err_out;
1274 1441
1442 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1443 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1444 dev_err(dev, "No usable DMA addressing method\n");
1445 goto err_out;
1446 }
1447
1275 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1448 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1276 hisi_hba->regs = devm_ioremap_resource(dev, res); 1449 hisi_hba->regs = devm_ioremap_resource(dev, res);
1277 if (IS_ERR(hisi_hba->regs)) 1450 if (IS_ERR(hisi_hba->regs))
@@ -1319,13 +1492,6 @@ int hisi_sas_probe(struct platform_device *pdev,
1319 hisi_hba = shost_priv(shost); 1492 hisi_hba = shost_priv(shost);
1320 platform_set_drvdata(pdev, sha); 1493 platform_set_drvdata(pdev, sha);
1321 1494
1322 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
1323 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
1324 dev_err(dev, "No usable DMA addressing method\n");
1325 rc = -EIO;
1326 goto err_out_ha;
1327 }
1328
1329 phy_nr = port_nr = hisi_hba->n_phy; 1495 phy_nr = port_nr = hisi_hba->n_phy;
1330 1496
1331 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL); 1497 arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 1abbc2e162df..c0ac49d8bc8d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -490,25 +490,17 @@ static void config_id_frame_v1_hw(struct hisi_hba *hisi_hba, int phy_no)
490 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 490 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
491 __swab32(identify_buffer[0])); 491 __swab32(identify_buffer[0]));
492 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 492 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
493 identify_buffer[2]); 493 __swab32(identify_buffer[1]));
494 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 494 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
495 identify_buffer[1]); 495 __swab32(identify_buffer[2]));
496 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 496 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
497 identify_buffer[4]); 497 __swab32(identify_buffer[3]));
498 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 498 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
499 identify_buffer[3]); 499 __swab32(identify_buffer[4]));
500 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 500 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
501 __swab32(identify_buffer[5])); 501 __swab32(identify_buffer[5]));
502} 502}
503 503
504static void init_id_frame_v1_hw(struct hisi_hba *hisi_hba)
505{
506 int i;
507
508 for (i = 0; i < hisi_hba->n_phy; i++)
509 config_id_frame_v1_hw(hisi_hba, i);
510}
511
512static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, 504static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
513 struct hisi_sas_device *sas_dev) 505 struct hisi_sas_device *sas_dev)
514{ 506{
@@ -774,8 +766,6 @@ static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
774 msleep(100); 766 msleep(100);
775 init_reg_v1_hw(hisi_hba); 767 init_reg_v1_hw(hisi_hba);
776 768
777 init_id_frame_v1_hw(hisi_hba);
778
779 return 0; 769 return 0;
780} 770}
781 771
@@ -875,12 +865,13 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
875static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s) 865static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, int *q, int *s)
876{ 866{
877 struct device *dev = &hisi_hba->pdev->dev; 867 struct device *dev = &hisi_hba->pdev->dev;
868 struct hisi_sas_dq *dq;
878 u32 r, w; 869 u32 r, w;
879 int queue = hisi_hba->queue; 870 int queue = hisi_hba->queue;
880 871
881 while (1) { 872 while (1) {
882 w = hisi_sas_read32_relaxed(hisi_hba, 873 dq = &hisi_hba->dq[queue];
883 DLVRY_Q_0_WR_PTR + (queue * 0x14)); 874 w = dq->wr_point;
884 r = hisi_sas_read32_relaxed(hisi_hba, 875 r = hisi_sas_read32_relaxed(hisi_hba,
885 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 876 DLVRY_Q_0_RD_PTR + (queue * 0x14));
886 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 877 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
@@ -903,10 +894,11 @@ static void start_delivery_v1_hw(struct hisi_hba *hisi_hba)
903{ 894{
904 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; 895 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
905 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; 896 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
897 struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
906 898
907 hisi_sas_write32(hisi_hba, 899 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
908 DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 900 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
909 ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS); 901 dq->wr_point);
910} 902}
911 903
912static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba, 904static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
@@ -1565,14 +1557,11 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
1565 struct hisi_sas_complete_v1_hdr *complete_queue = 1557 struct hisi_sas_complete_v1_hdr *complete_queue =
1566 (struct hisi_sas_complete_v1_hdr *) 1558 (struct hisi_sas_complete_v1_hdr *)
1567 hisi_hba->complete_hdr[queue]; 1559 hisi_hba->complete_hdr[queue];
1568 u32 irq_value, rd_point, wr_point; 1560 u32 irq_value, rd_point = cq->rd_point, wr_point;
1569 1561
1570 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); 1562 irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC);
1571 1563
1572 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 1564 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
1573
1574 rd_point = hisi_sas_read32(hisi_hba,
1575 COMPL_Q_0_RD_PTR + (0x14 * queue));
1576 wr_point = hisi_sas_read32(hisi_hba, 1565 wr_point = hisi_sas_read32(hisi_hba,
1577 COMPL_Q_0_WR_PTR + (0x14 * queue)); 1566 COMPL_Q_0_WR_PTR + (0x14 * queue));
1578 1567
@@ -1600,6 +1589,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
1600 } 1589 }
1601 1590
1602 /* update rd_point */ 1591 /* update rd_point */
1592 cq->rd_point = rd_point;
1603 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 1593 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
1604 1594
1605 return IRQ_HANDLED; 1595 return IRQ_HANDLED;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index f96560431cf1..9825a3f49f53 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -117,6 +117,8 @@
117#define SL_CONTROL (PORT_BASE + 0x94) 117#define SL_CONTROL (PORT_BASE + 0x94)
118#define SL_CONTROL_NOTIFY_EN_OFF 0 118#define SL_CONTROL_NOTIFY_EN_OFF 0
119#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF) 119#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
120#define SL_CONTROL_CTA_OFF 17
121#define SL_CONTROL_CTA_MSK (0x1 << SL_CONTROL_CTA_OFF)
120#define TX_ID_DWORD0 (PORT_BASE + 0x9c) 122#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
121#define TX_ID_DWORD1 (PORT_BASE + 0xa0) 123#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
122#define TX_ID_DWORD2 (PORT_BASE + 0xa4) 124#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
@@ -124,6 +126,9 @@
124#define TX_ID_DWORD4 (PORT_BASE + 0xaC) 126#define TX_ID_DWORD4 (PORT_BASE + 0xaC)
125#define TX_ID_DWORD5 (PORT_BASE + 0xb0) 127#define TX_ID_DWORD5 (PORT_BASE + 0xb0)
126#define TX_ID_DWORD6 (PORT_BASE + 0xb4) 128#define TX_ID_DWORD6 (PORT_BASE + 0xb4)
129#define TXID_AUTO (PORT_BASE + 0xb8)
130#define TXID_AUTO_CT3_OFF 1
131#define TXID_AUTO_CT3_MSK (0x1 << TXID_AUTO_CT3_OFF)
127#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4) 132#define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
128#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8) 133#define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
129#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc) 134#define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
@@ -174,6 +179,10 @@
174/* HW dma structures */ 179/* HW dma structures */
175/* Delivery queue header */ 180/* Delivery queue header */
176/* dw0 */ 181/* dw0 */
182#define CMD_HDR_ABORT_FLAG_OFF 0
183#define CMD_HDR_ABORT_FLAG_MSK (0x3 << CMD_HDR_ABORT_FLAG_OFF)
184#define CMD_HDR_ABORT_DEVICE_TYPE_OFF 2
185#define CMD_HDR_ABORT_DEVICE_TYPE_MSK (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
177#define CMD_HDR_RESP_REPORT_OFF 5 186#define CMD_HDR_RESP_REPORT_OFF 5
178#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) 187#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
179#define CMD_HDR_TLR_CTRL_OFF 6 188#define CMD_HDR_TLR_CTRL_OFF 6
@@ -214,6 +223,8 @@
214#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF) 223#define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
215#define CMD_HDR_DATA_SGL_LEN_OFF 16 224#define CMD_HDR_DATA_SGL_LEN_OFF 16
216#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF) 225#define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
226#define CMD_HDR_ABORT_IPTT_OFF 16
227#define CMD_HDR_ABORT_IPTT_MSK (0xffff << CMD_HDR_ABORT_IPTT_OFF)
217 228
218/* Completion header */ 229/* Completion header */
219/* dw0 */ 230/* dw0 */
@@ -221,6 +232,13 @@
221#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF) 232#define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
222#define CMPLT_HDR_ERX_OFF 12 233#define CMPLT_HDR_ERX_OFF 12
223#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF) 234#define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
235#define CMPLT_HDR_ABORT_STAT_OFF 13
236#define CMPLT_HDR_ABORT_STAT_MSK (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
237/* abort_stat */
238#define STAT_IO_NOT_VALID 0x1
239#define STAT_IO_NO_DEVICE 0x2
240#define STAT_IO_COMPLETE 0x3
241#define STAT_IO_ABORTED 0x4
224/* dw1 */ 242/* dw1 */
225#define CMPLT_HDR_IPTT_OFF 0 243#define CMPLT_HDR_IPTT_OFF 0
226#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF) 244#define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
@@ -549,25 +567,17 @@ static void config_id_frame_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
549 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0, 567 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
550 __swab32(identify_buffer[0])); 568 __swab32(identify_buffer[0]));
551 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1, 569 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
552 identify_buffer[2]); 570 __swab32(identify_buffer[1]));
553 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2, 571 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
554 identify_buffer[1]); 572 __swab32(identify_buffer[2]));
555 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3, 573 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
556 identify_buffer[4]); 574 __swab32(identify_buffer[3]));
557 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4, 575 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
558 identify_buffer[3]); 576 __swab32(identify_buffer[4]));
559 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5, 577 hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
560 __swab32(identify_buffer[5])); 578 __swab32(identify_buffer[5]));
561} 579}
562 580
563static void init_id_frame_v2_hw(struct hisi_hba *hisi_hba)
564{
565 int i;
566
567 for (i = 0; i < hisi_hba->n_phy; i++)
568 config_id_frame_v2_hw(hisi_hba, i);
569}
570
571static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, 581static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
572 struct hisi_sas_device *sas_dev) 582 struct hisi_sas_device *sas_dev)
573{ 583{
@@ -589,6 +599,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
589 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF; 599 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
590 break; 600 break;
591 case SAS_SATA_DEV: 601 case SAS_SATA_DEV:
602 case SAS_SATA_PENDING:
592 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) 603 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
593 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF; 604 qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
594 else 605 else
@@ -672,9 +683,7 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
672 else 683 else
673 reset_val = 0x7ffff; 684 reset_val = 0x7ffff;
674 685
675 /* Disable all of the DQ */ 686 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
676 for (i = 0; i < HISI_SAS_MAX_QUEUES; i++)
677 hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
678 687
679 /* Disable all of the PHYs */ 688 /* Disable all of the PHYs */
680 for (i = 0; i < hisi_hba->n_phy; i++) { 689 for (i = 0; i < hisi_hba->n_phy; i++) {
@@ -810,6 +819,8 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
810 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855); 819 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 0x855);
811 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908); 820 hisi_sas_phy_write32(hisi_hba, i, SAS_PHY_CTRL, 0x30b9908);
812 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d); 821 hisi_sas_phy_write32(hisi_hba, i, SL_TOUT_CFG, 0x7d7d7d7d);
822 hisi_sas_phy_write32(hisi_hba, i, SL_CONTROL, 0x0);
823 hisi_sas_phy_write32(hisi_hba, i, TXID_AUTO, 0x2);
813 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10); 824 hisi_sas_phy_write32(hisi_hba, i, DONE_RECEIVED_TIME, 0x10);
814 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 825 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
815 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 826 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
@@ -901,8 +912,6 @@ static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
901 msleep(100); 912 msleep(100);
902 init_reg_v2_hw(hisi_hba); 913 init_reg_v2_hw(hisi_hba);
903 914
904 init_id_frame_v2_hw(hisi_hba);
905
906 return 0; 915 return 0;
907} 916}
908 917
@@ -952,14 +961,8 @@ static void start_phys_v2_hw(unsigned long data)
952 961
953static void phys_init_v2_hw(struct hisi_hba *hisi_hba) 962static void phys_init_v2_hw(struct hisi_hba *hisi_hba)
954{ 963{
955 int i;
956 struct timer_list *timer = &hisi_hba->timer; 964 struct timer_list *timer = &hisi_hba->timer;
957 965
958 for (i = 0; i < hisi_hba->n_phy; i++) {
959 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0x6a);
960 hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK);
961 }
962
963 setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba); 966 setup_timer(timer, start_phys_v2_hw, (unsigned long)hisi_hba);
964 mod_timer(timer, jiffies + HZ); 967 mod_timer(timer, jiffies + HZ);
965} 968}
@@ -1010,12 +1013,13 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
1010static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s) 1013static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, int *q, int *s)
1011{ 1014{
1012 struct device *dev = &hisi_hba->pdev->dev; 1015 struct device *dev = &hisi_hba->pdev->dev;
1016 struct hisi_sas_dq *dq;
1013 u32 r, w; 1017 u32 r, w;
1014 int queue = hisi_hba->queue; 1018 int queue = hisi_hba->queue;
1015 1019
1016 while (1) { 1020 while (1) {
1017 w = hisi_sas_read32_relaxed(hisi_hba, 1021 dq = &hisi_hba->dq[queue];
1018 DLVRY_Q_0_WR_PTR + (queue * 0x14)); 1022 w = dq->wr_point;
1019 r = hisi_sas_read32_relaxed(hisi_hba, 1023 r = hisi_sas_read32_relaxed(hisi_hba,
1020 DLVRY_Q_0_RD_PTR + (queue * 0x14)); 1024 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1021 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { 1025 if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
@@ -1038,9 +1042,11 @@ static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
1038{ 1042{
1039 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue; 1043 int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
1040 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot; 1044 int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
1045 struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
1041 1046
1047 dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
1042 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), 1048 hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
1043 ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS); 1049 dq->wr_point);
1044} 1050}
1045 1051
1046static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba, 1052static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
@@ -1563,6 +1569,30 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot,
1563 goto out; 1569 goto out;
1564 } 1570 }
1565 1571
1572 /* Use SAS+TMF status codes */
1573 switch ((complete_hdr->dw0 & CMPLT_HDR_ABORT_STAT_MSK)
1574 >> CMPLT_HDR_ABORT_STAT_OFF) {
1575 case STAT_IO_ABORTED:
1576 /* this io has been aborted by abort command */
1577 ts->stat = SAS_ABORTED_TASK;
1578 goto out;
1579 case STAT_IO_COMPLETE:
1580 /* internal abort command complete */
1581 ts->stat = TMF_RESP_FUNC_COMPLETE;
1582 goto out;
1583 case STAT_IO_NO_DEVICE:
1584 ts->stat = TMF_RESP_FUNC_COMPLETE;
1585 goto out;
1586 case STAT_IO_NOT_VALID:
1587 /* abort single io, controller don't find
1588 * the io need to abort
1589 */
1590 ts->stat = TMF_RESP_FUNC_FAILED;
1591 goto out;
1592 default:
1593 break;
1594 }
1595
1566 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) && 1596 if ((complete_hdr->dw0 & CMPLT_HDR_ERX_MSK) &&
1567 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) { 1597 (!(complete_hdr->dw0 & CMPLT_HDR_RSPNS_XFRD_MSK))) {
1568 1598
@@ -1775,6 +1805,32 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
1775 return 0; 1805 return 0;
1776} 1806}
1777 1807
1808static int prep_abort_v2_hw(struct hisi_hba *hisi_hba,
1809 struct hisi_sas_slot *slot,
1810 int device_id, int abort_flag, int tag_to_abort)
1811{
1812 struct sas_task *task = slot->task;
1813 struct domain_device *dev = task->dev;
1814 struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1815 struct hisi_sas_port *port = slot->port;
1816
1817 /* dw0 */
1818 hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
1819 (port->id << CMD_HDR_PORT_OFF) |
1820 ((dev_is_sata(dev) ? 1:0) <<
1821 CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
1822 (abort_flag << CMD_HDR_ABORT_FLAG_OFF));
1823
1824 /* dw1 */
1825 hdr->dw1 = cpu_to_le32(device_id << CMD_HDR_DEV_ID_OFF);
1826
1827 /* dw7 */
1828 hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
1829 hdr->transfer_tags = cpu_to_le32(slot->idx);
1830
1831 return 0;
1832}
1833
1778static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 1834static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
1779{ 1835{
1780 int i, res = 0; 1836 int i, res = 0;
@@ -1818,9 +1874,6 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
1818 frame_rcvd[i] = __swab32(idaf); 1874 frame_rcvd[i] = __swab32(idaf);
1819 } 1875 }
1820 1876
1821 /* Get the linkrates */
1822 link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
1823 link_rate = (link_rate >> (phy_no * 4)) & 0xf;
1824 sas_phy->linkrate = link_rate; 1877 sas_phy->linkrate = link_rate;
1825 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no, 1878 hard_phy_linkrate = hisi_sas_phy_read32(hisi_hba, phy_no,
1826 HARD_PHY_LINKRATE); 1879 HARD_PHY_LINKRATE);
@@ -1855,16 +1908,21 @@ end:
1855static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba) 1908static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
1856{ 1909{
1857 int res = 0; 1910 int res = 0;
1858 u32 phy_cfg, phy_state; 1911 u32 phy_state, sl_ctrl, txid_auto;
1859 1912
1860 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); 1913 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
1861 1914
1862 phy_cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
1863
1864 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE); 1915 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1865
1866 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0); 1916 hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
1867 1917
1918 sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
1919 hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
1920 sl_ctrl & ~SL_CONTROL_CTA_MSK);
1921
1922 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
1923 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
1924 txid_auto | TXID_AUTO_CT3_MSK);
1925
1868 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK); 1926 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
1869 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0); 1927 hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
1870 1928
@@ -1986,7 +2044,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
1986 struct hisi_sas_slot *slot; 2044 struct hisi_sas_slot *slot;
1987 struct hisi_sas_itct *itct; 2045 struct hisi_sas_itct *itct;
1988 struct hisi_sas_complete_v2_hdr *complete_queue; 2046 struct hisi_sas_complete_v2_hdr *complete_queue;
1989 u32 irq_value, rd_point, wr_point, dev_id; 2047 u32 irq_value, rd_point = cq->rd_point, wr_point, dev_id;
1990 int queue = cq->id; 2048 int queue = cq->id;
1991 2049
1992 complete_queue = hisi_hba->complete_hdr[queue]; 2050 complete_queue = hisi_hba->complete_hdr[queue];
@@ -1994,8 +2052,6 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
1994 2052
1995 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); 2053 hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
1996 2054
1997 rd_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_RD_PTR +
1998 (0x14 * queue));
1999 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + 2055 wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
2000 (0x14 * queue)); 2056 (0x14 * queue));
2001 2057
@@ -2043,6 +2099,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
2043 } 2099 }
2044 2100
2045 /* update rd_point */ 2101 /* update rd_point */
2102 cq->rd_point = rd_point;
2046 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); 2103 hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
2047 return IRQ_HANDLED; 2104 return IRQ_HANDLED;
2048} 2105}
@@ -2239,6 +2296,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
2239 .prep_smp = prep_smp_v2_hw, 2296 .prep_smp = prep_smp_v2_hw,
2240 .prep_ssp = prep_ssp_v2_hw, 2297 .prep_ssp = prep_ssp_v2_hw,
2241 .prep_stp = prep_ata_v2_hw, 2298 .prep_stp = prep_ata_v2_hw,
2299 .prep_abort = prep_abort_v2_hw,
2242 .get_free_slot = get_free_slot_v2_hw, 2300 .get_free_slot = get_free_slot_v2_hw,
2243 .start_delivery = start_delivery_v2_hw, 2301 .start_delivery = start_delivery_v2_hw,
2244 .slot_complete = slot_complete_v2_hw, 2302 .slot_complete = slot_complete_v2_hw,
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index ec6381e57eb7..258a3f9a2519 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -246,10 +246,6 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
246 246
247 shost->dma_dev = dma_dev; 247 shost->dma_dev = dma_dev;
248 248
249 error = device_add(&shost->shost_gendev);
250 if (error)
251 goto out_destroy_freelist;
252
253 /* 249 /*
254 * Increase usage count temporarily here so that calling 250 * Increase usage count temporarily here so that calling
255 * scsi_autopm_put_host() will trigger runtime idle if there is 251 * scsi_autopm_put_host() will trigger runtime idle if there is
@@ -260,6 +256,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
260 pm_runtime_enable(&shost->shost_gendev); 256 pm_runtime_enable(&shost->shost_gendev);
261 device_enable_async_suspend(&shost->shost_gendev); 257 device_enable_async_suspend(&shost->shost_gendev);
262 258
259 error = device_add(&shost->shost_gendev);
260 if (error)
261 goto out_destroy_freelist;
262
263 scsi_host_set_state(shost, SHOST_RUNNING); 263 scsi_host_set_state(shost, SHOST_RUNNING);
264 get_device(shost->shost_gendev.parent); 264 get_device(shost->shost_gendev.parent);
265 265
@@ -309,6 +309,10 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
309 out_del_gendev: 309 out_del_gendev:
310 device_del(&shost->shost_gendev); 310 device_del(&shost->shost_gendev);
311 out_destroy_freelist: 311 out_destroy_freelist:
312 device_disable_async_suspend(&shost->shost_gendev);
313 pm_runtime_disable(&shost->shost_gendev);
314 pm_runtime_set_suspended(&shost->shost_gendev);
315 pm_runtime_put_noidle(&shost->shost_gendev);
312 scsi_destroy_command_freelist(shost); 316 scsi_destroy_command_freelist(shost);
313 out_destroy_tags: 317 out_destroy_tags:
314 if (shost_use_blk_mq(shost)) 318 if (shost_use_blk_mq(shost))
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 030d0023e1d2..d007ec18179a 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -293,6 +293,8 @@ static int detect_controller_lockup(struct ctlr_info *h);
293static void hpsa_disable_rld_caching(struct ctlr_info *h); 293static void hpsa_disable_rld_caching(struct ctlr_info *h);
294static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, 294static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
295 struct ReportExtendedLUNdata *buf, int bufsize); 295 struct ReportExtendedLUNdata *buf, int bufsize);
296static bool hpsa_vpd_page_supported(struct ctlr_info *h,
297 unsigned char scsi3addr[], u8 page);
296static int hpsa_luns_changed(struct ctlr_info *h); 298static int hpsa_luns_changed(struct ctlr_info *h);
297static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c, 299static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
298 struct hpsa_scsi_dev_t *dev, 300 struct hpsa_scsi_dev_t *dev,
@@ -2388,7 +2390,8 @@ static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2388 struct CommandList *c, struct scsi_cmnd *cmd) 2390 struct CommandList *c, struct scsi_cmnd *cmd)
2389{ 2391{
2390 hpsa_cmd_resolve_and_free(h, c); 2392 hpsa_cmd_resolve_and_free(h, c);
2391 cmd->scsi_done(cmd); 2393 if (cmd && cmd->scsi_done)
2394 cmd->scsi_done(cmd);
2392} 2395}
2393 2396
2394static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) 2397static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
@@ -2489,7 +2492,17 @@ static void complete_scsi_command(struct CommandList *cp)
2489 ei = cp->err_info; 2492 ei = cp->err_info;
2490 cmd = cp->scsi_cmd; 2493 cmd = cp->scsi_cmd;
2491 h = cp->h; 2494 h = cp->h;
2495
2496 if (!cmd->device) {
2497 cmd->result = DID_NO_CONNECT << 16;
2498 return hpsa_cmd_free_and_done(h, cp, cmd);
2499 }
2500
2492 dev = cmd->device->hostdata; 2501 dev = cmd->device->hostdata;
2502 if (!dev) {
2503 cmd->result = DID_NO_CONNECT << 16;
2504 return hpsa_cmd_free_and_done(h, cp, cmd);
2505 }
2493 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex]; 2506 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2494 2507
2495 scsi_dma_unmap(cmd); /* undo the DMA mappings */ 2508 scsi_dma_unmap(cmd); /* undo the DMA mappings */
@@ -2504,8 +2517,15 @@ static void complete_scsi_command(struct CommandList *cp)
2504 cmd->result = (DID_OK << 16); /* host byte */ 2517 cmd->result = (DID_OK << 16); /* host byte */
2505 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 2518 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2506 2519
2507 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) 2520 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
2508 atomic_dec(&cp->phys_disk->ioaccel_cmds_out); 2521 if (dev->physical_device && dev->expose_device &&
2522 dev->removed) {
2523 cmd->result = DID_NO_CONNECT << 16;
2524 return hpsa_cmd_free_and_done(h, cp, cmd);
2525 }
2526 if (likely(cp->phys_disk != NULL))
2527 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2528 }
2509 2529
2510 /* 2530 /*
2511 * We check for lockup status here as it may be set for 2531 * We check for lockup status here as it may be set for
@@ -3074,11 +3094,19 @@ static void hpsa_get_raid_level(struct ctlr_info *h,
3074 buf = kzalloc(64, GFP_KERNEL); 3094 buf = kzalloc(64, GFP_KERNEL);
3075 if (!buf) 3095 if (!buf)
3076 return; 3096 return;
3077 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64); 3097
3098 if (!hpsa_vpd_page_supported(h, scsi3addr,
3099 HPSA_VPD_LV_DEVICE_GEOMETRY))
3100 goto exit;
3101
3102 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3103 HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
3104
3078 if (rc == 0) 3105 if (rc == 0)
3079 *raid_level = buf[8]; 3106 *raid_level = buf[8];
3080 if (*raid_level > RAID_UNKNOWN) 3107 if (*raid_level > RAID_UNKNOWN)
3081 *raid_level = RAID_UNKNOWN; 3108 *raid_level = RAID_UNKNOWN;
3109exit:
3082 kfree(buf); 3110 kfree(buf);
3083 return; 3111 return;
3084} 3112}
@@ -3436,7 +3464,7 @@ static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
3436} 3464}
3437 3465
3438/* Get a device id from inquiry page 0x83 */ 3466/* Get a device id from inquiry page 0x83 */
3439static int hpsa_vpd_page_supported(struct ctlr_info *h, 3467static bool hpsa_vpd_page_supported(struct ctlr_info *h,
3440 unsigned char scsi3addr[], u8 page) 3468 unsigned char scsi3addr[], u8 page)
3441{ 3469{
3442 int rc; 3470 int rc;
@@ -3446,7 +3474,7 @@ static int hpsa_vpd_page_supported(struct ctlr_info *h,
3446 3474
3447 buf = kzalloc(256, GFP_KERNEL); 3475 buf = kzalloc(256, GFP_KERNEL);
3448 if (!buf) 3476 if (!buf)
3449 return 0; 3477 return false;
3450 3478
3451 /* Get the size of the page list first */ 3479 /* Get the size of the page list first */
3452 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 3480 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
@@ -3473,10 +3501,10 @@ static int hpsa_vpd_page_supported(struct ctlr_info *h,
3473 goto exit_supported; 3501 goto exit_supported;
3474exit_unsupported: 3502exit_unsupported:
3475 kfree(buf); 3503 kfree(buf);
3476 return 0; 3504 return false;
3477exit_supported: 3505exit_supported:
3478 kfree(buf); 3506 kfree(buf);
3479 return 1; 3507 return true;
3480} 3508}
3481 3509
3482static void hpsa_get_ioaccel_status(struct ctlr_info *h, 3510static void hpsa_get_ioaccel_status(struct ctlr_info *h,
@@ -3525,18 +3553,25 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3525 int rc; 3553 int rc;
3526 unsigned char *buf; 3554 unsigned char *buf;
3527 3555
3528 if (buflen > 16) 3556 /* Does controller have VPD for device id? */
3529 buflen = 16; 3557 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
3558 return 1; /* not supported */
3559
3530 buf = kzalloc(64, GFP_KERNEL); 3560 buf = kzalloc(64, GFP_KERNEL);
3531 if (!buf) 3561 if (!buf)
3532 return -ENOMEM; 3562 return -ENOMEM;
3533 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64); 3563
3534 if (rc == 0) 3564 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
3535 memcpy(device_id, &buf[index], buflen); 3565 HPSA_VPD_LV_DEVICE_ID, buf, 64);
3566 if (rc == 0) {
3567 if (buflen > 16)
3568 buflen = 16;
3569 memcpy(device_id, &buf[8], buflen);
3570 }
3536 3571
3537 kfree(buf); 3572 kfree(buf);
3538 3573
3539 return rc != 0; 3574 return rc; /*0 - got id, otherwise, didn't */
3540} 3575}
3541 3576
3542static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, 3577static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
@@ -3807,8 +3842,15 @@ static int hpsa_update_device_info(struct ctlr_info *h,
3807 sizeof(this_device->model)); 3842 sizeof(this_device->model));
3808 memset(this_device->device_id, 0, 3843 memset(this_device->device_id, 0,
3809 sizeof(this_device->device_id)); 3844 sizeof(this_device->device_id));
3810 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8, 3845 if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3811 sizeof(this_device->device_id)); 3846 sizeof(this_device->device_id)))
3847 dev_err(&h->pdev->dev,
3848 "hpsa%d: %s: can't get device id for host %d:C0:T%d:L%d\t%s\t%.16s\n",
3849 h->ctlr, __func__,
3850 h->scsi_host->host_no,
3851 this_device->target, this_device->lun,
3852 scsi_device_type(this_device->devtype),
3853 this_device->model);
3812 3854
3813 if ((this_device->devtype == TYPE_DISK || 3855 if ((this_device->devtype == TYPE_DISK ||
3814 this_device->devtype == TYPE_ZBC) && 3856 this_device->devtype == TYPE_ZBC) &&
@@ -4034,7 +4076,17 @@ static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
4034 struct bmic_identify_physical_device *id_phys) 4076 struct bmic_identify_physical_device *id_phys)
4035{ 4077{
4036 int rc; 4078 int rc;
4037 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; 4079 struct ext_report_lun_entry *rle;
4080
4081 /*
4082 * external targets don't support BMIC
4083 */
4084 if (dev->external) {
4085 dev->queue_depth = 7;
4086 return;
4087 }
4088
4089 rle = &rlep->LUN[rle_index];
4038 4090
4039 dev->ioaccel_handle = rle->ioaccel_handle; 4091 dev->ioaccel_handle = rle->ioaccel_handle;
4040 if ((rle->device_flags & 0x08) && dev->ioaccel_handle) 4092 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
@@ -4270,6 +4322,11 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4270 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, 4322 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
4271 i, nphysicals, nlogicals, physdev_list, logdev_list); 4323 i, nphysicals, nlogicals, physdev_list, logdev_list);
4272 4324
4325 /* Determine if this is a lun from an external target array */
4326 tmpdevice->external =
4327 figure_external_status(h, raid_ctlr_position, i,
4328 nphysicals, nlocal_logicals);
4329
4273 /* 4330 /*
4274 * Skip over some devices such as a spare. 4331 * Skip over some devices such as a spare.
4275 */ 4332 */
@@ -4295,11 +4352,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
4295 continue; 4352 continue;
4296 } 4353 }
4297 4354
4298 /* Determine if this is a lun from an external target array */
4299 tmpdevice->external =
4300 figure_external_status(h, raid_ctlr_position, i,
4301 nphysicals, nlocal_logicals);
4302
4303 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); 4355 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
4304 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); 4356 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
4305 this_device = currentsd[ncurrent]; 4357 this_device = currentsd[ncurrent];
@@ -4513,7 +4565,9 @@ static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4513 case READ_6: 4565 case READ_6:
4514 case READ_12: 4566 case READ_12:
4515 if (*cdb_len == 6) { 4567 if (*cdb_len == 6) {
4516 block = get_unaligned_be16(&cdb[2]); 4568 block = (((cdb[1] & 0x1F) << 16) |
4569 (cdb[2] << 8) |
4570 cdb[3]);
4517 block_cnt = cdb[4]; 4571 block_cnt = cdb[4];
4518 if (block_cnt == 0) 4572 if (block_cnt == 0)
4519 block_cnt = 256; 4573 block_cnt = 256;
@@ -4638,6 +4692,9 @@ static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4638 struct scsi_cmnd *cmd = c->scsi_cmd; 4692 struct scsi_cmnd *cmd = c->scsi_cmd;
4639 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 4693 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4640 4694
4695 if (!dev)
4696 return -1;
4697
4641 c->phys_disk = dev; 4698 c->phys_disk = dev;
4642 4699
4643 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, 4700 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
@@ -4670,9 +4727,11 @@ static void set_encrypt_ioaccel2(struct ctlr_info *h,
4670 */ 4727 */
4671 switch (cmd->cmnd[0]) { 4728 switch (cmd->cmnd[0]) {
4672 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ 4729 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4673 case WRITE_6:
4674 case READ_6: 4730 case READ_6:
4675 first_block = get_unaligned_be16(&cmd->cmnd[2]); 4731 case WRITE_6:
4732 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4733 (cmd->cmnd[2] << 8) |
4734 cmd->cmnd[3]);
4676 break; 4735 break;
4677 case WRITE_10: 4736 case WRITE_10:
4678 case READ_10: 4737 case READ_10:
@@ -4714,6 +4773,12 @@ static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4714 u32 len; 4773 u32 len;
4715 u32 total_len = 0; 4774 u32 total_len = 0;
4716 4775
4776 if (!cmd->device)
4777 return -1;
4778
4779 if (!cmd->device->hostdata)
4780 return -1;
4781
4717 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 4782 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4718 4783
4719 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4784 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
@@ -4822,6 +4887,12 @@ static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4822 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, 4887 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4823 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk) 4888 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4824{ 4889{
4890 if (!c->scsi_cmd->device)
4891 return -1;
4892
4893 if (!c->scsi_cmd->device->hostdata)
4894 return -1;
4895
4825 /* Try to honor the device's queue depth */ 4896 /* Try to honor the device's queue depth */
4826 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) > 4897 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4827 phys_disk->queue_depth) { 4898 phys_disk->queue_depth) {
@@ -4902,12 +4973,17 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4902#endif 4973#endif
4903 int offload_to_mirror; 4974 int offload_to_mirror;
4904 4975
4976 if (!dev)
4977 return -1;
4978
4905 /* check for valid opcode, get LBA and block count */ 4979 /* check for valid opcode, get LBA and block count */
4906 switch (cmd->cmnd[0]) { 4980 switch (cmd->cmnd[0]) {
4907 case WRITE_6: 4981 case WRITE_6:
4908 is_write = 1; 4982 is_write = 1;
4909 case READ_6: 4983 case READ_6:
4910 first_block = get_unaligned_be16(&cmd->cmnd[2]); 4984 first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
4985 (cmd->cmnd[2] << 8) |
4986 cmd->cmnd[3]);
4911 block_cnt = cmd->cmnd[4]; 4987 block_cnt = cmd->cmnd[4];
4912 if (block_cnt == 0) 4988 if (block_cnt == 0)
4913 block_cnt = 256; 4989 block_cnt = 256;
@@ -5314,6 +5390,9 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
5314 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; 5390 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
5315 int rc = IO_ACCEL_INELIGIBLE; 5391 int rc = IO_ACCEL_INELIGIBLE;
5316 5392
5393 if (!dev)
5394 return SCSI_MLQUEUE_HOST_BUSY;
5395
5317 cmd->host_scribble = (unsigned char *) c; 5396 cmd->host_scribble = (unsigned char *) c;
5318 5397
5319 if (dev->offload_enabled) { 5398 if (dev->offload_enabled) {
@@ -5852,6 +5931,9 @@ static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5852 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; 5931 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5853 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; 5932 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5854 5933
5934 if (!dev)
5935 return;
5936
5855 /* 5937 /*
5856 * We're overlaying struct hpsa_tmf_struct on top of something which 5938 * We're overlaying struct hpsa_tmf_struct on top of something which
5857 * was allocated as a struct io_accel2_cmd, so we better be sure it 5939 * was allocated as a struct io_accel2_cmd, so we better be sure it
@@ -5935,7 +6017,7 @@ static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5935 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 6017 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5936 psa[0], psa[1], psa[2], psa[3], 6018 psa[0], psa[1], psa[2], psa[3],
5937 psa[4], psa[5], psa[6], psa[7]); 6019 psa[4], psa[5], psa[6], psa[7]);
5938 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue); 6020 rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue);
5939 if (rc != 0) { 6021 if (rc != 0) {
5940 dev_warn(&h->pdev->dev, 6022 dev_warn(&h->pdev->dev,
5941 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 6023 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
@@ -5972,6 +6054,9 @@ static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5972 struct io_accel2_cmd *c2; 6054 struct io_accel2_cmd *c2;
5973 6055
5974 dev = abort->scsi_cmd->device->hostdata; 6056 dev = abort->scsi_cmd->device->hostdata;
6057 if (!dev)
6058 return -1;
6059
5975 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) 6060 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5976 return -1; 6061 return -1;
5977 6062
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a1487e67f7a1..82cdfad874f3 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -312,7 +312,6 @@ struct offline_device_entry {
312#define HPSA_DEVICE_RESET_MSG 1 312#define HPSA_DEVICE_RESET_MSG 1
313#define HPSA_RESET_TYPE_CONTROLLER 0x00 313#define HPSA_RESET_TYPE_CONTROLLER 0x00
314#define HPSA_RESET_TYPE_BUS 0x01 314#define HPSA_RESET_TYPE_BUS 0x01
315#define HPSA_RESET_TYPE_TARGET 0x03
316#define HPSA_RESET_TYPE_LUN 0x04 315#define HPSA_RESET_TYPE_LUN 0x04
317#define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */ 316#define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
318#define HPSA_MSG_SEND_RETRY_LIMIT 10 317#define HPSA_MSG_SEND_RETRY_LIMIT 10
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index a5be153d92d4..a584cdf07058 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -157,6 +157,7 @@
157 157
158/* VPD Inquiry types */ 158/* VPD Inquiry types */
159#define HPSA_VPD_SUPPORTED_PAGES 0x00 159#define HPSA_VPD_SUPPORTED_PAGES 0x00
160#define HPSA_VPD_LV_DEVICE_ID 0x83
160#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 161#define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1
161#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2 162#define HPSA_VPD_LV_IOACCEL_STATUS 0xC2
162#define HPSA_VPD_LV_STATUS 0xC3 163#define HPSA_VPD_LV_STATUS 0xC3
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ab67ec4b6bd6..7e487c78279c 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -52,6 +52,7 @@ static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
52static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS; 52static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
53static unsigned int ibmvfc_debug = IBMVFC_DEBUG; 53static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
54static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL; 54static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
55static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
55static LIST_HEAD(ibmvfc_head); 56static LIST_HEAD(ibmvfc_head);
56static DEFINE_SPINLOCK(ibmvfc_driver_lock); 57static DEFINE_SPINLOCK(ibmvfc_driver_lock);
57static struct scsi_transport_template *ibmvfc_transport_template; 58static struct scsi_transport_template *ibmvfc_transport_template;
@@ -86,6 +87,9 @@ MODULE_PARM_DESC(debug, "Enable driver debug information. "
86module_param_named(log_level, log_level, uint, 0); 87module_param_named(log_level, log_level, uint, 0);
87MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. " 88MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
88 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]"); 89 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
90module_param_named(cls3_error, cls3_error, uint, 0);
91MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
92 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
89 93
90static const struct { 94static const struct {
91 u16 status; 95 u16 status;
@@ -717,7 +721,6 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
717 spin_lock_irqsave(vhost->host->host_lock, flags); 721 spin_lock_irqsave(vhost->host->host_lock, flags);
718 vhost->state = IBMVFC_NO_CRQ; 722 vhost->state = IBMVFC_NO_CRQ;
719 vhost->logged_in = 0; 723 vhost->logged_in = 0;
720 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
721 724
722 /* Clean out the queue */ 725 /* Clean out the queue */
723 memset(crq->msgs, 0, PAGE_SIZE); 726 memset(crq->msgs, 0, PAGE_SIZE);
@@ -1335,6 +1338,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1335 struct srp_direct_buf *data = &vfc_cmd->ioba; 1338 struct srp_direct_buf *data = &vfc_cmd->ioba;
1336 struct ibmvfc_host *vhost = dev_get_drvdata(dev); 1339 struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1337 1340
1341 if (cls3_error)
1342 vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1343
1338 sg_mapped = scsi_dma_map(scmd); 1344 sg_mapped = scsi_dma_map(scmd);
1339 if (!sg_mapped) { 1345 if (!sg_mapped) {
1340 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC); 1346 vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
@@ -3381,6 +3387,10 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3381 prli->parms.type = IBMVFC_SCSI_FCP_TYPE; 3387 prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3382 prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR); 3388 prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
3383 prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC); 3389 prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
3390 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
3391
3392 if (cls3_error)
3393 prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
3384 3394
3385 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT); 3395 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3386 if (ibmvfc_send_event(evt, vhost, default_timeout)) { 3396 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 5c70a52ad346..9a0696f68f37 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -54,6 +54,7 @@
54#define IBMVFC_DEV_LOSS_TMO (5 * 60) 54#define IBMVFC_DEV_LOSS_TMO (5 * 60)
55#define IBMVFC_DEFAULT_LOG_LEVEL 2 55#define IBMVFC_DEFAULT_LOG_LEVEL 2
56#define IBMVFC_MAX_CDB_LEN 16 56#define IBMVFC_MAX_CDB_LEN 16
57#define IBMVFC_CLS3_ERROR 0
57 58
58/* 59/*
59 * Ensure we have resources for ERP and initialization: 60 * Ensure we have resources for ERP and initialization:
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index b29fef9d0f27..642b739ad0da 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -1606,8 +1606,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi)
1606 1606
1607 if (!(vscsi->flags & RESPONSE_Q_DOWN)) { 1607 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
1608 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { 1608 list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
1609 pr_debug("send_messages cmd %p\n", cmd);
1610
1611 iue = cmd->iue; 1609 iue = cmd->iue;
1612 1610
1613 crq->valid = VALID_CMD_RESP_EL; 1611 crq->valid = VALID_CMD_RESP_EL;
@@ -1934,6 +1932,8 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
1934 /* 1932 /*
1935 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port 1933 * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
1936 */ 1934 */
1935 target_wait_for_sess_cmds(se_sess);
1936 transport_deregister_session_configfs(se_sess);
1937 transport_deregister_session(se_sess); 1937 transport_deregister_session(se_sess);
1938 tport->ibmv_nexus = NULL; 1938 tport->ibmv_nexus = NULL;
1939 kfree(nexus); 1939 kfree(nexus);
@@ -1978,7 +1978,7 @@ static long ibmvscsis_srp_login(struct scsi_info *vscsi,
1978 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED; 1978 reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
1979 else if (fmt->buffers & (~SUPPORTED_FORMATS)) 1979 else if (fmt->buffers & (~SUPPORTED_FORMATS))
1980 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 1980 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
1981 else if ((fmt->buffers | SUPPORTED_FORMATS) == 0) 1981 else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
1982 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT; 1982 reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
1983 1983
1984 if (vscsi->state == SRP_PROCESSING) 1984 if (vscsi->state == SRP_PROCESSING)
@@ -2554,10 +2554,6 @@ static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
2554 2554
2555 srp->lun.scsi_lun[0] &= 0x3f; 2555 srp->lun.scsi_lun[0] &= 0x3f;
2556 2556
2557 pr_debug("calling submit_cmd, se_cmd %p, lun 0x%llx, cdb 0x%x, attr:%d\n",
2558 &cmd->se_cmd, scsilun_to_int(&srp->lun), (int)srp->cdb[0],
2559 attr);
2560
2561 rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb, 2557 rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
2562 cmd->sense_buf, scsilun_to_int(&srp->lun), 2558 cmd->sense_buf, scsilun_to_int(&srp->lun),
2563 data_len, attr, dir, 0); 2559 data_len, attr, dir, 0);
@@ -3142,8 +3138,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3142 long tx_len; 3138 long tx_len;
3143 long rc = 0; 3139 long rc = 0;
3144 3140
3145 pr_debug("rdma: dir %d, bytes 0x%x\n", dir, bytes);
3146
3147 if (bytes == 0) 3141 if (bytes == 0)
3148 return 0; 3142 return 0;
3149 3143
@@ -3192,12 +3186,6 @@ static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
3192 vscsi->dds.window[LOCAL].liobn, 3186 vscsi->dds.window[LOCAL].liobn,
3193 server_ioba); 3187 server_ioba);
3194 } else { 3188 } else {
3195 /* write to client */
3196 struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
3197
3198 if (!READ_CMD(srp->cdb))
3199 print_hex_dump_bytes(" data:", DUMP_PREFIX_NONE,
3200 sg_virt(sgp), buf_len);
3201 /* The h_copy_rdma will cause phyp, running in another 3189 /* The h_copy_rdma will cause phyp, running in another
3202 * partition, to read memory, so we need to make sure 3190 * partition, to read memory, so we need to make sure
3203 * the data has been written out, hence these syncs. 3191 * the data has been written out, hence these syncs.
@@ -3322,12 +3310,9 @@ cmd_work:
3322 rc = ibmvscsis_trans_event(vscsi, crq); 3310 rc = ibmvscsis_trans_event(vscsi, crq);
3323 } else if (vscsi->flags & TRANS_EVENT) { 3311 } else if (vscsi->flags & TRANS_EVENT) {
3324 /* 3312 /*
3325 * if a tranport event has occurred leave 3313 * if a transport event has occurred leave
3326 * everything but transport events on the queue 3314 * everything but transport events on the queue
3327 */ 3315 *
3328 pr_debug("handle_crq, ignoring\n");
3329
3330 /*
3331 * need to decrement the queue index so we can 3316 * need to decrement the queue index so we can
3332 * look at the elment again 3317 * look at the elment again
3333 */ 3318 */
@@ -3461,6 +3446,7 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3461 vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE, 3446 vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
3462 DMA_BIDIRECTIONAL); 3447 DMA_BIDIRECTIONAL);
3463 if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) { 3448 if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
3449 rc = -ENOMEM;
3464 dev_err(&vscsi->dev, "probe: error mapping command buffer\n"); 3450 dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
3465 goto free_buf; 3451 goto free_buf;
3466 } 3452 }
@@ -3693,12 +3679,9 @@ static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
3693 se_cmd); 3679 se_cmd);
3694 struct scsi_info *vscsi = cmd->adapter; 3680 struct scsi_info *vscsi = cmd->adapter;
3695 3681
3696 pr_debug("release_cmd %p, flags %d\n", se_cmd, cmd->flags);
3697
3698 spin_lock_bh(&vscsi->intr_lock); 3682 spin_lock_bh(&vscsi->intr_lock);
3699 /* Remove from active_q */ 3683 /* Remove from active_q */
3700 list_del(&cmd->list); 3684 list_move_tail(&cmd->list, &vscsi->waiting_rsp);
3701 list_add_tail(&cmd->list, &vscsi->waiting_rsp);
3702 ibmvscsis_send_messages(vscsi); 3685 ibmvscsis_send_messages(vscsi);
3703 spin_unlock_bh(&vscsi->intr_lock); 3686 spin_unlock_bh(&vscsi->intr_lock);
3704} 3687}
@@ -3715,9 +3698,6 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3715 struct iu_entry *iue = cmd->iue; 3698 struct iu_entry *iue = cmd->iue;
3716 int rc; 3699 int rc;
3717 3700
3718 pr_debug("write_pending, se_cmd %p, length 0x%x\n",
3719 se_cmd, se_cmd->data_length);
3720
3721 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 3701 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
3722 1, 1); 3702 1, 1);
3723 if (rc) { 3703 if (rc) {
@@ -3756,9 +3736,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
3756 uint len = 0; 3736 uint len = 0;
3757 int rc; 3737 int rc;
3758 3738
3759 pr_debug("queue_data_in, se_cmd %p, length 0x%x\n",
3760 se_cmd, se_cmd->data_length);
3761
3762 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, 3739 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
3763 1); 3740 1);
3764 if (rc) { 3741 if (rc) {
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c
deleted file mode 100644
index 3882d9f519c8..000000000000
--- a/drivers/scsi/in2000.c
+++ /dev/null
@@ -1,2302 +0,0 @@
1/*
2 * in2000.c - Linux device driver for the
3 * Always IN2000 ISA SCSI card.
4 *
5 * Copyright (c) 1996 John Shifflett, GeoLog Consulting
6 * john@geolog.com
7 * jshiffle@netcom.com
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * For the avoidance of doubt the "preferred form" of this code is one which
20 * is in an open non patent encumbered format. Where cryptographic key signing
21 * forms part of the process of creating an executable the information
22 * including keys needed to generate an equivalently functional executable
23 * are deemed to be part of the source code.
24 *
25 * Drew Eckhardt's excellent 'Generic NCR5380' sources provided
26 * much of the inspiration and some of the code for this driver.
27 * The Linux IN2000 driver distributed in the Linux kernels through
28 * version 1.2.13 was an extremely valuable reference on the arcane
29 * (and still mysterious) workings of the IN2000's fifo. It also
30 * is where I lifted in2000_biosparam(), the gist of the card
31 * detection scheme, and other bits of code. Many thanks to the
32 * talented and courageous people who wrote, contributed to, and
33 * maintained that driver (including Brad McLean, Shaun Savage,
34 * Bill Earnest, Larry Doolittle, Roger Sunshine, John Luckey,
35 * Matt Postiff, Peter Lu, zerucha@shell.portal.com, and Eric
36 * Youngdale). I should also mention the driver written by
37 * Hamish Macdonald for the (GASP!) Amiga A2091 card, included
38 * in the Linux-m68k distribution; it gave me a good initial
39 * understanding of the proper way to run a WD33c93 chip, and I
40 * ended up stealing lots of code from it.
41 *
42 * _This_ driver is (I feel) an improvement over the old one in
43 * several respects:
44 * - All problems relating to the data size of a SCSI request are
45 * gone (as far as I know). The old driver couldn't handle
46 * swapping to partitions because that involved 4k blocks, nor
47 * could it deal with the st.c tape driver unmodified, because
48 * that usually involved 4k - 32k blocks. The old driver never
49 * quite got away from a morbid dependence on 2k block sizes -
50 * which of course is the size of the card's fifo.
51 *
52 * - Target Disconnection/Reconnection is now supported. Any
53 * system with more than one device active on the SCSI bus
54 * will benefit from this. The driver defaults to what I'm
55 * calling 'adaptive disconnect' - meaning that each command
56 * is evaluated individually as to whether or not it should
57 * be run with the option to disconnect/reselect (if the
58 * device chooses), or as a "SCSI-bus-hog".
59 *
60 * - Synchronous data transfers are now supported. Because there
61 * are a few devices (and many improperly terminated systems)
62 * that choke when doing sync, the default is sync DISABLED
63 * for all devices. This faster protocol can (and should!)
64 * be enabled on selected devices via the command-line.
65 *
66 * - Runtime operating parameters can now be specified through
67 * either the LILO or the 'insmod' command line. For LILO do:
68 * "in2000=blah,blah,blah"
69 * and with insmod go like:
70 * "insmod /usr/src/linux/modules/in2000.o setup_strings=blah,blah"
71 * The defaults should be good for most people. See the comment
72 * for 'setup_strings' below for more details.
73 *
74 * - The old driver relied exclusively on what the Western Digital
75 * docs call "Combination Level 2 Commands", which are a great
76 * idea in that the CPU is relieved of a lot of interrupt
77 * overhead. However, by accepting a certain (user-settable)
78 * amount of additional interrupts, this driver achieves
79 * better control over the SCSI bus, and data transfers are
80 * almost as fast while being much easier to define, track,
81 * and debug.
82 *
83 * - You can force detection of a card whose BIOS has been disabled.
84 *
85 * - Multiple IN2000 cards might almost be supported. I've tried to
86 * keep it in mind, but have no way to test...
87 *
88 *
89 * TODO:
90 * tagged queuing. multiple cards.
91 *
92 *
93 * NOTE:
94 * When using this or any other SCSI driver as a module, you'll
95 * find that with the stock kernel, at most _two_ SCSI hard
96 * drives will be linked into the device list (ie, usable).
97 * If your IN2000 card has more than 2 disks on its bus, you
98 * might want to change the define of 'SD_EXTRA_DEVS' in the
99 * 'hosts.h' file from 2 to whatever is appropriate. It took
100 * me a while to track down this surprisingly obscure and
101 * undocumented little "feature".
102 *
103 *
104 * People with bug reports, wish-lists, complaints, comments,
105 * or improvements are asked to pah-leeez email me (John Shifflett)
106 * at john@geolog.com or jshiffle@netcom.com! I'm anxious to get
107 * this thing into as good a shape as possible, and I'm positive
108 * there are lots of lurking bugs and "Stupid Places".
109 *
110 * Updated for Linux 2.5 by Alan Cox <alan@lxorguk.ukuu.org.uk>
111 * - Using new_eh handler
112 * - Hopefully got all the locking right again
113 * See "FIXME" notes for items that could do with more work
114 */
115
116#include <linux/module.h>
117#include <linux/blkdev.h>
118#include <linux/interrupt.h>
119#include <linux/string.h>
120#include <linux/delay.h>
121#include <linux/proc_fs.h>
122#include <linux/ioport.h>
123#include <linux/stat.h>
124
125#include <asm/io.h>
126
127#include "scsi.h"
128#include <scsi/scsi_host.h>
129
130#define IN2000_VERSION "1.33-2.5"
131#define IN2000_DATE "2002/11/03"
132
133#include "in2000.h"
134
135
136/*
137 * 'setup_strings' is a single string used to pass operating parameters and
138 * settings from the kernel/module command-line to the driver. 'setup_args[]'
139 * is an array of strings that define the compile-time default values for
140 * these settings. If Linux boots with a LILO or insmod command-line, those
141 * settings are combined with 'setup_args[]'. Note that LILO command-lines
142 * are prefixed with "in2000=" while insmod uses a "setup_strings=" prefix.
143 * The driver recognizes the following keywords (lower case required) and
144 * arguments:
145 *
146 * - ioport:addr -Where addr is IO address of a (usually ROM-less) card.
147 * - noreset -No optional args. Prevents SCSI bus reset at boot time.
148 * - nosync:x -x is a bitmask where the 1st 7 bits correspond with
149 * the 7 possible SCSI devices (bit 0 for device #0, etc).
150 * Set a bit to PREVENT sync negotiation on that device.
151 * The driver default is sync DISABLED on all devices.
152 * - period:ns -ns is the minimum # of nanoseconds in a SCSI data transfer
153 * period. Default is 500; acceptable values are 250 - 1000.
154 * - disconnect:x -x = 0 to never allow disconnects, 2 to always allow them.
155 * x = 1 does 'adaptive' disconnects, which is the default
156 * and generally the best choice.
157 * - debug:x -If 'DEBUGGING_ON' is defined, x is a bitmask that causes
158 * various types of debug output to printed - see the DB_xxx
159 * defines in in2000.h
160 * - proc:x -If 'PROC_INTERFACE' is defined, x is a bitmask that
161 * determines how the /proc interface works and what it
162 * does - see the PR_xxx defines in in2000.h
163 *
164 * Syntax Notes:
165 * - Numeric arguments can be decimal or the '0x' form of hex notation. There
166 * _must_ be a colon between a keyword and its numeric argument, with no
167 * spaces.
168 * - Keywords are separated by commas, no spaces, in the standard kernel
169 * command-line manner.
170 * - A keyword in the 'nth' comma-separated command-line member will overwrite
171 * the 'nth' element of setup_args[]. A blank command-line member (in
172 * other words, a comma with no preceding keyword) will _not_ overwrite
173 * the corresponding setup_args[] element.
174 *
175 * A few LILO examples (for insmod, use 'setup_strings' instead of 'in2000'):
176 * - in2000=ioport:0x220,noreset
177 * - in2000=period:250,disconnect:2,nosync:0x03
178 * - in2000=debug:0x1e
179 * - in2000=proc:3
180 */
181
182/* Normally, no defaults are specified... */
183static char *setup_args[] = { "", "", "", "", "", "", "", "", "" };
184
185/* filled in by 'insmod' */
186static char *setup_strings;
187
188module_param(setup_strings, charp, 0);
189
190static inline uchar read_3393(struct IN2000_hostdata *hostdata, uchar reg_num)
191{
192 write1_io(reg_num, IO_WD_ADDR);
193 return read1_io(IO_WD_DATA);
194}
195
196
197#define READ_AUX_STAT() read1_io(IO_WD_ASR)
198
199
200static inline void write_3393(struct IN2000_hostdata *hostdata, uchar reg_num, uchar value)
201{
202 write1_io(reg_num, IO_WD_ADDR);
203 write1_io(value, IO_WD_DATA);
204}
205
206
207static inline void write_3393_cmd(struct IN2000_hostdata *hostdata, uchar cmd)
208{
209/* while (READ_AUX_STAT() & ASR_CIP)
210 printk("|");*/
211 write1_io(WD_COMMAND, IO_WD_ADDR);
212 write1_io(cmd, IO_WD_DATA);
213}
214
215
216static uchar read_1_byte(struct IN2000_hostdata *hostdata)
217{
218 uchar asr, x = 0;
219
220 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
221 write_3393_cmd(hostdata, WD_CMD_TRANS_INFO | 0x80);
222 do {
223 asr = READ_AUX_STAT();
224 if (asr & ASR_DBR)
225 x = read_3393(hostdata, WD_DATA);
226 } while (!(asr & ASR_INT));
227 return x;
228}
229
230
231static void write_3393_count(struct IN2000_hostdata *hostdata, unsigned long value)
232{
233 write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
234 write1_io((value >> 16), IO_WD_DATA);
235 write1_io((value >> 8), IO_WD_DATA);
236 write1_io(value, IO_WD_DATA);
237}
238
239
240static unsigned long read_3393_count(struct IN2000_hostdata *hostdata)
241{
242 unsigned long value;
243
244 write1_io(WD_TRANSFER_COUNT_MSB, IO_WD_ADDR);
245 value = read1_io(IO_WD_DATA) << 16;
246 value |= read1_io(IO_WD_DATA) << 8;
247 value |= read1_io(IO_WD_DATA);
248 return value;
249}
250
251
252/* The 33c93 needs to be told which direction a command transfers its
253 * data; we use this function to figure it out. Returns true if there
254 * will be a DATA_OUT phase with this command, false otherwise.
255 * (Thanks to Joerg Dorchain for the research and suggestion.)
256 */
257static int is_dir_out(Scsi_Cmnd * cmd)
258{
259 switch (cmd->cmnd[0]) {
260 case WRITE_6:
261 case WRITE_10:
262 case WRITE_12:
263 case WRITE_LONG:
264 case WRITE_SAME:
265 case WRITE_BUFFER:
266 case WRITE_VERIFY:
267 case WRITE_VERIFY_12:
268 case COMPARE:
269 case COPY:
270 case COPY_VERIFY:
271 case SEARCH_EQUAL:
272 case SEARCH_HIGH:
273 case SEARCH_LOW:
274 case SEARCH_EQUAL_12:
275 case SEARCH_HIGH_12:
276 case SEARCH_LOW_12:
277 case FORMAT_UNIT:
278 case REASSIGN_BLOCKS:
279 case RESERVE:
280 case MODE_SELECT:
281 case MODE_SELECT_10:
282 case LOG_SELECT:
283 case SEND_DIAGNOSTIC:
284 case CHANGE_DEFINITION:
285 case UPDATE_BLOCK:
286 case SET_WINDOW:
287 case MEDIUM_SCAN:
288 case SEND_VOLUME_TAG:
289 case 0xea:
290 return 1;
291 default:
292 return 0;
293 }
294}
295
296
297
298static struct sx_period sx_table[] = {
299 {1, 0x20},
300 {252, 0x20},
301 {376, 0x30},
302 {500, 0x40},
303 {624, 0x50},
304 {752, 0x60},
305 {876, 0x70},
306 {1000, 0x00},
307 {0, 0}
308};
309
310static int round_period(unsigned int period)
311{
312 int x;
313
314 for (x = 1; sx_table[x].period_ns; x++) {
315 if ((period <= sx_table[x - 0].period_ns) && (period > sx_table[x - 1].period_ns)) {
316 return x;
317 }
318 }
319 return 7;
320}
321
322static uchar calc_sync_xfer(unsigned int period, unsigned int offset)
323{
324 uchar result;
325
326 period *= 4; /* convert SDTR code to ns */
327 result = sx_table[round_period(period)].reg_value;
328 result |= (offset < OPTIMUM_SX_OFF) ? offset : OPTIMUM_SX_OFF;
329 return result;
330}
331
332
333
334static void in2000_execute(struct Scsi_Host *instance);
335
336static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
337{
338 struct Scsi_Host *instance;
339 struct IN2000_hostdata *hostdata;
340 Scsi_Cmnd *tmp;
341
342 instance = cmd->device->host;
343 hostdata = (struct IN2000_hostdata *) instance->hostdata;
344
345 DB(DB_QUEUE_COMMAND, scmd_printk(KERN_DEBUG, cmd, "Q-%02x(", cmd->cmnd[0]))
346
347/* Set up a few fields in the Scsi_Cmnd structure for our own use:
348 * - host_scribble is the pointer to the next cmd in the input queue
349 * - scsi_done points to the routine we call when a cmd is finished
350 * - result is what you'd expect
351 */
352 cmd->host_scribble = NULL;
353 cmd->scsi_done = done;
354 cmd->result = 0;
355
356/* We use the Scsi_Pointer structure that's included with each command
357 * as a scratchpad (as it's intended to be used!). The handy thing about
358 * the SCp.xxx fields is that they're always associated with a given
359 * cmd, and are preserved across disconnect-reselect. This means we
360 * can pretty much ignore SAVE_POINTERS and RESTORE_POINTERS messages
361 * if we keep all the critical pointers and counters in SCp:
362 * - SCp.ptr is the pointer into the RAM buffer
363 * - SCp.this_residual is the size of that buffer
364 * - SCp.buffer points to the current scatter-gather buffer
365 * - SCp.buffers_residual tells us how many S.G. buffers there are
366 * - SCp.have_data_in helps keep track of >2048 byte transfers
367 * - SCp.sent_command is not used
368 * - SCp.phase records this command's SRCID_ER bit setting
369 */
370
371 if (scsi_bufflen(cmd)) {
372 cmd->SCp.buffer = scsi_sglist(cmd);
373 cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
374 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
375 cmd->SCp.this_residual = cmd->SCp.buffer->length;
376 } else {
377 cmd->SCp.buffer = NULL;
378 cmd->SCp.buffers_residual = 0;
379 cmd->SCp.ptr = NULL;
380 cmd->SCp.this_residual = 0;
381 }
382 cmd->SCp.have_data_in = 0;
383
384/* We don't set SCp.phase here - that's done in in2000_execute() */
385
386/* WD docs state that at the conclusion of a "LEVEL2" command, the
387 * status byte can be retrieved from the LUN register. Apparently,
388 * this is the case only for *uninterrupted* LEVEL2 commands! If
389 * there are any unexpected phases entered, even if they are 100%
390 * legal (different devices may choose to do things differently),
391 * the LEVEL2 command sequence is exited. This often occurs prior
392 * to receiving the status byte, in which case the driver does a
393 * status phase interrupt and gets the status byte on its own.
394 * While such a command can then be "resumed" (ie restarted to
395 * finish up as a LEVEL2 command), the LUN register will NOT be
396 * a valid status byte at the command's conclusion, and we must
397 * use the byte obtained during the earlier interrupt. Here, we
398 * preset SCp.Status to an illegal value (0xff) so that when
399 * this command finally completes, we can tell where the actual
400 * status byte is stored.
401 */
402
403 cmd->SCp.Status = ILLEGAL_STATUS_BYTE;
404
405/* We need to disable interrupts before messing with the input
406 * queue and calling in2000_execute().
407 */
408
409 /*
410 * Add the cmd to the end of 'input_Q'. Note that REQUEST_SENSE
411 * commands are added to the head of the queue so that the desired
412 * sense data is not lost before REQUEST_SENSE executes.
413 */
414
415 if (!(hostdata->input_Q) || (cmd->cmnd[0] == REQUEST_SENSE)) {
416 cmd->host_scribble = (uchar *) hostdata->input_Q;
417 hostdata->input_Q = cmd;
418 } else { /* find the end of the queue */
419 for (tmp = (Scsi_Cmnd *) hostdata->input_Q; tmp->host_scribble; tmp = (Scsi_Cmnd *) tmp->host_scribble);
420 tmp->host_scribble = (uchar *) cmd;
421 }
422
423/* We know that there's at least one command in 'input_Q' now.
424 * Go see if any of them are runnable!
425 */
426
427 in2000_execute(cmd->device->host);
428
429 DB(DB_QUEUE_COMMAND, printk(")Q "))
430 return 0;
431}
432
433static DEF_SCSI_QCMD(in2000_queuecommand)
434
435
436
437/*
438 * This routine attempts to start a scsi command. If the host_card is
439 * already connected, we give up immediately. Otherwise, look through
440 * the input_Q, using the first command we find that's intended
441 * for a currently non-busy target/lun.
442 * Note that this function is always called with interrupts already
443 * disabled (either from in2000_queuecommand() or in2000_intr()).
444 */
445static void in2000_execute(struct Scsi_Host *instance)
446{
447 struct IN2000_hostdata *hostdata;
448 Scsi_Cmnd *cmd, *prev;
449 int i;
450 unsigned short *sp;
451 unsigned short f;
452 unsigned short flushbuf[16];
453
454
455 hostdata = (struct IN2000_hostdata *) instance->hostdata;
456
457 DB(DB_EXECUTE, printk("EX("))
458
459 if (hostdata->selecting || hostdata->connected) {
460
461 DB(DB_EXECUTE, printk(")EX-0 "))
462
463 return;
464 }
465
466 /*
467 * Search through the input_Q for a command destined
468 * for an idle target/lun.
469 */
470
471 cmd = (Scsi_Cmnd *) hostdata->input_Q;
472 prev = NULL;
473 while (cmd) {
474 if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
475 break;
476 prev = cmd;
477 cmd = (Scsi_Cmnd *) cmd->host_scribble;
478 }
479
480 /* quit if queue empty or all possible targets are busy */
481
482 if (!cmd) {
483
484 DB(DB_EXECUTE, printk(")EX-1 "))
485
486 return;
487 }
488
489 /* remove command from queue */
490
491 if (prev)
492 prev->host_scribble = cmd->host_scribble;
493 else
494 hostdata->input_Q = (Scsi_Cmnd *) cmd->host_scribble;
495
496#ifdef PROC_STATISTICS
497 hostdata->cmd_cnt[cmd->device->id]++;
498#endif
499
500/*
501 * Start the selection process
502 */
503
504 if (is_dir_out(cmd))
505 write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
506 else
507 write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
508
509/* Now we need to figure out whether or not this command is a good
510 * candidate for disconnect/reselect. We guess to the best of our
511 * ability, based on a set of hierarchical rules. When several
512 * devices are operating simultaneously, disconnects are usually
513 * an advantage. In a single device system, or if only 1 device
514 * is being accessed, transfers usually go faster if disconnects
515 * are not allowed:
516 *
517 * + Commands should NEVER disconnect if hostdata->disconnect =
518 * DIS_NEVER (this holds for tape drives also), and ALWAYS
519 * disconnect if hostdata->disconnect = DIS_ALWAYS.
520 * + Tape drive commands should always be allowed to disconnect.
521 * + Disconnect should be allowed if disconnected_Q isn't empty.
522 * + Commands should NOT disconnect if input_Q is empty.
523 * + Disconnect should be allowed if there are commands in input_Q
524 * for a different target/lun. In this case, the other commands
525 * should be made disconnect-able, if not already.
526 *
527 * I know, I know - this code would flunk me out of any
528 * "C Programming 101" class ever offered. But it's easy
529 * to change around and experiment with for now.
530 */
531
532 cmd->SCp.phase = 0; /* assume no disconnect */
533 if (hostdata->disconnect == DIS_NEVER)
534 goto no;
535 if (hostdata->disconnect == DIS_ALWAYS)
536 goto yes;
537 if (cmd->device->type == 1) /* tape drive? */
538 goto yes;
539 if (hostdata->disconnected_Q) /* other commands disconnected? */
540 goto yes;
541 if (!(hostdata->input_Q)) /* input_Q empty? */
542 goto no;
543 for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble) {
544 if ((prev->device->id != cmd->device->id) || (prev->device->lun != cmd->device->lun)) {
545 for (prev = (Scsi_Cmnd *) hostdata->input_Q; prev; prev = (Scsi_Cmnd *) prev->host_scribble)
546 prev->SCp.phase = 1;
547 goto yes;
548 }
549 }
550 goto no;
551
552 yes:
553 cmd->SCp.phase = 1;
554
555#ifdef PROC_STATISTICS
556 hostdata->disc_allowed_cnt[cmd->device->id]++;
557#endif
558
559 no:
560 write_3393(hostdata, WD_SOURCE_ID, ((cmd->SCp.phase) ? SRCID_ER : 0));
561
562 write_3393(hostdata, WD_TARGET_LUN, cmd->device->lun);
563 write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
564 hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
565
566 if ((hostdata->level2 <= L2_NONE) || (hostdata->sync_stat[cmd->device->id] == SS_UNSET)) {
567
568 /*
569 * Do a 'Select-With-ATN' command. This will end with
570 * one of the following interrupts:
571 * CSR_RESEL_AM: failure - can try again later.
572 * CSR_TIMEOUT: failure - give up.
573 * CSR_SELECT: success - proceed.
574 */
575
576 hostdata->selecting = cmd;
577
578/* Every target has its own synchronous transfer setting, kept in
579 * the sync_xfer array, and a corresponding status byte in sync_stat[].
580 * Each target's sync_stat[] entry is initialized to SS_UNSET, and its
581 * sync_xfer[] entry is initialized to the default/safe value. SS_UNSET
582 * means that the parameters are undetermined as yet, and that we
583 * need to send an SDTR message to this device after selection is
584 * complete. We set SS_FIRST to tell the interrupt routine to do so,
585 * unless we don't want to even _try_ synchronous transfers: In this
586 * case we set SS_SET to make the defaults final.
587 */
588 if (hostdata->sync_stat[cmd->device->id] == SS_UNSET) {
589 if (hostdata->sync_off & (1 << cmd->device->id))
590 hostdata->sync_stat[cmd->device->id] = SS_SET;
591 else
592 hostdata->sync_stat[cmd->device->id] = SS_FIRST;
593 }
594 hostdata->state = S_SELECTING;
595 write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
596 write_3393_cmd(hostdata, WD_CMD_SEL_ATN);
597 }
598
599 else {
600
601 /*
602 * Do a 'Select-With-ATN-Xfer' command. This will end with
603 * one of the following interrupts:
604 * CSR_RESEL_AM: failure - can try again later.
605 * CSR_TIMEOUT: failure - give up.
606 * anything else: success - proceed.
607 */
608
609 hostdata->connected = cmd;
610 write_3393(hostdata, WD_COMMAND_PHASE, 0);
611
612 /* copy command_descriptor_block into WD chip
613 * (take advantage of auto-incrementing)
614 */
615
616 write1_io(WD_CDB_1, IO_WD_ADDR);
617 for (i = 0; i < cmd->cmd_len; i++)
618 write1_io(cmd->cmnd[i], IO_WD_DATA);
619
620 /* The wd33c93 only knows about Group 0, 1, and 5 commands when
621 * it's doing a 'select-and-transfer'. To be safe, we write the
622 * size of the CDB into the OWN_ID register for every case. This
623 * way there won't be problems with vendor-unique, audio, etc.
624 */
625
626 write_3393(hostdata, WD_OWN_ID, cmd->cmd_len);
627
628 /* When doing a non-disconnect command, we can save ourselves a DATA
629 * phase interrupt later by setting everything up now. With writes we
630 * need to pre-fill the fifo; if there's room for the 32 flush bytes,
631 * put them in there too - that'll avoid a fifo interrupt. Reads are
632 * somewhat simpler.
633 * KLUDGE NOTE: It seems that you can't completely fill the fifo here:
634 * This results in the IO_FIFO_COUNT register rolling over to zero,
635 * and apparently the gate array logic sees this as empty, not full,
636 * so the 3393 chip is never signalled to start reading from the
637 * fifo. Or maybe it's seen as a permanent fifo interrupt condition.
638 * Regardless, we fix this by temporarily pretending that the fifo
639 * is 16 bytes smaller. (I see now that the old driver has a comment
640 * about "don't fill completely" in an analogous place - must be the
641 * same deal.) This results in CDROM, swap partitions, and tape drives
642 * needing an extra interrupt per write command - I think we can live
643 * with that!
644 */
645
646 if (!(cmd->SCp.phase)) {
647 write_3393_count(hostdata, cmd->SCp.this_residual);
648 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
649 write1_io(0, IO_FIFO_WRITE); /* clear fifo counter, write mode */
650
651 if (is_dir_out(cmd)) {
652 hostdata->fifo = FI_FIFO_WRITING;
653 if ((i = cmd->SCp.this_residual) > (IN2000_FIFO_SIZE - 16))
654 i = IN2000_FIFO_SIZE - 16;
655 cmd->SCp.have_data_in = i; /* this much data in fifo */
656 i >>= 1; /* Gulp. Assuming modulo 2. */
657 sp = (unsigned short *) cmd->SCp.ptr;
658 f = hostdata->io_base + IO_FIFO;
659
660#ifdef FAST_WRITE_IO
661
662 FAST_WRITE2_IO();
663#else
664 while (i--)
665 write2_io(*sp++, IO_FIFO);
666
667#endif
668
669 /* Is there room for the flush bytes? */
670
671 if (cmd->SCp.have_data_in <= ((IN2000_FIFO_SIZE - 16) - 32)) {
672 sp = flushbuf;
673 i = 16;
674
675#ifdef FAST_WRITE_IO
676
677 FAST_WRITE2_IO();
678#else
679 while (i--)
680 write2_io(0, IO_FIFO);
681
682#endif
683
684 }
685 }
686
687 else {
688 write1_io(0, IO_FIFO_READ); /* put fifo in read mode */
689 hostdata->fifo = FI_FIFO_READING;
690 cmd->SCp.have_data_in = 0; /* nothing transferred yet */
691 }
692
693 } else {
694 write_3393_count(hostdata, 0); /* this guarantees a DATA_PHASE interrupt */
695 }
696 hostdata->state = S_RUNNING_LEVEL2;
697 write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
698 }
699
700 /*
701 * Since the SCSI bus can handle only 1 connection at a time,
702 * we get out of here now. If the selection fails, or when
703 * the command disconnects, we'll come back to this routine
704 * to search the input_Q again...
705 */
706
707 DB(DB_EXECUTE, printk("%s)EX-2 ", (cmd->SCp.phase) ? "d:" : ""))
708
709}
710
711
712
713static void transfer_pio(uchar * buf, int cnt, int data_in_dir, struct IN2000_hostdata *hostdata)
714{
715 uchar asr;
716
717 DB(DB_TRANSFER, printk("(%p,%d,%s)", buf, cnt, data_in_dir ? "in" : "out"))
718
719 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
720 write_3393_count(hostdata, cnt);
721 write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
722 if (data_in_dir) {
723 do {
724 asr = READ_AUX_STAT();
725 if (asr & ASR_DBR)
726 *buf++ = read_3393(hostdata, WD_DATA);
727 } while (!(asr & ASR_INT));
728 } else {
729 do {
730 asr = READ_AUX_STAT();
731 if (asr & ASR_DBR)
732 write_3393(hostdata, WD_DATA, *buf++);
733 } while (!(asr & ASR_INT));
734 }
735
736 /* Note: we are returning with the interrupt UN-cleared.
737 * Since (presumably) an entire I/O operation has
738 * completed, the bus phase is probably different, and
739 * the interrupt routine will discover this when it
740 * responds to the uncleared int.
741 */
742
743}
744
745
746
747static void transfer_bytes(Scsi_Cmnd * cmd, int data_in_dir)
748{
749 struct IN2000_hostdata *hostdata;
750 unsigned short *sp;
751 unsigned short f;
752 int i;
753
754 hostdata = (struct IN2000_hostdata *) cmd->device->host->hostdata;
755
756/* Normally, you'd expect 'this_residual' to be non-zero here.
757 * In a series of scatter-gather transfers, however, this
758 * routine will usually be called with 'this_residual' equal
759 * to 0 and 'buffers_residual' non-zero. This means that a
760 * previous transfer completed, clearing 'this_residual', and
761 * now we need to setup the next scatter-gather buffer as the
762 * source or destination for THIS transfer.
763 */
764 if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) {
765 ++cmd->SCp.buffer;
766 --cmd->SCp.buffers_residual;
767 cmd->SCp.this_residual = cmd->SCp.buffer->length;
768 cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
769 }
770
771/* Set up hardware registers */
772
773 write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, hostdata->sync_xfer[cmd->device->id]);
774 write_3393_count(hostdata, cmd->SCp.this_residual);
775 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_BUS);
776 write1_io(0, IO_FIFO_WRITE); /* zero counter, assume write */
777
778/* Reading is easy. Just issue the command and return - we'll
779 * get an interrupt later when we have actual data to worry about.
780 */
781
782 if (data_in_dir) {
783 write1_io(0, IO_FIFO_READ);
784 if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
785 write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
786 write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
787 hostdata->state = S_RUNNING_LEVEL2;
788 } else
789 write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
790 hostdata->fifo = FI_FIFO_READING;
791 cmd->SCp.have_data_in = 0;
792 return;
793 }
794
795/* Writing is more involved - we'll start the WD chip and write as
796 * much data to the fifo as we can right now. Later interrupts will
797 * write any bytes that don't make it at this stage.
798 */
799
800 if ((hostdata->level2 >= L2_DATA) || (hostdata->level2 == L2_BASIC && cmd->SCp.phase == 0)) {
801 write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
802 write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
803 hostdata->state = S_RUNNING_LEVEL2;
804 } else
805 write_3393_cmd(hostdata, WD_CMD_TRANS_INFO);
806 hostdata->fifo = FI_FIFO_WRITING;
807 sp = (unsigned short *) cmd->SCp.ptr;
808
809 if ((i = cmd->SCp.this_residual) > IN2000_FIFO_SIZE)
810 i = IN2000_FIFO_SIZE;
811 cmd->SCp.have_data_in = i;
812 i >>= 1; /* Gulp. We assume this_residual is modulo 2 */
813 f = hostdata->io_base + IO_FIFO;
814
815#ifdef FAST_WRITE_IO
816
817 FAST_WRITE2_IO();
818#else
819 while (i--)
820 write2_io(*sp++, IO_FIFO);
821
822#endif
823
824}
825
826
827/* We need to use spin_lock_irqsave() & spin_unlock_irqrestore() in this
828 * function in order to work in an SMP environment. (I'd be surprised
829 * if the driver is ever used by anyone on a real multi-CPU motherboard,
830 * but it _does_ need to be able to compile and run in an SMP kernel.)
831 */
832
833static irqreturn_t in2000_intr(int irqnum, void *dev_id)
834{
835 struct Scsi_Host *instance = dev_id;
836 struct IN2000_hostdata *hostdata;
837 Scsi_Cmnd *patch, *cmd;
838 uchar asr, sr, phs, id, lun, *ucp, msg;
839 int i, j;
840 unsigned long length;
841 unsigned short *sp;
842 unsigned short f;
843 unsigned long flags;
844
845 hostdata = (struct IN2000_hostdata *) instance->hostdata;
846
847/* Get the spin_lock and disable further ints, for SMP */
848
849 spin_lock_irqsave(instance->host_lock, flags);
850
851#ifdef PROC_STATISTICS
852 hostdata->int_cnt++;
853#endif
854
855/* The IN2000 card has 2 interrupt sources OR'ed onto its IRQ line - the
856 * WD3393 chip and the 2k fifo (which is actually a dual-port RAM combined
857 * with a big logic array, so it's a little different than what you might
858 * expect). As far as I know, there's no reason that BOTH can't be active
859 * at the same time, but there's a problem: while we can read the 3393
860 * to tell if _it_ wants an interrupt, I don't know of a way to ask the
861 * fifo the same question. The best we can do is check the 3393 and if
862 * it _isn't_ the source of the interrupt, then we can be pretty sure
863 * that the fifo is the culprit.
864 * UPDATE: I have it on good authority (Bill Earnest) that bit 0 of the
865 * IO_FIFO_COUNT register mirrors the fifo interrupt state. I
866 * assume that bit clear means interrupt active. As it turns
867 * out, the driver really doesn't need to check for this after
868 * all, so my remarks above about a 'problem' can safely be
869 * ignored. The way the logic is set up, there's no advantage
870 * (that I can see) to worrying about it.
871 *
872 * It seems that the fifo interrupt signal is negated when we extract
873 * bytes during read or write bytes during write.
874 * - fifo will interrupt when data is moving from it to the 3393, and
875 * there are 31 (or less?) bytes left to go. This is sort of short-
876 * sighted: what if you don't WANT to do more? In any case, our
877 * response is to push more into the fifo - either actual data or
878 * dummy bytes if need be. Note that we apparently have to write at
879 * least 32 additional bytes to the fifo after an interrupt in order
880 * to get it to release the ones it was holding on to - writing fewer
881 * than 32 will result in another fifo int.
882 * UPDATE: Again, info from Bill Earnest makes this more understandable:
883 * 32 bytes = two counts of the fifo counter register. He tells
884 * me that the fifo interrupt is a non-latching signal derived
885 * from a straightforward boolean interpretation of the 7
886 * highest bits of the fifo counter and the fifo-read/fifo-write
887 * state. Who'd a thought?
888 */
889
890 write1_io(0, IO_LED_ON);
891 asr = READ_AUX_STAT();
892 if (!(asr & ASR_INT)) { /* no WD33c93 interrupt? */
893
894/* Ok. This is definitely a FIFO-only interrupt.
895 *
896 * If FI_FIFO_READING is set, there are up to 2048 bytes waiting to be read,
897 * maybe more to come from the SCSI bus. Read as many as we can out of the
898 * fifo and into memory at the location of SCp.ptr[SCp.have_data_in], and
899 * update have_data_in afterwards.
900 *
901 * If we have FI_FIFO_WRITING, the FIFO has almost run out of bytes to move
902 * into the WD3393 chip (I think the interrupt happens when there are 31
903 * bytes left, but it may be fewer...). The 3393 is still waiting, so we
904 * shove some more into the fifo, which gets things moving again. If the
905 * original SCSI command specified more than 2048 bytes, there may still
906 * be some of that data left: fine - use it (from SCp.ptr[SCp.have_data_in]).
907 * Don't forget to update have_data_in. If we've already written out the
908 * entire buffer, feed 32 dummy bytes to the fifo - they're needed to
909 * push out the remaining real data.
910 * (Big thanks to Bill Earnest for getting me out of the mud in here.)
911 */
912
913 cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
914 CHECK_NULL(cmd, "fifo_int")
915
916 if (hostdata->fifo == FI_FIFO_READING) {
917
918 DB(DB_FIFO, printk("{R:%02x} ", read1_io(IO_FIFO_COUNT)))
919
920 sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
921 i = read1_io(IO_FIFO_COUNT) & 0xfe;
922 i <<= 2; /* # of words waiting in the fifo */
923 f = hostdata->io_base + IO_FIFO;
924
925#ifdef FAST_READ_IO
926
927 FAST_READ2_IO();
928#else
929 while (i--)
930 *sp++ = read2_io(IO_FIFO);
931
932#endif
933
934 i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
935 i <<= 1;
936 cmd->SCp.have_data_in += i;
937 }
938
939 else if (hostdata->fifo == FI_FIFO_WRITING) {
940
941 DB(DB_FIFO, printk("{W:%02x} ", read1_io(IO_FIFO_COUNT)))
942
943/* If all bytes have been written to the fifo, flush out the stragglers.
944 * Note that while writing 16 dummy words seems arbitrary, we don't
945 * have another choice that I can see. What we really want is to read
946 * the 3393 transfer count register (that would tell us how many bytes
947 * needed flushing), but the TRANSFER_INFO command hasn't completed
948 * yet (not enough bytes!) and that register won't be accessible. So,
949 * we use 16 words - a number obtained through trial and error.
950 * UPDATE: Bill says this is exactly what Always does, so there.
951 * More thanks due him for help in this section.
952 */
953 if (cmd->SCp.this_residual == cmd->SCp.have_data_in) {
954 i = 16;
955 while (i--) /* write 32 dummy bytes */
956 write2_io(0, IO_FIFO);
957 }
958
959/* If there are still bytes left in the SCSI buffer, write as many as we
960 * can out to the fifo.
961 */
962
963 else {
964 sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
965 i = cmd->SCp.this_residual - cmd->SCp.have_data_in; /* bytes yet to go */
966 j = read1_io(IO_FIFO_COUNT) & 0xfe;
967 j <<= 2; /* how many words the fifo has room for */
968 if ((j << 1) > i)
969 j = (i >> 1);
970 while (j--)
971 write2_io(*sp++, IO_FIFO);
972
973 i = sp - (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
974 i <<= 1;
975 cmd->SCp.have_data_in += i;
976 }
977 }
978
979 else {
980 printk("*** Spurious FIFO interrupt ***");
981 }
982
983 write1_io(0, IO_LED_OFF);
984
985/* release the SMP spin_lock and restore irq state */
986 spin_unlock_irqrestore(instance->host_lock, flags);
987 return IRQ_HANDLED;
988 }
989
990/* This interrupt was triggered by the WD33c93 chip. The fifo interrupt
991 * may also be asserted, but we don't bother to check it: we get more
992 * detailed info from FIFO_READING and FIFO_WRITING (see below).
993 */
994
995 cmd = (Scsi_Cmnd *) hostdata->connected; /* assume we're connected */
996 sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear the interrupt */
997 phs = read_3393(hostdata, WD_COMMAND_PHASE);
998
999 if (!cmd && (sr != CSR_RESEL_AM && sr != CSR_TIMEOUT && sr != CSR_SELECT)) {
1000 printk("\nNR:wd-intr-1\n");
1001 write1_io(0, IO_LED_OFF);
1002
1003/* release the SMP spin_lock and restore irq state */
1004 spin_unlock_irqrestore(instance->host_lock, flags);
1005 return IRQ_HANDLED;
1006 }
1007
1008 DB(DB_INTR, printk("{%02x:%02x-", asr, sr))
1009
1010/* After starting a FIFO-based transfer, the next _WD3393_ interrupt is
1011 * guaranteed to be in response to the completion of the transfer.
1012 * If we were reading, there's probably data in the fifo that needs
1013 * to be copied into RAM - do that here. Also, we have to update
1014 * 'this_residual' and 'ptr' based on the contents of the
1015 * TRANSFER_COUNT register, in case the device decided to do an
1016 * intermediate disconnect (a device may do this if it has to
1017 * do a seek, or just to be nice and let other devices have
1018 * some bus time during long transfers).
1019 * After doing whatever is necessary with the fifo, we go on and
1020 * service the WD3393 interrupt normally.
1021 */
1022 if (hostdata->fifo == FI_FIFO_READING) {
1023
1024/* buffer index = start-of-buffer + #-of-bytes-already-read */
1025
1026 sp = (unsigned short *) (cmd->SCp.ptr + cmd->SCp.have_data_in);
1027
1028/* bytes remaining in fifo = (total-wanted - #-not-got) - #-already-read */
1029
1030 i = (cmd->SCp.this_residual - read_3393_count(hostdata)) - cmd->SCp.have_data_in;
1031 i >>= 1; /* Gulp. We assume this will always be modulo 2 */
1032 f = hostdata->io_base + IO_FIFO;
1033
1034#ifdef FAST_READ_IO
1035
1036 FAST_READ2_IO();
1037#else
1038 while (i--)
1039 *sp++ = read2_io(IO_FIFO);
1040
1041#endif
1042
1043 hostdata->fifo = FI_FIFO_UNUSED;
1044 length = cmd->SCp.this_residual;
1045 cmd->SCp.this_residual = read_3393_count(hostdata);
1046 cmd->SCp.ptr += (length - cmd->SCp.this_residual);
1047
1048 DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
1049
1050 }
1051
1052 else if (hostdata->fifo == FI_FIFO_WRITING) {
1053 hostdata->fifo = FI_FIFO_UNUSED;
1054 length = cmd->SCp.this_residual;
1055 cmd->SCp.this_residual = read_3393_count(hostdata);
1056 cmd->SCp.ptr += (length - cmd->SCp.this_residual);
1057
1058 DB(DB_TRANSFER, printk("(%p,%d)", cmd->SCp.ptr, cmd->SCp.this_residual))
1059
1060 }
1061
1062/* Respond to the specific WD3393 interrupt - there are quite a few! */
1063
1064 switch (sr) {
1065
1066 case CSR_TIMEOUT:
1067 DB(DB_INTR, printk("TIMEOUT"))
1068
1069 if (hostdata->state == S_RUNNING_LEVEL2)
1070 hostdata->connected = NULL;
1071 else {
1072 cmd = (Scsi_Cmnd *) hostdata->selecting; /* get a valid cmd */
1073 CHECK_NULL(cmd, "csr_timeout")
1074 hostdata->selecting = NULL;
1075 }
1076
1077 cmd->result = DID_NO_CONNECT << 16;
1078 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1079 hostdata->state = S_UNCONNECTED;
1080 cmd->scsi_done(cmd);
1081
1082/* We are not connected to a target - check to see if there
1083 * are commands waiting to be executed.
1084 */
1085
1086 in2000_execute(instance);
1087 break;
1088
1089
1090/* Note: this interrupt should not occur in a LEVEL2 command */
1091
1092 case CSR_SELECT:
1093 DB(DB_INTR, printk("SELECT"))
1094 hostdata->connected = cmd = (Scsi_Cmnd *) hostdata->selecting;
1095 CHECK_NULL(cmd, "csr_select")
1096 hostdata->selecting = NULL;
1097
1098 /* construct an IDENTIFY message with correct disconnect bit */
1099
1100 hostdata->outgoing_msg[0] = (0x80 | 0x00 | cmd->device->lun);
1101 if (cmd->SCp.phase)
1102 hostdata->outgoing_msg[0] |= 0x40;
1103
1104 if (hostdata->sync_stat[cmd->device->id] == SS_FIRST) {
1105#ifdef SYNC_DEBUG
1106 printk(" sending SDTR ");
1107#endif
1108
1109 hostdata->sync_stat[cmd->device->id] = SS_WAITING;
1110
1111 /* tack on a 2nd message to ask about synchronous transfers */
1112
1113 hostdata->outgoing_msg[1] = EXTENDED_MESSAGE;
1114 hostdata->outgoing_msg[2] = 3;
1115 hostdata->outgoing_msg[3] = EXTENDED_SDTR;
1116 hostdata->outgoing_msg[4] = OPTIMUM_SX_PER / 4;
1117 hostdata->outgoing_msg[5] = OPTIMUM_SX_OFF;
1118 hostdata->outgoing_len = 6;
1119 } else
1120 hostdata->outgoing_len = 1;
1121
1122 hostdata->state = S_CONNECTED;
1123 break;
1124
1125
1126 case CSR_XFER_DONE | PHS_DATA_IN:
1127 case CSR_UNEXP | PHS_DATA_IN:
1128 case CSR_SRV_REQ | PHS_DATA_IN:
1129 DB(DB_INTR, printk("IN-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
1130 transfer_bytes(cmd, DATA_IN_DIR);
1131 if (hostdata->state != S_RUNNING_LEVEL2)
1132 hostdata->state = S_CONNECTED;
1133 break;
1134
1135
1136 case CSR_XFER_DONE | PHS_DATA_OUT:
1137 case CSR_UNEXP | PHS_DATA_OUT:
1138 case CSR_SRV_REQ | PHS_DATA_OUT:
1139 DB(DB_INTR, printk("OUT-%d.%d", cmd->SCp.this_residual, cmd->SCp.buffers_residual))
1140 transfer_bytes(cmd, DATA_OUT_DIR);
1141 if (hostdata->state != S_RUNNING_LEVEL2)
1142 hostdata->state = S_CONNECTED;
1143 break;
1144
1145
1146/* Note: this interrupt should not occur in a LEVEL2 command */
1147
1148 case CSR_XFER_DONE | PHS_COMMAND:
1149 case CSR_UNEXP | PHS_COMMAND:
1150 case CSR_SRV_REQ | PHS_COMMAND:
1151 DB(DB_INTR, printk("CMND-%02x", cmd->cmnd[0]))
1152 transfer_pio(cmd->cmnd, cmd->cmd_len, DATA_OUT_DIR, hostdata);
1153 hostdata->state = S_CONNECTED;
1154 break;
1155
1156
1157 case CSR_XFER_DONE | PHS_STATUS:
1158 case CSR_UNEXP | PHS_STATUS:
1159 case CSR_SRV_REQ | PHS_STATUS:
1160 DB(DB_INTR, printk("STATUS="))
1161
1162 cmd->SCp.Status = read_1_byte(hostdata);
1163 DB(DB_INTR, printk("%02x", cmd->SCp.Status))
1164 if (hostdata->level2 >= L2_BASIC) {
1165 sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
1166 hostdata->state = S_RUNNING_LEVEL2;
1167 write_3393(hostdata, WD_COMMAND_PHASE, 0x50);
1168 write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
1169 } else {
1170 hostdata->state = S_CONNECTED;
1171 }
1172 break;
1173
1174
1175 case CSR_XFER_DONE | PHS_MESS_IN:
1176 case CSR_UNEXP | PHS_MESS_IN:
1177 case CSR_SRV_REQ | PHS_MESS_IN:
1178 DB(DB_INTR, printk("MSG_IN="))
1179
1180 msg = read_1_byte(hostdata);
1181 sr = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
1182
1183 hostdata->incoming_msg[hostdata->incoming_ptr] = msg;
1184 if (hostdata->incoming_msg[0] == EXTENDED_MESSAGE)
1185 msg = EXTENDED_MESSAGE;
1186 else
1187 hostdata->incoming_ptr = 0;
1188
1189 cmd->SCp.Message = msg;
1190 switch (msg) {
1191
1192 case COMMAND_COMPLETE:
1193 DB(DB_INTR, printk("CCMP"))
1194 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1195 hostdata->state = S_PRE_CMP_DISC;
1196 break;
1197
1198 case SAVE_POINTERS:
1199 DB(DB_INTR, printk("SDP"))
1200 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1201 hostdata->state = S_CONNECTED;
1202 break;
1203
1204 case RESTORE_POINTERS:
1205 DB(DB_INTR, printk("RDP"))
1206 if (hostdata->level2 >= L2_BASIC) {
1207 write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
1208 write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
1209 hostdata->state = S_RUNNING_LEVEL2;
1210 } else {
1211 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1212 hostdata->state = S_CONNECTED;
1213 }
1214 break;
1215
1216 case DISCONNECT:
1217 DB(DB_INTR, printk("DIS"))
1218 cmd->device->disconnect = 1;
1219 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1220 hostdata->state = S_PRE_TMP_DISC;
1221 break;
1222
1223 case MESSAGE_REJECT:
1224 DB(DB_INTR, printk("REJ"))
1225#ifdef SYNC_DEBUG
1226 printk("-REJ-");
1227#endif
1228 if (hostdata->sync_stat[cmd->device->id] == SS_WAITING)
1229 hostdata->sync_stat[cmd->device->id] = SS_SET;
1230 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1231 hostdata->state = S_CONNECTED;
1232 break;
1233
1234 case EXTENDED_MESSAGE:
1235 DB(DB_INTR, printk("EXT"))
1236
1237 ucp = hostdata->incoming_msg;
1238
1239#ifdef SYNC_DEBUG
1240 printk("%02x", ucp[hostdata->incoming_ptr]);
1241#endif
1242 /* Is this the last byte of the extended message? */
1243
1244 if ((hostdata->incoming_ptr >= 2) && (hostdata->incoming_ptr == (ucp[1] + 1))) {
1245
1246 switch (ucp[2]) { /* what's the EXTENDED code? */
1247 case EXTENDED_SDTR:
1248 id = calc_sync_xfer(ucp[3], ucp[4]);
1249 if (hostdata->sync_stat[cmd->device->id] != SS_WAITING) {
1250
1251/* A device has sent an unsolicited SDTR message; rather than go
1252 * through the effort of decoding it and then figuring out what
1253 * our reply should be, we're just gonna say that we have a
1254 * synchronous fifo depth of 0. This will result in asynchronous
1255 * transfers - not ideal but so much easier.
1256 * Actually, this is OK because it assures us that if we don't
1257 * specifically ask for sync transfers, we won't do any.
1258 */
1259
1260 write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
1261 hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
1262 hostdata->outgoing_msg[1] = 3;
1263 hostdata->outgoing_msg[2] = EXTENDED_SDTR;
1264 hostdata->outgoing_msg[3] = hostdata->default_sx_per / 4;
1265 hostdata->outgoing_msg[4] = 0;
1266 hostdata->outgoing_len = 5;
1267 hostdata->sync_xfer[cmd->device->id] = calc_sync_xfer(hostdata->default_sx_per / 4, 0);
1268 } else {
1269 hostdata->sync_xfer[cmd->device->id] = id;
1270 }
1271#ifdef SYNC_DEBUG
1272 printk("sync_xfer=%02x", hostdata->sync_xfer[cmd->device->id]);
1273#endif
1274 hostdata->sync_stat[cmd->device->id] = SS_SET;
1275 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1276 hostdata->state = S_CONNECTED;
1277 break;
1278 case EXTENDED_WDTR:
1279 write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
1280 printk("sending WDTR ");
1281 hostdata->outgoing_msg[0] = EXTENDED_MESSAGE;
1282 hostdata->outgoing_msg[1] = 2;
1283 hostdata->outgoing_msg[2] = EXTENDED_WDTR;
1284 hostdata->outgoing_msg[3] = 0; /* 8 bit transfer width */
1285 hostdata->outgoing_len = 4;
1286 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1287 hostdata->state = S_CONNECTED;
1288 break;
1289 default:
1290 write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
1291 printk("Rejecting Unknown Extended Message(%02x). ", ucp[2]);
1292 hostdata->outgoing_msg[0] = MESSAGE_REJECT;
1293 hostdata->outgoing_len = 1;
1294 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1295 hostdata->state = S_CONNECTED;
1296 break;
1297 }
1298 hostdata->incoming_ptr = 0;
1299 }
1300
1301 /* We need to read more MESS_IN bytes for the extended message */
1302
1303 else {
1304 hostdata->incoming_ptr++;
1305 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1306 hostdata->state = S_CONNECTED;
1307 }
1308 break;
1309
1310 default:
1311 printk("Rejecting Unknown Message(%02x) ", msg);
1312 write_3393_cmd(hostdata, WD_CMD_ASSERT_ATN); /* want MESS_OUT */
1313 hostdata->outgoing_msg[0] = MESSAGE_REJECT;
1314 hostdata->outgoing_len = 1;
1315 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1316 hostdata->state = S_CONNECTED;
1317 }
1318 break;
1319
1320
1321/* Note: this interrupt will occur only after a LEVEL2 command */
1322
1323 case CSR_SEL_XFER_DONE:
1324
1325/* Make sure that reselection is enabled at this point - it may
1326 * have been turned off for the command that just completed.
1327 */
1328
1329 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
1330 if (phs == 0x60) {
1331 DB(DB_INTR, printk("SX-DONE"))
1332 cmd->SCp.Message = COMMAND_COMPLETE;
1333 lun = read_3393(hostdata, WD_TARGET_LUN);
1334 DB(DB_INTR, printk(":%d.%d", cmd->SCp.Status, lun))
1335 hostdata->connected = NULL;
1336 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1337 hostdata->state = S_UNCONNECTED;
1338 if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
1339 cmd->SCp.Status = lun;
1340 if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
1341 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
1342 else
1343 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
1344 cmd->scsi_done(cmd);
1345
1346/* We are no longer connected to a target - check to see if
1347 * there are commands waiting to be executed.
1348 */
1349
1350 in2000_execute(instance);
1351 } else {
1352 printk("%02x:%02x:%02x: Unknown SEL_XFER_DONE phase!!---", asr, sr, phs);
1353 }
1354 break;
1355
1356
1357/* Note: this interrupt will occur only after a LEVEL2 command */
1358
1359 case CSR_SDP:
1360 DB(DB_INTR, printk("SDP"))
1361 hostdata->state = S_RUNNING_LEVEL2;
1362 write_3393(hostdata, WD_COMMAND_PHASE, 0x41);
1363 write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
1364 break;
1365
1366
1367 case CSR_XFER_DONE | PHS_MESS_OUT:
1368 case CSR_UNEXP | PHS_MESS_OUT:
1369 case CSR_SRV_REQ | PHS_MESS_OUT:
1370 DB(DB_INTR, printk("MSG_OUT="))
1371
1372/* To get here, we've probably requested MESSAGE_OUT and have
1373 * already put the correct bytes in outgoing_msg[] and filled
1374 * in outgoing_len. We simply send them out to the SCSI bus.
1375 * Sometimes we get MESSAGE_OUT phase when we're not expecting
1376 * it - like when our SDTR message is rejected by a target. Some
1377 * targets send the REJECT before receiving all of the extended
1378 * message, and then seem to go back to MESSAGE_OUT for a byte
1379 * or two. Not sure why, or if I'm doing something wrong to
1380 * cause this to happen. Regardless, it seems that sending
1381 * NOP messages in these situations results in no harm and
1382 * makes everyone happy.
1383 */
1384 if (hostdata->outgoing_len == 0) {
1385 hostdata->outgoing_len = 1;
1386 hostdata->outgoing_msg[0] = NOP;
1387 }
1388 transfer_pio(hostdata->outgoing_msg, hostdata->outgoing_len, DATA_OUT_DIR, hostdata);
1389 DB(DB_INTR, printk("%02x", hostdata->outgoing_msg[0]))
1390 hostdata->outgoing_len = 0;
1391 hostdata->state = S_CONNECTED;
1392 break;
1393
1394
1395 case CSR_UNEXP_DISC:
1396
1397/* I think I've seen this after a request-sense that was in response
1398 * to an error condition, but not sure. We certainly need to do
1399 * something when we get this interrupt - the question is 'what?'.
1400 * Let's think positively, and assume some command has finished
1401 * in a legal manner (like a command that provokes a request-sense),
1402 * so we treat it as a normal command-complete-disconnect.
1403 */
1404
1405
1406/* Make sure that reselection is enabled at this point - it may
1407 * have been turned off for the command that just completed.
1408 */
1409
1410 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
1411 if (cmd == NULL) {
1412 printk(" - Already disconnected! ");
1413 hostdata->state = S_UNCONNECTED;
1414
1415/* release the SMP spin_lock and restore irq state */
1416 spin_unlock_irqrestore(instance->host_lock, flags);
1417 return IRQ_HANDLED;
1418 }
1419 DB(DB_INTR, printk("UNEXP_DISC"))
1420 hostdata->connected = NULL;
1421 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1422 hostdata->state = S_UNCONNECTED;
1423 if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
1424 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
1425 else
1426 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
1427 cmd->scsi_done(cmd);
1428
1429/* We are no longer connected to a target - check to see if
1430 * there are commands waiting to be executed.
1431 */
1432
1433 in2000_execute(instance);
1434 break;
1435
1436
1437 case CSR_DISC:
1438
1439/* Make sure that reselection is enabled at this point - it may
1440 * have been turned off for the command that just completed.
1441 */
1442
1443 write_3393(hostdata, WD_SOURCE_ID, SRCID_ER);
1444 DB(DB_INTR, printk("DISC"))
1445 if (cmd == NULL) {
1446 printk(" - Already disconnected! ");
1447 hostdata->state = S_UNCONNECTED;
1448 }
1449 switch (hostdata->state) {
1450 case S_PRE_CMP_DISC:
1451 hostdata->connected = NULL;
1452 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1453 hostdata->state = S_UNCONNECTED;
1454 DB(DB_INTR, printk(":%d", cmd->SCp.Status))
1455 if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
1456 cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16);
1457 else
1458 cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
1459 cmd->scsi_done(cmd);
1460 break;
1461 case S_PRE_TMP_DISC:
1462 case S_RUNNING_LEVEL2:
1463 cmd->host_scribble = (uchar *) hostdata->disconnected_Q;
1464 hostdata->disconnected_Q = cmd;
1465 hostdata->connected = NULL;
1466 hostdata->state = S_UNCONNECTED;
1467
1468#ifdef PROC_STATISTICS
1469 hostdata->disc_done_cnt[cmd->device->id]++;
1470#endif
1471
1472 break;
1473 default:
1474 printk("*** Unexpected DISCONNECT interrupt! ***");
1475 hostdata->state = S_UNCONNECTED;
1476 }
1477
1478/* We are no longer connected to a target - check to see if
1479 * there are commands waiting to be executed.
1480 */
1481
1482 in2000_execute(instance);
1483 break;
1484
1485
1486 case CSR_RESEL_AM:
1487 DB(DB_INTR, printk("RESEL"))
1488
1489 /* First we have to make sure this reselection didn't */
1490 /* happen during Arbitration/Selection of some other device. */
1491 /* If yes, put losing command back on top of input_Q. */
1492 if (hostdata->level2 <= L2_NONE) {
1493
1494 if (hostdata->selecting) {
1495 cmd = (Scsi_Cmnd *) hostdata->selecting;
1496 hostdata->selecting = NULL;
1497 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1498 cmd->host_scribble = (uchar *) hostdata->input_Q;
1499 hostdata->input_Q = cmd;
1500 }
1501 }
1502
1503 else {
1504
1505 if (cmd) {
1506 if (phs == 0x00) {
1507 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1508 cmd->host_scribble = (uchar *) hostdata->input_Q;
1509 hostdata->input_Q = cmd;
1510 } else {
1511 printk("---%02x:%02x:%02x-TROUBLE: Intrusive ReSelect!---", asr, sr, phs);
1512 while (1)
1513 printk("\r");
1514 }
1515 }
1516
1517 }
1518
1519 /* OK - find out which device reselected us. */
1520
1521 id = read_3393(hostdata, WD_SOURCE_ID);
1522 id &= SRCID_MASK;
1523
1524 /* and extract the lun from the ID message. (Note that we don't
1525 * bother to check for a valid message here - I guess this is
1526 * not the right way to go, but....)
1527 */
1528
1529 lun = read_3393(hostdata, WD_DATA);
1530 if (hostdata->level2 < L2_RESELECT)
1531 write_3393_cmd(hostdata, WD_CMD_NEGATE_ACK);
1532 lun &= 7;
1533
1534 /* Now we look for the command that's reconnecting. */
1535
1536 cmd = (Scsi_Cmnd *) hostdata->disconnected_Q;
1537 patch = NULL;
1538 while (cmd) {
1539 if (id == cmd->device->id && lun == cmd->device->lun)
1540 break;
1541 patch = cmd;
1542 cmd = (Scsi_Cmnd *) cmd->host_scribble;
1543 }
1544
1545 /* Hmm. Couldn't find a valid command.... What to do? */
1546
1547 if (!cmd) {
1548 printk("---TROUBLE: target %d.%d not in disconnect queue---", id, lun);
1549 break;
1550 }
1551
1552 /* Ok, found the command - now start it up again. */
1553
1554 if (patch)
1555 patch->host_scribble = cmd->host_scribble;
1556 else
1557 hostdata->disconnected_Q = (Scsi_Cmnd *) cmd->host_scribble;
1558 hostdata->connected = cmd;
1559
1560 /* We don't need to worry about 'initialize_SCp()' or 'hostdata->busy[]'
1561 * because these things are preserved over a disconnect.
1562 * But we DO need to fix the DPD bit so it's correct for this command.
1563 */
1564
1565 if (is_dir_out(cmd))
1566 write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id);
1567 else
1568 write_3393(hostdata, WD_DESTINATION_ID, cmd->device->id | DSTID_DPD);
1569 if (hostdata->level2 >= L2_RESELECT) {
1570 write_3393_count(hostdata, 0); /* we want a DATA_PHASE interrupt */
1571 write_3393(hostdata, WD_COMMAND_PHASE, 0x45);
1572 write_3393_cmd(hostdata, WD_CMD_SEL_ATN_XFER);
1573 hostdata->state = S_RUNNING_LEVEL2;
1574 } else
1575 hostdata->state = S_CONNECTED;
1576
1577 break;
1578
1579 default:
1580 printk("--UNKNOWN INTERRUPT:%02x:%02x:%02x--", asr, sr, phs);
1581 }
1582
1583 write1_io(0, IO_LED_OFF);
1584
1585 DB(DB_INTR, printk("} "))
1586
1587/* release the SMP spin_lock and restore irq state */
1588 spin_unlock_irqrestore(instance->host_lock, flags);
1589 return IRQ_HANDLED;
1590}
1591
1592
1593
1594#define RESET_CARD 0
1595#define RESET_CARD_AND_BUS 1
1596#define B_FLAG 0x80
1597
1598/*
1599 * Caller must hold instance lock!
1600 */
1601
1602static int reset_hardware(struct Scsi_Host *instance, int type)
1603{
1604 struct IN2000_hostdata *hostdata;
1605 int qt, x;
1606
1607 hostdata = (struct IN2000_hostdata *) instance->hostdata;
1608
1609 write1_io(0, IO_LED_ON);
1610 if (type == RESET_CARD_AND_BUS) {
1611 write1_io(0, IO_CARD_RESET);
1612 x = read1_io(IO_HARDWARE);
1613 }
1614 x = read_3393(hostdata, WD_SCSI_STATUS); /* clear any WD intrpt */
1615 write_3393(hostdata, WD_OWN_ID, instance->this_id | OWNID_EAF | OWNID_RAF | OWNID_FS_8);
1616 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
1617 write_3393(hostdata, WD_SYNCHRONOUS_TRANSFER, calc_sync_xfer(hostdata->default_sx_per / 4, DEFAULT_SX_OFF));
1618
1619 write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
1620 write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
1621 write_3393(hostdata, WD_COMMAND, WD_CMD_RESET);
1622 /* FIXME: timeout ?? */
1623 while (!(READ_AUX_STAT() & ASR_INT))
1624 cpu_relax(); /* wait for RESET to complete */
1625
1626 x = read_3393(hostdata, WD_SCSI_STATUS); /* clear interrupt */
1627
1628 write_3393(hostdata, WD_QUEUE_TAG, 0xa5); /* any random number */
1629 qt = read_3393(hostdata, WD_QUEUE_TAG);
1630 if (qt == 0xa5) {
1631 x |= B_FLAG;
1632 write_3393(hostdata, WD_QUEUE_TAG, 0);
1633 }
1634 write_3393(hostdata, WD_TIMEOUT_PERIOD, TIMEOUT_PERIOD_VALUE);
1635 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
1636 write1_io(0, IO_LED_OFF);
1637 return x;
1638}
1639
1640
1641
1642static int in2000_bus_reset(Scsi_Cmnd * cmd)
1643{
1644 struct Scsi_Host *instance;
1645 struct IN2000_hostdata *hostdata;
1646 int x;
1647 unsigned long flags;
1648
1649 instance = cmd->device->host;
1650 hostdata = (struct IN2000_hostdata *) instance->hostdata;
1651
1652 printk(KERN_WARNING "scsi%d: Reset. ", instance->host_no);
1653
1654 spin_lock_irqsave(instance->host_lock, flags);
1655
1656 /* do scsi-reset here */
1657 reset_hardware(instance, RESET_CARD_AND_BUS);
1658 for (x = 0; x < 8; x++) {
1659 hostdata->busy[x] = 0;
1660 hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
1661 hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
1662 }
1663 hostdata->input_Q = NULL;
1664 hostdata->selecting = NULL;
1665 hostdata->connected = NULL;
1666 hostdata->disconnected_Q = NULL;
1667 hostdata->state = S_UNCONNECTED;
1668 hostdata->fifo = FI_FIFO_UNUSED;
1669 hostdata->incoming_ptr = 0;
1670 hostdata->outgoing_len = 0;
1671
1672 cmd->result = DID_RESET << 16;
1673
1674 spin_unlock_irqrestore(instance->host_lock, flags);
1675 return SUCCESS;
1676}
1677
1678static int __in2000_abort(Scsi_Cmnd * cmd)
1679{
1680 struct Scsi_Host *instance;
1681 struct IN2000_hostdata *hostdata;
1682 Scsi_Cmnd *tmp, *prev;
1683 uchar sr, asr;
1684 unsigned long timeout;
1685
1686 instance = cmd->device->host;
1687 hostdata = (struct IN2000_hostdata *) instance->hostdata;
1688
1689 printk(KERN_DEBUG "scsi%d: Abort-", instance->host_no);
1690 printk("(asr=%02x,count=%ld,resid=%d,buf_resid=%d,have_data=%d,FC=%02x)- ", READ_AUX_STAT(), read_3393_count(hostdata), cmd->SCp.this_residual, cmd->SCp.buffers_residual, cmd->SCp.have_data_in, read1_io(IO_FIFO_COUNT));
1691
1692/*
1693 * Case 1 : If the command hasn't been issued yet, we simply remove it
1694 * from the inout_Q.
1695 */
1696
1697 tmp = (Scsi_Cmnd *) hostdata->input_Q;
1698 prev = NULL;
1699 while (tmp) {
1700 if (tmp == cmd) {
1701 if (prev)
1702 prev->host_scribble = cmd->host_scribble;
1703 cmd->host_scribble = NULL;
1704 cmd->result = DID_ABORT << 16;
1705 printk(KERN_WARNING "scsi%d: Abort - removing command from input_Q. ", instance->host_no);
1706 cmd->scsi_done(cmd);
1707 return SUCCESS;
1708 }
1709 prev = tmp;
1710 tmp = (Scsi_Cmnd *) tmp->host_scribble;
1711 }
1712
1713/*
1714 * Case 2 : If the command is connected, we're going to fail the abort
1715 * and let the high level SCSI driver retry at a later time or
1716 * issue a reset.
1717 *
1718 * Timeouts, and therefore aborted commands, will be highly unlikely
1719 * and handling them cleanly in this situation would make the common
1720 * case of noresets less efficient, and would pollute our code. So,
1721 * we fail.
1722 */
1723
1724 if (hostdata->connected == cmd) {
1725
1726 printk(KERN_WARNING "scsi%d: Aborting connected command - ", instance->host_no);
1727
1728 printk("sending wd33c93 ABORT command - ");
1729 write_3393(hostdata, WD_CONTROL, CTRL_IDI | CTRL_EDI | CTRL_POLLED);
1730 write_3393_cmd(hostdata, WD_CMD_ABORT);
1731
1732/* Now we have to attempt to flush out the FIFO... */
1733
1734 printk("flushing fifo - ");
1735 timeout = 1000000;
1736 do {
1737 asr = READ_AUX_STAT();
1738 if (asr & ASR_DBR)
1739 read_3393(hostdata, WD_DATA);
1740 } while (!(asr & ASR_INT) && timeout-- > 0);
1741 sr = read_3393(hostdata, WD_SCSI_STATUS);
1742 printk("asr=%02x, sr=%02x, %ld bytes un-transferred (timeout=%ld) - ", asr, sr, read_3393_count(hostdata), timeout);
1743
1744 /*
1745 * Abort command processed.
1746 * Still connected.
1747 * We must disconnect.
1748 */
1749
1750 printk("sending wd33c93 DISCONNECT command - ");
1751 write_3393_cmd(hostdata, WD_CMD_DISCONNECT);
1752
1753 timeout = 1000000;
1754 asr = READ_AUX_STAT();
1755 while ((asr & ASR_CIP) && timeout-- > 0)
1756 asr = READ_AUX_STAT();
1757 sr = read_3393(hostdata, WD_SCSI_STATUS);
1758 printk("asr=%02x, sr=%02x.", asr, sr);
1759
1760 hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
1761 hostdata->connected = NULL;
1762 hostdata->state = S_UNCONNECTED;
1763 cmd->result = DID_ABORT << 16;
1764 cmd->scsi_done(cmd);
1765
1766 in2000_execute(instance);
1767
1768 return SUCCESS;
1769 }
1770
1771/*
1772 * Case 3: If the command is currently disconnected from the bus,
1773 * we're not going to expend much effort here: Let's just return
1774 * an ABORT_SNOOZE and hope for the best...
1775 */
1776
1777 for (tmp = (Scsi_Cmnd *) hostdata->disconnected_Q; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
1778 if (cmd == tmp) {
1779 printk(KERN_DEBUG "scsi%d: unable to abort disconnected command.\n", instance->host_no);
1780 return FAILED;
1781 }
1782
1783/*
1784 * Case 4 : If we reached this point, the command was not found in any of
1785 * the queues.
1786 *
1787 * We probably reached this point because of an unlikely race condition
1788 * between the command completing successfully and the abortion code,
1789 * so we won't panic, but we will notify the user in case something really
1790 * broke.
1791 */
1792
1793 in2000_execute(instance);
1794
1795 printk("scsi%d: warning : SCSI command probably completed successfully" " before abortion. ", instance->host_no);
1796 return SUCCESS;
1797}
1798
1799static int in2000_abort(Scsi_Cmnd * cmd)
1800{
1801 int rc;
1802
1803 spin_lock_irq(cmd->device->host->host_lock);
1804 rc = __in2000_abort(cmd);
1805 spin_unlock_irq(cmd->device->host->host_lock);
1806
1807 return rc;
1808}
1809
1810
1811#define MAX_IN2000_HOSTS 3
1812#define MAX_SETUP_ARGS ARRAY_SIZE(setup_args)
1813#define SETUP_BUFFER_SIZE 200
1814static char setup_buffer[SETUP_BUFFER_SIZE];
1815static char setup_used[MAX_SETUP_ARGS];
1816static int done_setup = 0;
1817
1818static void __init in2000_setup(char *str, int *ints)
1819{
1820 int i;
1821 char *p1, *p2;
1822
1823 strlcpy(setup_buffer, str, SETUP_BUFFER_SIZE);
1824 p1 = setup_buffer;
1825 i = 0;
1826 while (*p1 && (i < MAX_SETUP_ARGS)) {
1827 p2 = strchr(p1, ',');
1828 if (p2) {
1829 *p2 = '\0';
1830 if (p1 != p2)
1831 setup_args[i] = p1;
1832 p1 = p2 + 1;
1833 i++;
1834 } else {
1835 setup_args[i] = p1;
1836 break;
1837 }
1838 }
1839 for (i = 0; i < MAX_SETUP_ARGS; i++)
1840 setup_used[i] = 0;
1841 done_setup = 1;
1842}
1843
1844
1845/* check_setup_args() returns index if key found, 0 if not
1846 */
1847
1848static int __init check_setup_args(char *key, int *val, char *buf)
1849{
1850 int x;
1851 char *cp;
1852
1853 for (x = 0; x < MAX_SETUP_ARGS; x++) {
1854 if (setup_used[x])
1855 continue;
1856 if (!strncmp(setup_args[x], key, strlen(key)))
1857 break;
1858 }
1859 if (x == MAX_SETUP_ARGS)
1860 return 0;
1861 setup_used[x] = 1;
1862 cp = setup_args[x] + strlen(key);
1863 *val = -1;
1864 if (*cp != ':')
1865 return ++x;
1866 cp++;
1867 if ((*cp >= '0') && (*cp <= '9')) {
1868 *val = simple_strtoul(cp, NULL, 0);
1869 }
1870 return ++x;
1871}
1872
1873
1874
1875/* The "correct" (ie portable) way to access memory-mapped hardware
1876 * such as the IN2000 EPROM and dip switch is through the use of
1877 * special macros declared in 'asm/io.h'. We use readb() and readl()
1878 * when reading from the card's BIOS area in in2000_detect().
1879 */
1880static u32 bios_tab[] in2000__INITDATA = {
1881 0xc8000,
1882 0xd0000,
1883 0xd8000,
1884 0
1885};
1886
1887static unsigned short base_tab[] in2000__INITDATA = {
1888 0x220,
1889 0x200,
1890 0x110,
1891 0x100,
1892};
1893
1894static int int_tab[] in2000__INITDATA = {
1895 15,
1896 14,
1897 11,
1898 10
1899};
1900
1901static int probe_bios(u32 addr, u32 *s1, uchar *switches)
1902{
1903 void __iomem *p = ioremap(addr, 0x34);
1904 if (!p)
1905 return 0;
1906 *s1 = readl(p + 0x10);
1907 if (*s1 == 0x41564f4e || readl(p + 0x30) == 0x61776c41) {
1908 /* Read the switch image that's mapped into EPROM space */
1909 *switches = ~readb(p + 0x20);
1910 iounmap(p);
1911 return 1;
1912 }
1913 iounmap(p);
1914 return 0;
1915}
1916
1917static int __init in2000_detect(struct scsi_host_template * tpnt)
1918{
1919 struct Scsi_Host *instance;
1920 struct IN2000_hostdata *hostdata;
1921 int detect_count;
1922 int bios;
1923 int x;
1924 unsigned short base;
1925 uchar switches;
1926 uchar hrev;
1927 unsigned long flags;
1928 int val;
1929 char buf[32];
1930
1931/* Thanks to help from Bill Earnest, probing for IN2000 cards is a
1932 * pretty straightforward and fool-proof operation. There are 3
1933 * possible locations for the IN2000 EPROM in memory space - if we
1934 * find a BIOS signature, we can read the dip switch settings from
1935 * the byte at BIOS+32 (shadowed in by logic on the card). From 2
1936 * of the switch bits we get the card's address in IO space. There's
1937 * an image of the dip switch there, also, so we have a way to back-
1938 * check that this really is an IN2000 card. Very nifty. Use the
1939 * 'ioport:xx' command-line parameter if your BIOS EPROM is absent
1940 * or disabled.
1941 */
1942
1943 if (!done_setup && setup_strings)
1944 in2000_setup(setup_strings, NULL);
1945
1946 detect_count = 0;
1947 for (bios = 0; bios_tab[bios]; bios++) {
1948 u32 s1 = 0;
1949 if (check_setup_args("ioport", &val, buf)) {
1950 base = val;
1951 switches = ~inb(base + IO_SWITCHES) & 0xff;
1952 printk("Forcing IN2000 detection at IOport 0x%x ", base);
1953 bios = 2;
1954 }
1955/*
1956 * There have been a couple of BIOS versions with different layouts
1957 * for the obvious ID strings. We look for the 2 most common ones and
1958 * hope that they cover all the cases...
1959 */
1960 else if (probe_bios(bios_tab[bios], &s1, &switches)) {
1961 printk("Found IN2000 BIOS at 0x%x ", (unsigned int) bios_tab[bios]);
1962
1963/* Find out where the IO space is */
1964
1965 x = switches & (SW_ADDR0 | SW_ADDR1);
1966 base = base_tab[x];
1967
1968/* Check for the IN2000 signature in IO space. */
1969
1970 x = ~inb(base + IO_SWITCHES) & 0xff;
1971 if (x != switches) {
1972 printk("Bad IO signature: %02x vs %02x.\n", x, switches);
1973 continue;
1974 }
1975 } else
1976 continue;
1977
1978/* OK. We have a base address for the IO ports - run a few safety checks */
1979
1980 if (!(switches & SW_BIT7)) { /* I _think_ all cards do this */
1981 printk("There is no IN-2000 SCSI card at IOport 0x%03x!\n", base);
1982 continue;
1983 }
1984
1985/* Let's assume any hardware version will work, although the driver
1986 * has only been tested on 0x21, 0x22, 0x25, 0x26, and 0x27. We'll
1987 * print out the rev number for reference later, but accept them all.
1988 */
1989
1990 hrev = inb(base + IO_HARDWARE);
1991
1992 /* Bit 2 tells us if interrupts are disabled */
1993 if (switches & SW_DISINT) {
1994 printk("The IN-2000 SCSI card at IOport 0x%03x ", base);
1995 printk("is not configured for interrupt operation!\n");
1996 printk("This driver requires an interrupt: cancelling detection.\n");
1997 continue;
1998 }
1999
2000/* Ok. We accept that there's an IN2000 at ioaddr 'base'. Now
2001 * initialize it.
2002 */
2003
2004 tpnt->proc_name = "in2000";
2005 instance = scsi_register(tpnt, sizeof(struct IN2000_hostdata));
2006 if (instance == NULL)
2007 continue;
2008 detect_count++;
2009 hostdata = (struct IN2000_hostdata *) instance->hostdata;
2010 instance->io_port = hostdata->io_base = base;
2011 hostdata->dip_switch = switches;
2012 hostdata->hrev = hrev;
2013
2014 write1_io(0, IO_FIFO_WRITE); /* clear fifo counter */
2015 write1_io(0, IO_FIFO_READ); /* start fifo out in read mode */
2016 write1_io(0, IO_INTR_MASK); /* allow all ints */
2017 x = int_tab[(switches & (SW_INT0 | SW_INT1)) >> SW_INT_SHIFT];
2018 if (request_irq(x, in2000_intr, 0, "in2000", instance)) {
2019 printk("in2000_detect: Unable to allocate IRQ.\n");
2020 detect_count--;
2021 continue;
2022 }
2023 instance->irq = x;
2024 instance->n_io_port = 13;
2025 request_region(base, 13, "in2000"); /* lock in this IO space for our use */
2026
2027 for (x = 0; x < 8; x++) {
2028 hostdata->busy[x] = 0;
2029 hostdata->sync_xfer[x] = calc_sync_xfer(DEFAULT_SX_PER / 4, DEFAULT_SX_OFF);
2030 hostdata->sync_stat[x] = SS_UNSET; /* using default sync values */
2031#ifdef PROC_STATISTICS
2032 hostdata->cmd_cnt[x] = 0;
2033 hostdata->disc_allowed_cnt[x] = 0;
2034 hostdata->disc_done_cnt[x] = 0;
2035#endif
2036 }
2037 hostdata->input_Q = NULL;
2038 hostdata->selecting = NULL;
2039 hostdata->connected = NULL;
2040 hostdata->disconnected_Q = NULL;
2041 hostdata->state = S_UNCONNECTED;
2042 hostdata->fifo = FI_FIFO_UNUSED;
2043 hostdata->level2 = L2_BASIC;
2044 hostdata->disconnect = DIS_ADAPTIVE;
2045 hostdata->args = DEBUG_DEFAULTS;
2046 hostdata->incoming_ptr = 0;
2047 hostdata->outgoing_len = 0;
2048 hostdata->default_sx_per = DEFAULT_SX_PER;
2049
2050/* Older BIOS's had a 'sync on/off' switch - use its setting */
2051
2052 if (s1 == 0x41564f4e && (switches & SW_SYNC_DOS5))
2053 hostdata->sync_off = 0x00; /* sync defaults to on */
2054 else
2055 hostdata->sync_off = 0xff; /* sync defaults to off */
2056
2057#ifdef PROC_INTERFACE
2058 hostdata->proc = PR_VERSION | PR_INFO | PR_STATISTICS | PR_CONNECTED | PR_INPUTQ | PR_DISCQ | PR_STOP;
2059#ifdef PROC_STATISTICS
2060 hostdata->int_cnt = 0;
2061#endif
2062#endif
2063
2064 if (check_setup_args("nosync", &val, buf))
2065 hostdata->sync_off = val;
2066
2067 if (check_setup_args("period", &val, buf))
2068 hostdata->default_sx_per = sx_table[round_period((unsigned int) val)].period_ns;
2069
2070 if (check_setup_args("disconnect", &val, buf)) {
2071 if ((val >= DIS_NEVER) && (val <= DIS_ALWAYS))
2072 hostdata->disconnect = val;
2073 else
2074 hostdata->disconnect = DIS_ADAPTIVE;
2075 }
2076
2077 if (check_setup_args("noreset", &val, buf))
2078 hostdata->args ^= A_NO_SCSI_RESET;
2079
2080 if (check_setup_args("level2", &val, buf))
2081 hostdata->level2 = val;
2082
2083 if (check_setup_args("debug", &val, buf))
2084 hostdata->args = (val & DB_MASK);
2085
2086#ifdef PROC_INTERFACE
2087 if (check_setup_args("proc", &val, buf))
2088 hostdata->proc = val;
2089#endif
2090
2091
2092 /* FIXME: not strictly needed I think but the called code expects
2093 to be locked */
2094 spin_lock_irqsave(instance->host_lock, flags);
2095 x = reset_hardware(instance, (hostdata->args & A_NO_SCSI_RESET) ? RESET_CARD : RESET_CARD_AND_BUS);
2096 spin_unlock_irqrestore(instance->host_lock, flags);
2097
2098 hostdata->microcode = read_3393(hostdata, WD_CDB_1);
2099 if (x & 0x01) {
2100 if (x & B_FLAG)
2101 hostdata->chip = C_WD33C93B;
2102 else
2103 hostdata->chip = C_WD33C93A;
2104 } else
2105 hostdata->chip = C_WD33C93;
2106
2107 printk("dip_switch=%02x irq=%d ioport=%02x floppy=%s sync/DOS5=%s ", (switches & 0x7f), instance->irq, hostdata->io_base, (switches & SW_FLOPPY) ? "Yes" : "No", (switches & SW_SYNC_DOS5) ? "Yes" : "No");
2108 printk("hardware_ver=%02x chip=%s microcode=%02x\n", hrev, (hostdata->chip == C_WD33C93) ? "WD33c93" : (hostdata->chip == C_WD33C93A) ? "WD33c93A" : (hostdata->chip == C_WD33C93B) ? "WD33c93B" : "unknown", hostdata->microcode);
2109#ifdef DEBUGGING_ON
2110 printk("setup_args = ");
2111 for (x = 0; x < MAX_SETUP_ARGS; x++)
2112 printk("%s,", setup_args[x]);
2113 printk("\n");
2114#endif
2115 if (hostdata->sync_off == 0xff)
2116 printk("Sync-transfer DISABLED on all devices: ENABLE from command-line\n");
2117 printk("IN2000 driver version %s - %s\n", IN2000_VERSION, IN2000_DATE);
2118 }
2119
2120 return detect_count;
2121}
2122
2123static int in2000_release(struct Scsi_Host *shost)
2124{
2125 if (shost->irq)
2126 free_irq(shost->irq, shost);
2127 if (shost->io_port && shost->n_io_port)
2128 release_region(shost->io_port, shost->n_io_port);
2129 return 0;
2130}
2131
2132/* NOTE: I lifted this function straight out of the old driver,
2133 * and have not tested it. Presumably it does what it's
2134 * supposed to do...
2135 */
2136
2137static int in2000_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *iinfo)
2138{
2139 int size;
2140
2141 size = capacity;
2142 iinfo[0] = 64;
2143 iinfo[1] = 32;
2144 iinfo[2] = size >> 11;
2145
2146/* This should approximate the large drive handling that the DOS ASPI manager
2147 uses. Drives very near the boundaries may not be handled correctly (i.e.
2148 near 2.0 Gb and 4.0 Gb) */
2149
2150 if (iinfo[2] > 1024) {
2151 iinfo[0] = 64;
2152 iinfo[1] = 63;
2153 iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
2154 }
2155 if (iinfo[2] > 1024) {
2156 iinfo[0] = 128;
2157 iinfo[1] = 63;
2158 iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
2159 }
2160 if (iinfo[2] > 1024) {
2161 iinfo[0] = 255;
2162 iinfo[1] = 63;
2163 iinfo[2] = (unsigned long) capacity / (iinfo[0] * iinfo[1]);
2164 }
2165 return 0;
2166}
2167
2168
2169static int in2000_write_info(struct Scsi_Host *instance, char *buf, int len)
2170{
2171
2172#ifdef PROC_INTERFACE
2173
2174 char *bp;
2175 struct IN2000_hostdata *hd;
2176 int x, i;
2177
2178 hd = (struct IN2000_hostdata *) instance->hostdata;
2179
2180 buf[len] = '\0';
2181 bp = buf;
2182 if (!strncmp(bp, "debug:", 6)) {
2183 bp += 6;
2184 hd->args = simple_strtoul(bp, NULL, 0) & DB_MASK;
2185 } else if (!strncmp(bp, "disconnect:", 11)) {
2186 bp += 11;
2187 x = simple_strtoul(bp, NULL, 0);
2188 if (x < DIS_NEVER || x > DIS_ALWAYS)
2189 x = DIS_ADAPTIVE;
2190 hd->disconnect = x;
2191 } else if (!strncmp(bp, "period:", 7)) {
2192 bp += 7;
2193 x = simple_strtoul(bp, NULL, 0);
2194 hd->default_sx_per = sx_table[round_period((unsigned int) x)].period_ns;
2195 } else if (!strncmp(bp, "resync:", 7)) {
2196 bp += 7;
2197 x = simple_strtoul(bp, NULL, 0);
2198 for (i = 0; i < 7; i++)
2199 if (x & (1 << i))
2200 hd->sync_stat[i] = SS_UNSET;
2201 } else if (!strncmp(bp, "proc:", 5)) {
2202 bp += 5;
2203 hd->proc = simple_strtoul(bp, NULL, 0);
2204 } else if (!strncmp(bp, "level2:", 7)) {
2205 bp += 7;
2206 hd->level2 = simple_strtoul(bp, NULL, 0);
2207 }
2208#endif
2209 return len;
2210}
2211
2212static int in2000_show_info(struct seq_file *m, struct Scsi_Host *instance)
2213{
2214
2215#ifdef PROC_INTERFACE
2216 unsigned long flags;
2217 struct IN2000_hostdata *hd;
2218 Scsi_Cmnd *cmd;
2219 int x;
2220
2221 hd = (struct IN2000_hostdata *) instance->hostdata;
2222
2223 spin_lock_irqsave(instance->host_lock, flags);
2224 if (hd->proc & PR_VERSION)
2225 seq_printf(m, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
2226
2227 if (hd->proc & PR_INFO) {
2228 seq_printf(m, "\ndip_switch=%02x: irq=%d io=%02x floppy=%s sync/DOS5=%s", (hd->dip_switch & 0x7f), instance->irq, hd->io_base, (hd->dip_switch & 0x40) ? "Yes" : "No", (hd->dip_switch & 0x20) ? "Yes" : "No");
2229 seq_puts(m, "\nsync_xfer[] = ");
2230 for (x = 0; x < 7; x++)
2231 seq_printf(m, "\t%02x", hd->sync_xfer[x]);
2232 seq_puts(m, "\nsync_stat[] = ");
2233 for (x = 0; x < 7; x++)
2234 seq_printf(m, "\t%02x", hd->sync_stat[x]);
2235 }
2236#ifdef PROC_STATISTICS
2237 if (hd->proc & PR_STATISTICS) {
2238 seq_puts(m, "\ncommands issued: ");
2239 for (x = 0; x < 7; x++)
2240 seq_printf(m, "\t%ld", hd->cmd_cnt[x]);
2241 seq_puts(m, "\ndisconnects allowed:");
2242 for (x = 0; x < 7; x++)
2243 seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]);
2244 seq_puts(m, "\ndisconnects done: ");
2245 for (x = 0; x < 7; x++)
2246 seq_printf(m, "\t%ld", hd->disc_done_cnt[x]);
2247 seq_printf(m, "\ninterrupts: \t%ld", hd->int_cnt);
2248 }
2249#endif
2250 if (hd->proc & PR_CONNECTED) {
2251 seq_puts(m, "\nconnected: ");
2252 if (hd->connected) {
2253 cmd = (Scsi_Cmnd *) hd->connected;
2254 seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2255 }
2256 }
2257 if (hd->proc & PR_INPUTQ) {
2258 seq_puts(m, "\ninput_Q: ");
2259 cmd = (Scsi_Cmnd *) hd->input_Q;
2260 while (cmd) {
2261 seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2262 cmd = (Scsi_Cmnd *) cmd->host_scribble;
2263 }
2264 }
2265 if (hd->proc & PR_DISCQ) {
2266 seq_puts(m, "\ndisconnected_Q:");
2267 cmd = (Scsi_Cmnd *) hd->disconnected_Q;
2268 while (cmd) {
2269 seq_printf(m, " %d:%llu(%02x)", cmd->device->id, cmd->device->lun, cmd->cmnd[0]);
2270 cmd = (Scsi_Cmnd *) cmd->host_scribble;
2271 }
2272 }
2273 if (hd->proc & PR_TEST) {
2274 ; /* insert your own custom function here */
2275 }
2276 seq_putc(m, '\n');
2277 spin_unlock_irqrestore(instance->host_lock, flags);
2278#endif /* PROC_INTERFACE */
2279 return 0;
2280}
2281
2282MODULE_LICENSE("GPL");
2283
2284
2285static struct scsi_host_template driver_template = {
2286 .proc_name = "in2000",
2287 .write_info = in2000_write_info,
2288 .show_info = in2000_show_info,
2289 .name = "Always IN2000",
2290 .detect = in2000_detect,
2291 .release = in2000_release,
2292 .queuecommand = in2000_queuecommand,
2293 .eh_abort_handler = in2000_abort,
2294 .eh_bus_reset_handler = in2000_bus_reset,
2295 .bios_param = in2000_biosparam,
2296 .can_queue = IN2000_CAN_Q,
2297 .this_id = IN2000_HOST_ID,
2298 .sg_tablesize = IN2000_SG,
2299 .cmd_per_lun = IN2000_CPL,
2300 .use_clustering = DISABLE_CLUSTERING,
2301};
2302#include "scsi_module.c"
diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h
deleted file mode 100644
index 5821e1fbce08..000000000000
--- a/drivers/scsi/in2000.h
+++ /dev/null
@@ -1,412 +0,0 @@
1/*
2 * in2000.h - Linux device driver definitions for the
3 * Always IN2000 ISA SCSI card.
4 *
5 * IMPORTANT: This file is for version 1.33 - 26/Aug/1998
6 *
7 * Copyright (c) 1996 John Shifflett, GeoLog Consulting
8 * john@geolog.com
9 * jshiffle@netcom.com
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 */
22
23#ifndef IN2000_H
24#define IN2000_H
25
26#include <asm/io.h>
27
28#define PROC_INTERFACE /* add code for /proc/scsi/in2000/xxx interface */
29#ifdef PROC_INTERFACE
30#define PROC_STATISTICS /* add code for keeping various real time stats */
31#endif
32
33#define SYNC_DEBUG /* extra info on sync negotiation printed */
34#define DEBUGGING_ON /* enable command-line debugging bitmask */
35#define DEBUG_DEFAULTS 0 /* default bitmask - change from command-line */
36
37#ifdef __i386__
38#define FAST_READ_IO /* No problems with these on my machine */
39#define FAST_WRITE_IO
40#endif
41
42#ifdef DEBUGGING_ON
43#define DB(f,a) if (hostdata->args & (f)) a;
44#define CHECK_NULL(p,s) /* if (!(p)) {printk("\n"); while (1) printk("NP:%s\r",(s));} */
45#else
46#define DB(f,a)
47#define CHECK_NULL(p,s)
48#endif
49
50#define uchar unsigned char
51
52#define read1_io(a) (inb(hostdata->io_base+(a)))
53#define read2_io(a) (inw(hostdata->io_base+(a)))
54#define write1_io(b,a) (outb((b),hostdata->io_base+(a)))
55#define write2_io(w,a) (outw((w),hostdata->io_base+(a)))
56
57#ifdef __i386__
58/* These inline assembly defines are derived from a patch
59 * sent to me by Bill Earnest. He's done a lot of very
60 * valuable thinking, testing, and coding during his effort
61 * to squeeze more speed out of this driver. I really think
62 * that we are doing IO at close to the maximum now with
63 * the fifo. (And yes, insw uses 'edi' while outsw uses
64 * 'esi'. Thanks Bill!)
65 */
66
67#define FAST_READ2_IO() \
68({ \
69int __dummy_1,__dummy_2; \
70 __asm__ __volatile__ ("\n \
71 cld \n \
72 orl %%ecx, %%ecx \n \
73 jz 1f \n \
74 rep \n \
75 insw (%%dx),%%es:(%%edi) \n \
761: " \
77 : "=D" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2) /* output */ \
78 : "2" (f), "0" (sp), "1" (i) /* input */ \
79 ); /* trashed */ \
80})
81
82#define FAST_WRITE2_IO() \
83({ \
84int __dummy_1,__dummy_2; \
85 __asm__ __volatile__ ("\n \
86 cld \n \
87 orl %%ecx, %%ecx \n \
88 jz 1f \n \
89 rep \n \
90 outsw %%ds:(%%esi),(%%dx) \n \
911: " \
92 : "=S" (sp) ,"=c" (__dummy_1) ,"=d" (__dummy_2)/* output */ \
93 : "2" (f), "0" (sp), "1" (i) /* input */ \
94 ); /* trashed */ \
95})
96#endif
97
98/* IN2000 io_port offsets */
99#define IO_WD_ASR 0x00 /* R - 3393 auxstat reg */
100#define ASR_INT 0x80
101#define ASR_LCI 0x40
102#define ASR_BSY 0x20
103#define ASR_CIP 0x10
104#define ASR_PE 0x02
105#define ASR_DBR 0x01
106#define IO_WD_ADDR 0x00 /* W - 3393 address reg */
107#define IO_WD_DATA 0x01 /* R/W - rest of 3393 regs */
108#define IO_FIFO 0x02 /* R/W - in2000 dual-port fifo (16 bits) */
109#define IN2000_FIFO_SIZE 2048 /* fifo capacity in bytes */
110#define IO_CARD_RESET 0x03 /* W - in2000 start master reset */
111#define IO_FIFO_COUNT 0x04 /* R - in2000 fifo counter */
112#define IO_FIFO_WRITE 0x05 /* W - clear fifo counter, start write */
113#define IO_FIFO_READ 0x07 /* W - start fifo read */
114#define IO_LED_OFF 0x08 /* W - turn off in2000 activity LED */
115#define IO_SWITCHES 0x08 /* R - read in2000 dip switch */
116#define SW_ADDR0 0x01 /* bit 0 = bit 0 of index to io addr */
117#define SW_ADDR1 0x02 /* bit 1 = bit 1 of index io addr */
118#define SW_DISINT 0x04 /* bit 2 true if ints disabled */
119#define SW_INT0 0x08 /* bit 3 = bit 0 of index to interrupt */
120#define SW_INT1 0x10 /* bit 4 = bit 1 of index to interrupt */
121#define SW_INT_SHIFT 3 /* shift right this amount to right justify int bits */
122#define SW_SYNC_DOS5 0x20 /* bit 5 used by Always BIOS */
123#define SW_FLOPPY 0x40 /* bit 6 true if floppy enabled */
124#define SW_BIT7 0x80 /* bit 7 hardwired true (ground) */
125#define IO_LED_ON 0x09 /* W - turn on in2000 activity LED */
126#define IO_HARDWARE 0x0a /* R - read in2000 hardware rev, stop reset */
127#define IO_INTR_MASK 0x0c /* W - in2000 interrupt mask reg */
128#define IMASK_WD 0x01 /* WD33c93 interrupt mask */
129#define IMASK_FIFO 0x02 /* FIFO interrupt mask */
130
131/* wd register names */
132#define WD_OWN_ID 0x00
133#define WD_CONTROL 0x01
134#define WD_TIMEOUT_PERIOD 0x02
135#define WD_CDB_1 0x03
136#define WD_CDB_2 0x04
137#define WD_CDB_3 0x05
138#define WD_CDB_4 0x06
139#define WD_CDB_5 0x07
140#define WD_CDB_6 0x08
141#define WD_CDB_7 0x09
142#define WD_CDB_8 0x0a
143#define WD_CDB_9 0x0b
144#define WD_CDB_10 0x0c
145#define WD_CDB_11 0x0d
146#define WD_CDB_12 0x0e
147#define WD_TARGET_LUN 0x0f
148#define WD_COMMAND_PHASE 0x10
149#define WD_SYNCHRONOUS_TRANSFER 0x11
150#define WD_TRANSFER_COUNT_MSB 0x12
151#define WD_TRANSFER_COUNT 0x13
152#define WD_TRANSFER_COUNT_LSB 0x14
153#define WD_DESTINATION_ID 0x15
154#define WD_SOURCE_ID 0x16
155#define WD_SCSI_STATUS 0x17
156#define WD_COMMAND 0x18
157#define WD_DATA 0x19
158#define WD_QUEUE_TAG 0x1a
159#define WD_AUXILIARY_STATUS 0x1f
160
161/* WD commands */
162#define WD_CMD_RESET 0x00
163#define WD_CMD_ABORT 0x01
164#define WD_CMD_ASSERT_ATN 0x02
165#define WD_CMD_NEGATE_ACK 0x03
166#define WD_CMD_DISCONNECT 0x04
167#define WD_CMD_RESELECT 0x05
168#define WD_CMD_SEL_ATN 0x06
169#define WD_CMD_SEL 0x07
170#define WD_CMD_SEL_ATN_XFER 0x08
171#define WD_CMD_SEL_XFER 0x09
172#define WD_CMD_RESEL_RECEIVE 0x0a
173#define WD_CMD_RESEL_SEND 0x0b
174#define WD_CMD_WAIT_SEL_RECEIVE 0x0c
175#define WD_CMD_TRANS_ADDR 0x18
176#define WD_CMD_TRANS_INFO 0x20
177#define WD_CMD_TRANSFER_PAD 0x21
178#define WD_CMD_SBT_MODE 0x80
179
180/* SCSI Bus Phases */
181#define PHS_DATA_OUT 0x00
182#define PHS_DATA_IN 0x01
183#define PHS_COMMAND 0x02
184#define PHS_STATUS 0x03
185#define PHS_MESS_OUT 0x06
186#define PHS_MESS_IN 0x07
187
188/* Command Status Register definitions */
189
190 /* reset state interrupts */
191#define CSR_RESET 0x00
192#define CSR_RESET_AF 0x01
193
194 /* successful completion interrupts */
195#define CSR_RESELECT 0x10
196#define CSR_SELECT 0x11
197#define CSR_SEL_XFER_DONE 0x16
198#define CSR_XFER_DONE 0x18
199
200 /* paused or aborted interrupts */
201#define CSR_MSGIN 0x20
202#define CSR_SDP 0x21
203#define CSR_SEL_ABORT 0x22
204#define CSR_RESEL_ABORT 0x25
205#define CSR_RESEL_ABORT_AM 0x27
206#define CSR_ABORT 0x28
207
208 /* terminated interrupts */
209#define CSR_INVALID 0x40
210#define CSR_UNEXP_DISC 0x41
211#define CSR_TIMEOUT 0x42
212#define CSR_PARITY 0x43
213#define CSR_PARITY_ATN 0x44
214#define CSR_BAD_STATUS 0x45
215#define CSR_UNEXP 0x48
216
217 /* service required interrupts */
218#define CSR_RESEL 0x80
219#define CSR_RESEL_AM 0x81
220#define CSR_DISC 0x85
221#define CSR_SRV_REQ 0x88
222
223 /* Own ID/CDB Size register */
224#define OWNID_EAF 0x08
225#define OWNID_EHP 0x10
226#define OWNID_RAF 0x20
227#define OWNID_FS_8 0x00
228#define OWNID_FS_12 0x40
229#define OWNID_FS_16 0x80
230
231 /* Control register */
232#define CTRL_HSP 0x01
233#define CTRL_HA 0x02
234#define CTRL_IDI 0x04
235#define CTRL_EDI 0x08
236#define CTRL_HHP 0x10
237#define CTRL_POLLED 0x00
238#define CTRL_BURST 0x20
239#define CTRL_BUS 0x40
240#define CTRL_DMA 0x80
241
242 /* Timeout Period register */
243#define TIMEOUT_PERIOD_VALUE 20 /* results in 200 ms. */
244
245 /* Synchronous Transfer Register */
246#define STR_FSS 0x80
247
248 /* Destination ID register */
249#define DSTID_DPD 0x40
250#define DATA_OUT_DIR 0
251#define DATA_IN_DIR 1
252#define DSTID_SCC 0x80
253
254 /* Source ID register */
255#define SRCID_MASK 0x07
256#define SRCID_SIV 0x08
257#define SRCID_DSP 0x20
258#define SRCID_ES 0x40
259#define SRCID_ER 0x80
260
261
262
263#define ILLEGAL_STATUS_BYTE 0xff
264
265
266#define DEFAULT_SX_PER 500 /* (ns) fairly safe */
267#define DEFAULT_SX_OFF 0 /* aka async */
268
269#define OPTIMUM_SX_PER 252 /* (ns) best we can do (mult-of-4) */
270#define OPTIMUM_SX_OFF 12 /* size of in2000 fifo */
271
272struct sx_period {
273 unsigned int period_ns;
274 uchar reg_value;
275 };
276
277
278struct IN2000_hostdata {
279 struct Scsi_Host *next;
280 uchar chip; /* what kind of wd33c93 chip? */
281 uchar microcode; /* microcode rev if 'B' */
282 unsigned short io_base; /* IO port base */
283 unsigned int dip_switch; /* dip switch settings */
284 unsigned int hrev; /* hardware revision of card */
285 volatile uchar busy[8]; /* index = target, bit = lun */
286 volatile Scsi_Cmnd *input_Q; /* commands waiting to be started */
287 volatile Scsi_Cmnd *selecting; /* trying to select this command */
288 volatile Scsi_Cmnd *connected; /* currently connected command */
289 volatile Scsi_Cmnd *disconnected_Q;/* commands waiting for reconnect */
290 uchar state; /* what we are currently doing */
291 uchar fifo; /* what the FIFO is up to */
292 uchar level2; /* extent to which Level-2 commands are used */
293 uchar disconnect; /* disconnect/reselect policy */
294 unsigned int args; /* set from command-line argument */
295 uchar incoming_msg[8]; /* filled during message_in phase */
296 int incoming_ptr; /* mainly used with EXTENDED messages */
297 uchar outgoing_msg[8]; /* send this during next message_out */
298 int outgoing_len; /* length of outgoing message */
299 unsigned int default_sx_per; /* default transfer period for SCSI bus */
300 uchar sync_xfer[8]; /* sync_xfer reg settings per target */
301 uchar sync_stat[8]; /* status of sync negotiation per target */
302 uchar sync_off; /* bit mask: don't use sync with these targets */
303#ifdef PROC_INTERFACE
304 uchar proc; /* bit mask: what's in proc output */
305#ifdef PROC_STATISTICS
306 unsigned long cmd_cnt[8]; /* # of commands issued per target */
307 unsigned long int_cnt; /* # of interrupts serviced */
308 unsigned long disc_allowed_cnt[8]; /* # of disconnects allowed per target */
309 unsigned long disc_done_cnt[8]; /* # of disconnects done per target*/
310#endif
311#endif
312 };
313
314
315/* defines for hostdata->chip */
316
317#define C_WD33C93 0
318#define C_WD33C93A 1
319#define C_WD33C93B 2
320#define C_UNKNOWN_CHIP 100
321
322/* defines for hostdata->state */
323
324#define S_UNCONNECTED 0
325#define S_SELECTING 1
326#define S_RUNNING_LEVEL2 2
327#define S_CONNECTED 3
328#define S_PRE_TMP_DISC 4
329#define S_PRE_CMP_DISC 5
330
331/* defines for hostdata->fifo */
332
333#define FI_FIFO_UNUSED 0
334#define FI_FIFO_READING 1
335#define FI_FIFO_WRITING 2
336
337/* defines for hostdata->level2 */
338/* NOTE: only the first 3 are trustworthy at this point -
339 * having trouble when more than 1 device is reading/writing
340 * at the same time...
341 */
342
343#define L2_NONE 0 /* no combination commands - we get lots of ints */
344#define L2_SELECT 1 /* start with SEL_ATN_XFER, but never resume it */
345#define L2_BASIC 2 /* resume after STATUS ints & RDP messages */
346#define L2_DATA 3 /* resume after DATA_IN/OUT ints */
347#define L2_MOST 4 /* resume after anything except a RESELECT int */
348#define L2_RESELECT 5 /* resume after everything, including RESELECT ints */
349#define L2_ALL 6 /* always resume */
350
351/* defines for hostdata->disconnect */
352
353#define DIS_NEVER 0
354#define DIS_ADAPTIVE 1
355#define DIS_ALWAYS 2
356
357/* defines for hostdata->args */
358
359#define DB_TEST 1<<0
360#define DB_FIFO 1<<1
361#define DB_QUEUE_COMMAND 1<<2
362#define DB_EXECUTE 1<<3
363#define DB_INTR 1<<4
364#define DB_TRANSFER 1<<5
365#define DB_MASK 0x3f
366
367#define A_NO_SCSI_RESET 1<<15
368
369
370/* defines for hostdata->sync_xfer[] */
371
372#define SS_UNSET 0
373#define SS_FIRST 1
374#define SS_WAITING 2
375#define SS_SET 3
376
377/* defines for hostdata->proc */
378
379#define PR_VERSION 1<<0
380#define PR_INFO 1<<1
381#define PR_STATISTICS 1<<2
382#define PR_CONNECTED 1<<3
383#define PR_INPUTQ 1<<4
384#define PR_DISCQ 1<<5
385#define PR_TEST 1<<6
386#define PR_STOP 1<<7
387
388
389# include <linux/init.h>
390# include <linux/spinlock.h>
391# define in2000__INITFUNC(function) __initfunc(function)
392# define in2000__INIT __init
393# define in2000__INITDATA __initdata
394# define CLISPIN_LOCK(host,flags) spin_lock_irqsave(host->host_lock, flags)
395# define CLISPIN_UNLOCK(host,flags) spin_unlock_irqrestore(host->host_lock, \
396 flags)
397
398static int in2000_detect(struct scsi_host_template *) in2000__INIT;
399static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
400static int in2000_abort(Scsi_Cmnd *);
401static void in2000_setup(char *, int *) in2000__INIT;
402static int in2000_biosparam(struct scsi_device *, struct block_device *,
403 sector_t, int *);
404static int in2000_bus_reset(Scsi_Cmnd *);
405
406
407#define IN2000_CAN_Q 16
408#define IN2000_SG SG_ALL
409#define IN2000_CPL 2
410#define IN2000_HOST_ID 7
411
412#endif /* IN2000_H */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 17d04c702e1b..a8762a3efeef 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -493,15 +493,15 @@ struct ipr_error_table_t ipr_error_table[] = {
493 "9072: Link not operational transition"}, 493 "9072: Link not operational transition"},
494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL, 494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
495 "9032: Array exposed but still protected"}, 495 "9032: Array exposed but still protected"},
496 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1, 496 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
497 "70DD: Device forced failed by disrupt device command"}, 497 "70DD: Device forced failed by disrupt device command"},
498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL, 498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
499 "4061: Multipath redundancy level got better"}, 499 "4061: Multipath redundancy level got better"},
500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL, 500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
501 "4060: Multipath redundancy level got worse"}, 501 "4060: Multipath redundancy level got worse"},
502 {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL, 502 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
503 "9083: Device raw mode enabled"}, 503 "9083: Device raw mode enabled"},
504 {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL, 504 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
505 "9084: Device raw mode disabled"}, 505 "9084: Device raw mode disabled"},
506 {0x07270000, 0, 0, 506 {0x07270000, 0, 0,
507 "Failure due to other device"}, 507 "Failure due to other device"},
@@ -1473,7 +1473,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1473 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 1473 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1474 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 1474 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1475 1475
1476 list_del(&hostrcb->queue); 1476 list_del_init(&hostrcb->queue);
1477 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 1477 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1478 1478
1479 if (ioasc) { 1479 if (ioasc) {
@@ -2552,6 +2552,23 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2552 } 2552 }
2553} 2553}
2554 2554
2555static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2556{
2557 struct ipr_hostrcb *hostrcb;
2558
2559 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2560 struct ipr_hostrcb, queue);
2561
2562 if (unlikely(!hostrcb)) {
2563 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2564 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2565 struct ipr_hostrcb, queue);
2566 }
2567
2568 list_del_init(&hostrcb->queue);
2569 return hostrcb;
2570}
2571
2555/** 2572/**
2556 * ipr_process_error - Op done function for an adapter error log. 2573 * ipr_process_error - Op done function for an adapter error log.
2557 * @ipr_cmd: ipr command struct 2574 * @ipr_cmd: ipr command struct
@@ -2569,13 +2586,14 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2569 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb; 2586 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2570 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 2587 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2571 u32 fd_ioasc; 2588 u32 fd_ioasc;
2589 char *envp[] = { "ASYNC_ERR_LOG=1", NULL };
2572 2590
2573 if (ioa_cfg->sis64) 2591 if (ioa_cfg->sis64)
2574 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc); 2592 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2575 else 2593 else
2576 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc); 2594 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2577 2595
2578 list_del(&hostrcb->queue); 2596 list_del_init(&hostrcb->queue);
2579 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 2597 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2580 2598
2581 if (!ioasc) { 2599 if (!ioasc) {
@@ -2588,6 +2606,10 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2588 "Host RCB failed with IOASC: 0x%08X\n", ioasc); 2606 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2589 } 2607 }
2590 2608
2609 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2610 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2611 kobject_uevent_env(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE, envp);
2612
2591 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 2613 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2592} 2614}
2593 2615
@@ -4095,6 +4117,64 @@ static struct device_attribute ipr_ioa_fw_type_attr = {
4095 .show = ipr_show_fw_type 4117 .show = ipr_show_fw_type
4096}; 4118};
4097 4119
4120static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4121 struct bin_attribute *bin_attr, char *buf,
4122 loff_t off, size_t count)
4123{
4124 struct device *cdev = container_of(kobj, struct device, kobj);
4125 struct Scsi_Host *shost = class_to_shost(cdev);
4126 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4127 struct ipr_hostrcb *hostrcb;
4128 unsigned long lock_flags = 0;
4129 int ret;
4130
4131 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4132 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4133 struct ipr_hostrcb, queue);
4134 if (!hostrcb) {
4135 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4136 return 0;
4137 }
4138 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4139 sizeof(hostrcb->hcam));
4140 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4141 return ret;
4142}
4143
4144static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4145 struct bin_attribute *bin_attr, char *buf,
4146 loff_t off, size_t count)
4147{
4148 struct device *cdev = container_of(kobj, struct device, kobj);
4149 struct Scsi_Host *shost = class_to_shost(cdev);
4150 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4151 struct ipr_hostrcb *hostrcb;
4152 unsigned long lock_flags = 0;
4153
4154 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4155 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4156 struct ipr_hostrcb, queue);
4157 if (!hostrcb) {
4158 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4159 return count;
4160 }
4161
4162 /* Reclaim hostrcb before exit */
4163 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4164 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4165 return count;
4166}
4167
4168static struct bin_attribute ipr_ioa_async_err_log = {
4169 .attr = {
4170 .name = "async_err_log",
4171 .mode = S_IRUGO | S_IWUSR,
4172 },
4173 .size = 0,
4174 .read = ipr_read_async_err_log,
4175 .write = ipr_next_async_err_log
4176};
4177
4098static struct device_attribute *ipr_ioa_attrs[] = { 4178static struct device_attribute *ipr_ioa_attrs[] = {
4099 &ipr_fw_version_attr, 4179 &ipr_fw_version_attr,
4100 &ipr_log_level_attr, 4180 &ipr_log_level_attr,
@@ -7026,8 +7106,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7026{ 7106{
7027 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 7107 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7028 struct ipr_resource_entry *res; 7108 struct ipr_resource_entry *res;
7029 struct ipr_hostrcb *hostrcb, *temp; 7109 int j;
7030 int i = 0, j;
7031 7110
7032 ENTER; 7111 ENTER;
7033 ioa_cfg->in_reset_reload = 0; 7112 ioa_cfg->in_reset_reload = 0;
@@ -7048,12 +7127,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7048 } 7127 }
7049 schedule_work(&ioa_cfg->work_q); 7128 schedule_work(&ioa_cfg->work_q);
7050 7129
7051 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) { 7130 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7052 list_del(&hostrcb->queue); 7131 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7053 if (i++ < IPR_NUM_LOG_HCAMS) 7132 if (j < IPR_NUM_LOG_HCAMS)
7054 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb); 7133 ipr_send_hcam(ioa_cfg,
7134 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7135 ioa_cfg->hostrcb[j]);
7055 else 7136 else
7056 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb); 7137 ipr_send_hcam(ioa_cfg,
7138 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7139 ioa_cfg->hostrcb[j]);
7057 } 7140 }
7058 7141
7059 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS); 7142 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
@@ -7966,7 +8049,8 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7966 8049
7967 ENTER; 8050 ENTER;
7968 ipr_cmd->job_step = ipr_ioafp_std_inquiry; 8051 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7969 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n"); 8052 if (ioa_cfg->identify_hrrq_index == 0)
8053 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7970 8054
7971 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) { 8055 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7972 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index]; 8056 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
@@ -8335,7 +8419,7 @@ static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8335 8419
8336 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next, 8420 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8337 struct ipr_hostrcb, queue); 8421 struct ipr_hostrcb, queue);
8338 list_del(&hostrcb->queue); 8422 list_del_init(&hostrcb->queue);
8339 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam)); 8423 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8340 8424
8341 rc = ipr_get_ldump_data_section(ioa_cfg, 8425 rc = ipr_get_ldump_data_section(ioa_cfg,
@@ -9332,7 +9416,7 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9332 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size, 9416 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9333 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma); 9417 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9334 9418
9335 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9419 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9336 dma_free_coherent(&ioa_cfg->pdev->dev, 9420 dma_free_coherent(&ioa_cfg->pdev->dev,
9337 sizeof(struct ipr_hostrcb), 9421 sizeof(struct ipr_hostrcb),
9338 ioa_cfg->hostrcb[i], 9422 ioa_cfg->hostrcb[i],
@@ -9572,7 +9656,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
9572 if (!ioa_cfg->u.cfg_table) 9656 if (!ioa_cfg->u.cfg_table)
9573 goto out_free_host_rrq; 9657 goto out_free_host_rrq;
9574 9658
9575 for (i = 0; i < IPR_NUM_HCAMS; i++) { 9659 for (i = 0; i < IPR_MAX_HCAMS; i++) {
9576 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev, 9660 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9577 sizeof(struct ipr_hostrcb), 9661 sizeof(struct ipr_hostrcb),
9578 &ioa_cfg->hostrcb_dma[i], 9662 &ioa_cfg->hostrcb_dma[i],
@@ -9714,6 +9798,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9714 9798
9715 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q); 9799 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9716 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q); 9800 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9801 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9717 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9802 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9718 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 9803 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9719 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9804 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
@@ -10352,6 +10437,8 @@ static void ipr_remove(struct pci_dev *pdev)
10352 &ipr_trace_attr); 10437 &ipr_trace_attr);
10353 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj, 10438 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10354 &ipr_dump_attr); 10439 &ipr_dump_attr);
10440 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10441 &ipr_ioa_async_err_log);
10355 scsi_remove_host(ioa_cfg->host); 10442 scsi_remove_host(ioa_cfg->host);
10356 10443
10357 __ipr_remove(pdev); 10444 __ipr_remove(pdev);
@@ -10400,10 +10487,25 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
10400 return rc; 10487 return rc;
10401 } 10488 }
10402 10489
10490 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10491 &ipr_ioa_async_err_log);
10492
10493 if (rc) {
10494 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10495 &ipr_dump_attr);
10496 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10497 &ipr_trace_attr);
10498 scsi_remove_host(ioa_cfg->host);
10499 __ipr_remove(pdev);
10500 return rc;
10501 }
10502
10403 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj, 10503 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
10404 &ipr_dump_attr); 10504 &ipr_dump_attr);
10405 10505
10406 if (rc) { 10506 if (rc) {
10507 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10508 &ipr_ioa_async_err_log);
10407 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj, 10509 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10408 &ipr_trace_attr); 10510 &ipr_trace_attr);
10409 scsi_remove_host(ioa_cfg->host); 10511 scsi_remove_host(ioa_cfg->host);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index cdb51960b53c..8995053d01b3 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -154,7 +154,9 @@
154#define IPR_DEFAULT_MAX_ERROR_DUMP 984 154#define IPR_DEFAULT_MAX_ERROR_DUMP 984
155#define IPR_NUM_LOG_HCAMS 2 155#define IPR_NUM_LOG_HCAMS 2
156#define IPR_NUM_CFG_CHG_HCAMS 2 156#define IPR_NUM_CFG_CHG_HCAMS 2
157#define IPR_NUM_HCAM_QUEUE 12
157#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS) 158#define IPR_NUM_HCAMS (IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
159#define IPR_MAX_HCAMS (IPR_NUM_HCAMS + IPR_NUM_HCAM_QUEUE)
158 160
159#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024 161#define IPR_MAX_SIS64_TARGETS_PER_BUS 1024
160#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff 162#define IPR_MAX_SIS64_LUNS_PER_TARGET 0xffffffff
@@ -1504,6 +1506,7 @@ struct ipr_ioa_cfg {
1504 u8 log_level; 1506 u8 log_level;
1505#define IPR_MAX_LOG_LEVEL 4 1507#define IPR_MAX_LOG_LEVEL 4
1506#define IPR_DEFAULT_LOG_LEVEL 2 1508#define IPR_DEFAULT_LOG_LEVEL 2
1509#define IPR_DEBUG_LOG_LEVEL 3
1507 1510
1508#define IPR_NUM_TRACE_INDEX_BITS 8 1511#define IPR_NUM_TRACE_INDEX_BITS 8
1509#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) 1512#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
@@ -1532,10 +1535,11 @@ struct ipr_ioa_cfg {
1532 1535
1533 char ipr_hcam_label[8]; 1536 char ipr_hcam_label[8];
1534#define IPR_HCAM_LABEL "hcams" 1537#define IPR_HCAM_LABEL "hcams"
1535 struct ipr_hostrcb *hostrcb[IPR_NUM_HCAMS]; 1538 struct ipr_hostrcb *hostrcb[IPR_MAX_HCAMS];
1536 dma_addr_t hostrcb_dma[IPR_NUM_HCAMS]; 1539 dma_addr_t hostrcb_dma[IPR_MAX_HCAMS];
1537 struct list_head hostrcb_free_q; 1540 struct list_head hostrcb_free_q;
1538 struct list_head hostrcb_pending_q; 1541 struct list_head hostrcb_pending_q;
1542 struct list_head hostrcb_report_q;
1539 1543
1540 struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM]; 1544 struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
1541 u32 hrrq_num; 1545 u32 hrrq_num;
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index e72673b0a8fb..16ca31ad5ec0 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -1837,7 +1837,6 @@ static void fc_exch_reset(struct fc_exch *ep)
1837 int rc = 1; 1837 int rc = 1;
1838 1838
1839 spin_lock_bh(&ep->ex_lock); 1839 spin_lock_bh(&ep->ex_lock);
1840 fc_exch_abort_locked(ep, 0);
1841 ep->state |= FC_EX_RST_CLEANUP; 1840 ep->state |= FC_EX_RST_CLEANUP;
1842 fc_exch_timer_cancel(ep); 1841 fc_exch_timer_cancel(ep);
1843 if (ep->esb_stat & ESB_ST_REC_QUAL) 1842 if (ep->esb_stat & ESB_ST_REC_QUAL)
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 93f596182145..97aeaddd600d 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -457,6 +457,9 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
457 */ 457 */
458static int fc_rport_logoff(struct fc_rport_priv *rdata) 458static int fc_rport_logoff(struct fc_rport_priv *rdata)
459{ 459{
460 struct fc_lport *lport = rdata->local_port;
461 u32 port_id = rdata->ids.port_id;
462
460 mutex_lock(&rdata->rp_mutex); 463 mutex_lock(&rdata->rp_mutex);
461 464
462 FC_RPORT_DBG(rdata, "Remove port\n"); 465 FC_RPORT_DBG(rdata, "Remove port\n");
@@ -466,6 +469,15 @@ static int fc_rport_logoff(struct fc_rport_priv *rdata)
466 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n"); 469 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
467 goto out; 470 goto out;
468 } 471 }
472 /*
473 * FC-LS states:
474 * To explicitly Logout, the initiating Nx_Port shall terminate
475 * other open Sequences that it initiated with the destination
476 * Nx_Port prior to performing Logout.
477 */
478 lport->tt.exch_mgr_reset(lport, 0, port_id);
479 lport->tt.exch_mgr_reset(lport, port_id, 0);
480
469 fc_rport_enter_logo(rdata); 481 fc_rport_enter_logo(rdata);
470 482
471 /* 483 /*
@@ -547,16 +559,24 @@ static void fc_rport_timeout(struct work_struct *work)
547 */ 559 */
548static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp) 560static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
549{ 561{
562 struct fc_lport *lport = rdata->local_port;
563
550 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n", 564 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
551 IS_ERR(fp) ? -PTR_ERR(fp) : 0, 565 IS_ERR(fp) ? -PTR_ERR(fp) : 0,
552 fc_rport_state(rdata), rdata->retries); 566 fc_rport_state(rdata), rdata->retries);
553 567
554 switch (rdata->rp_state) { 568 switch (rdata->rp_state) {
555 case RPORT_ST_FLOGI: 569 case RPORT_ST_FLOGI:
556 case RPORT_ST_PLOGI:
557 rdata->flags &= ~FC_RP_STARTED; 570 rdata->flags &= ~FC_RP_STARTED;
558 fc_rport_enter_delete(rdata, RPORT_EV_FAILED); 571 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
559 break; 572 break;
573 case RPORT_ST_PLOGI:
574 if (lport->point_to_multipoint) {
575 rdata->flags &= ~FC_RP_STARTED;
576 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
577 } else
578 fc_rport_enter_logo(rdata);
579 break;
560 case RPORT_ST_RTV: 580 case RPORT_ST_RTV:
561 fc_rport_enter_ready(rdata); 581 fc_rport_enter_ready(rdata);
562 break; 582 break;
@@ -1877,7 +1897,7 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1877 spp->spp_type_ext = rspp->spp_type_ext; 1897 spp->spp_type_ext = rspp->spp_type_ext;
1878 spp->spp_flags = FC_SPP_RESP_ACK; 1898 spp->spp_flags = FC_SPP_RESP_ACK;
1879 1899
1880 fc_rport_enter_delete(rdata, RPORT_EV_LOGO); 1900 fc_rport_enter_prli(rdata);
1881 1901
1882 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0); 1902 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1883 lport->tt.frame_send(lport, fp); 1903 lport->tt.frame_send(lport, fp);
@@ -1915,7 +1935,7 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
1915 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n", 1935 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1916 fc_rport_state(rdata)); 1936 fc_rport_state(rdata));
1917 1937
1918 fc_rport_enter_delete(rdata, RPORT_EV_LOGO); 1938 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
1919 mutex_unlock(&rdata->rp_mutex); 1939 mutex_unlock(&rdata->rp_mutex);
1920 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy); 1940 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
1921 } else 1941 } else
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 63e48d4277b0..4ac03b16d17f 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1535,7 +1535,7 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport)
1535} 1535}
1536 1536
1537/* Routines for all individual HBA attributes */ 1537/* Routines for all individual HBA attributes */
1538int 1538static int
1539lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) 1539lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
1540{ 1540{
1541 struct lpfc_fdmi_attr_entry *ae; 1541 struct lpfc_fdmi_attr_entry *ae;
@@ -1551,7 +1551,7 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
1551 ad->AttrType = cpu_to_be16(RHBA_NODENAME); 1551 ad->AttrType = cpu_to_be16(RHBA_NODENAME);
1552 return size; 1552 return size;
1553} 1553}
1554int 1554static int
1555lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, 1555lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
1556 struct lpfc_fdmi_attr_def *ad) 1556 struct lpfc_fdmi_attr_def *ad)
1557{ 1557{
@@ -1573,7 +1573,7 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
1573 return size; 1573 return size;
1574} 1574}
1575 1575
1576int 1576static int
1577lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad) 1577lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
1578{ 1578{
1579 struct lpfc_hba *phba = vport->phba; 1579 struct lpfc_hba *phba = vport->phba;
@@ -1594,7 +1594,7 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
1594 return size; 1594 return size;
1595} 1595}
1596 1596
1597int 1597static int
1598lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, 1598lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
1599 struct lpfc_fdmi_attr_def *ad) 1599 struct lpfc_fdmi_attr_def *ad)
1600{ 1600{
@@ -1615,7 +1615,7 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
1615 return size; 1615 return size;
1616} 1616}
1617 1617
1618int 1618static int
1619lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, 1619lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
1620 struct lpfc_fdmi_attr_def *ad) 1620 struct lpfc_fdmi_attr_def *ad)
1621{ 1621{
@@ -1637,7 +1637,7 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
1637 return size; 1637 return size;
1638} 1638}
1639 1639
1640int 1640static int
1641lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, 1641lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
1642 struct lpfc_fdmi_attr_def *ad) 1642 struct lpfc_fdmi_attr_def *ad)
1643{ 1643{
@@ -1669,7 +1669,7 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
1669 return size; 1669 return size;
1670} 1670}
1671 1671
1672int 1672static int
1673lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, 1673lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
1674 struct lpfc_fdmi_attr_def *ad) 1674 struct lpfc_fdmi_attr_def *ad)
1675{ 1675{
@@ -1690,7 +1690,7 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
1690 return size; 1690 return size;
1691} 1691}
1692 1692
1693int 1693static int
1694lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, 1694lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
1695 struct lpfc_fdmi_attr_def *ad) 1695 struct lpfc_fdmi_attr_def *ad)
1696{ 1696{
@@ -1715,7 +1715,7 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
1715 return size; 1715 return size;
1716} 1716}
1717 1717
1718int 1718static int
1719lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, 1719lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
1720 struct lpfc_fdmi_attr_def *ad) 1720 struct lpfc_fdmi_attr_def *ad)
1721{ 1721{
@@ -1736,7 +1736,7 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
1736 return size; 1736 return size;
1737} 1737}
1738 1738
1739int 1739static int
1740lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, 1740lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
1741 struct lpfc_fdmi_attr_def *ad) 1741 struct lpfc_fdmi_attr_def *ad)
1742{ 1742{
@@ -1759,7 +1759,7 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
1759 return size; 1759 return size;
1760} 1760}
1761 1761
1762int 1762static int
1763lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, 1763lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
1764 struct lpfc_fdmi_attr_def *ad) 1764 struct lpfc_fdmi_attr_def *ad)
1765{ 1765{
@@ -1775,7 +1775,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
1775 return size; 1775 return size;
1776} 1776}
1777 1777
1778int 1778static int
1779lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, 1779lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
1780 struct lpfc_fdmi_attr_def *ad) 1780 struct lpfc_fdmi_attr_def *ad)
1781{ 1781{
@@ -1794,7 +1794,7 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
1794 return size; 1794 return size;
1795} 1795}
1796 1796
1797int 1797static int
1798lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, 1798lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
1799 struct lpfc_fdmi_attr_def *ad) 1799 struct lpfc_fdmi_attr_def *ad)
1800{ 1800{
@@ -1811,7 +1811,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
1811 return size; 1811 return size;
1812} 1812}
1813 1813
1814int 1814static int
1815lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, 1815lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
1816 struct lpfc_fdmi_attr_def *ad) 1816 struct lpfc_fdmi_attr_def *ad)
1817{ 1817{
@@ -1828,7 +1828,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
1828 return size; 1828 return size;
1829} 1829}
1830 1830
1831int 1831static int
1832lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, 1832lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
1833 struct lpfc_fdmi_attr_def *ad) 1833 struct lpfc_fdmi_attr_def *ad)
1834{ 1834{
@@ -1846,7 +1846,7 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
1846 return size; 1846 return size;
1847} 1847}
1848 1848
1849int 1849static int
1850lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, 1850lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
1851 struct lpfc_fdmi_attr_def *ad) 1851 struct lpfc_fdmi_attr_def *ad)
1852{ 1852{
@@ -1867,7 +1867,7 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
1867 return size; 1867 return size;
1868} 1868}
1869 1869
1870int 1870static int
1871lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, 1871lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
1872 struct lpfc_fdmi_attr_def *ad) 1872 struct lpfc_fdmi_attr_def *ad)
1873{ 1873{
@@ -1884,7 +1884,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
1884 return size; 1884 return size;
1885} 1885}
1886 1886
1887int 1887static int
1888lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, 1888lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
1889 struct lpfc_fdmi_attr_def *ad) 1889 struct lpfc_fdmi_attr_def *ad)
1890{ 1890{
@@ -1906,7 +1906,7 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
1906} 1906}
1907 1907
1908/* Routines for all individual PORT attributes */ 1908/* Routines for all individual PORT attributes */
1909int 1909static int
1910lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, 1910lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
1911 struct lpfc_fdmi_attr_def *ad) 1911 struct lpfc_fdmi_attr_def *ad)
1912{ 1912{
@@ -1925,7 +1925,7 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
1925 return size; 1925 return size;
1926} 1926}
1927 1927
1928int 1928static int
1929lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, 1929lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
1930 struct lpfc_fdmi_attr_def *ad) 1930 struct lpfc_fdmi_attr_def *ad)
1931{ 1931{
@@ -1975,7 +1975,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
1975 return size; 1975 return size;
1976} 1976}
1977 1977
1978int 1978static int
1979lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, 1979lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
1980 struct lpfc_fdmi_attr_def *ad) 1980 struct lpfc_fdmi_attr_def *ad)
1981{ 1981{
@@ -2039,7 +2039,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
2039 return size; 2039 return size;
2040} 2040}
2041 2041
2042int 2042static int
2043lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, 2043lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
2044 struct lpfc_fdmi_attr_def *ad) 2044 struct lpfc_fdmi_attr_def *ad)
2045{ 2045{
@@ -2059,7 +2059,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
2059 return size; 2059 return size;
2060} 2060}
2061 2061
2062int 2062static int
2063lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, 2063lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
2064 struct lpfc_fdmi_attr_def *ad) 2064 struct lpfc_fdmi_attr_def *ad)
2065{ 2065{
@@ -2081,7 +2081,7 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
2081 return size; 2081 return size;
2082} 2082}
2083 2083
2084int 2084static int
2085lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, 2085lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
2086 struct lpfc_fdmi_attr_def *ad) 2086 struct lpfc_fdmi_attr_def *ad)
2087{ 2087{
@@ -2102,7 +2102,7 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
2102 return size; 2102 return size;
2103} 2103}
2104 2104
2105int 2105static int
2106lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, 2106lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
2107 struct lpfc_fdmi_attr_def *ad) 2107 struct lpfc_fdmi_attr_def *ad)
2108{ 2108{
@@ -2120,7 +2120,7 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
2120 return size; 2120 return size;
2121} 2121}
2122 2122
2123int 2123static int
2124lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, 2124lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
2125 struct lpfc_fdmi_attr_def *ad) 2125 struct lpfc_fdmi_attr_def *ad)
2126{ 2126{
@@ -2138,7 +2138,7 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
2138 return size; 2138 return size;
2139} 2139}
2140 2140
2141int 2141static int
2142lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, 2142lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
2143 struct lpfc_fdmi_attr_def *ad) 2143 struct lpfc_fdmi_attr_def *ad)
2144{ 2144{
@@ -2156,7 +2156,7 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
2156 return size; 2156 return size;
2157} 2157}
2158 2158
2159int 2159static int
2160lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, 2160lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
2161 struct lpfc_fdmi_attr_def *ad) 2161 struct lpfc_fdmi_attr_def *ad)
2162{ 2162{
@@ -2175,7 +2175,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
2175 return size; 2175 return size;
2176} 2176}
2177 2177
2178int 2178static int
2179lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, 2179lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
2180 struct lpfc_fdmi_attr_def *ad) 2180 struct lpfc_fdmi_attr_def *ad)
2181{ 2181{
@@ -2190,7 +2190,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
2190 return size; 2190 return size;
2191} 2191}
2192 2192
2193int 2193static int
2194lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, 2194lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
2195 struct lpfc_fdmi_attr_def *ad) 2195 struct lpfc_fdmi_attr_def *ad)
2196{ 2196{
@@ -2208,7 +2208,7 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
2208 return size; 2208 return size;
2209} 2209}
2210 2210
2211int 2211static int
2212lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, 2212lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
2213 struct lpfc_fdmi_attr_def *ad) 2213 struct lpfc_fdmi_attr_def *ad)
2214{ 2214{
@@ -2227,7 +2227,7 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
2227 return size; 2227 return size;
2228} 2228}
2229 2229
2230int 2230static int
2231lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, 2231lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
2232 struct lpfc_fdmi_attr_def *ad) 2232 struct lpfc_fdmi_attr_def *ad)
2233{ 2233{
@@ -2243,7 +2243,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
2243 return size; 2243 return size;
2244} 2244}
2245 2245
2246int 2246static int
2247lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, 2247lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
2248 struct lpfc_fdmi_attr_def *ad) 2248 struct lpfc_fdmi_attr_def *ad)
2249{ 2249{
@@ -2259,7 +2259,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
2259 return size; 2259 return size;
2260} 2260}
2261 2261
2262int 2262static int
2263lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, 2263lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
2264 struct lpfc_fdmi_attr_def *ad) 2264 struct lpfc_fdmi_attr_def *ad)
2265{ 2265{
@@ -2274,7 +2274,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
2274 return size; 2274 return size;
2275} 2275}
2276 2276
2277int 2277static int
2278lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, 2278lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
2279 struct lpfc_fdmi_attr_def *ad) 2279 struct lpfc_fdmi_attr_def *ad)
2280{ 2280{
@@ -2295,7 +2295,7 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
2295 return size; 2295 return size;
2296} 2296}
2297 2297
2298int 2298static int
2299lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, 2299lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
2300 struct lpfc_fdmi_attr_def *ad) 2300 struct lpfc_fdmi_attr_def *ad)
2301{ 2301{
@@ -2316,7 +2316,7 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
2316 return size; 2316 return size;
2317} 2317}
2318 2318
2319int 2319static int
2320lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, 2320lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
2321 struct lpfc_fdmi_attr_def *ad) 2321 struct lpfc_fdmi_attr_def *ad)
2322{ 2322{
@@ -2337,7 +2337,7 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
2337 return size; 2337 return size;
2338} 2338}
2339 2339
2340int 2340static int
2341lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, 2341lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
2342 struct lpfc_fdmi_attr_def *ad) 2342 struct lpfc_fdmi_attr_def *ad)
2343{ 2343{
@@ -2358,7 +2358,7 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
2358 return size; 2358 return size;
2359} 2359}
2360 2360
2361int 2361static int
2362lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, 2362lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
2363 struct lpfc_fdmi_attr_def *ad) 2363 struct lpfc_fdmi_attr_def *ad)
2364{ 2364{
@@ -2378,7 +2378,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
2378 return size; 2378 return size;
2379} 2379}
2380 2380
2381int 2381static int
2382lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, 2382lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
2383 struct lpfc_fdmi_attr_def *ad) 2383 struct lpfc_fdmi_attr_def *ad)
2384{ 2384{
@@ -2393,7 +2393,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
2393 return size; 2393 return size;
2394} 2394}
2395 2395
2396int 2396static int
2397lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, 2397lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
2398 struct lpfc_fdmi_attr_def *ad) 2398 struct lpfc_fdmi_attr_def *ad)
2399{ 2399{
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index c0af32f24954..b7d54bfb1df9 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4617,7 +4617,7 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport)
4617 return sentplogi; 4617 return sentplogi;
4618} 4618}
4619 4619
4620uint32_t 4620static uint32_t
4621lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc, 4621lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
4622 uint32_t word0) 4622 uint32_t word0)
4623{ 4623{
@@ -4629,7 +4629,7 @@ lpfc_rdp_res_link_service(struct fc_rdp_link_service_desc *desc,
4629 return sizeof(struct fc_rdp_link_service_desc); 4629 return sizeof(struct fc_rdp_link_service_desc);
4630} 4630}
4631 4631
4632uint32_t 4632static uint32_t
4633lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc, 4633lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
4634 uint8_t *page_a0, uint8_t *page_a2) 4634 uint8_t *page_a0, uint8_t *page_a2)
4635{ 4635{
@@ -4694,7 +4694,7 @@ lpfc_rdp_res_sfp_desc(struct fc_rdp_sfp_desc *desc,
4694 return sizeof(struct fc_rdp_sfp_desc); 4694 return sizeof(struct fc_rdp_sfp_desc);
4695} 4695}
4696 4696
4697uint32_t 4697static uint32_t
4698lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc, 4698lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
4699 READ_LNK_VAR *stat) 4699 READ_LNK_VAR *stat)
4700{ 4700{
@@ -4723,7 +4723,7 @@ lpfc_rdp_res_link_error(struct fc_rdp_link_error_status_desc *desc,
4723 return sizeof(struct fc_rdp_link_error_status_desc); 4723 return sizeof(struct fc_rdp_link_error_status_desc);
4724} 4724}
4725 4725
4726uint32_t 4726static uint32_t
4727lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat, 4727lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
4728 struct lpfc_vport *vport) 4728 struct lpfc_vport *vport)
4729{ 4729{
@@ -4748,7 +4748,7 @@ lpfc_rdp_res_bbc_desc(struct fc_rdp_bbc_desc *desc, READ_LNK_VAR *stat,
4748 return sizeof(struct fc_rdp_bbc_desc); 4748 return sizeof(struct fc_rdp_bbc_desc);
4749} 4749}
4750 4750
4751uint32_t 4751static uint32_t
4752lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba, 4752lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
4753 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2) 4753 struct fc_rdp_oed_sfp_desc *desc, uint8_t *page_a2)
4754{ 4754{
@@ -4776,7 +4776,7 @@ lpfc_rdp_res_oed_temp_desc(struct lpfc_hba *phba,
4776 return sizeof(struct fc_rdp_oed_sfp_desc); 4776 return sizeof(struct fc_rdp_oed_sfp_desc);
4777} 4777}
4778 4778
4779uint32_t 4779static uint32_t
4780lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba, 4780lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
4781 struct fc_rdp_oed_sfp_desc *desc, 4781 struct fc_rdp_oed_sfp_desc *desc,
4782 uint8_t *page_a2) 4782 uint8_t *page_a2)
@@ -4805,7 +4805,7 @@ lpfc_rdp_res_oed_voltage_desc(struct lpfc_hba *phba,
4805 return sizeof(struct fc_rdp_oed_sfp_desc); 4805 return sizeof(struct fc_rdp_oed_sfp_desc);
4806} 4806}
4807 4807
4808uint32_t 4808static uint32_t
4809lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba, 4809lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
4810 struct fc_rdp_oed_sfp_desc *desc, 4810 struct fc_rdp_oed_sfp_desc *desc,
4811 uint8_t *page_a2) 4811 uint8_t *page_a2)
@@ -4834,7 +4834,7 @@ lpfc_rdp_res_oed_txbias_desc(struct lpfc_hba *phba,
4834 return sizeof(struct fc_rdp_oed_sfp_desc); 4834 return sizeof(struct fc_rdp_oed_sfp_desc);
4835} 4835}
4836 4836
4837uint32_t 4837static uint32_t
4838lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba, 4838lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
4839 struct fc_rdp_oed_sfp_desc *desc, 4839 struct fc_rdp_oed_sfp_desc *desc,
4840 uint8_t *page_a2) 4840 uint8_t *page_a2)
@@ -4864,7 +4864,7 @@ lpfc_rdp_res_oed_txpower_desc(struct lpfc_hba *phba,
4864} 4864}
4865 4865
4866 4866
4867uint32_t 4867static uint32_t
4868lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba, 4868lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
4869 struct fc_rdp_oed_sfp_desc *desc, 4869 struct fc_rdp_oed_sfp_desc *desc,
4870 uint8_t *page_a2) 4870 uint8_t *page_a2)
@@ -4893,7 +4893,7 @@ lpfc_rdp_res_oed_rxpower_desc(struct lpfc_hba *phba,
4893 return sizeof(struct fc_rdp_oed_sfp_desc); 4893 return sizeof(struct fc_rdp_oed_sfp_desc);
4894} 4894}
4895 4895
4896uint32_t 4896static uint32_t
4897lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc, 4897lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
4898 uint8_t *page_a0, struct lpfc_vport *vport) 4898 uint8_t *page_a0, struct lpfc_vport *vport)
4899{ 4899{
@@ -4907,7 +4907,7 @@ lpfc_rdp_res_opd_desc(struct fc_rdp_opd_sfp_desc *desc,
4907 return sizeof(struct fc_rdp_opd_sfp_desc); 4907 return sizeof(struct fc_rdp_opd_sfp_desc);
4908} 4908}
4909 4909
4910uint32_t 4910static uint32_t
4911lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat) 4911lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
4912{ 4912{
4913 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0) 4913 if (bf_get(lpfc_read_link_stat_gec2, stat) == 0)
@@ -4924,7 +4924,7 @@ lpfc_rdp_res_fec_desc(struct fc_fec_rdp_desc *desc, READ_LNK_VAR *stat)
4924 return sizeof(struct fc_fec_rdp_desc); 4924 return sizeof(struct fc_fec_rdp_desc);
4925} 4925}
4926 4926
4927uint32_t 4927static uint32_t
4928lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba) 4928lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
4929{ 4929{
4930 uint16_t rdp_cap = 0; 4930 uint16_t rdp_cap = 0;
@@ -4986,7 +4986,7 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
4986 return sizeof(struct fc_rdp_port_speed_desc); 4986 return sizeof(struct fc_rdp_port_speed_desc);
4987} 4987}
4988 4988
4989uint32_t 4989static uint32_t
4990lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc, 4990lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
4991 struct lpfc_hba *phba) 4991 struct lpfc_hba *phba)
4992{ 4992{
@@ -5003,7 +5003,7 @@ lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
5003 return sizeof(struct fc_rdp_port_name_desc); 5003 return sizeof(struct fc_rdp_port_name_desc);
5004} 5004}
5005 5005
5006uint32_t 5006static uint32_t
5007lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, 5007lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5008 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 5008 struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
5009{ 5009{
@@ -5027,7 +5027,7 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc,
5027 return sizeof(struct fc_rdp_port_name_desc); 5027 return sizeof(struct fc_rdp_port_name_desc);
5028} 5028}
5029 5029
5030void 5030static void
5031lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, 5031lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
5032 int status) 5032 int status)
5033{ 5033{
@@ -5165,7 +5165,7 @@ free_rdp_context:
5165 kfree(rdp_context); 5165 kfree(rdp_context);
5166} 5166}
5167 5167
5168int 5168static int
5169lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context) 5169lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
5170{ 5170{
5171 LPFC_MBOXQ_t *mbox = NULL; 5171 LPFC_MBOXQ_t *mbox = NULL;
@@ -7995,7 +7995,7 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
7995 } 7995 }
7996} 7996}
7997 7997
7998void 7998static void
7999lpfc_start_fdmi(struct lpfc_vport *vport) 7999lpfc_start_fdmi(struct lpfc_vport *vport)
8000{ 8000{
8001 struct lpfc_hba *phba = vport->phba; 8001 struct lpfc_hba *phba = vport->phba;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 12dbe99ccc50..b234c50c255f 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2260,7 +2260,7 @@ lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2260 return 0; 2260 return 0;
2261} 2261}
2262 2262
2263void 2263static void
2264lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2264lpfc_mbx_cmpl_rdp_link_stat(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2265{ 2265{
2266 MAILBOX_t *mb; 2266 MAILBOX_t *mb;
@@ -2281,7 +2281,7 @@ mbx_failed:
2281 rdp_context->cmpl(phba, rdp_context, rc); 2281 rdp_context->cmpl(phba, rdp_context, rc);
2282} 2282}
2283 2283
2284void 2284static void
2285lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 2285lpfc_mbx_cmpl_rdp_page_a2(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
2286{ 2286{
2287 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1; 2287 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) mbox->context1;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7080ce2920fd..c5326055beee 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5689,7 +5689,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5689 return rc; 5689 return rc;
5690} 5690}
5691 5691
5692void 5692static void
5693lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, 5693lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5694 uint32_t feature) 5694 uint32_t feature)
5695{ 5695{
@@ -8968,7 +8968,7 @@ lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8968 * Since ABORTS must go on the same WQ of the command they are 8968 * Since ABORTS must go on the same WQ of the command they are
8969 * aborting, we use command's fcp_wqidx. 8969 * aborting, we use command's fcp_wqidx.
8970 */ 8970 */
8971int 8971static int
8972lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number, 8972lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
8973 struct lpfc_iocbq *piocb) 8973 struct lpfc_iocbq *piocb)
8974{ 8974{
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index c1ed25adb17e..9ff57dee72d7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -189,25 +189,12 @@ u32
189megasas_build_and_issue_cmd(struct megasas_instance *instance, 189megasas_build_and_issue_cmd(struct megasas_instance *instance,
190 struct scsi_cmnd *scmd); 190 struct scsi_cmnd *scmd);
191static void megasas_complete_cmd_dpc(unsigned long instance_addr); 191static void megasas_complete_cmd_dpc(unsigned long instance_addr);
192void
193megasas_release_fusion(struct megasas_instance *instance);
194int
195megasas_ioc_init_fusion(struct megasas_instance *instance);
196void
197megasas_free_cmds_fusion(struct megasas_instance *instance);
198u8
199megasas_get_map_info(struct megasas_instance *instance);
200int
201megasas_sync_map_info(struct megasas_instance *instance);
202int 192int
203wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd, 193wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
204 int seconds); 194 int seconds);
205void megasas_reset_reply_desc(struct megasas_instance *instance);
206void megasas_fusion_ocr_wq(struct work_struct *work); 195void megasas_fusion_ocr_wq(struct work_struct *work);
207static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance, 196static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
208 int initial); 197 int initial);
209int megasas_check_mpio_paths(struct megasas_instance *instance,
210 struct scsi_cmnd *scmd);
211 198
212int 199int
213megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd) 200megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
@@ -5036,7 +5023,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5036 5023
5037 /* Find first memory bar */ 5024 /* Find first memory bar */
5038 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); 5025 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5039 instance->bar = find_first_bit(&bar_list, sizeof(unsigned long)); 5026 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5040 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar, 5027 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5041 "megasas: LSI")) { 5028 "megasas: LSI")) {
5042 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n"); 5029 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
@@ -5782,7 +5769,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
5782 &instance->consumer_h); 5769 &instance->consumer_h);
5783 5770
5784 if (!instance->producer || !instance->consumer) { 5771 if (!instance->producer || !instance->consumer) {
5785 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate" 5772 dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
5786 "memory for producer, consumer\n"); 5773 "memory for producer, consumer\n");
5787 goto fail_alloc_dma_buf; 5774 goto fail_alloc_dma_buf;
5788 } 5775 }
@@ -6711,14 +6698,9 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
6711 unsigned long flags; 6698 unsigned long flags;
6712 u32 wait_time = MEGASAS_RESET_WAIT_TIME; 6699 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
6713 6700
6714 ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); 6701 ioc = memdup_user(user_ioc, sizeof(*ioc));
6715 if (!ioc) 6702 if (IS_ERR(ioc))
6716 return -ENOMEM; 6703 return PTR_ERR(ioc);
6717
6718 if (copy_from_user(ioc, user_ioc, sizeof(*ioc))) {
6719 error = -EFAULT;
6720 goto out_kfree_ioc;
6721 }
6722 6704
6723 instance = megasas_lookup_instance(ioc->host_no); 6705 instance = megasas_lookup_instance(ioc->host_no);
6724 if (!instance) { 6706 if (!instance) {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 80eaee22f5bc..e3bee04c1eb1 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -991,5 +991,14 @@ union desc_value {
991 } u; 991 } u;
992}; 992};
993 993
994void megasas_free_cmds_fusion(struct megasas_instance *instance);
995int megasas_ioc_init_fusion(struct megasas_instance *instance);
996u8 megasas_get_map_info(struct megasas_instance *instance);
997int megasas_sync_map_info(struct megasas_instance *instance);
998void megasas_release_fusion(struct megasas_instance *instance);
999void megasas_reset_reply_desc(struct megasas_instance *instance);
1000int megasas_check_mpio_paths(struct megasas_instance *instance,
1001 struct scsi_cmnd *scmd);
1002void megasas_fusion_ocr_wq(struct work_struct *work);
994 1003
995#endif /* _MEGARAID_SAS_FUSION_H_ */ 1004#endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 750f82c339d4..a1a5ceb42ce6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -98,7 +98,7 @@ MODULE_PARM_DESC(mpt3sas_fwfault_debug,
98 " enable detection of firmware fault and halt firmware - (default=0)"); 98 " enable detection of firmware fault and halt firmware - (default=0)");
99 99
100static int 100static int
101_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag); 101_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
102 102
103/** 103/**
104 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. 104 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
@@ -218,8 +218,7 @@ _base_fault_reset_work(struct work_struct *work)
218 ioc->non_operational_loop = 0; 218 ioc->non_operational_loop = 0;
219 219
220 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { 220 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
221 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 221 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
222 FORCE_BIG_HAMMER);
223 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, 222 pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
224 __func__, (rc == 0) ? "success" : "failed"); 223 __func__, (rc == 0) ? "success" : "failed");
225 doorbell = mpt3sas_base_get_iocstate(ioc, 0); 224 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
@@ -2040,7 +2039,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2040 * mpt3sas_base_unmap_resources - free controller resources 2039 * mpt3sas_base_unmap_resources - free controller resources
2041 * @ioc: per adapter object 2040 * @ioc: per adapter object
2042 */ 2041 */
2043void 2042static void
2044mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) 2043mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
2045{ 2044{
2046 struct pci_dev *pdev = ioc->pdev; 2045 struct pci_dev *pdev = ioc->pdev;
@@ -2145,7 +2144,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
2145 2144
2146 _base_mask_interrupts(ioc); 2145 _base_mask_interrupts(ioc);
2147 2146
2148 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 2147 r = _base_get_ioc_facts(ioc);
2149 if (r) 2148 if (r)
2150 goto out_fail; 2149 goto out_fail;
2151 2150
@@ -3183,12 +3182,11 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3183/** 3182/**
3184 * _base_allocate_memory_pools - allocate start of day memory pools 3183 * _base_allocate_memory_pools - allocate start of day memory pools
3185 * @ioc: per adapter object 3184 * @ioc: per adapter object
3186 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3187 * 3185 *
3188 * Returns 0 success, anything else error 3186 * Returns 0 success, anything else error
3189 */ 3187 */
3190static int 3188static int
3191_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 3189_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
3192{ 3190{
3193 struct mpt3sas_facts *facts; 3191 struct mpt3sas_facts *facts;
3194 u16 max_sge_elements; 3192 u16 max_sge_elements;
@@ -3658,29 +3656,25 @@ mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
3658 * _base_wait_on_iocstate - waiting on a particular ioc state 3656 * _base_wait_on_iocstate - waiting on a particular ioc state
3659 * @ioc_state: controller state { READY, OPERATIONAL, or RESET } 3657 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
3660 * @timeout: timeout in second 3658 * @timeout: timeout in second
3661 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3662 * 3659 *
3663 * Returns 0 for success, non-zero for failure. 3660 * Returns 0 for success, non-zero for failure.
3664 */ 3661 */
3665static int 3662static int
3666_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout, 3663_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
3667 int sleep_flag)
3668{ 3664{
3669 u32 count, cntdn; 3665 u32 count, cntdn;
3670 u32 current_state; 3666 u32 current_state;
3671 3667
3672 count = 0; 3668 count = 0;
3673 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3669 cntdn = 1000 * timeout;
3674 do { 3670 do {
3675 current_state = mpt3sas_base_get_iocstate(ioc, 1); 3671 current_state = mpt3sas_base_get_iocstate(ioc, 1);
3676 if (current_state == ioc_state) 3672 if (current_state == ioc_state)
3677 return 0; 3673 return 0;
3678 if (count && current_state == MPI2_IOC_STATE_FAULT) 3674 if (count && current_state == MPI2_IOC_STATE_FAULT)
3679 break; 3675 break;
3680 if (sleep_flag == CAN_SLEEP) 3676
3681 usleep_range(1000, 1500); 3677 usleep_range(1000, 1500);
3682 else
3683 udelay(500);
3684 count++; 3678 count++;
3685 } while (--cntdn); 3679 } while (--cntdn);
3686 3680
@@ -3692,24 +3686,22 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
3692 * a write to the doorbell) 3686 * a write to the doorbell)
3693 * @ioc: per adapter object 3687 * @ioc: per adapter object
3694 * @timeout: timeout in second 3688 * @timeout: timeout in second
3695 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3696 * 3689 *
3697 * Returns 0 for success, non-zero for failure. 3690 * Returns 0 for success, non-zero for failure.
3698 * 3691 *
3699 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. 3692 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
3700 */ 3693 */
3701static int 3694static int
3702_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag); 3695_base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3703 3696
3704static int 3697static int
3705_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout, 3698_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
3706 int sleep_flag)
3707{ 3699{
3708 u32 cntdn, count; 3700 u32 cntdn, count;
3709 u32 int_status; 3701 u32 int_status;
3710 3702
3711 count = 0; 3703 count = 0;
3712 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3704 cntdn = 1000 * timeout;
3713 do { 3705 do {
3714 int_status = readl(&ioc->chip->HostInterruptStatus); 3706 int_status = readl(&ioc->chip->HostInterruptStatus);
3715 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 3707 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
@@ -3718,10 +3710,35 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3718 ioc->name, __func__, count, timeout)); 3710 ioc->name, __func__, count, timeout));
3719 return 0; 3711 return 0;
3720 } 3712 }
3721 if (sleep_flag == CAN_SLEEP) 3713
3722 usleep_range(1000, 1500); 3714 usleep_range(1000, 1500);
3723 else 3715 count++;
3724 udelay(500); 3716 } while (--cntdn);
3717
3718 pr_err(MPT3SAS_FMT
3719 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3720 ioc->name, __func__, count, int_status);
3721 return -EFAULT;
3722}
3723
3724static int
3725_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
3726{
3727 u32 cntdn, count;
3728 u32 int_status;
3729
3730 count = 0;
3731 cntdn = 2000 * timeout;
3732 do {
3733 int_status = readl(&ioc->chip->HostInterruptStatus);
3734 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
3735 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3736 "%s: successful count(%d), timeout(%d)\n",
3737 ioc->name, __func__, count, timeout));
3738 return 0;
3739 }
3740
3741 udelay(500);
3725 count++; 3742 count++;
3726 } while (--cntdn); 3743 } while (--cntdn);
3727 3744
@@ -3729,13 +3746,13 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3729 "%s: failed due to timeout count(%d), int_status(%x)!\n", 3746 "%s: failed due to timeout count(%d), int_status(%x)!\n",
3730 ioc->name, __func__, count, int_status); 3747 ioc->name, __func__, count, int_status);
3731 return -EFAULT; 3748 return -EFAULT;
3749
3732} 3750}
3733 3751
3734/** 3752/**
3735 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. 3753 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
3736 * @ioc: per adapter object 3754 * @ioc: per adapter object
3737 * @timeout: timeout in second 3755 * @timeout: timeout in second
3738 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3739 * 3756 *
3740 * Returns 0 for success, non-zero for failure. 3757 * Returns 0 for success, non-zero for failure.
3741 * 3758 *
@@ -3743,15 +3760,14 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
3743 * doorbell. 3760 * doorbell.
3744 */ 3761 */
3745static int 3762static int
3746_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout, 3763_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
3747 int sleep_flag)
3748{ 3764{
3749 u32 cntdn, count; 3765 u32 cntdn, count;
3750 u32 int_status; 3766 u32 int_status;
3751 u32 doorbell; 3767 u32 doorbell;
3752 3768
3753 count = 0; 3769 count = 0;
3754 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3770 cntdn = 1000 * timeout;
3755 do { 3771 do {
3756 int_status = readl(&ioc->chip->HostInterruptStatus); 3772 int_status = readl(&ioc->chip->HostInterruptStatus);
3757 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 3773 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
@@ -3769,10 +3785,7 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3769 } else if (int_status == 0xFFFFFFFF) 3785 } else if (int_status == 0xFFFFFFFF)
3770 goto out; 3786 goto out;
3771 3787
3772 if (sleep_flag == CAN_SLEEP) 3788 usleep_range(1000, 1500);
3773 usleep_range(1000, 1500);
3774 else
3775 udelay(500);
3776 count++; 3789 count++;
3777 } while (--cntdn); 3790 } while (--cntdn);
3778 3791
@@ -3787,20 +3800,18 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
3787 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use 3800 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
3788 * @ioc: per adapter object 3801 * @ioc: per adapter object
3789 * @timeout: timeout in second 3802 * @timeout: timeout in second
3790 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3791 * 3803 *
3792 * Returns 0 for success, non-zero for failure. 3804 * Returns 0 for success, non-zero for failure.
3793 * 3805 *
3794 */ 3806 */
3795static int 3807static int
3796_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout, 3808_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
3797 int sleep_flag)
3798{ 3809{
3799 u32 cntdn, count; 3810 u32 cntdn, count;
3800 u32 doorbell_reg; 3811 u32 doorbell_reg;
3801 3812
3802 count = 0; 3813 count = 0;
3803 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 3814 cntdn = 1000 * timeout;
3804 do { 3815 do {
3805 doorbell_reg = readl(&ioc->chip->Doorbell); 3816 doorbell_reg = readl(&ioc->chip->Doorbell);
3806 if (!(doorbell_reg & MPI2_DOORBELL_USED)) { 3817 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
@@ -3809,10 +3820,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3809 ioc->name, __func__, count, timeout)); 3820 ioc->name, __func__, count, timeout));
3810 return 0; 3821 return 0;
3811 } 3822 }
3812 if (sleep_flag == CAN_SLEEP) 3823
3813 usleep_range(1000, 1500); 3824 usleep_range(1000, 1500);
3814 else
3815 udelay(500);
3816 count++; 3825 count++;
3817 } while (--cntdn); 3826 } while (--cntdn);
3818 3827
@@ -3827,13 +3836,11 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
3827 * @ioc: per adapter object 3836 * @ioc: per adapter object
3828 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET 3837 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
3829 * @timeout: timeout in second 3838 * @timeout: timeout in second
3830 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3831 * 3839 *
3832 * Returns 0 for success, non-zero for failure. 3840 * Returns 0 for success, non-zero for failure.
3833 */ 3841 */
3834static int 3842static int
3835_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout, 3843_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
3836 int sleep_flag)
3837{ 3844{
3838 u32 ioc_state; 3845 u32 ioc_state;
3839 int r = 0; 3846 int r = 0;
@@ -3852,12 +3859,11 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3852 3859
3853 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, 3860 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
3854 &ioc->chip->Doorbell); 3861 &ioc->chip->Doorbell);
3855 if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) { 3862 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
3856 r = -EFAULT; 3863 r = -EFAULT;
3857 goto out; 3864 goto out;
3858 } 3865 }
3859 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 3866 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
3860 timeout, sleep_flag);
3861 if (ioc_state) { 3867 if (ioc_state) {
3862 pr_err(MPT3SAS_FMT 3868 pr_err(MPT3SAS_FMT
3863 "%s: failed going to ready state (ioc_state=0x%x)\n", 3869 "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -3879,18 +3885,16 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
3879 * @reply_bytes: reply length 3885 * @reply_bytes: reply length
3880 * @reply: pointer to reply payload 3886 * @reply: pointer to reply payload
3881 * @timeout: timeout in second 3887 * @timeout: timeout in second
3882 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3883 * 3888 *
3884 * Returns 0 for success, non-zero for failure. 3889 * Returns 0 for success, non-zero for failure.
3885 */ 3890 */
3886static int 3891static int
3887_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, 3892_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3888 u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) 3893 u32 *request, int reply_bytes, u16 *reply, int timeout)
3889{ 3894{
3890 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; 3895 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
3891 int i; 3896 int i;
3892 u8 failed; 3897 u8 failed;
3893 u16 dummy;
3894 __le32 *mfp; 3898 __le32 *mfp;
3895 3899
3896 /* make sure doorbell is not in use */ 3900 /* make sure doorbell is not in use */
@@ -3911,7 +3915,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3911 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)), 3915 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
3912 &ioc->chip->Doorbell); 3916 &ioc->chip->Doorbell);
3913 3917
3914 if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) { 3918 if ((_base_spin_on_doorbell_int(ioc, 5))) {
3915 pr_err(MPT3SAS_FMT 3919 pr_err(MPT3SAS_FMT
3916 "doorbell handshake int failed (line=%d)\n", 3920 "doorbell handshake int failed (line=%d)\n",
3917 ioc->name, __LINE__); 3921 ioc->name, __LINE__);
@@ -3919,7 +3923,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3919 } 3923 }
3920 writel(0, &ioc->chip->HostInterruptStatus); 3924 writel(0, &ioc->chip->HostInterruptStatus);
3921 3925
3922 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) { 3926 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
3923 pr_err(MPT3SAS_FMT 3927 pr_err(MPT3SAS_FMT
3924 "doorbell handshake ack failed (line=%d)\n", 3928 "doorbell handshake ack failed (line=%d)\n",
3925 ioc->name, __LINE__); 3929 ioc->name, __LINE__);
@@ -3929,7 +3933,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3929 /* send message 32-bits at a time */ 3933 /* send message 32-bits at a time */
3930 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { 3934 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
3931 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); 3935 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
3932 if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) 3936 if ((_base_wait_for_doorbell_ack(ioc, 5)))
3933 failed = 1; 3937 failed = 1;
3934 } 3938 }
3935 3939
@@ -3941,7 +3945,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3941 } 3945 }
3942 3946
3943 /* now wait for the reply */ 3947 /* now wait for the reply */
3944 if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) { 3948 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
3945 pr_err(MPT3SAS_FMT 3949 pr_err(MPT3SAS_FMT
3946 "doorbell handshake int failed (line=%d)\n", 3950 "doorbell handshake int failed (line=%d)\n",
3947 ioc->name, __LINE__); 3951 ioc->name, __LINE__);
@@ -3952,7 +3956,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3952 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) 3956 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3953 & MPI2_DOORBELL_DATA_MASK); 3957 & MPI2_DOORBELL_DATA_MASK);
3954 writel(0, &ioc->chip->HostInterruptStatus); 3958 writel(0, &ioc->chip->HostInterruptStatus);
3955 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { 3959 if ((_base_wait_for_doorbell_int(ioc, 5))) {
3956 pr_err(MPT3SAS_FMT 3960 pr_err(MPT3SAS_FMT
3957 "doorbell handshake int failed (line=%d)\n", 3961 "doorbell handshake int failed (line=%d)\n",
3958 ioc->name, __LINE__); 3962 ioc->name, __LINE__);
@@ -3963,22 +3967,22 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
3963 writel(0, &ioc->chip->HostInterruptStatus); 3967 writel(0, &ioc->chip->HostInterruptStatus);
3964 3968
3965 for (i = 2; i < default_reply->MsgLength * 2; i++) { 3969 for (i = 2; i < default_reply->MsgLength * 2; i++) {
3966 if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { 3970 if ((_base_wait_for_doorbell_int(ioc, 5))) {
3967 pr_err(MPT3SAS_FMT 3971 pr_err(MPT3SAS_FMT
3968 "doorbell handshake int failed (line=%d)\n", 3972 "doorbell handshake int failed (line=%d)\n",
3969 ioc->name, __LINE__); 3973 ioc->name, __LINE__);
3970 return -EFAULT; 3974 return -EFAULT;
3971 } 3975 }
3972 if (i >= reply_bytes/2) /* overflow case */ 3976 if (i >= reply_bytes/2) /* overflow case */
3973 dummy = readl(&ioc->chip->Doorbell); 3977 readl(&ioc->chip->Doorbell);
3974 else 3978 else
3975 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) 3979 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
3976 & MPI2_DOORBELL_DATA_MASK); 3980 & MPI2_DOORBELL_DATA_MASK);
3977 writel(0, &ioc->chip->HostInterruptStatus); 3981 writel(0, &ioc->chip->HostInterruptStatus);
3978 } 3982 }
3979 3983
3980 _base_wait_for_doorbell_int(ioc, 5, sleep_flag); 3984 _base_wait_for_doorbell_int(ioc, 5);
3981 if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) { 3985 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
3982 dhsprintk(ioc, pr_info(MPT3SAS_FMT 3986 dhsprintk(ioc, pr_info(MPT3SAS_FMT
3983 "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); 3987 "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
3984 } 3988 }
@@ -4015,7 +4019,6 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
4015{ 4019{
4016 u16 smid; 4020 u16 smid;
4017 u32 ioc_state; 4021 u32 ioc_state;
4018 unsigned long timeleft;
4019 bool issue_reset = false; 4022 bool issue_reset = false;
4020 int rc; 4023 int rc;
4021 void *request; 4024 void *request;
@@ -4068,7 +4071,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
4068 ioc->ioc_link_reset_in_progress = 1; 4071 ioc->ioc_link_reset_in_progress = 1;
4069 init_completion(&ioc->base_cmds.done); 4072 init_completion(&ioc->base_cmds.done);
4070 mpt3sas_base_put_smid_default(ioc, smid); 4073 mpt3sas_base_put_smid_default(ioc, smid);
4071 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 4074 wait_for_completion_timeout(&ioc->base_cmds.done,
4072 msecs_to_jiffies(10000)); 4075 msecs_to_jiffies(10000));
4073 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || 4076 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
4074 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && 4077 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
@@ -4093,8 +4096,7 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
4093 4096
4094 issue_host_reset: 4097 issue_host_reset:
4095 if (issue_reset) 4098 if (issue_reset)
4096 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 4099 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
4097 FORCE_BIG_HAMMER);
4098 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4100 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4099 rc = -EFAULT; 4101 rc = -EFAULT;
4100 out: 4102 out:
@@ -4119,7 +4121,6 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4119{ 4121{
4120 u16 smid; 4122 u16 smid;
4121 u32 ioc_state; 4123 u32 ioc_state;
4122 unsigned long timeleft;
4123 bool issue_reset = false; 4124 bool issue_reset = false;
4124 int rc; 4125 int rc;
4125 void *request; 4126 void *request;
@@ -4170,7 +4171,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4170 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); 4171 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
4171 init_completion(&ioc->base_cmds.done); 4172 init_completion(&ioc->base_cmds.done);
4172 mpt3sas_base_put_smid_default(ioc, smid); 4173 mpt3sas_base_put_smid_default(ioc, smid);
4173 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 4174 wait_for_completion_timeout(&ioc->base_cmds.done,
4174 msecs_to_jiffies(10000)); 4175 msecs_to_jiffies(10000));
4175 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4176 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4176 pr_err(MPT3SAS_FMT "%s: timeout\n", 4177 pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -4191,8 +4192,7 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4191 4192
4192 issue_host_reset: 4193 issue_host_reset:
4193 if (issue_reset) 4194 if (issue_reset)
4194 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 4195 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
4195 FORCE_BIG_HAMMER);
4196 ioc->base_cmds.status = MPT3_CMD_NOT_USED; 4196 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4197 rc = -EFAULT; 4197 rc = -EFAULT;
4198 out: 4198 out:
@@ -4203,12 +4203,11 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
4203/** 4203/**
4204 * _base_get_port_facts - obtain port facts reply and save in ioc 4204 * _base_get_port_facts - obtain port facts reply and save in ioc
4205 * @ioc: per adapter object 4205 * @ioc: per adapter object
4206 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4207 * 4206 *
4208 * Returns 0 for success, non-zero for failure. 4207 * Returns 0 for success, non-zero for failure.
4209 */ 4208 */
4210static int 4209static int
4211_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag) 4210_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
4212{ 4211{
4213 Mpi2PortFactsRequest_t mpi_request; 4212 Mpi2PortFactsRequest_t mpi_request;
4214 Mpi2PortFactsReply_t mpi_reply; 4213 Mpi2PortFactsReply_t mpi_reply;
@@ -4224,7 +4223,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
4224 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; 4223 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
4225 mpi_request.PortNumber = port; 4224 mpi_request.PortNumber = port;
4226 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 4225 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4227 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); 4226 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
4228 4227
4229 if (r != 0) { 4228 if (r != 0) {
4230 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 4229 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4247,13 +4246,11 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
4247 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL 4246 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
4248 * @ioc: per adapter object 4247 * @ioc: per adapter object
4249 * @timeout: 4248 * @timeout:
4250 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4251 * 4249 *
4252 * Returns 0 for success, non-zero for failure. 4250 * Returns 0 for success, non-zero for failure.
4253 */ 4251 */
4254static int 4252static int
4255_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout, 4253_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
4256 int sleep_flag)
4257{ 4254{
4258 u32 ioc_state; 4255 u32 ioc_state;
4259 int rc; 4256 int rc;
@@ -4287,8 +4284,7 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
4287 goto issue_diag_reset; 4284 goto issue_diag_reset;
4288 } 4285 }
4289 4286
4290 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 4287 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
4291 timeout, sleep_flag);
4292 if (ioc_state) { 4288 if (ioc_state) {
4293 dfailprintk(ioc, printk(MPT3SAS_FMT 4289 dfailprintk(ioc, printk(MPT3SAS_FMT
4294 "%s: failed going to ready state (ioc_state=0x%x)\n", 4290 "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4297,19 +4293,18 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout,
4297 } 4293 }
4298 4294
4299 issue_diag_reset: 4295 issue_diag_reset:
4300 rc = _base_diag_reset(ioc, sleep_flag); 4296 rc = _base_diag_reset(ioc);
4301 return rc; 4297 return rc;
4302} 4298}
4303 4299
4304/** 4300/**
4305 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc 4301 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4306 * @ioc: per adapter object 4302 * @ioc: per adapter object
4307 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4308 * 4303 *
4309 * Returns 0 for success, non-zero for failure. 4304 * Returns 0 for success, non-zero for failure.
4310 */ 4305 */
4311static int 4306static int
4312_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4307_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
4313{ 4308{
4314 Mpi2IOCFactsRequest_t mpi_request; 4309 Mpi2IOCFactsRequest_t mpi_request;
4315 Mpi2IOCFactsReply_t mpi_reply; 4310 Mpi2IOCFactsReply_t mpi_reply;
@@ -4319,7 +4314,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4319 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 4314 dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
4320 __func__)); 4315 __func__));
4321 4316
4322 r = _base_wait_for_iocstate(ioc, 10, sleep_flag); 4317 r = _base_wait_for_iocstate(ioc, 10);
4323 if (r) { 4318 if (r) {
4324 dfailprintk(ioc, printk(MPT3SAS_FMT 4319 dfailprintk(ioc, printk(MPT3SAS_FMT
4325 "%s: failed getting to correct state\n", 4320 "%s: failed getting to correct state\n",
@@ -4331,7 +4326,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4331 memset(&mpi_request, 0, mpi_request_sz); 4326 memset(&mpi_request, 0, mpi_request_sz);
4332 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; 4327 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
4333 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, 4328 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
4334 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); 4329 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
4335 4330
4336 if (r != 0) { 4331 if (r != 0) {
4337 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 4332 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4391,12 +4386,11 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4391/** 4386/**
4392 * _base_send_ioc_init - send ioc_init to firmware 4387 * _base_send_ioc_init - send ioc_init to firmware
4393 * @ioc: per adapter object 4388 * @ioc: per adapter object
4394 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4395 * 4389 *
4396 * Returns 0 for success, non-zero for failure. 4390 * Returns 0 for success, non-zero for failure.
4397 */ 4391 */
4398static int 4392static int
4399_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4393_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
4400{ 4394{
4401 Mpi2IOCInitRequest_t mpi_request; 4395 Mpi2IOCInitRequest_t mpi_request;
4402 Mpi2IOCInitReply_t mpi_reply; 4396 Mpi2IOCInitReply_t mpi_reply;
@@ -4479,8 +4473,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4479 4473
4480 r = _base_handshake_req_reply_wait(ioc, 4474 r = _base_handshake_req_reply_wait(ioc,
4481 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, 4475 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
4482 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10, 4476 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
4483 sleep_flag);
4484 4477
4485 if (r != 0) { 4478 if (r != 0) {
4486 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", 4479 pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
@@ -4555,16 +4548,14 @@ mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4555/** 4548/**
4556 * _base_send_port_enable - send port_enable(discovery stuff) to firmware 4549 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
4557 * @ioc: per adapter object 4550 * @ioc: per adapter object
4558 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4559 * 4551 *
4560 * Returns 0 for success, non-zero for failure. 4552 * Returns 0 for success, non-zero for failure.
4561 */ 4553 */
4562static int 4554static int
4563_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4555_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
4564{ 4556{
4565 Mpi2PortEnableRequest_t *mpi_request; 4557 Mpi2PortEnableRequest_t *mpi_request;
4566 Mpi2PortEnableReply_t *mpi_reply; 4558 Mpi2PortEnableReply_t *mpi_reply;
4567 unsigned long timeleft;
4568 int r = 0; 4559 int r = 0;
4569 u16 smid; 4560 u16 smid;
4570 u16 ioc_status; 4561 u16 ioc_status;
@@ -4592,8 +4583,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4592 4583
4593 init_completion(&ioc->port_enable_cmds.done); 4584 init_completion(&ioc->port_enable_cmds.done);
4594 mpt3sas_base_put_smid_default(ioc, smid); 4585 mpt3sas_base_put_smid_default(ioc, smid);
4595 timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done, 4586 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
4596 300*HZ);
4597 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { 4587 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
4598 pr_err(MPT3SAS_FMT "%s: timeout\n", 4588 pr_err(MPT3SAS_FMT "%s: timeout\n",
4599 ioc->name, __func__); 4589 ioc->name, __func__);
@@ -4737,15 +4727,13 @@ _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
4737/** 4727/**
4738 * _base_event_notification - send event notification 4728 * _base_event_notification - send event notification
4739 * @ioc: per adapter object 4729 * @ioc: per adapter object
4740 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4741 * 4730 *
4742 * Returns 0 for success, non-zero for failure. 4731 * Returns 0 for success, non-zero for failure.
4743 */ 4732 */
4744static int 4733static int
4745_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4734_base_event_notification(struct MPT3SAS_ADAPTER *ioc)
4746{ 4735{
4747 Mpi2EventNotificationRequest_t *mpi_request; 4736 Mpi2EventNotificationRequest_t *mpi_request;
4748 unsigned long timeleft;
4749 u16 smid; 4737 u16 smid;
4750 int r = 0; 4738 int r = 0;
4751 int i; 4739 int i;
@@ -4777,7 +4765,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4777 cpu_to_le32(ioc->event_masks[i]); 4765 cpu_to_le32(ioc->event_masks[i]);
4778 init_completion(&ioc->base_cmds.done); 4766 init_completion(&ioc->base_cmds.done);
4779 mpt3sas_base_put_smid_default(ioc, smid); 4767 mpt3sas_base_put_smid_default(ioc, smid);
4780 timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); 4768 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
4781 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { 4769 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4782 pr_err(MPT3SAS_FMT "%s: timeout\n", 4770 pr_err(MPT3SAS_FMT "%s: timeout\n",
4783 ioc->name, __func__); 4771 ioc->name, __func__);
@@ -4827,19 +4815,18 @@ mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
4827 return; 4815 return;
4828 4816
4829 mutex_lock(&ioc->base_cmds.mutex); 4817 mutex_lock(&ioc->base_cmds.mutex);
4830 _base_event_notification(ioc, CAN_SLEEP); 4818 _base_event_notification(ioc);
4831 mutex_unlock(&ioc->base_cmds.mutex); 4819 mutex_unlock(&ioc->base_cmds.mutex);
4832} 4820}
4833 4821
4834/** 4822/**
4835 * _base_diag_reset - the "big hammer" start of day reset 4823 * _base_diag_reset - the "big hammer" start of day reset
4836 * @ioc: per adapter object 4824 * @ioc: per adapter object
4837 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4838 * 4825 *
4839 * Returns 0 for success, non-zero for failure. 4826 * Returns 0 for success, non-zero for failure.
4840 */ 4827 */
4841static int 4828static int
4842_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 4829_base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
4843{ 4830{
4844 u32 host_diagnostic; 4831 u32 host_diagnostic;
4845 u32 ioc_state; 4832 u32 ioc_state;
@@ -4867,10 +4854,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4867 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); 4854 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
4868 4855
4869 /* wait 100 msec */ 4856 /* wait 100 msec */
4870 if (sleep_flag == CAN_SLEEP) 4857 msleep(100);
4871 msleep(100);
4872 else
4873 mdelay(100);
4874 4858
4875 if (count++ > 20) 4859 if (count++ > 20)
4876 goto out; 4860 goto out;
@@ -4890,10 +4874,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4890 &ioc->chip->HostDiagnostic); 4874 &ioc->chip->HostDiagnostic);
4891 4875
4892 /*This delay allows the chip PCIe hardware time to finish reset tasks*/ 4876 /*This delay allows the chip PCIe hardware time to finish reset tasks*/
4893 if (sleep_flag == CAN_SLEEP) 4877 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4894 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4895 else
4896 mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
4897 4878
4898 /* Approximately 300 second max wait */ 4879 /* Approximately 300 second max wait */
4899 for (count = 0; count < (300000000 / 4880 for (count = 0; count < (300000000 /
@@ -4906,13 +4887,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4906 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) 4887 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
4907 break; 4888 break;
4908 4889
4909 /* Wait to pass the second read delay window */ 4890 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
4910 if (sleep_flag == CAN_SLEEP)
4911 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4912 / 1000);
4913 else
4914 mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
4915 / 1000);
4916 } 4891 }
4917 4892
4918 if (host_diagnostic & MPI2_DIAG_HCB_MODE) { 4893 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
@@ -4941,8 +4916,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4941 4916
4942 drsprintk(ioc, pr_info(MPT3SAS_FMT 4917 drsprintk(ioc, pr_info(MPT3SAS_FMT
4943 "Wait for FW to go to the READY state\n", ioc->name)); 4918 "Wait for FW to go to the READY state\n", ioc->name));
4944 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20, 4919 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
4945 sleep_flag);
4946 if (ioc_state) { 4920 if (ioc_state) {
4947 pr_err(MPT3SAS_FMT 4921 pr_err(MPT3SAS_FMT
4948 "%s: failed going to ready state (ioc_state=0x%x)\n", 4922 "%s: failed going to ready state (ioc_state=0x%x)\n",
@@ -4961,14 +4935,12 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
4961/** 4935/**
4962 * _base_make_ioc_ready - put controller in READY state 4936 * _base_make_ioc_ready - put controller in READY state
4963 * @ioc: per adapter object 4937 * @ioc: per adapter object
4964 * @sleep_flag: CAN_SLEEP or NO_SLEEP
4965 * @type: FORCE_BIG_HAMMER or SOFT_RESET 4938 * @type: FORCE_BIG_HAMMER or SOFT_RESET
4966 * 4939 *
4967 * Returns 0 for success, non-zero for failure. 4940 * Returns 0 for success, non-zero for failure.
4968 */ 4941 */
4969static int 4942static int
4970_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, 4943_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
4971 enum reset_type type)
4972{ 4944{
4973 u32 ioc_state; 4945 u32 ioc_state;
4974 int rc; 4946 int rc;
@@ -4995,10 +4967,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
4995 ioc->name, __func__, ioc_state); 4967 ioc->name, __func__, ioc_state);
4996 return -EFAULT; 4968 return -EFAULT;
4997 } 4969 }
4998 if (sleep_flag == CAN_SLEEP) 4970 ssleep(1);
4999 ssleep(1);
5000 else
5001 mdelay(1000);
5002 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 4971 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5003 } 4972 }
5004 } 4973 }
@@ -5024,24 +4993,23 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5024 4993
5025 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) 4994 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
5026 if (!(_base_send_ioc_reset(ioc, 4995 if (!(_base_send_ioc_reset(ioc,
5027 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) { 4996 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
5028 return 0; 4997 return 0;
5029 } 4998 }
5030 4999
5031 issue_diag_reset: 5000 issue_diag_reset:
5032 rc = _base_diag_reset(ioc, CAN_SLEEP); 5001 rc = _base_diag_reset(ioc);
5033 return rc; 5002 return rc;
5034} 5003}
5035 5004
5036/** 5005/**
5037 * _base_make_ioc_operational - put controller in OPERATIONAL state 5006 * _base_make_ioc_operational - put controller in OPERATIONAL state
5038 * @ioc: per adapter object 5007 * @ioc: per adapter object
5039 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5040 * 5008 *
5041 * Returns 0 for success, non-zero for failure. 5009 * Returns 0 for success, non-zero for failure.
5042 */ 5010 */
5043static int 5011static int
5044_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 5012_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
5045{ 5013{
5046 int r, i, index; 5014 int r, i, index;
5047 unsigned long flags; 5015 unsigned long flags;
@@ -5160,7 +5128,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5160 } 5128 }
5161 skip_init_reply_post_free_queue: 5129 skip_init_reply_post_free_queue:
5162 5130
5163 r = _base_send_ioc_init(ioc, sleep_flag); 5131 r = _base_send_ioc_init(ioc);
5164 if (r) 5132 if (r)
5165 return r; 5133 return r;
5166 5134
@@ -5186,13 +5154,11 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5186 skip_init_reply_post_host_index: 5154 skip_init_reply_post_host_index:
5187 5155
5188 _base_unmask_interrupts(ioc); 5156 _base_unmask_interrupts(ioc);
5189 r = _base_event_notification(ioc, sleep_flag); 5157 r = _base_event_notification(ioc);
5190 if (r) 5158 if (r)
5191 return r; 5159 return r;
5192 5160
5193 if (sleep_flag == CAN_SLEEP) 5161 _base_static_config_pages(ioc);
5194 _base_static_config_pages(ioc);
5195
5196 5162
5197 if (ioc->is_driver_loading) { 5163 if (ioc->is_driver_loading) {
5198 5164
@@ -5211,7 +5177,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5211 return r; /* scan_start and scan_finished support */ 5177 return r; /* scan_start and scan_finished support */
5212 } 5178 }
5213 5179
5214 r = _base_send_port_enable(ioc, sleep_flag); 5180 r = _base_send_port_enable(ioc);
5215 if (r) 5181 if (r)
5216 return r; 5182 return r;
5217 5183
@@ -5235,7 +5201,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
5235 if (ioc->chip_phys && ioc->chip) { 5201 if (ioc->chip_phys && ioc->chip) {
5236 _base_mask_interrupts(ioc); 5202 _base_mask_interrupts(ioc);
5237 ioc->shost_recovery = 1; 5203 ioc->shost_recovery = 1;
5238 _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 5204 _base_make_ioc_ready(ioc, SOFT_RESET);
5239 ioc->shost_recovery = 0; 5205 ioc->shost_recovery = 0;
5240 } 5206 }
5241 5207
@@ -5292,7 +5258,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5292 goto out_free_resources; 5258 goto out_free_resources;
5293 5259
5294 pci_set_drvdata(ioc->pdev, ioc->shost); 5260 pci_set_drvdata(ioc->pdev, ioc->shost);
5295 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 5261 r = _base_get_ioc_facts(ioc);
5296 if (r) 5262 if (r)
5297 goto out_free_resources; 5263 goto out_free_resources;
5298 5264
@@ -5326,7 +5292,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5326 ioc->build_sg_mpi = &_base_build_sg; 5292 ioc->build_sg_mpi = &_base_build_sg;
5327 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; 5293 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
5328 5294
5329 r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); 5295 r = _base_make_ioc_ready(ioc, SOFT_RESET);
5330 if (r) 5296 if (r)
5331 goto out_free_resources; 5297 goto out_free_resources;
5332 5298
@@ -5338,12 +5304,12 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5338 } 5304 }
5339 5305
5340 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { 5306 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
5341 r = _base_get_port_facts(ioc, i, CAN_SLEEP); 5307 r = _base_get_port_facts(ioc, i);
5342 if (r) 5308 if (r)
5343 goto out_free_resources; 5309 goto out_free_resources;
5344 } 5310 }
5345 5311
5346 r = _base_allocate_memory_pools(ioc, CAN_SLEEP); 5312 r = _base_allocate_memory_pools(ioc);
5347 if (r) 5313 if (r)
5348 goto out_free_resources; 5314 goto out_free_resources;
5349 5315
@@ -5429,7 +5395,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
5429 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) 5395 if (ioc->hba_mpi_version_belonged == MPI26_VERSION)
5430 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION); 5396 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
5431 5397
5432 r = _base_make_ioc_operational(ioc, CAN_SLEEP); 5398 r = _base_make_ioc_operational(ioc);
5433 if (r) 5399 if (r)
5434 goto out_free_resources; 5400 goto out_free_resources;
5435 5401
@@ -5565,21 +5531,18 @@ _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
5565/** 5531/**
5566 * _wait_for_commands_to_complete - reset controller 5532 * _wait_for_commands_to_complete - reset controller
5567 * @ioc: Pointer to MPT_ADAPTER structure 5533 * @ioc: Pointer to MPT_ADAPTER structure
5568 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5569 * 5534 *
5570 * This function waiting(3s) for all pending commands to complete 5535 * This function waiting(3s) for all pending commands to complete
5571 * prior to putting controller in reset. 5536 * prior to putting controller in reset.
5572 */ 5537 */
5573static void 5538static void
5574_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) 5539_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
5575{ 5540{
5576 u32 ioc_state; 5541 u32 ioc_state;
5577 unsigned long flags; 5542 unsigned long flags;
5578 u16 i; 5543 u16 i;
5579 5544
5580 ioc->pending_io_count = 0; 5545 ioc->pending_io_count = 0;
5581 if (sleep_flag != CAN_SLEEP)
5582 return;
5583 5546
5584 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 5547 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
5585 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) 5548 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
@@ -5602,13 +5565,12 @@ _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
5602/** 5565/**
5603 * mpt3sas_base_hard_reset_handler - reset controller 5566 * mpt3sas_base_hard_reset_handler - reset controller
5604 * @ioc: Pointer to MPT_ADAPTER structure 5567 * @ioc: Pointer to MPT_ADAPTER structure
5605 * @sleep_flag: CAN_SLEEP or NO_SLEEP
5606 * @type: FORCE_BIG_HAMMER or SOFT_RESET 5568 * @type: FORCE_BIG_HAMMER or SOFT_RESET
5607 * 5569 *
5608 * Returns 0 for success, non-zero for failure. 5570 * Returns 0 for success, non-zero for failure.
5609 */ 5571 */
5610int 5572int
5611mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, 5573mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
5612 enum reset_type type) 5574 enum reset_type type)
5613{ 5575{
5614 int r; 5576 int r;
@@ -5629,13 +5591,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5629 if (mpt3sas_fwfault_debug) 5591 if (mpt3sas_fwfault_debug)
5630 mpt3sas_halt_firmware(ioc); 5592 mpt3sas_halt_firmware(ioc);
5631 5593
5632 /* TODO - What we really should be doing is pulling
5633 * out all the code associated with NO_SLEEP; its never used.
5634 * That is legacy code from mpt fusion driver, ported over.
5635 * I will leave this BUG_ON here for now till its been resolved.
5636 */
5637 BUG_ON(sleep_flag == NO_SLEEP);
5638
5639 /* wait for an active reset in progress to complete */ 5594 /* wait for an active reset in progress to complete */
5640 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { 5595 if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
5641 do { 5596 do {
@@ -5660,9 +5615,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5660 is_fault = 1; 5615 is_fault = 1;
5661 } 5616 }
5662 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); 5617 _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
5663 _wait_for_commands_to_complete(ioc, sleep_flag); 5618 _wait_for_commands_to_complete(ioc);
5664 _base_mask_interrupts(ioc); 5619 _base_mask_interrupts(ioc);
5665 r = _base_make_ioc_ready(ioc, sleep_flag, type); 5620 r = _base_make_ioc_ready(ioc, type);
5666 if (r) 5621 if (r)
5667 goto out; 5622 goto out;
5668 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); 5623 _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
@@ -5675,7 +5630,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5675 r = -EFAULT; 5630 r = -EFAULT;
5676 goto out; 5631 goto out;
5677 } 5632 }
5678 r = _base_get_ioc_facts(ioc, CAN_SLEEP); 5633 r = _base_get_ioc_facts(ioc);
5679 if (r) 5634 if (r)
5680 goto out; 5635 goto out;
5681 5636
@@ -5684,7 +5639,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
5684 "Please reboot the system and ensure that the correct" 5639 "Please reboot the system and ensure that the correct"
5685 " firmware version is running\n", ioc->name); 5640 " firmware version is running\n", ioc->name);
5686 5641
5687 r = _base_make_ioc_operational(ioc, sleep_flag); 5642 r = _base_make_ioc_operational(ioc);
5688 if (!r) 5643 if (!r)
5689 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); 5644 _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
5690 5645
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 892c9be008b5..3e71bc1b4a80 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -119,10 +119,6 @@
119 119
120#define MPT_MAX_CALLBACKS 32 120#define MPT_MAX_CALLBACKS 32
121 121
122
123#define CAN_SLEEP 1
124#define NO_SLEEP 0
125
126#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ 122#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
127/* reserved for issuing internally framed scsi io cmds */ 123/* reserved for issuing internally framed scsi io cmds */
128#define INTERNAL_SCSIIO_CMDS_COUNT 3 124#define INTERNAL_SCSIIO_CMDS_COUNT 3
@@ -478,7 +474,7 @@ struct _sas_device {
478 u8 pfa_led_on; 474 u8 pfa_led_on;
479 u8 pend_sas_rphy_add; 475 u8 pend_sas_rphy_add;
480 u8 enclosure_level; 476 u8 enclosure_level;
481 u8 connector_name[4]; 477 u8 connector_name[5];
482 struct kref refcount; 478 struct kref refcount;
483}; 479};
484 480
@@ -794,16 +790,6 @@ struct reply_post_struct {
794 dma_addr_t reply_post_free_dma; 790 dma_addr_t reply_post_free_dma;
795}; 791};
796 792
797/**
798 * enum mutex_type - task management mutex type
799 * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
800 * @TM_MUTEX_ON: mutex is required
801 */
802enum mutex_type {
803 TM_MUTEX_OFF = 0,
804 TM_MUTEX_ON = 1,
805};
806
807typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); 793typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
808/** 794/**
809 * struct MPT3SAS_ADAPTER - per adapter struct 795 * struct MPT3SAS_ADAPTER - per adapter struct
@@ -1229,7 +1215,7 @@ int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
1229void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc); 1215void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
1230int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc); 1216int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
1231void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc); 1217void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
1232int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, 1218int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
1233 enum reset_type type); 1219 enum reset_type type);
1234 1220
1235void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); 1221void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
@@ -1291,7 +1277,11 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
1291 1277
1292int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, 1278int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
1293 uint channel, uint id, uint lun, u8 type, u16 smid_task, 1279 uint channel, uint id, uint lun, u8 type, u16 smid_task,
1294 ulong timeout, enum mutex_type m_type); 1280 ulong timeout);
1281int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
1282 uint channel, uint id, uint lun, u8 type, u16 smid_task,
1283 ulong timeout);
1284
1295void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 1285void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
1296void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); 1286void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
1297void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); 1287void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index a6914ec99cc0..cebfd734fd76 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -285,7 +285,6 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
285{ 285{
286 u16 smid; 286 u16 smid;
287 u32 ioc_state; 287 u32 ioc_state;
288 unsigned long timeleft;
289 Mpi2ConfigRequest_t *config_request; 288 Mpi2ConfigRequest_t *config_request;
290 int r; 289 int r;
291 u8 retry_count, issue_host_reset = 0; 290 u8 retry_count, issue_host_reset = 0;
@@ -386,8 +385,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
386 _config_display_some_debug(ioc, smid, "config_request", NULL); 385 _config_display_some_debug(ioc, smid, "config_request", NULL);
387 init_completion(&ioc->config_cmds.done); 386 init_completion(&ioc->config_cmds.done);
388 mpt3sas_base_put_smid_default(ioc, smid); 387 mpt3sas_base_put_smid_default(ioc, smid);
389 timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, 388 wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ);
390 timeout*HZ);
391 if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { 389 if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
392 pr_err(MPT3SAS_FMT "%s: timeout\n", 390 pr_err(MPT3SAS_FMT "%s: timeout\n",
393 ioc->name, __func__); 391 ioc->name, __func__);
@@ -491,8 +489,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
491 mutex_unlock(&ioc->config_cmds.mutex); 489 mutex_unlock(&ioc->config_cmds.mutex);
492 490
493 if (issue_host_reset) 491 if (issue_host_reset)
494 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 492 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
495 FORCE_BIG_HAMMER);
496 return r; 493 return r;
497} 494}
498 495
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7d00f09666b6..26cdc127ac89 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -518,7 +518,7 @@ mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
518 * 518 *
519 * Called when application request fasyn callback handler. 519 * Called when application request fasyn callback handler.
520 */ 520 */
521int 521static int
522_ctl_fasync(int fd, struct file *filep, int mode) 522_ctl_fasync(int fd, struct file *filep, int mode)
523{ 523{
524 return fasync_helper(fd, filep, mode, &async_queue); 524 return fasync_helper(fd, filep, mode, &async_queue);
@@ -530,7 +530,7 @@ _ctl_fasync(int fd, struct file *filep, int mode)
530 * @wait - 530 * @wait -
531 * 531 *
532 */ 532 */
533unsigned int 533static unsigned int
534_ctl_poll(struct file *filep, poll_table *wait) 534_ctl_poll(struct file *filep, poll_table *wait)
535{ 535{
536 struct MPT3SAS_ADAPTER *ioc; 536 struct MPT3SAS_ADAPTER *ioc;
@@ -641,9 +641,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
641 MPI2RequestHeader_t *mpi_request = NULL, *request; 641 MPI2RequestHeader_t *mpi_request = NULL, *request;
642 MPI2DefaultReply_t *mpi_reply; 642 MPI2DefaultReply_t *mpi_reply;
643 u32 ioc_state; 643 u32 ioc_state;
644 u16 ioc_status;
645 u16 smid; 644 u16 smid;
646 unsigned long timeout, timeleft; 645 unsigned long timeout;
647 u8 issue_reset; 646 u8 issue_reset;
648 u32 sz; 647 u32 sz;
649 void *psge; 648 void *psge;
@@ -914,8 +913,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
914 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; 913 timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
915 else 914 else
916 timeout = karg.timeout; 915 timeout = karg.timeout;
917 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 916 wait_for_completion_timeout(&ioc->ctl_cmds.done, timeout*HZ);
918 timeout*HZ);
919 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { 917 if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
920 Mpi2SCSITaskManagementRequest_t *tm_request = 918 Mpi2SCSITaskManagementRequest_t *tm_request =
921 (Mpi2SCSITaskManagementRequest_t *)mpi_request; 919 (Mpi2SCSITaskManagementRequest_t *)mpi_request;
@@ -938,7 +936,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
938 } 936 }
939 937
940 mpi_reply = ioc->ctl_cmds.reply; 938 mpi_reply = ioc->ctl_cmds.reply;
941 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
942 939
943 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && 940 if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
944 (ioc->logging_level & MPT_DEBUG_TM)) { 941 (ioc->logging_level & MPT_DEBUG_TM)) {
@@ -1001,13 +998,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
1001 ioc->name, 998 ioc->name,
1002 le16_to_cpu(mpi_request->FunctionDependent1)); 999 le16_to_cpu(mpi_request->FunctionDependent1));
1003 mpt3sas_halt_firmware(ioc); 1000 mpt3sas_halt_firmware(ioc);
1004 mpt3sas_scsih_issue_tm(ioc, 1001 mpt3sas_scsih_issue_locked_tm(ioc,
1005 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 1002 le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
1006 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, 1003 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30);
1007 TM_MUTEX_ON);
1008 } else 1004 } else
1009 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1005 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1010 FORCE_BIG_HAMMER);
1011 } 1006 }
1012 1007
1013 out: 1008 out:
@@ -1220,8 +1215,7 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1220 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, 1215 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
1221 __func__)); 1216 __func__));
1222 1217
1223 retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1218 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1224 FORCE_BIG_HAMMER);
1225 pr_info(MPT3SAS_FMT "host reset: %s\n", 1219 pr_info(MPT3SAS_FMT "host reset: %s\n",
1226 ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); 1220 ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
1227 return 0; 1221 return 0;
@@ -1381,7 +1375,6 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1381 Mpi2DiagBufferPostRequest_t *mpi_request; 1375 Mpi2DiagBufferPostRequest_t *mpi_request;
1382 Mpi2DiagBufferPostReply_t *mpi_reply; 1376 Mpi2DiagBufferPostReply_t *mpi_reply;
1383 u8 buffer_type; 1377 u8 buffer_type;
1384 unsigned long timeleft;
1385 u16 smid; 1378 u16 smid;
1386 u16 ioc_status; 1379 u16 ioc_status;
1387 u32 ioc_state; 1380 u32 ioc_state;
@@ -1499,7 +1492,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1499 1492
1500 init_completion(&ioc->ctl_cmds.done); 1493 init_completion(&ioc->ctl_cmds.done);
1501 mpt3sas_base_put_smid_default(ioc, smid); 1494 mpt3sas_base_put_smid_default(ioc, smid);
1502 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1495 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1503 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1496 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1504 1497
1505 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1498 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1538,8 +1531,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
1538 1531
1539 issue_host_reset: 1532 issue_host_reset:
1540 if (issue_reset) 1533 if (issue_reset)
1541 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1534 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1542 FORCE_BIG_HAMMER);
1543 1535
1544 out: 1536 out:
1545 1537
@@ -1800,7 +1792,6 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1800 u16 ioc_status; 1792 u16 ioc_status;
1801 u32 ioc_state; 1793 u32 ioc_state;
1802 int rc; 1794 int rc;
1803 unsigned long timeleft;
1804 1795
1805 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, 1796 dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
1806 __func__)); 1797 __func__));
@@ -1848,7 +1839,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
1848 1839
1849 init_completion(&ioc->ctl_cmds.done); 1840 init_completion(&ioc->ctl_cmds.done);
1850 mpt3sas_base_put_smid_default(ioc, smid); 1841 mpt3sas_base_put_smid_default(ioc, smid);
1851 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 1842 wait_for_completion_timeout(&ioc->ctl_cmds.done,
1852 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 1843 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
1853 1844
1854 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 1845 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -1974,8 +1965,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1974 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); 1965 rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
1975 1966
1976 if (issue_reset) 1967 if (issue_reset)
1977 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1968 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1978 FORCE_BIG_HAMMER);
1979 1969
1980 return rc; 1970 return rc;
1981} 1971}
@@ -1995,7 +1985,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
1995 Mpi2DiagBufferPostReply_t *mpi_reply; 1985 Mpi2DiagBufferPostReply_t *mpi_reply;
1996 int rc, i; 1986 int rc, i;
1997 u8 buffer_type; 1987 u8 buffer_type;
1998 unsigned long timeleft, request_size, copy_size; 1988 unsigned long request_size, copy_size;
1999 u16 smid; 1989 u16 smid;
2000 u16 ioc_status; 1990 u16 ioc_status;
2001 u8 issue_reset = 0; 1991 u8 issue_reset = 0;
@@ -2116,7 +2106,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2116 2106
2117 init_completion(&ioc->ctl_cmds.done); 2107 init_completion(&ioc->ctl_cmds.done);
2118 mpt3sas_base_put_smid_default(ioc, smid); 2108 mpt3sas_base_put_smid_default(ioc, smid);
2119 timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, 2109 wait_for_completion_timeout(&ioc->ctl_cmds.done,
2120 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); 2110 MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
2121 2111
2122 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { 2112 if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
@@ -2155,8 +2145,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
2155 2145
2156 issue_host_reset: 2146 issue_host_reset:
2157 if (issue_reset) 2147 if (issue_reset)
2158 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2148 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2159 FORCE_BIG_HAMMER);
2160 2149
2161 out: 2150 out:
2162 2151
@@ -2352,7 +2341,7 @@ out_unlock_pciaccess:
2352 * @cmd - ioctl opcode 2341 * @cmd - ioctl opcode
2353 * @arg - 2342 * @arg -
2354 */ 2343 */
2355long 2344static long
2356_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2345_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2357{ 2346{
2358 long ret; 2347 long ret;
@@ -2372,7 +2361,7 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2372 * @cmd - ioctl opcode 2361 * @cmd - ioctl opcode
2373 * @arg - 2362 * @arg -
2374 */ 2363 */
2375long 2364static long
2376_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 2365_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2377{ 2366{
2378 long ret; 2367 long ret;
@@ -2392,7 +2381,7 @@ _ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2392 * 2381 *
2393 * This routine handles 32 bit applications in 64bit os. 2382 * This routine handles 32 bit applications in 64bit os.
2394 */ 2383 */
2395long 2384static long
2396_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2385_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2397{ 2386{
2398 long ret; 2387 long ret;
@@ -2410,7 +2399,7 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2410 * 2399 *
2411 * This routine handles 32 bit applications in 64bit os. 2400 * This routine handles 32 bit applications in 64bit os.
2412 */ 2401 */
2413long 2402static long
2414_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) 2403_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
2415{ 2404{
2416 long ret; 2405 long ret;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index cd91a684c945..209a969a979d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1195,7 +1195,7 @@ _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1195 * 1195 *
1196 * Returns queue depth. 1196 * Returns queue depth.
1197 */ 1197 */
1198int 1198static int
1199scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) 1199scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1200{ 1200{
1201 struct Scsi_Host *shost = sdev->host; 1201 struct Scsi_Host *shost = sdev->host;
@@ -1244,7 +1244,7 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1244 * Returns 0 if ok. Any other return is assumed to be an error and 1244 * Returns 0 if ok. Any other return is assumed to be an error and
1245 * the device is ignored. 1245 * the device is ignored.
1246 */ 1246 */
1247int 1247static int
1248scsih_target_alloc(struct scsi_target *starget) 1248scsih_target_alloc(struct scsi_target *starget)
1249{ 1249{
1250 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1250 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1311,7 +1311,7 @@ scsih_target_alloc(struct scsi_target *starget)
1311 * 1311 *
1312 * Returns nothing. 1312 * Returns nothing.
1313 */ 1313 */
1314void 1314static void
1315scsih_target_destroy(struct scsi_target *starget) 1315scsih_target_destroy(struct scsi_target *starget)
1316{ 1316{
1317 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1317 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
@@ -1320,7 +1320,6 @@ scsih_target_destroy(struct scsi_target *starget)
1320 struct _sas_device *sas_device; 1320 struct _sas_device *sas_device;
1321 struct _raid_device *raid_device; 1321 struct _raid_device *raid_device;
1322 unsigned long flags; 1322 unsigned long flags;
1323 struct sas_rphy *rphy;
1324 1323
1325 sas_target_priv_data = starget->hostdata; 1324 sas_target_priv_data = starget->hostdata;
1326 if (!sas_target_priv_data) 1325 if (!sas_target_priv_data)
@@ -1339,7 +1338,6 @@ scsih_target_destroy(struct scsi_target *starget)
1339 } 1338 }
1340 1339
1341 spin_lock_irqsave(&ioc->sas_device_lock, flags); 1340 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1342 rphy = dev_to_rphy(starget->dev.parent);
1343 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); 1341 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1344 if (sas_device && (sas_device->starget == starget) && 1342 if (sas_device && (sas_device->starget == starget) &&
1345 (sas_device->id == starget->id) && 1343 (sas_device->id == starget->id) &&
@@ -1369,7 +1367,7 @@ scsih_target_destroy(struct scsi_target *starget)
1369 * Returns 0 if ok. Any other return is assumed to be an error and 1367 * Returns 0 if ok. Any other return is assumed to be an error and
1370 * the device is ignored. 1368 * the device is ignored.
1371 */ 1369 */
1372int 1370static int
1373scsih_slave_alloc(struct scsi_device *sdev) 1371scsih_slave_alloc(struct scsi_device *sdev)
1374{ 1372{
1375 struct Scsi_Host *shost; 1373 struct Scsi_Host *shost;
@@ -1434,7 +1432,7 @@ scsih_slave_alloc(struct scsi_device *sdev)
1434 * 1432 *
1435 * Returns nothing. 1433 * Returns nothing.
1436 */ 1434 */
1437void 1435static void
1438scsih_slave_destroy(struct scsi_device *sdev) 1436scsih_slave_destroy(struct scsi_device *sdev)
1439{ 1437{
1440 struct MPT3SAS_TARGET *sas_target_priv_data; 1438 struct MPT3SAS_TARGET *sas_target_priv_data;
@@ -1527,7 +1525,7 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1527 * scsih_is_raid - return boolean indicating device is raid volume 1525 * scsih_is_raid - return boolean indicating device is raid volume
1528 * @dev the device struct object 1526 * @dev the device struct object
1529 */ 1527 */
1530int 1528static int
1531scsih_is_raid(struct device *dev) 1529scsih_is_raid(struct device *dev)
1532{ 1530{
1533 struct scsi_device *sdev = to_scsi_device(dev); 1531 struct scsi_device *sdev = to_scsi_device(dev);
@@ -1542,7 +1540,7 @@ scsih_is_raid(struct device *dev)
1542 * scsih_get_resync - get raid volume resync percent complete 1540 * scsih_get_resync - get raid volume resync percent complete
1543 * @dev the device struct object 1541 * @dev the device struct object
1544 */ 1542 */
1545void 1543static void
1546scsih_get_resync(struct device *dev) 1544scsih_get_resync(struct device *dev)
1547{ 1545{
1548 struct scsi_device *sdev = to_scsi_device(dev); 1546 struct scsi_device *sdev = to_scsi_device(dev);
@@ -1603,7 +1601,7 @@ scsih_get_resync(struct device *dev)
1603 * scsih_get_state - get raid volume level 1601 * scsih_get_state - get raid volume level
1604 * @dev the device struct object 1602 * @dev the device struct object
1605 */ 1603 */
1606void 1604static void
1607scsih_get_state(struct device *dev) 1605scsih_get_state(struct device *dev)
1608{ 1606{
1609 struct scsi_device *sdev = to_scsi_device(dev); 1607 struct scsi_device *sdev = to_scsi_device(dev);
@@ -1805,7 +1803,7 @@ _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
1805 * Returns 0 if ok. Any other return is assumed to be an error and 1803 * Returns 0 if ok. Any other return is assumed to be an error and
1806 * the device is ignored. 1804 * the device is ignored.
1807 */ 1805 */
1808int 1806static int
1809scsih_slave_configure(struct scsi_device *sdev) 1807scsih_slave_configure(struct scsi_device *sdev)
1810{ 1808{
1811 struct Scsi_Host *shost = sdev->host; 1809 struct Scsi_Host *shost = sdev->host;
@@ -2021,7 +2019,7 @@ scsih_slave_configure(struct scsi_device *sdev)
2021 * 2019 *
2022 * Return nothing. 2020 * Return nothing.
2023 */ 2021 */
2024int 2022static int
2025scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, 2023scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2026 sector_t capacity, int params[]) 2024 sector_t capacity, int params[])
2027{ 2025{
@@ -2201,7 +2199,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2201 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) 2199 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2202 * @smid_task: smid assigned to the task 2200 * @smid_task: smid assigned to the task
2203 * @timeout: timeout in seconds 2201 * @timeout: timeout in seconds
2204 * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
2205 * Context: user 2202 * Context: user
2206 * 2203 *
2207 * A generic API for sending task management requests to firmware. 2204 * A generic API for sending task management requests to firmware.
@@ -2212,60 +2209,51 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2212 */ 2209 */
2213int 2210int
2214mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, 2211mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2215 uint id, uint lun, u8 type, u16 smid_task, ulong timeout, 2212 uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
2216 enum mutex_type m_type)
2217{ 2213{
2218 Mpi2SCSITaskManagementRequest_t *mpi_request; 2214 Mpi2SCSITaskManagementRequest_t *mpi_request;
2219 Mpi2SCSITaskManagementReply_t *mpi_reply; 2215 Mpi2SCSITaskManagementReply_t *mpi_reply;
2220 u16 smid = 0; 2216 u16 smid = 0;
2221 u32 ioc_state; 2217 u32 ioc_state;
2222 unsigned long timeleft;
2223 struct scsiio_tracker *scsi_lookup = NULL; 2218 struct scsiio_tracker *scsi_lookup = NULL;
2224 int rc; 2219 int rc;
2225 u16 msix_task = 0; 2220 u16 msix_task = 0;
2226 2221
2227 if (m_type == TM_MUTEX_ON) 2222 lockdep_assert_held(&ioc->tm_cmds.mutex);
2228 mutex_lock(&ioc->tm_cmds.mutex); 2223
2229 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { 2224 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2230 pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n", 2225 pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
2231 __func__, ioc->name); 2226 __func__, ioc->name);
2232 rc = FAILED; 2227 return FAILED;
2233 goto err_out;
2234 } 2228 }
2235 2229
2236 if (ioc->shost_recovery || ioc->remove_host || 2230 if (ioc->shost_recovery || ioc->remove_host ||
2237 ioc->pci_error_recovery) { 2231 ioc->pci_error_recovery) {
2238 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", 2232 pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
2239 __func__, ioc->name); 2233 __func__, ioc->name);
2240 rc = FAILED; 2234 return FAILED;
2241 goto err_out;
2242 } 2235 }
2243 2236
2244 ioc_state = mpt3sas_base_get_iocstate(ioc, 0); 2237 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2245 if (ioc_state & MPI2_DOORBELL_USED) { 2238 if (ioc_state & MPI2_DOORBELL_USED) {
2246 dhsprintk(ioc, pr_info(MPT3SAS_FMT 2239 dhsprintk(ioc, pr_info(MPT3SAS_FMT
2247 "unexpected doorbell active!\n", ioc->name)); 2240 "unexpected doorbell active!\n", ioc->name));
2248 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2241 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2249 FORCE_BIG_HAMMER); 2242 return (!rc) ? SUCCESS : FAILED;
2250 rc = (!rc) ? SUCCESS : FAILED;
2251 goto err_out;
2252 } 2243 }
2253 2244
2254 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2245 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2255 mpt3sas_base_fault_info(ioc, ioc_state & 2246 mpt3sas_base_fault_info(ioc, ioc_state &
2256 MPI2_DOORBELL_DATA_MASK); 2247 MPI2_DOORBELL_DATA_MASK);
2257 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2248 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2258 FORCE_BIG_HAMMER); 2249 return (!rc) ? SUCCESS : FAILED;
2259 rc = (!rc) ? SUCCESS : FAILED;
2260 goto err_out;
2261 } 2250 }
2262 2251
2263 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); 2252 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2264 if (!smid) { 2253 if (!smid) {
2265 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", 2254 pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
2266 ioc->name, __func__); 2255 ioc->name, __func__);
2267 rc = FAILED; 2256 return FAILED;
2268 goto err_out;
2269 } 2257 }
2270 2258
2271 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) 2259 if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
@@ -2292,19 +2280,17 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2292 else 2280 else
2293 msix_task = 0; 2281 msix_task = 0;
2294 mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); 2282 mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
2295 timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); 2283 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2296 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { 2284 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2297 pr_err(MPT3SAS_FMT "%s: timeout\n", 2285 pr_err(MPT3SAS_FMT "%s: timeout\n",
2298 ioc->name, __func__); 2286 ioc->name, __func__);
2299 _debug_dump_mf(mpi_request, 2287 _debug_dump_mf(mpi_request,
2300 sizeof(Mpi2SCSITaskManagementRequest_t)/4); 2288 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2301 if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) { 2289 if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
2302 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2290 rc = mpt3sas_base_hard_reset_handler(ioc,
2303 FORCE_BIG_HAMMER); 2291 FORCE_BIG_HAMMER);
2304 rc = (!rc) ? SUCCESS : FAILED; 2292 rc = (!rc) ? SUCCESS : FAILED;
2305 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 2293 goto out;
2306 mpt3sas_scsih_clear_tm_flag(ioc, handle);
2307 goto err_out;
2308 } 2294 }
2309 } 2295 }
2310 2296
@@ -2356,17 +2342,23 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
2356 break; 2342 break;
2357 } 2343 }
2358 2344
2345out:
2359 mpt3sas_scsih_clear_tm_flag(ioc, handle); 2346 mpt3sas_scsih_clear_tm_flag(ioc, handle);
2360 ioc->tm_cmds.status = MPT3_CMD_NOT_USED; 2347 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2361 if (m_type == TM_MUTEX_ON)
2362 mutex_unlock(&ioc->tm_cmds.mutex);
2363
2364 return rc; 2348 return rc;
2349}
2365 2350
2366 err_out: 2351int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2367 if (m_type == TM_MUTEX_ON) 2352 uint channel, uint id, uint lun, u8 type, u16 smid_task, ulong timeout)
2368 mutex_unlock(&ioc->tm_cmds.mutex); 2353{
2369 return rc; 2354 int ret;
2355
2356 mutex_lock(&ioc->tm_cmds.mutex);
2357 ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
2358 smid_task, timeout);
2359 mutex_unlock(&ioc->tm_cmds.mutex);
2360
2361 return ret;
2370} 2362}
2371 2363
2372/** 2364/**
@@ -2439,7 +2431,7 @@ _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2439 * 2431 *
2440 * Returns SUCCESS if command aborted else FAILED 2432 * Returns SUCCESS if command aborted else FAILED
2441 */ 2433 */
2442int 2434static int
2443scsih_abort(struct scsi_cmnd *scmd) 2435scsih_abort(struct scsi_cmnd *scmd)
2444{ 2436{
2445 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2437 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2482,9 +2474,9 @@ scsih_abort(struct scsi_cmnd *scmd)
2482 mpt3sas_halt_firmware(ioc); 2474 mpt3sas_halt_firmware(ioc);
2483 2475
2484 handle = sas_device_priv_data->sas_target->handle; 2476 handle = sas_device_priv_data->sas_target->handle;
2485 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2477 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
2486 scmd->device->id, scmd->device->lun, 2478 scmd->device->id, scmd->device->lun,
2487 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); 2479 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30);
2488 2480
2489 out: 2481 out:
2490 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", 2482 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2498,7 +2490,7 @@ scsih_abort(struct scsi_cmnd *scmd)
2498 * 2490 *
2499 * Returns SUCCESS if command aborted else FAILED 2491 * Returns SUCCESS if command aborted else FAILED
2500 */ 2492 */
2501int 2493static int
2502scsih_dev_reset(struct scsi_cmnd *scmd) 2494scsih_dev_reset(struct scsi_cmnd *scmd)
2503{ 2495{
2504 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2496 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2541,9 +2533,9 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2541 goto out; 2533 goto out;
2542 } 2534 }
2543 2535
2544 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2536 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
2545 scmd->device->id, scmd->device->lun, 2537 scmd->device->id, scmd->device->lun,
2546 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); 2538 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30);
2547 2539
2548 out: 2540 out:
2549 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", 2541 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2561,7 +2553,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
2561 * 2553 *
2562 * Returns SUCCESS if command aborted else FAILED 2554 * Returns SUCCESS if command aborted else FAILED
2563 */ 2555 */
2564int 2556static int
2565scsih_target_reset(struct scsi_cmnd *scmd) 2557scsih_target_reset(struct scsi_cmnd *scmd)
2566{ 2558{
2567 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2559 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2603,9 +2595,9 @@ scsih_target_reset(struct scsi_cmnd *scmd)
2603 goto out; 2595 goto out;
2604 } 2596 }
2605 2597
2606 r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, 2598 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
2607 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 2599 scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
2608 30, TM_MUTEX_ON); 2600 30);
2609 2601
2610 out: 2602 out:
2611 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", 2603 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -2624,7 +2616,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
2624 * 2616 *
2625 * Returns SUCCESS if command aborted else FAILED 2617 * Returns SUCCESS if command aborted else FAILED
2626 */ 2618 */
2627int 2619static int
2628scsih_host_reset(struct scsi_cmnd *scmd) 2620scsih_host_reset(struct scsi_cmnd *scmd)
2629{ 2621{
2630 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); 2622 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
@@ -2641,8 +2633,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
2641 goto out; 2633 goto out;
2642 } 2634 }
2643 2635
2644 retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2636 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2645 FORCE_BIG_HAMMER);
2646 r = (retval < 0) ? FAILED : SUCCESS; 2637 r = (retval < 0) ? FAILED : SUCCESS;
2647out: 2638out:
2648 pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", 2639 pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
@@ -3455,7 +3446,7 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3455 * 3446 *
3456 * Context - processed in interrupt context. 3447 * Context - processed in interrupt context.
3457 */ 3448 */
3458void 3449static void
3459_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event, 3450_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
3460 u32 event_context) 3451 u32 event_context)
3461{ 3452{
@@ -3494,7 +3485,7 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 event,
3494 * 3485 *
3495 * Context - processed in interrupt context. 3486 * Context - processed in interrupt context.
3496 */ 3487 */
3497void 3488static void
3498_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc, 3489_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3499 u16 smid, u16 handle) 3490 u16 smid, u16 handle)
3500 { 3491 {
@@ -4032,7 +4023,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4032 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or 4023 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4033 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full 4024 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4034 */ 4025 */
4035int 4026static int
4036scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) 4027scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4037{ 4028{
4038 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 4029 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -4701,7 +4692,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
4701 le16_to_cpu(mpi_reply->DevHandle)); 4692 le16_to_cpu(mpi_reply->DevHandle));
4702 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); 4693 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
4703 4694
4704 if (!(ioc->logging_level & MPT_DEBUG_REPLY) && 4695 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
4705 ((scmd->sense_buffer[2] == UNIT_ATTENTION) || 4696 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
4706 (scmd->sense_buffer[2] == MEDIUM_ERROR) || 4697 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
4707 (scmd->sense_buffer[2] == HARDWARE_ERROR))) 4698 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
@@ -5380,8 +5371,9 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5380 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 5371 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5381 sas_device->enclosure_level = 5372 sas_device->enclosure_level =
5382 le16_to_cpu(sas_device_pg0.EnclosureLevel); 5373 le16_to_cpu(sas_device_pg0.EnclosureLevel);
5383 memcpy(&sas_device->connector_name[0], 5374 memcpy(sas_device->connector_name,
5384 &sas_device_pg0.ConnectorName[0], 4); 5375 sas_device_pg0.ConnectorName, 4);
5376 sas_device->connector_name[4] = '\0';
5385 } else { 5377 } else {
5386 sas_device->enclosure_level = 0; 5378 sas_device->enclosure_level = 0;
5387 sas_device->connector_name[0] = '\0'; 5379 sas_device->connector_name[0] = '\0';
@@ -5508,8 +5500,9 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
5508 if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { 5500 if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5509 sas_device->enclosure_level = 5501 sas_device->enclosure_level =
5510 le16_to_cpu(sas_device_pg0.EnclosureLevel); 5502 le16_to_cpu(sas_device_pg0.EnclosureLevel);
5511 memcpy(&sas_device->connector_name[0], 5503 memcpy(sas_device->connector_name,
5512 &sas_device_pg0.ConnectorName[0], 4); 5504 sas_device_pg0.ConnectorName, 4);
5505 sas_device->connector_name[4] = '\0';
5513 } else { 5506 } else {
5514 sas_device->enclosure_level = 0; 5507 sas_device->enclosure_level = 0;
5515 sas_device->connector_name[0] = '\0'; 5508 sas_device->connector_name[0] = '\0';
@@ -6087,8 +6080,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
6087 6080
6088 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); 6081 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6089 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, 6082 r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
6090 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 6083 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30);
6091 TM_MUTEX_OFF);
6092 if (r == FAILED) { 6084 if (r == FAILED) {
6093 sdev_printk(KERN_WARNING, sdev, 6085 sdev_printk(KERN_WARNING, sdev,
6094 "mpt3sas_scsih_issue_tm: FAILED when sending " 6086 "mpt3sas_scsih_issue_tm: FAILED when sending "
@@ -6128,8 +6120,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
6128 goto out_no_lock; 6120 goto out_no_lock;
6129 6121
6130 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, 6122 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
6131 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, 6123 sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid,
6132 TM_MUTEX_OFF); 6124 30);
6133 if (r == FAILED) { 6125 if (r == FAILED) {
6134 sdev_printk(KERN_WARNING, sdev, 6126 sdev_printk(KERN_WARNING, sdev,
6135 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " 6127 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -6297,8 +6289,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
6297 mutex_unlock(&ioc->scsih_cmds.mutex); 6289 mutex_unlock(&ioc->scsih_cmds.mutex);
6298 6290
6299 if (issue_reset) 6291 if (issue_reset)
6300 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 6292 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
6301 FORCE_BIG_HAMMER);
6302 return rc; 6293 return rc;
6303} 6294}
6304 6295
@@ -6311,11 +6302,10 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
6311static void 6302static void
6312_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) 6303_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
6313{ 6304{
6314 int rc;
6315 sdev->no_uld_attach = no_uld_attach ? 1 : 0; 6305 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
6316 sdev_printk(KERN_INFO, sdev, "%s raid component\n", 6306 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
6317 sdev->no_uld_attach ? "hidding" : "exposing"); 6307 sdev->no_uld_attach ? "hidding" : "exposing");
6318 rc = scsi_device_reprobe(sdev); 6308 WARN_ON(scsi_device_reprobe(sdev));
6319} 6309}
6320 6310
6321/** 6311/**
@@ -8137,7 +8127,7 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
8137 * Routine called when unloading the driver. 8127 * Routine called when unloading the driver.
8138 * Return nothing. 8128 * Return nothing.
8139 */ 8129 */
8140void scsih_remove(struct pci_dev *pdev) 8130static void scsih_remove(struct pci_dev *pdev)
8141{ 8131{
8142 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8132 struct Scsi_Host *shost = pci_get_drvdata(pdev);
8143 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 8133 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8210,7 +8200,7 @@ void scsih_remove(struct pci_dev *pdev)
8210 * 8200 *
8211 * Return nothing. 8201 * Return nothing.
8212 */ 8202 */
8213void 8203static void
8214scsih_shutdown(struct pci_dev *pdev) 8204scsih_shutdown(struct pci_dev *pdev)
8215{ 8205{
8216 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8206 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8451,7 +8441,7 @@ _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
8451 * of scanning the entire bus. In our implemention, we will kick off 8441 * of scanning the entire bus. In our implemention, we will kick off
8452 * firmware discovery. 8442 * firmware discovery.
8453 */ 8443 */
8454void 8444static void
8455scsih_scan_start(struct Scsi_Host *shost) 8445scsih_scan_start(struct Scsi_Host *shost)
8456{ 8446{
8457 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 8447 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8478,7 +8468,7 @@ scsih_scan_start(struct Scsi_Host *shost)
8478 * scsi_host and the elapsed time of the scan in jiffies. In our implemention, 8468 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
8479 * we wait for firmware discovery to complete, then return 1. 8469 * we wait for firmware discovery to complete, then return 1.
8480 */ 8470 */
8481int 8471static int
8482scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) 8472scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
8483{ 8473{
8484 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 8474 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
@@ -8608,7 +8598,7 @@ static struct raid_function_template mpt3sas_raid_functions = {
8608 * MPI25_VERSION for SAS 3.0 HBA devices, and 8598 * MPI25_VERSION for SAS 3.0 HBA devices, and
8609 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices 8599 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
8610 */ 8600 */
8611u16 8601static u16
8612_scsih_determine_hba_mpi_version(struct pci_dev *pdev) 8602_scsih_determine_hba_mpi_version(struct pci_dev *pdev)
8613{ 8603{
8614 8604
@@ -8660,7 +8650,7 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
8660 * 8650 *
8661 * Returns 0 success, anything else error. 8651 * Returns 0 success, anything else error.
8662 */ 8652 */
8663int 8653static int
8664_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) 8654_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
8665{ 8655{
8666 struct MPT3SAS_ADAPTER *ioc; 8656 struct MPT3SAS_ADAPTER *ioc;
@@ -8869,7 +8859,7 @@ out_add_shost_fail:
8869 * 8859 *
8870 * Returns 0 success, anything else error. 8860 * Returns 0 success, anything else error.
8871 */ 8861 */
8872int 8862static int
8873scsih_suspend(struct pci_dev *pdev, pm_message_t state) 8863scsih_suspend(struct pci_dev *pdev, pm_message_t state)
8874{ 8864{
8875 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8865 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8896,7 +8886,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
8896 * 8886 *
8897 * Returns 0 success, anything else error. 8887 * Returns 0 success, anything else error.
8898 */ 8888 */
8899int 8889static int
8900scsih_resume(struct pci_dev *pdev) 8890scsih_resume(struct pci_dev *pdev)
8901{ 8891{
8902 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8892 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8916,7 +8906,7 @@ scsih_resume(struct pci_dev *pdev)
8916 if (r) 8906 if (r)
8917 return r; 8907 return r;
8918 8908
8919 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); 8909 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
8920 scsi_unblock_requests(shost); 8910 scsi_unblock_requests(shost);
8921 mpt3sas_base_start_watchdog(ioc); 8911 mpt3sas_base_start_watchdog(ioc);
8922 return 0; 8912 return 0;
@@ -8933,7 +8923,7 @@ scsih_resume(struct pci_dev *pdev)
8933 * Return value: 8923 * Return value:
8934 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT 8924 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8935 */ 8925 */
8936pci_ers_result_t 8926static pci_ers_result_t
8937scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) 8927scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8938{ 8928{
8939 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8929 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8970,7 +8960,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
8970 * code after the PCI slot has been reset, just before we 8960 * code after the PCI slot has been reset, just before we
8971 * should resume normal operations. 8961 * should resume normal operations.
8972 */ 8962 */
8973pci_ers_result_t 8963static pci_ers_result_t
8974scsih_pci_slot_reset(struct pci_dev *pdev) 8964scsih_pci_slot_reset(struct pci_dev *pdev)
8975{ 8965{
8976 struct Scsi_Host *shost = pci_get_drvdata(pdev); 8966 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8987,8 +8977,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
8987 if (rc) 8977 if (rc)
8988 return PCI_ERS_RESULT_DISCONNECT; 8978 return PCI_ERS_RESULT_DISCONNECT;
8989 8979
8990 rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 8980 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8991 FORCE_BIG_HAMMER);
8992 8981
8993 pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name, 8982 pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
8994 (rc == 0) ? "success" : "failed"); 8983 (rc == 0) ? "success" : "failed");
@@ -9007,7 +8996,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
9007 * OK to resume normal operation. Use completion to allow 8996 * OK to resume normal operation. Use completion to allow
9008 * halted scsi ops to resume. 8997 * halted scsi ops to resume.
9009 */ 8998 */
9010void 8999static void
9011scsih_pci_resume(struct pci_dev *pdev) 9000scsih_pci_resume(struct pci_dev *pdev)
9012{ 9001{
9013 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9002 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9024,7 +9013,7 @@ scsih_pci_resume(struct pci_dev *pdev)
9024 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers 9013 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
9025 * @pdev: pointer to PCI device 9014 * @pdev: pointer to PCI device
9026 */ 9015 */
9027pci_ers_result_t 9016static pci_ers_result_t
9028scsih_pci_mmio_enabled(struct pci_dev *pdev) 9017scsih_pci_mmio_enabled(struct pci_dev *pdev)
9029{ 9018{
9030 struct Scsi_Host *shost = pci_get_drvdata(pdev); 9019 struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9152,7 +9141,7 @@ static struct pci_driver mpt3sas_driver = {
9152 * 9141 *
9153 * Returns 0 success, anything else error. 9142 * Returns 0 success, anything else error.
9154 */ 9143 */
9155int 9144static int
9156scsih_init(void) 9145scsih_init(void)
9157{ 9146{
9158 mpt2_ids = 0; 9147 mpt2_ids = 0;
@@ -9202,7 +9191,7 @@ scsih_init(void)
9202 * 9191 *
9203 * Returns 0 success, anything else error. 9192 * Returns 0 success, anything else error.
9204 */ 9193 */
9205void 9194static void
9206scsih_exit(void) 9195scsih_exit(void)
9207{ 9196{
9208 9197
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index ff93286bc32f..b74faf1a69b2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -300,7 +300,6 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
300 int rc; 300 int rc;
301 u16 smid; 301 u16 smid;
302 u32 ioc_state; 302 u32 ioc_state;
303 unsigned long timeleft;
304 void *psge; 303 void *psge;
305 u8 issue_reset = 0; 304 u8 issue_reset = 0;
306 void *data_out = NULL; 305 void *data_out = NULL;
@@ -394,8 +393,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
394 ioc->name, (unsigned long long)sas_address)); 393 ioc->name, (unsigned long long)sas_address));
395 init_completion(&ioc->transport_cmds.done); 394 init_completion(&ioc->transport_cmds.done);
396 mpt3sas_base_put_smid_default(ioc, smid); 395 mpt3sas_base_put_smid_default(ioc, smid);
397 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 396 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
398 10*HZ);
399 397
400 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 398 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
401 pr_err(MPT3SAS_FMT "%s: timeout\n", 399 pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -446,8 +444,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
446 444
447 issue_host_reset: 445 issue_host_reset:
448 if (issue_reset) 446 if (issue_reset)
449 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 447 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
450 FORCE_BIG_HAMMER);
451 out: 448 out:
452 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 449 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
453 if (data_out) 450 if (data_out)
@@ -1107,7 +1104,6 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1107 int rc; 1104 int rc;
1108 u16 smid; 1105 u16 smid;
1109 u32 ioc_state; 1106 u32 ioc_state;
1110 unsigned long timeleft;
1111 void *psge; 1107 void *psge;
1112 u8 issue_reset = 0; 1108 u8 issue_reset = 0;
1113 void *data_out = NULL; 1109 void *data_out = NULL;
@@ -1203,8 +1199,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1203 phy->number)); 1199 phy->number));
1204 init_completion(&ioc->transport_cmds.done); 1200 init_completion(&ioc->transport_cmds.done);
1205 mpt3sas_base_put_smid_default(ioc, smid); 1201 mpt3sas_base_put_smid_default(ioc, smid);
1206 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 1202 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
1207 10*HZ);
1208 1203
1209 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 1204 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
1210 pr_err(MPT3SAS_FMT "%s: timeout\n", 1205 pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1253,8 +1248,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
1253 1248
1254 issue_host_reset: 1249 issue_host_reset:
1255 if (issue_reset) 1250 if (issue_reset)
1256 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1251 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1257 FORCE_BIG_HAMMER);
1258 out: 1252 out:
1259 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 1253 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
1260 if (data_out) 1254 if (data_out)
@@ -1421,7 +1415,6 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1421 int rc; 1415 int rc;
1422 u16 smid; 1416 u16 smid;
1423 u32 ioc_state; 1417 u32 ioc_state;
1424 unsigned long timeleft;
1425 void *psge; 1418 void *psge;
1426 u8 issue_reset = 0; 1419 u8 issue_reset = 0;
1427 void *data_out = NULL; 1420 void *data_out = NULL;
@@ -1522,8 +1515,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1522 phy->number, phy_operation)); 1515 phy->number, phy_operation));
1523 init_completion(&ioc->transport_cmds.done); 1516 init_completion(&ioc->transport_cmds.done);
1524 mpt3sas_base_put_smid_default(ioc, smid); 1517 mpt3sas_base_put_smid_default(ioc, smid);
1525 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 1518 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
1526 10*HZ);
1527 1519
1528 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 1520 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
1529 pr_err(MPT3SAS_FMT "%s: timeout\n", 1521 pr_err(MPT3SAS_FMT "%s: timeout\n",
@@ -1564,8 +1556,7 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
1564 1556
1565 issue_host_reset: 1557 issue_host_reset:
1566 if (issue_reset) 1558 if (issue_reset)
1567 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 1559 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
1568 FORCE_BIG_HAMMER);
1569 out: 1560 out:
1570 ioc->transport_cmds.status = MPT3_CMD_NOT_USED; 1561 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
1571 if (data_out) 1562 if (data_out)
@@ -1899,7 +1890,6 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1899 int rc; 1890 int rc;
1900 u16 smid; 1891 u16 smid;
1901 u32 ioc_state; 1892 u32 ioc_state;
1902 unsigned long timeleft;
1903 void *psge; 1893 void *psge;
1904 u8 issue_reset = 0; 1894 u8 issue_reset = 0;
1905 dma_addr_t dma_addr_in = 0; 1895 dma_addr_t dma_addr_in = 0;
@@ -2043,8 +2033,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2043 2033
2044 init_completion(&ioc->transport_cmds.done); 2034 init_completion(&ioc->transport_cmds.done);
2045 mpt3sas_base_put_smid_default(ioc, smid); 2035 mpt3sas_base_put_smid_default(ioc, smid);
2046 timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, 2036 wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
2047 10*HZ);
2048 2037
2049 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { 2038 if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
2050 pr_err(MPT3SAS_FMT "%s : timeout\n", 2039 pr_err(MPT3SAS_FMT "%s : timeout\n",
@@ -2103,8 +2092,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2103 2092
2104 issue_host_reset: 2093 issue_host_reset:
2105 if (issue_reset) { 2094 if (issue_reset) {
2106 mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, 2095 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2107 FORCE_BIG_HAMMER);
2108 rc = -ETIMEDOUT; 2096 rc = -ETIMEDOUT;
2109 } 2097 }
2110 2098
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 8bb06995adfb..b757d389e32f 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -136,7 +136,8 @@ static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
136 } 136 }
137} 137}
138 138
139void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) 139static void
140mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
140{ 141{
141 void __iomem *regs = mvi->regs; 142 void __iomem *regs = mvi->regs;
142 u32 tmp; 143 u32 tmp;
@@ -563,7 +564,7 @@ static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
563 return MVS_ID_NOT_MAPPED; 564 return MVS_ID_NOT_MAPPED;
564} 565}
565 566
566void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd) 567static void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
567{ 568{
568 int i; 569 int i;
569 struct scatterlist *sg; 570 struct scatterlist *sg;
@@ -633,7 +634,7 @@ static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
633 mvs_write_port_vsr_data(mvi, i, tmp); 634 mvs_write_port_vsr_data(mvi, i, tmp);
634} 635}
635 636
636void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 637static void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
637 struct sas_phy_linkrates *rates) 638 struct sas_phy_linkrates *rates)
638{ 639{
639 u32 lrmin = 0, lrmax = 0; 640 u32 lrmin = 0, lrmax = 0;
@@ -668,20 +669,20 @@ static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
668} 669}
669 670
670 671
671u32 mvs_64xx_spi_read_data(struct mvs_info *mvi) 672static u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
672{ 673{
673 void __iomem *regs = mvi->regs_ex; 674 void __iomem *regs = mvi->regs_ex;
674 return ior32(SPI_DATA_REG_64XX); 675 return ior32(SPI_DATA_REG_64XX);
675} 676}
676 677
677void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data) 678static void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
678{ 679{
679 void __iomem *regs = mvi->regs_ex; 680 void __iomem *regs = mvi->regs_ex;
680 iow32(SPI_DATA_REG_64XX, data); 681 iow32(SPI_DATA_REG_64XX, data);
681} 682}
682 683
683 684
684int mvs_64xx_spi_buildcmd(struct mvs_info *mvi, 685static int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
685 u32 *dwCmd, 686 u32 *dwCmd,
686 u8 cmd, 687 u8 cmd,
687 u8 read, 688 u8 read,
@@ -705,7 +706,7 @@ int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
705} 706}
706 707
707 708
708int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) 709static int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
709{ 710{
710 void __iomem *regs = mvi->regs_ex; 711 void __iomem *regs = mvi->regs_ex;
711 int retry; 712 int retry;
@@ -720,7 +721,7 @@ int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
720 return 0; 721 return 0;
721} 722}
722 723
723int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) 724static int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
724{ 725{
725 void __iomem *regs = mvi->regs_ex; 726 void __iomem *regs = mvi->regs_ex;
726 u32 i, dwTmp; 727 u32 i, dwTmp;
@@ -735,7 +736,7 @@ int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
735 return -1; 736 return -1;
736} 737}
737 738
738void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, 739static void mvs_64xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
739 int buf_len, int from, void *prd) 740 int buf_len, int from, void *prd)
740{ 741{
741 int i; 742 int i;
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index f6fc4a705924..4c57d9abce7b 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -48,8 +48,8 @@ static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
48 } 48 }
49} 49}
50 50
51void set_phy_tuning(struct mvs_info *mvi, int phy_id, 51static void set_phy_tuning(struct mvs_info *mvi, int phy_id,
52 struct phy_tuning phy_tuning) 52 struct phy_tuning phy_tuning)
53{ 53{
54 u32 tmp, setting_0 = 0, setting_1 = 0; 54 u32 tmp, setting_0 = 0, setting_1 = 0;
55 u8 i; 55 u8 i;
@@ -110,8 +110,8 @@ void set_phy_tuning(struct mvs_info *mvi, int phy_id,
110 } 110 }
111} 111}
112 112
113void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id, 113static void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
114 struct ffe_control ffe) 114 struct ffe_control ffe)
115{ 115{
116 u32 tmp; 116 u32 tmp;
117 117
@@ -177,7 +177,7 @@ void set_phy_ffe_tuning(struct mvs_info *mvi, int phy_id,
177} 177}
178 178
179/*Notice: this function must be called when phy is disabled*/ 179/*Notice: this function must be called when phy is disabled*/
180void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate) 180static void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
181{ 181{
182 union reg_phy_cfg phy_cfg, phy_cfg_tmp; 182 union reg_phy_cfg phy_cfg, phy_cfg_tmp;
183 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2); 183 mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
@@ -679,7 +679,8 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
679 } 679 }
680} 680}
681 681
682void mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all) 682static void
683mvs_94xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
683{ 684{
684 void __iomem *regs = mvi->regs; 685 void __iomem *regs = mvi->regs;
685 u32 tmp; 686 u32 tmp;
@@ -906,8 +907,8 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
906 907
907} 908}
908 909
909void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, 910static void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
910 struct sas_phy_linkrates *rates) 911 struct sas_phy_linkrates *rates)
911{ 912{
912 u32 lrmax = 0; 913 u32 lrmax = 0;
913 u32 tmp; 914 u32 tmp;
@@ -936,25 +937,25 @@ static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
936} 937}
937 938
938 939
939u32 mvs_94xx_spi_read_data(struct mvs_info *mvi) 940static u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
940{ 941{
941 void __iomem *regs = mvi->regs_ex - 0x10200; 942 void __iomem *regs = mvi->regs_ex - 0x10200;
942 return mr32(SPI_RD_DATA_REG_94XX); 943 return mr32(SPI_RD_DATA_REG_94XX);
943} 944}
944 945
945void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data) 946static void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
946{ 947{
947 void __iomem *regs = mvi->regs_ex - 0x10200; 948 void __iomem *regs = mvi->regs_ex - 0x10200;
948 mw32(SPI_RD_DATA_REG_94XX, data); 949 mw32(SPI_RD_DATA_REG_94XX, data);
949} 950}
950 951
951 952
952int mvs_94xx_spi_buildcmd(struct mvs_info *mvi, 953static int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
953 u32 *dwCmd, 954 u32 *dwCmd,
954 u8 cmd, 955 u8 cmd,
955 u8 read, 956 u8 read,
956 u8 length, 957 u8 length,
957 u32 addr 958 u32 addr
958 ) 959 )
959{ 960{
960 void __iomem *regs = mvi->regs_ex - 0x10200; 961 void __iomem *regs = mvi->regs_ex - 0x10200;
@@ -974,7 +975,7 @@ int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
974} 975}
975 976
976 977
977int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd) 978static int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
978{ 979{
979 void __iomem *regs = mvi->regs_ex - 0x10200; 980 void __iomem *regs = mvi->regs_ex - 0x10200;
980 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX); 981 mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
@@ -982,7 +983,7 @@ int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
982 return 0; 983 return 0;
983} 984}
984 985
985int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout) 986static int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
986{ 987{
987 void __iomem *regs = mvi->regs_ex - 0x10200; 988 void __iomem *regs = mvi->regs_ex - 0x10200;
988 u32 i, dwTmp; 989 u32 i, dwTmp;
@@ -997,8 +998,8 @@ int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
997 return -1; 998 return -1;
998} 999}
999 1000
1000void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask, 1001static void mvs_94xx_fix_dma(struct mvs_info *mvi, u32 phy_mask,
1001 int buf_len, int from, void *prd) 1002 int buf_len, int from, void *prd)
1002{ 1003{
1003 int i; 1004 int i;
1004 struct mvs_prd *buf_prd = prd; 1005 struct mvs_prd *buf_prd = prd;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 5b9fcff6cd94..86eb19902bac 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -74,7 +74,7 @@ void mvs_tag_init(struct mvs_info *mvi)
74 mvs_tag_clear(mvi, i); 74 mvs_tag_clear(mvi, i);
75} 75}
76 76
77struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev) 77static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
78{ 78{
79 unsigned long i = 0, j = 0, hi = 0; 79 unsigned long i = 0, j = 0, hi = 0;
80 struct sas_ha_struct *sha = dev->port->ha; 80 struct sas_ha_struct *sha = dev->port->ha;
@@ -102,7 +102,7 @@ struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
102 102
103} 103}
104 104
105int mvs_find_dev_phyno(struct domain_device *dev, int *phyno) 105static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
106{ 106{
107 unsigned long i = 0, j = 0, n = 0, num = 0; 107 unsigned long i = 0, j = 0, n = 0, num = 0;
108 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; 108 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
@@ -1158,7 +1158,7 @@ void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1158 mvs_port_notify_deformed(sas_phy, 1); 1158 mvs_port_notify_deformed(sas_phy, 1);
1159} 1159}
1160 1160
1161struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) 1161static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1162{ 1162{
1163 u32 dev; 1163 u32 dev;
1164 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { 1164 for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
@@ -1175,7 +1175,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1175 return NULL; 1175 return NULL;
1176} 1176}
1177 1177
1178void mvs_free_dev(struct mvs_device *mvi_dev) 1178static void mvs_free_dev(struct mvs_device *mvi_dev)
1179{ 1179{
1180 u32 id = mvi_dev->device_id; 1180 u32 id = mvi_dev->device_id;
1181 memset(mvi_dev, 0, sizeof(*mvi_dev)); 1181 memset(mvi_dev, 0, sizeof(*mvi_dev));
@@ -1185,7 +1185,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev)
1185 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; 1185 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1186} 1186}
1187 1187
1188int mvs_dev_found_notify(struct domain_device *dev, int lock) 1188static int mvs_dev_found_notify(struct domain_device *dev, int lock)
1189{ 1189{
1190 unsigned long flags = 0; 1190 unsigned long flags = 0;
1191 int res = 0; 1191 int res = 0;
@@ -1241,7 +1241,7 @@ int mvs_dev_found(struct domain_device *dev)
1241 return mvs_dev_found_notify(dev, 1); 1241 return mvs_dev_found_notify(dev, 1);
1242} 1242}
1243 1243
1244void mvs_dev_gone_notify(struct domain_device *dev) 1244static void mvs_dev_gone_notify(struct domain_device *dev)
1245{ 1245{
1246 unsigned long flags = 0; 1246 unsigned long flags = 0;
1247 struct mvs_device *mvi_dev = dev->lldd_dev; 1247 struct mvs_device *mvi_dev = dev->lldd_dev;
@@ -1611,7 +1611,7 @@ static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1611 return stat; 1611 return stat;
1612} 1612}
1613 1613
1614void mvs_set_sense(u8 *buffer, int len, int d_sense, 1614static void mvs_set_sense(u8 *buffer, int len, int d_sense,
1615 int key, int asc, int ascq) 1615 int key, int asc, int ascq)
1616{ 1616{
1617 memset(buffer, 0, len); 1617 memset(buffer, 0, len);
@@ -1650,7 +1650,7 @@ void mvs_set_sense(u8 *buffer, int len, int d_sense,
1650 return; 1650 return;
1651} 1651}
1652 1652
1653void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu, 1653static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1654 u8 key, u8 asc, u8 asc_q) 1654 u8 key, u8 asc, u8 asc_q)
1655{ 1655{
1656 iu->datapres = 2; 1656 iu->datapres = 2;
diff --git a/drivers/scsi/pas16.c b/drivers/scsi/pas16.c
deleted file mode 100644
index 2f689ae7a803..000000000000
--- a/drivers/scsi/pas16.c
+++ /dev/null
@@ -1,565 +0,0 @@
1/*
2 * This driver adapted from Drew Eckhardt's Trantor T128 driver
3 *
4 * Copyright 1993, Drew Eckhardt
5 * Visionary Computing
6 * (Unix and Linux consulting and custom programming)
7 * drew@colorado.edu
8 * +1 (303) 666-5836
9 *
10 * ( Based on T128 - DISTRIBUTION RELEASE 3. )
11 *
12 * Modified to work with the Pro Audio Spectrum/Studio 16
13 * by John Weidman.
14 *
15 *
16 * For more information, please consult
17 *
18 * Media Vision
19 * (510) 770-8600
20 * (800) 348-7116
21 */
22
23/*
24 * The card is detected and initialized in one of several ways :
25 * 1. Autoprobe (default) - There are many different models of
26 * the Pro Audio Spectrum/Studio 16, and I only have one of
27 * them, so this may require a little tweaking. An interrupt
28 * is triggered to autoprobe for the interrupt line. Note:
29 * with the newer model boards, the interrupt is set via
30 * software after reset using the default_irq for the
31 * current board number.
32 *
33 * 2. With command line overrides - pas16=port,irq may be
34 * used on the LILO command line to override the defaults.
35 *
36 * 3. With the PAS16_OVERRIDE compile time define. This is
37 * specified as an array of address, irq tuples. Ie, for
38 * one board at the default 0x388 address, IRQ10, I could say
39 * -DPAS16_OVERRIDE={{0x388, 10}}
40 * NOTE: Untested.
41 *
42 * 4. When included as a module, with arguments passed on the command line:
43 * pas16_irq=xx the interrupt
44 * pas16_addr=xx the port
45 * e.g. "modprobe pas16 pas16_addr=0x388 pas16_irq=5"
46 *
47 * Note that if the override methods are used, place holders must
48 * be specified for other boards in the system.
49 *
50 *
51 * Configuration notes :
52 * The current driver does not support interrupt sharing with the
53 * sound portion of the card. If you use the same irq for the
54 * scsi port and sound you will have problems. Either use
55 * a different irq for the scsi port or don't use interrupts
56 * for the scsi port.
57 *
58 * If you have problems with your card not being recognized, use
59 * the LILO command line override. Try to get it recognized without
60 * interrupts. Ie, for a board at the default 0x388 base port,
61 * boot: linux pas16=0x388,0
62 *
63 * NO_IRQ (0) should be specified for no interrupt,
64 * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden
65 * on the command line.
66 */
67
68#include <linux/module.h>
69
70#include <asm/io.h>
71#include <asm/dma.h>
72#include <linux/blkdev.h>
73#include <linux/interrupt.h>
74#include <linux/init.h>
75
76#include <scsi/scsi_host.h>
77#include "pas16.h"
78#include "NCR5380.h"
79
80
81static unsigned short pas16_addr;
82static int pas16_irq;
83
84
85static const int scsi_irq_translate[] =
86 { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 };
87
88/* The default_irqs array contains values used to set the irq into the
89 * board via software (as must be done on newer model boards without
90 * irq jumpers on the board). The first value in the array will be
91 * assigned to logical board 0, the next to board 1, etc.
92 */
93static int default_irqs[] __initdata =
94 { PAS16_DEFAULT_BOARD_1_IRQ,
95 PAS16_DEFAULT_BOARD_2_IRQ,
96 PAS16_DEFAULT_BOARD_3_IRQ,
97 PAS16_DEFAULT_BOARD_4_IRQ
98 };
99
100static struct override {
101 unsigned short io_port;
102 int irq;
103} overrides
104#ifdef PAS16_OVERRIDE
105 [] __initdata = PAS16_OVERRIDE;
106#else
107 [4] __initdata = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO},
108 {0,IRQ_AUTO}};
109#endif
110
111#define NO_OVERRIDES ARRAY_SIZE(overrides)
112
113static struct base {
114 unsigned short io_port;
115 int noauto;
116} bases[] __initdata =
117 { {PAS16_DEFAULT_BASE_1, 0},
118 {PAS16_DEFAULT_BASE_2, 0},
119 {PAS16_DEFAULT_BASE_3, 0},
120 {PAS16_DEFAULT_BASE_4, 0}
121 };
122
123#define NO_BASES ARRAY_SIZE(bases)
124
125static const unsigned short pas16_offset[ 8 ] =
126 {
127 0x1c00, /* OUTPUT_DATA_REG */
128 0x1c01, /* INITIATOR_COMMAND_REG */
129 0x1c02, /* MODE_REG */
130 0x1c03, /* TARGET_COMMAND_REG */
131 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */
132 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */
133 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?)
134 * START_DMA_TARGET_RECEIVE_REG wo
135 */
136 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro,
137 * START_DMA_INITIATOR_RECEIVE_REG wo
138 */
139 };
140
141
142/*
143 * Function : enable_board( int board_num, unsigned short port )
144 *
145 * Purpose : set address in new model board
146 *
147 * Inputs : board_num - logical board number 0-3, port - base address
148 *
149 */
150
151static void __init
152 enable_board( int board_num, unsigned short port )
153{
154 outb( 0xbc + board_num, MASTER_ADDRESS_PTR );
155 outb( port >> 2, MASTER_ADDRESS_PTR );
156}
157
158
159
160/*
161 * Function : init_board( unsigned short port, int irq )
162 *
163 * Purpose : Set the board up to handle the SCSI interface
164 *
165 * Inputs : port - base address of the board,
166 * irq - irq to assign to the SCSI port
167 * force_irq - set it even if it conflicts with sound driver
168 *
169 */
170
171static void __init
172 init_board( unsigned short io_port, int irq, int force_irq )
173{
174 unsigned int tmp;
175 unsigned int pas_irq_code;
176
177 /* Initialize the SCSI part of the board */
178
179 outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */
180 outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */
181 outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */
182
183 inb(io_port + pas16_offset[RESET_PARITY_INTERRUPT_REG]);
184
185 /* Set the SCSI interrupt pointer without mucking up the sound
186 * interrupt pointer in the same byte.
187 */
188 pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0;
189 tmp = inb( io_port + IO_CONFIG_3 );
190
191 if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0
192 && !force_irq )
193 {
194 printk( "pas16: WARNING: Can't use same irq as sound "
195 "driver -- interrupts disabled\n" );
196 /* Set up the drive parameters, disable 5380 interrupts */
197 outb( 0x4d, io_port + SYS_CONFIG_4 );
198 }
199 else
200 {
201 tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 );
202 outb( tmp, io_port + IO_CONFIG_3 );
203
204 /* Set up the drive parameters and enable 5380 interrupts */
205 outb( 0x6d, io_port + SYS_CONFIG_4 );
206 }
207}
208
209
210/*
211 * Function : pas16_hw_detect( unsigned short board_num )
212 *
213 * Purpose : determine if a pas16 board is present
214 *
215 * Inputs : board_num - logical board number ( 0 - 3 )
216 *
217 * Returns : 0 if board not found, 1 if found.
218 */
219
220static int __init
221 pas16_hw_detect( unsigned short board_num )
222{
223 unsigned char board_rev, tmp;
224 unsigned short io_port = bases[ board_num ].io_port;
225
226 /* See if we can find a PAS16 board at the address associated
227 * with this logical board number.
228 */
229
230 /* First, attempt to take a newer model board out of reset and
231 * give it a base address. This shouldn't affect older boards.
232 */
233 enable_board( board_num, io_port );
234
235 /* Now see if it looks like a PAS16 board */
236 board_rev = inb( io_port + PCB_CONFIG );
237
238 if( board_rev == 0xff )
239 return 0;
240
241 tmp = board_rev ^ 0xe0;
242
243 outb( tmp, io_port + PCB_CONFIG );
244 tmp = inb( io_port + PCB_CONFIG );
245 outb( board_rev, io_port + PCB_CONFIG );
246
247 if( board_rev != tmp ) /* Not a PAS-16 */
248 return 0;
249
250 if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 )
251 return 0; /* return if no SCSI interface found */
252
253 /* Mediavision has some new model boards that return ID bits
254 * that indicate a SCSI interface, but they're not (LMS). We'll
255 * put in an additional test to try to weed them out.
256 */
257
258 outb(0x01, io_port + WAIT_STATE); /* 1 Wait state */
259 outb(0x20, io_port + pas16_offset[MODE_REG]); /* Is it really SCSI? */
260 if (inb(io_port + pas16_offset[MODE_REG]) != 0x20) /* Write to a reg. */
261 return 0; /* and try to read */
262 outb(0x00, io_port + pas16_offset[MODE_REG]); /* it back. */
263 if (inb(io_port + pas16_offset[MODE_REG]) != 0x00)
264 return 0;
265
266 return 1;
267}
268
269
270#ifndef MODULE
271/*
272 * Function : pas16_setup(char *str, int *ints)
273 *
274 * Purpose : LILO command line initialization of the overrides array,
275 *
276 * Inputs : str - unused, ints - array of integer parameters with ints[0]
277 * equal to the number of ints.
278 *
279 */
280
281static int __init pas16_setup(char *str)
282{
283 static int commandline_current;
284 int i;
285 int ints[10];
286
287 get_options(str, ARRAY_SIZE(ints), ints);
288 if (ints[0] != 2)
289 printk("pas16_setup : usage pas16=io_port,irq\n");
290 else
291 if (commandline_current < NO_OVERRIDES) {
292 overrides[commandline_current].io_port = (unsigned short) ints[1];
293 overrides[commandline_current].irq = ints[2];
294 for (i = 0; i < NO_BASES; ++i)
295 if (bases[i].io_port == (unsigned short) ints[1]) {
296 bases[i].noauto = 1;
297 break;
298 }
299 ++commandline_current;
300 }
301 return 1;
302}
303
304__setup("pas16=", pas16_setup);
305#endif
306
307/*
308 * Function : int pas16_detect(struct scsi_host_template * tpnt)
309 *
310 * Purpose : detects and initializes PAS16 controllers
311 * that were autoprobed, overridden on the LILO command line,
312 * or specified at compile time.
313 *
314 * Inputs : tpnt - template for this SCSI adapter.
315 *
316 * Returns : 1 if a host adapter was found, 0 if not.
317 *
318 */
319
320static int __init pas16_detect(struct scsi_host_template *tpnt)
321{
322 static int current_override;
323 static unsigned short current_base;
324 struct Scsi_Host *instance;
325 unsigned short io_port;
326 int count;
327
328 if (pas16_addr != 0) {
329 overrides[0].io_port = pas16_addr;
330 /*
331 * This is how we avoid seeing more than
332 * one host adapter at the same I/O port.
333 * Cribbed shamelessly from pas16_setup().
334 */
335 for (count = 0; count < NO_BASES; ++count)
336 if (bases[count].io_port == pas16_addr) {
337 bases[count].noauto = 1;
338 break;
339 }
340 }
341 if (pas16_irq != 0)
342 overrides[0].irq = pas16_irq;
343
344 for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
345 io_port = 0;
346
347 if (overrides[current_override].io_port)
348 {
349 io_port = overrides[current_override].io_port;
350 enable_board( current_override, io_port );
351 init_board( io_port, overrides[current_override].irq, 1 );
352 }
353 else
354 for (; !io_port && (current_base < NO_BASES); ++current_base) {
355 dprintk(NDEBUG_INIT, "pas16: probing io_port 0x%04x\n",
356 (unsigned int)bases[current_base].io_port);
357 if ( !bases[current_base].noauto &&
358 pas16_hw_detect( current_base ) ){
359 io_port = bases[current_base].io_port;
360 init_board( io_port, default_irqs[ current_base ], 0 );
361 dprintk(NDEBUG_INIT, "pas16: detected board\n");
362 }
363 }
364
365 dprintk(NDEBUG_INIT, "pas16: io_port = 0x%04x\n",
366 (unsigned int)io_port);
367
368 if (!io_port)
369 break;
370
371 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
372 if(instance == NULL)
373 goto out;
374
375 instance->io_port = io_port;
376
377 if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
378 goto out_unregister;
379
380 NCR5380_maybe_reset_bus(instance);
381
382 if (overrides[current_override].irq != IRQ_AUTO)
383 instance->irq = overrides[current_override].irq;
384 else
385 instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS);
386
387 /* Compatibility with documented NCR5380 kernel parameters */
388 if (instance->irq == 255)
389 instance->irq = NO_IRQ;
390
391 if (instance->irq != NO_IRQ)
392 if (request_irq(instance->irq, pas16_intr, 0,
393 "pas16", instance)) {
394 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
395 instance->host_no, instance->irq);
396 instance->irq = NO_IRQ;
397 }
398
399 if (instance->irq == NO_IRQ) {
400 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
401 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
402 /* Disable 5380 interrupts, leave drive params the same */
403 outb( 0x4d, io_port + SYS_CONFIG_4 );
404 outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 );
405 }
406
407 dprintk(NDEBUG_INIT, "scsi%d : irq = %d\n",
408 instance->host_no, instance->irq);
409
410 ++current_override;
411 ++count;
412 }
413 return count;
414
415out_unregister:
416 scsi_unregister(instance);
417out:
418 return count;
419}
420
421/*
422 * Function : int pas16_biosparam(Disk *disk, struct block_device *dev, int *ip)
423 *
424 * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
425 * the specified device / size.
426 *
427 * Inputs : size = size of device in sectors (512 bytes), dev = block device
428 * major / minor, ip[] = {heads, sectors, cylinders}
429 *
430 * Returns : always 0 (success), initializes ip
431 *
432 */
433
434/*
435 * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
436 * using hard disks on a trantor should verify that this mapping corresponds
437 * to that used by the BIOS / ASPI driver by running the linux fdisk program
438 * and matching the H_C_S coordinates to what DOS uses.
439 */
440
441static int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev,
442 sector_t capacity, int *ip)
443{
444 int size = capacity;
445 ip[0] = 64;
446 ip[1] = 32;
447 ip[2] = size >> 11; /* I think I have it as /(32*64) */
448 if( ip[2] > 1024 ) { /* yes, >, not >= */
449 ip[0]=255;
450 ip[1]=63;
451 ip[2]=size/(63*255);
452 if( ip[2] > 1023 ) /* yes >1023... */
453 ip[2] = 1023;
454 }
455
456 return 0;
457}
458
459/*
460 * Function : int pas16_pread (struct Scsi_Host *instance,
461 * unsigned char *dst, int len)
462 *
463 * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
464 * dst
465 *
466 * Inputs : dst = destination, len = length in bytes
467 *
468 * Returns : 0 on success, non zero on a failure such as a watchdog
469 * timeout.
470 */
471
472static inline int pas16_pread(struct Scsi_Host *instance,
473 unsigned char *dst, int len)
474{
475 register unsigned char *d = dst;
476 register unsigned short reg = (unsigned short) (instance->io_port +
477 P_DATA_REG_OFFSET);
478 register int i = len;
479 int ii = 0;
480
481 while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) )
482 ++ii;
483
484 insb( reg, d, i );
485
486 if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
487 outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
488 printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
489 instance->host_no);
490 return -1;
491 }
492 return 0;
493}
494
495/*
496 * Function : int pas16_pwrite (struct Scsi_Host *instance,
497 * unsigned char *src, int len)
498 *
499 * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
500 * src
501 *
502 * Inputs : src = source, len = length in bytes
503 *
504 * Returns : 0 on success, non zero on a failure such as a watchdog
505 * timeout.
506 */
507
508static inline int pas16_pwrite(struct Scsi_Host *instance,
509 unsigned char *src, int len)
510{
511 register unsigned char *s = src;
512 register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET);
513 register int i = len;
514 int ii = 0;
515
516 while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) )
517 ++ii;
518
519 outsb( reg, s, i );
520
521 if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) {
522 outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET);
523 printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
524 instance->host_no);
525 return -1;
526 }
527 return 0;
528}
529
530#include "NCR5380.c"
531
532static int pas16_release(struct Scsi_Host *shost)
533{
534 if (shost->irq != NO_IRQ)
535 free_irq(shost->irq, shost);
536 NCR5380_exit(shost);
537 scsi_unregister(shost);
538 return 0;
539}
540
541static struct scsi_host_template driver_template = {
542 .name = "Pro Audio Spectrum-16 SCSI",
543 .detect = pas16_detect,
544 .release = pas16_release,
545 .proc_name = "pas16",
546 .info = pas16_info,
547 .queuecommand = pas16_queue_command,
548 .eh_abort_handler = pas16_abort,
549 .eh_bus_reset_handler = pas16_bus_reset,
550 .bios_param = pas16_biosparam,
551 .can_queue = 32,
552 .this_id = 7,
553 .sg_tablesize = SG_ALL,
554 .cmd_per_lun = 2,
555 .use_clustering = DISABLE_CLUSTERING,
556 .cmd_size = NCR5380_CMD_SIZE,
557 .max_sectors = 128,
558};
559#include "scsi_module.c"
560
561#ifdef MODULE
562module_param(pas16_addr, ushort, 0);
563module_param(pas16_irq, int, 0);
564#endif
565MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h
deleted file mode 100644
index 9fe7f33660b4..000000000000
--- a/drivers/scsi/pas16.h
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * This driver adapted from Drew Eckhardt's Trantor T128 driver
3 *
4 * Copyright 1993, Drew Eckhardt
5 * Visionary Computing
6 * (Unix and Linux consulting and custom programming)
7 * drew@colorado.edu
8 * +1 (303) 666-5836
9 *
10 * ( Based on T128 - DISTRIBUTION RELEASE 3. )
11 *
12 * Modified to work with the Pro Audio Spectrum/Studio 16
13 * by John Weidman.
14 *
15 *
16 * For more information, please consult
17 *
18 * Media Vision
19 * (510) 770-8600
20 * (800) 348-7116
21 */
22
23
24#ifndef PAS16_H
25#define PAS16_H
26
27#define PAS16_DEFAULT_BASE_1 0x388
28#define PAS16_DEFAULT_BASE_2 0x384
29#define PAS16_DEFAULT_BASE_3 0x38c
30#define PAS16_DEFAULT_BASE_4 0x288
31
32#define PAS16_DEFAULT_BOARD_1_IRQ 10
33#define PAS16_DEFAULT_BOARD_2_IRQ 12
34#define PAS16_DEFAULT_BOARD_3_IRQ 14
35#define PAS16_DEFAULT_BOARD_4_IRQ 15
36
37
38/*
39 * The Pro Audio Spectrum boards are I/O mapped. They use a Zilog 5380
40 * SCSI controller, which is the equivalent of NCR's 5380. "Pseudo-DMA"
41 * architecture is used, where a PAL drives the DMA signals on the 5380
42 * allowing fast, blind transfers with proper handshaking.
43 */
44
45
46/* The Time-out Counter register is used to safe-guard against a stuck
47 * bus (in the case of RDY driven handshake) or a stuck byte (if 16-Bit
48 * DMA conversion is used). The counter uses a 28.224MHz clock
49 * divided by 14 as its clock source. In the case of a stuck byte in
50 * the holding register, an interrupt is generated (and mixed with the
51 * one with the drive) using the CD-ROM interrupt pointer.
52 */
53
54#define P_TIMEOUT_COUNTER_REG 0x4000
55#define P_TC_DISABLE 0x80 /* Set to 0 to enable timeout int. */
56 /* Bits D6-D0 contain timeout count */
57
58
59#define P_TIMEOUT_STATUS_REG_OFFSET 0x4001
60#define P_TS_TIM 0x80 /* check timeout status */
61 /* Bits D6-D4 N/U */
62#define P_TS_ARM_DRQ_INT 0x08 /* Arm DRQ Int. When set high,
63 * the next rising edge will
64 * cause a CD-ROM interrupt.
65 * When set low, the interrupt
66 * will be cleared. There is
67 * no status available for
68 * this interrupt.
69 */
70#define P_TS_ENABLE_TO_ERR_INTERRUPT /* Enable timeout error int. */
71#define P_TS_ENABLE_WAIT /* Enable Wait */
72
73#define P_TS_CT 0x01 /* clear timeout. Note: writing
74 * to this register clears the
75 * timeout error int. or status
76 */
77
78
79/*
80 * The data register reads/writes to/from the 5380 in pseudo-DMA mode
81 */
82
83#define P_DATA_REG_OFFSET 0x5c00 /* rw */
84
85#define P_STATUS_REG_OFFSET 0x5c01 /* ro */
86#define P_ST_RDY 0x80 /* 5380 DDRQ Status */
87
88#define P_IRQ_STATUS 0x5c03
89#define P_IS_IRQ 0x80 /* DIRQ status */
90
91#define PCB_CONFIG 0x803
92#define MASTER_ADDRESS_PTR 0x9a01 /* Fixed position - no relo */
93#define SYS_CONFIG_4 0x8003
94#define WAIT_STATE 0xbc00
95#define OPERATION_MODE_1 0xec03
96#define IO_CONFIG_3 0xf002
97
98#define NCR5380_implementation_fields /* none */
99
100#define PAS16_io_port(reg) (instance->io_port + pas16_offset[(reg)])
101
102#define NCR5380_read(reg) ( inb(PAS16_io_port(reg)) )
103#define NCR5380_write(reg, value) ( outb((value),PAS16_io_port(reg)) )
104
105#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
106#define NCR5380_dma_recv_setup pas16_pread
107#define NCR5380_dma_send_setup pas16_pwrite
108#define NCR5380_dma_residual(instance) (0)
109
110#define NCR5380_intr pas16_intr
111#define NCR5380_queue_command pas16_queue_command
112#define NCR5380_abort pas16_abort
113#define NCR5380_bus_reset pas16_bus_reset
114#define NCR5380_info pas16_info
115
116/* 15 14 12 10 7 5 3
117 1101 0100 1010 1000 */
118
119#define PAS16_IRQS 0xd4a8
120
121#endif /* PAS16_H */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 04e67a190652..10546faac58c 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4492,8 +4492,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id)
4492 * @num: the inbound queue number 4492 * @num: the inbound queue number
4493 * @phy_id: the phy id which we wanted to start up. 4493 * @phy_id: the phy id which we wanted to start up.
4494 */ 4494 */
4495int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, 4495static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha,
4496 u8 phy_id) 4496 u8 phy_id)
4497{ 4497{
4498 struct phy_stop_req payload; 4498 struct phy_stop_req payload;
4499 struct inbound_queue_table *circularQ; 4499 struct inbound_queue_table *circularQ;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index dc33dfa8f994..ce584c31d36e 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -527,7 +527,7 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
527 * pm8001_alloc_dev - find a empty pm8001_device 527 * pm8001_alloc_dev - find a empty pm8001_device
528 * @pm8001_ha: our hba card information 528 * @pm8001_ha: our hba card information
529 */ 529 */
530struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) 530static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
531{ 531{
532 u32 dev; 532 u32 dev;
533 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { 533 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index b2a88200fe54..68a5c347fae9 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -306,7 +306,7 @@ static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
306 * Return Value 306 * Return Value
307 * None 307 * None
308 */ 308 */
309void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index) 309static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
310{ 310{
311 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb); 311 struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
312 dma_addr_t dma_addr = cmd->ioa_cb_bus_addr; 312 dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
@@ -401,7 +401,7 @@ static struct pmcraid_cmd *pmcraid_get_free_cmd(
401 * Return Value: 401 * Return Value:
402 * nothing 402 * nothing
403 */ 403 */
404void pmcraid_return_cmd(struct pmcraid_cmd *cmd) 404static void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
405{ 405{
406 struct pmcraid_instance *pinstance = cmd->drv_inst; 406 struct pmcraid_instance *pinstance = cmd->drv_inst;
407 unsigned long lock_flags; 407 unsigned long lock_flags;
@@ -1710,7 +1710,7 @@ static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
1710 * @ioasc: ioasc code 1710 * @ioasc: ioasc code
1711 * @cmd: pointer to command that resulted in 'ioasc' 1711 * @cmd: pointer to command that resulted in 'ioasc'
1712 */ 1712 */
1713void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd) 1713static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
1714{ 1714{
1715 struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc); 1715 struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
1716 1716
@@ -3137,7 +3137,7 @@ static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
3137 * returns pointer pmcraid_ioadl_desc, initialized to point to internal 3137 * returns pointer pmcraid_ioadl_desc, initialized to point to internal
3138 * or external IOADLs 3138 * or external IOADLs
3139 */ 3139 */
3140struct pmcraid_ioadl_desc * 3140static struct pmcraid_ioadl_desc *
3141pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount) 3141pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
3142{ 3142{
3143 struct pmcraid_ioadl_desc *ioadl; 3143 struct pmcraid_ioadl_desc *ioadl;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index ae4a74756128..73b12e41d992 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -279,16 +279,6 @@ struct req_que;
279struct qla_tgt_sess; 279struct qla_tgt_sess;
280 280
281/* 281/*
282 * (sd.h is not exported, hence local inclusion)
283 * Data Integrity Field tuple.
284 */
285struct sd_dif_tuple {
286 __be16 guard_tag; /* Checksum */
287 __be16 app_tag; /* Opaque storage */
288 __be32 ref_tag; /* Target LBA or indirect LBA */
289};
290
291/*
292 * SCSI Request Block 282 * SCSI Request Block
293 */ 283 */
294struct srb_cmd { 284struct srb_cmd {
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 987f1c729e9c..068c4e47fac9 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1828,7 +1828,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1828 if (scsi_prot_sg_count(cmd)) { 1828 if (scsi_prot_sg_count(cmd)) {
1829 uint32_t i, j = 0, k = 0, num_ent; 1829 uint32_t i, j = 0, k = 0, num_ent;
1830 struct scatterlist *sg; 1830 struct scatterlist *sg;
1831 struct sd_dif_tuple *spt; 1831 struct t10_pi_tuple *spt;
1832 1832
1833 /* Patch the corresponding protection tags */ 1833 /* Patch the corresponding protection tags */
1834 scsi_for_each_prot_sg(cmd, sg, 1834 scsi_for_each_prot_sg(cmd, sg,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2674f4c16bc3..ace65db1d2a2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -899,12 +899,12 @@ qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
899 struct qla_hw_data *ha = vha->hw; 899 struct qla_hw_data *ha = vha->hw;
900 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); 900 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
901 901
902 while (((qla2x00_reset_active(vha)) || ha->dpc_active || 902 while ((qla2x00_reset_active(vha) || ha->dpc_active ||
903 ha->flags.mbox_busy) || 903 ha->flags.mbox_busy) ||
904 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) || 904 test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
905 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) { 905 test_bit(FX00_TARGET_SCAN, &vha->dpc_flags)) {
906 if (test_bit(UNLOADING, &base_vha->dpc_flags)) 906 if (test_bit(UNLOADING, &base_vha->dpc_flags))
907 break; 907 break;
908 msleep(1000); 908 msleep(1000);
909 } 909 }
910} 910}
@@ -4694,7 +4694,7 @@ retry_unlock:
4694 qla83xx_wait_logic(); 4694 qla83xx_wait_logic();
4695 retry++; 4695 retry++;
4696 ql_dbg(ql_dbg_p3p, base_vha, 0xb064, 4696 ql_dbg(ql_dbg_p3p, base_vha, 0xb064,
4697 "Failed to release IDC lock, retyring=%d\n", retry); 4697 "Failed to release IDC lock, retrying=%d\n", retry);
4698 goto retry_unlock; 4698 goto retry_unlock;
4699 } 4699 }
4700 } else if (retry < 10) { 4700 } else if (retry < 10) {
@@ -4702,7 +4702,7 @@ retry_unlock:
4702 qla83xx_wait_logic(); 4702 qla83xx_wait_logic();
4703 retry++; 4703 retry++;
4704 ql_dbg(ql_dbg_p3p, base_vha, 0xb065, 4704 ql_dbg(ql_dbg_p3p, base_vha, 0xb065,
4705 "Failed to read drv-lockid, retyring=%d\n", retry); 4705 "Failed to read drv-lockid, retrying=%d\n", retry);
4706 goto retry_unlock; 4706 goto retry_unlock;
4707 } 4707 }
4708 4708
@@ -4718,7 +4718,7 @@ retry_unlock2:
4718 qla83xx_wait_logic(); 4718 qla83xx_wait_logic();
4719 retry++; 4719 retry++;
4720 ql_dbg(ql_dbg_p3p, base_vha, 0xb066, 4720 ql_dbg(ql_dbg_p3p, base_vha, 0xb066,
4721 "Failed to release IDC lock, retyring=%d\n", retry); 4721 "Failed to release IDC lock, retrying=%d\n", retry);
4722 goto retry_unlock2; 4722 goto retry_unlock2;
4723 } 4723 }
4724 } 4724 }
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index ae87d6c19f17..06ddd13cb7cc 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1843,7 +1843,7 @@ static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
1843 return rval; 1843 return rval;
1844} 1844}
1845 1845
1846uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1, 1846static uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
1847 uint32_t addr3, uint32_t mask, uint32_t addr, 1847 uint32_t addr3, uint32_t mask, uint32_t addr,
1848 uint32_t *data_ptr) 1848 uint32_t *data_ptr)
1849{ 1849{
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6a219a0844d3..c905709707f0 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -42,6 +42,7 @@
42#include <linux/atomic.h> 42#include <linux/atomic.h>
43#include <linux/hrtimer.h> 43#include <linux/hrtimer.h>
44#include <linux/uuid.h> 44#include <linux/uuid.h>
45#include <linux/t10-pi.h>
45 46
46#include <net/checksum.h> 47#include <net/checksum.h>
47 48
@@ -627,7 +628,7 @@ static LIST_HEAD(sdebug_host_list);
627static DEFINE_SPINLOCK(sdebug_host_list_lock); 628static DEFINE_SPINLOCK(sdebug_host_list_lock);
628 629
629static unsigned char *fake_storep; /* ramdisk storage */ 630static unsigned char *fake_storep; /* ramdisk storage */
630static struct sd_dif_tuple *dif_storep; /* protection info */ 631static struct t10_pi_tuple *dif_storep; /* protection info */
631static void *map_storep; /* provisioning map */ 632static void *map_storep; /* provisioning map */
632 633
633static unsigned long map_size; 634static unsigned long map_size;
@@ -682,7 +683,7 @@ static void *fake_store(unsigned long long lba)
682 return fake_storep + lba * sdebug_sector_size; 683 return fake_storep + lba * sdebug_sector_size;
683} 684}
684 685
685static struct sd_dif_tuple *dif_store(sector_t sector) 686static struct t10_pi_tuple *dif_store(sector_t sector)
686{ 687{
687 sector = sector_div(sector, sdebug_store_sectors); 688 sector = sector_div(sector, sdebug_store_sectors);
688 689
@@ -1349,7 +1350,7 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
1349 } else if (0x86 == cmd[2]) { /* extended inquiry */ 1350 } else if (0x86 == cmd[2]) { /* extended inquiry */
1350 arr[1] = cmd[2]; /*sanity */ 1351 arr[1] = cmd[2]; /*sanity */
1351 arr[3] = 0x3c; /* number of following entries */ 1352 arr[3] = 0x3c; /* number of following entries */
1352 if (sdebug_dif == SD_DIF_TYPE3_PROTECTION) 1353 if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
1353 arr[4] = 0x4; /* SPT: GRD_CHK:1 */ 1354 arr[4] = 0x4; /* SPT: GRD_CHK:1 */
1354 else if (have_dif_prot) 1355 else if (have_dif_prot)
1355 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */ 1356 arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
@@ -2430,7 +2431,7 @@ static __be16 dif_compute_csum(const void *buf, int len)
2430 return csum; 2431 return csum;
2431} 2432}
2432 2433
2433static int dif_verify(struct sd_dif_tuple *sdt, const void *data, 2434static int dif_verify(struct t10_pi_tuple *sdt, const void *data,
2434 sector_t sector, u32 ei_lba) 2435 sector_t sector, u32 ei_lba)
2435{ 2436{
2436 __be16 csum = dif_compute_csum(data, sdebug_sector_size); 2437 __be16 csum = dif_compute_csum(data, sdebug_sector_size);
@@ -2442,13 +2443,13 @@ static int dif_verify(struct sd_dif_tuple *sdt, const void *data,
2442 be16_to_cpu(csum)); 2443 be16_to_cpu(csum));
2443 return 0x01; 2444 return 0x01;
2444 } 2445 }
2445 if (sdebug_dif == SD_DIF_TYPE1_PROTECTION && 2446 if (sdebug_dif == T10_PI_TYPE1_PROTECTION &&
2446 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { 2447 be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) {
2447 pr_err("REF check failed on sector %lu\n", 2448 pr_err("REF check failed on sector %lu\n",
2448 (unsigned long)sector); 2449 (unsigned long)sector);
2449 return 0x03; 2450 return 0x03;
2450 } 2451 }
2451 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 2452 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2452 be32_to_cpu(sdt->ref_tag) != ei_lba) { 2453 be32_to_cpu(sdt->ref_tag) != ei_lba) {
2453 pr_err("REF check failed on sector %lu\n", 2454 pr_err("REF check failed on sector %lu\n",
2454 (unsigned long)sector); 2455 (unsigned long)sector);
@@ -2504,7 +2505,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
2504 unsigned int sectors, u32 ei_lba) 2505 unsigned int sectors, u32 ei_lba)
2505{ 2506{
2506 unsigned int i; 2507 unsigned int i;
2507 struct sd_dif_tuple *sdt; 2508 struct t10_pi_tuple *sdt;
2508 sector_t sector; 2509 sector_t sector;
2509 2510
2510 for (i = 0; i < sectors; i++, ei_lba++) { 2511 for (i = 0; i < sectors; i++, ei_lba++) {
@@ -2580,13 +2581,13 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2580 break; 2581 break;
2581 } 2582 }
2582 if (unlikely(have_dif_prot && check_prot)) { 2583 if (unlikely(have_dif_prot && check_prot)) {
2583 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 2584 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2584 (cmd[1] & 0xe0)) { 2585 (cmd[1] & 0xe0)) {
2585 mk_sense_invalid_opcode(scp); 2586 mk_sense_invalid_opcode(scp);
2586 return check_condition_result; 2587 return check_condition_result;
2587 } 2588 }
2588 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || 2589 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2589 sdebug_dif == SD_DIF_TYPE3_PROTECTION) && 2590 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2590 (cmd[1] & 0xe0) == 0) 2591 (cmd[1] & 0xe0) == 0)
2591 sdev_printk(KERN_ERR, scp->device, "Unprotected RD " 2592 sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
2592 "to DIF device\n"); 2593 "to DIF device\n");
@@ -2696,7 +2697,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2696 unsigned int sectors, u32 ei_lba) 2697 unsigned int sectors, u32 ei_lba)
2697{ 2698{
2698 int ret; 2699 int ret;
2699 struct sd_dif_tuple *sdt; 2700 struct t10_pi_tuple *sdt;
2700 void *daddr; 2701 void *daddr;
2701 sector_t sector = start_sec; 2702 sector_t sector = start_sec;
2702 int ppage_offset; 2703 int ppage_offset;
@@ -2722,7 +2723,7 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
2722 } 2723 }
2723 2724
2724 for (ppage_offset = 0; ppage_offset < piter.length; 2725 for (ppage_offset = 0; ppage_offset < piter.length;
2725 ppage_offset += sizeof(struct sd_dif_tuple)) { 2726 ppage_offset += sizeof(struct t10_pi_tuple)) {
2726 /* If we're at the end of the current 2727 /* If we're at the end of the current
2727 * data page advance to the next one 2728 * data page advance to the next one
2728 */ 2729 */
@@ -2893,13 +2894,13 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
2893 break; 2894 break;
2894 } 2895 }
2895 if (unlikely(have_dif_prot && check_prot)) { 2896 if (unlikely(have_dif_prot && check_prot)) {
2896 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 2897 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
2897 (cmd[1] & 0xe0)) { 2898 (cmd[1] & 0xe0)) {
2898 mk_sense_invalid_opcode(scp); 2899 mk_sense_invalid_opcode(scp);
2899 return check_condition_result; 2900 return check_condition_result;
2900 } 2901 }
2901 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || 2902 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
2902 sdebug_dif == SD_DIF_TYPE3_PROTECTION) && 2903 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
2903 (cmd[1] & 0xe0) == 0) 2904 (cmd[1] & 0xe0) == 0)
2904 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " 2905 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
2905 "to DIF device\n"); 2906 "to DIF device\n");
@@ -3135,13 +3136,13 @@ static int resp_comp_write(struct scsi_cmnd *scp,
3135 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */ 3136 num = cmd[13]; /* 1 to a maximum of 255 logical blocks */
3136 if (0 == num) 3137 if (0 == num)
3137 return 0; /* degenerate case, not an error */ 3138 return 0; /* degenerate case, not an error */
3138 if (sdebug_dif == SD_DIF_TYPE2_PROTECTION && 3139 if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
3139 (cmd[1] & 0xe0)) { 3140 (cmd[1] & 0xe0)) {
3140 mk_sense_invalid_opcode(scp); 3141 mk_sense_invalid_opcode(scp);
3141 return check_condition_result; 3142 return check_condition_result;
3142 } 3143 }
3143 if ((sdebug_dif == SD_DIF_TYPE1_PROTECTION || 3144 if ((sdebug_dif == T10_PI_TYPE1_PROTECTION ||
3144 sdebug_dif == SD_DIF_TYPE3_PROTECTION) && 3145 sdebug_dif == T10_PI_TYPE3_PROTECTION) &&
3145 (cmd[1] & 0xe0) == 0) 3146 (cmd[1] & 0xe0) == 0)
3146 sdev_printk(KERN_ERR, scp->device, "Unprotected WR " 3147 sdev_printk(KERN_ERR, scp->device, "Unprotected WR "
3147 "to DIF device\n"); 3148 "to DIF device\n");
@@ -4939,12 +4940,11 @@ static int __init scsi_debug_init(void)
4939 } 4940 }
4940 4941
4941 switch (sdebug_dif) { 4942 switch (sdebug_dif) {
4942 4943 case T10_PI_TYPE0_PROTECTION:
4943 case SD_DIF_TYPE0_PROTECTION:
4944 break; 4944 break;
4945 case SD_DIF_TYPE1_PROTECTION: 4945 case T10_PI_TYPE1_PROTECTION:
4946 case SD_DIF_TYPE2_PROTECTION: 4946 case T10_PI_TYPE2_PROTECTION:
4947 case SD_DIF_TYPE3_PROTECTION: 4947 case T10_PI_TYPE3_PROTECTION:
4948 have_dif_prot = true; 4948 have_dif_prot = true;
4949 break; 4949 break;
4950 4950
@@ -5026,7 +5026,7 @@ static int __init scsi_debug_init(void)
5026 if (sdebug_dix) { 5026 if (sdebug_dix) {
5027 int dif_size; 5027 int dif_size;
5028 5028
5029 dif_size = sdebug_store_sectors * sizeof(struct sd_dif_tuple); 5029 dif_size = sdebug_store_sectors * sizeof(struct t10_pi_tuple);
5030 dif_storep = vmalloc(dif_size); 5030 dif_storep = vmalloc(dif_size);
5031 5031
5032 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep); 5032 pr_err("dif_storep %u bytes @ %p\n", dif_size, dif_storep);
@@ -5480,19 +5480,19 @@ static int sdebug_driver_probe(struct device * dev)
5480 5480
5481 switch (sdebug_dif) { 5481 switch (sdebug_dif) {
5482 5482
5483 case SD_DIF_TYPE1_PROTECTION: 5483 case T10_PI_TYPE1_PROTECTION:
5484 hprot = SHOST_DIF_TYPE1_PROTECTION; 5484 hprot = SHOST_DIF_TYPE1_PROTECTION;
5485 if (sdebug_dix) 5485 if (sdebug_dix)
5486 hprot |= SHOST_DIX_TYPE1_PROTECTION; 5486 hprot |= SHOST_DIX_TYPE1_PROTECTION;
5487 break; 5487 break;
5488 5488
5489 case SD_DIF_TYPE2_PROTECTION: 5489 case T10_PI_TYPE2_PROTECTION:
5490 hprot = SHOST_DIF_TYPE2_PROTECTION; 5490 hprot = SHOST_DIF_TYPE2_PROTECTION;
5491 if (sdebug_dix) 5491 if (sdebug_dix)
5492 hprot |= SHOST_DIX_TYPE2_PROTECTION; 5492 hprot |= SHOST_DIX_TYPE2_PROTECTION;
5493 break; 5493 break;
5494 5494
5495 case SD_DIF_TYPE3_PROTECTION: 5495 case T10_PI_TYPE3_PROTECTION:
5496 hprot = SHOST_DIF_TYPE3_PROTECTION; 5496 hprot = SHOST_DIF_TYPE3_PROTECTION;
5497 if (sdebug_dix) 5497 if (sdebug_dix)
5498 hprot |= SHOST_DIX_TYPE3_PROTECTION; 5498 hprot |= SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 85c8a51bc563..193636a59adf 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -86,12 +86,14 @@ extern void scsi_device_unbusy(struct scsi_device *sdev);
86extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); 86extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
87extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); 87extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
88extern void scsi_run_host_queues(struct Scsi_Host *shost); 88extern void scsi_run_host_queues(struct Scsi_Host *shost);
89extern void scsi_requeue_run_queue(struct work_struct *work);
89extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev); 90extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
90extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev); 91extern struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev);
91extern int scsi_mq_setup_tags(struct Scsi_Host *shost); 92extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
92extern void scsi_mq_destroy_tags(struct Scsi_Host *shost); 93extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
93extern int scsi_init_queue(void); 94extern int scsi_init_queue(void);
94extern void scsi_exit_queue(void); 95extern void scsi_exit_queue(void);
96extern void scsi_evt_thread(struct work_struct *work);
95struct request_queue; 97struct request_queue;
96struct request; 98struct request;
97extern struct kmem_cache *scsi_sdb_cache; 99extern struct kmem_cache *scsi_sdb_cache;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e0a78f53d809..212e98d940bc 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -217,8 +217,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
217 struct scsi_device *sdev; 217 struct scsi_device *sdev;
218 int display_failure_msg = 1, ret; 218 int display_failure_msg = 1, ret;
219 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 219 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
220 extern void scsi_evt_thread(struct work_struct *work);
221 extern void scsi_requeue_run_queue(struct work_struct *work);
222 220
223 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, 221 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
224 GFP_ATOMIC); 222 GFP_ATOMIC);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index d3e852ad5aa3..51e56296f465 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -52,6 +52,7 @@
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/pm_runtime.h> 53#include <linux/pm_runtime.h>
54#include <linux/pr.h> 54#include <linux/pr.h>
55#include <linux/t10-pi.h>
55#include <asm/uaccess.h> 56#include <asm/uaccess.h>
56#include <asm/unaligned.h> 57#include <asm/unaligned.h>
57 58
@@ -314,7 +315,7 @@ protection_type_store(struct device *dev, struct device_attribute *attr,
314 if (err) 315 if (err)
315 return err; 316 return err;
316 317
317 if (val >= 0 && val <= SD_DIF_TYPE3_PROTECTION) 318 if (val >= 0 && val <= T10_PI_TYPE3_PROTECTION)
318 sdkp->protection_type = val; 319 sdkp->protection_type = val;
319 320
320 return count; 321 return count;
@@ -332,7 +333,7 @@ protection_mode_show(struct device *dev, struct device_attribute *attr,
332 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); 333 dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
333 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); 334 dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
334 335
335 if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) { 336 if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
336 dif = 0; 337 dif = 0;
337 dix = 1; 338 dix = 1;
338 } 339 }
@@ -608,7 +609,7 @@ static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
608 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK; 609 scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
609 } 610 }
610 611
611 if (dif != SD_DIF_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */ 612 if (dif != T10_PI_TYPE3_PROTECTION) { /* DIX/DIF Type 0, 1, 2 */
612 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT; 613 scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;
613 614
614 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false) 615 if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
@@ -1031,7 +1032,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt)
1031 else 1032 else
1032 protect = 0; 1033 protect = 0;
1033 1034
1034 if (protect && sdkp->protection_type == SD_DIF_TYPE2_PROTECTION) { 1035 if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1035 SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC); 1036 SCpnt->cmnd = mempool_alloc(sd_cdb_pool, GFP_ATOMIC);
1036 1037
1037 if (unlikely(SCpnt->cmnd == NULL)) { 1038 if (unlikely(SCpnt->cmnd == NULL)) {
@@ -1997,7 +1998,7 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
1997 1998
1998 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ 1999 type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
1999 2000
2000 if (type > SD_DIF_TYPE3_PROTECTION) 2001 if (type > T10_PI_TYPE3_PROTECTION)
2001 ret = -ENODEV; 2002 ret = -ENODEV;
2002 else if (scsi_host_dif_capable(sdp->host, type)) 2003 else if (scsi_host_dif_capable(sdp->host, type))
2003 ret = 1; 2004 ret = 1;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 765a6f1ac1b7..c8d986368da9 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -157,27 +157,6 @@ static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t b
157} 157}
158 158
159/* 159/*
160 * A DIF-capable target device can be formatted with different
161 * protection schemes. Currently 0 through 3 are defined:
162 *
163 * Type 0 is regular (unprotected) I/O
164 *
165 * Type 1 defines the contents of the guard and reference tags
166 *
167 * Type 2 defines the contents of the guard and reference tags and
168 * uses 32-byte commands to seed the latter
169 *
170 * Type 3 defines the contents of the guard tag only
171 */
172
173enum sd_dif_target_protection_types {
174 SD_DIF_TYPE0_PROTECTION = 0x0,
175 SD_DIF_TYPE1_PROTECTION = 0x1,
176 SD_DIF_TYPE2_PROTECTION = 0x2,
177 SD_DIF_TYPE3_PROTECTION = 0x3,
178};
179
180/*
181 * Look up the DIX operation based on whether the command is read or 160 * Look up the DIX operation based on whether the command is read or
182 * write and whether dix and dif are enabled. 161 * write and whether dix and dif are enabled.
183 */ 162 */
@@ -239,15 +218,6 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op)
239 return flag_mask[prot_op]; 218 return flag_mask[prot_op];
240} 219}
241 220
242/*
243 * Data Integrity Field tuple.
244 */
245struct sd_dif_tuple {
246 __be16 guard_tag; /* Checksum */
247 __be16 app_tag; /* Opaque storage */
248 __be32 ref_tag; /* Target LBA or indirect LBA */
249};
250
251#ifdef CONFIG_BLK_DEV_INTEGRITY 221#ifdef CONFIG_BLK_DEV_INTEGRITY
252 222
253extern void sd_dif_config_host(struct scsi_disk *); 223extern void sd_dif_config_host(struct scsi_disk *);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 987bf392c336..9035380c0dda 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -60,14 +60,14 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
60 60
61 /* Enable DMA of protection information */ 61 /* Enable DMA of protection information */
62 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) { 62 if (scsi_host_get_guard(sdkp->device->host) & SHOST_DIX_GUARD_IP) {
63 if (type == SD_DIF_TYPE3_PROTECTION) 63 if (type == T10_PI_TYPE3_PROTECTION)
64 bi.profile = &t10_pi_type3_ip; 64 bi.profile = &t10_pi_type3_ip;
65 else 65 else
66 bi.profile = &t10_pi_type1_ip; 66 bi.profile = &t10_pi_type1_ip;
67 67
68 bi.flags |= BLK_INTEGRITY_IP_CHECKSUM; 68 bi.flags |= BLK_INTEGRITY_IP_CHECKSUM;
69 } else 69 } else
70 if (type == SD_DIF_TYPE3_PROTECTION) 70 if (type == T10_PI_TYPE3_PROTECTION)
71 bi.profile = &t10_pi_type3_crc; 71 bi.profile = &t10_pi_type3_crc;
72 else 72 else
73 bi.profile = &t10_pi_type1_crc; 73 bi.profile = &t10_pi_type1_crc;
@@ -82,7 +82,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
82 if (!sdkp->ATO) 82 if (!sdkp->ATO)
83 goto out; 83 goto out;
84 84
85 if (type == SD_DIF_TYPE3_PROTECTION) 85 if (type == T10_PI_TYPE3_PROTECTION)
86 bi.tag_size = sizeof(u16) + sizeof(u32); 86 bi.tag_size = sizeof(u16) + sizeof(u32);
87 else 87 else
88 bi.tag_size = sizeof(u16); 88 bi.tag_size = sizeof(u16);
@@ -121,7 +121,7 @@ void sd_dif_prepare(struct scsi_cmnd *scmd)
121 121
122 sdkp = scsi_disk(scmd->request->rq_disk); 122 sdkp = scsi_disk(scmd->request->rq_disk);
123 123
124 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION) 124 if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION)
125 return; 125 return;
126 126
127 phys = scsi_prot_ref_tag(scmd); 127 phys = scsi_prot_ref_tag(scmd);
@@ -172,7 +172,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
172 172
173 sdkp = scsi_disk(scmd->request->rq_disk); 173 sdkp = scsi_disk(scmd->request->rq_disk);
174 174
175 if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION || good_bytes == 0) 175 if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0)
176 return; 176 return;
177 177
178 intervals = good_bytes / scsi_prot_interval(scmd); 178 intervals = good_bytes / scsi_prot_interval(scmd);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ae7d9bdf409c..070332eb41f3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -79,18 +79,7 @@ static void sg_proc_cleanup(void);
79 */ 79 */
80#define SG_MAX_CDB_SIZE 252 80#define SG_MAX_CDB_SIZE 252
81 81
82/* 82#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
83 * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
84 * Then when using 32 bit integers x * m may overflow during the calculation.
85 * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
86 * calculates the same, but prevents the overflow when both m and d
87 * are "small" numbers (like HZ and USER_HZ).
88 * Of course an overflow is inavoidable if the result of muldiv doesn't fit
89 * in 32 bits.
90 */
91#define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
92
93#define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
94 83
95int sg_big_buff = SG_DEF_RESERVED_SIZE; 84int sg_big_buff = SG_DEF_RESERVED_SIZE;
96/* N.B. This variable is readable and writeable via 85/* N.B. This variable is readable and writeable via
@@ -884,10 +873,11 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
884 return result; 873 return result;
885 if (val < 0) 874 if (val < 0)
886 return -EIO; 875 return -EIO;
887 if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) 876 if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ))
888 val = MULDIV (INT_MAX, USER_HZ, HZ); 877 val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ),
878 INT_MAX);
889 sfp->timeout_user = val; 879 sfp->timeout_user = val;
890 sfp->timeout = MULDIV (val, HZ, USER_HZ); 880 sfp->timeout = mult_frac(val, HZ, USER_HZ);
891 881
892 return 0; 882 return 0;
893 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ 883 case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
new file mode 100644
index 000000000000..97e159c2cecd
--- /dev/null
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -0,0 +1,54 @@
1#
2# Kernel configuration file for the SMARTPQI
3#
4# Copyright (c) 2016 Microsemi Corporation
5# Copyright (c) 2016 PMC-Sierra, Inc.
6# (mailto:esc.storagedev@microsemi.com)
7
8# This program is free software; you can redistribute it and/or
9# modify it under the terms of the GNU General Public License
10# as published by the Free Software Foundation; version 2
11# of the License.
12
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU General Public License for more details.
17
18# NO WARRANTY
19# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23# solely responsible for determining the appropriateness of using and
24# distributing the Program and assumes all risks associated with its
25# exercise of rights under this Agreement, including but not limited to
26# the risks and costs of program errors, damage to or loss of data,
27# programs or equipment, and unavailability or interruption of operations.
28
29# DISCLAIMER OF LIABILITY
30# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37
38config SCSI_SMARTPQI
39 tristate "Microsemi PQI Driver"
40 depends on PCI && SCSI && !S390
41 select SCSI_SAS_ATTRS
42 select RAID_ATTRS
43 ---help---
44 This driver supports Microsemi PQI controllers.
45
46 <http://www.microsemi.com>
47
48 To compile this driver as a module, choose M here: the
49 module will be called smartpqi.
50
51 Note: the aacraid driver will not manage a smartpqi
52 controller. You need to enable smartpqi for smartpqi
53 controllers. For more information, please see
54 Documentation/scsi/smartpqi.txt
diff --git a/drivers/scsi/smartpqi/Makefile b/drivers/scsi/smartpqi/Makefile
new file mode 100644
index 000000000000..0f42a225a664
--- /dev/null
+++ b/drivers/scsi/smartpqi/Makefile
@@ -0,0 +1,3 @@
1ccflags-y += -I.
2obj-m += smartpqi.o
3smartpqi-objs := smartpqi_init.o smartpqi_sis.o smartpqi_sas_transport.o
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
new file mode 100644
index 000000000000..07b6444d3e0a
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -0,0 +1,1136 @@
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#if !defined(_SMARTPQI_H)
20#define _SMARTPQI_H
21
22#pragma pack(1)
23
24#define PQI_DEVICE_SIGNATURE "PQI DREG"
25
26/* This structure is defined by the PQI specification. */
27struct pqi_device_registers {
28 __le64 signature;
29 u8 function_and_status_code;
30 u8 reserved[7];
31 u8 max_admin_iq_elements;
32 u8 max_admin_oq_elements;
33 u8 admin_iq_element_length; /* in 16-byte units */
34 u8 admin_oq_element_length; /* in 16-byte units */
35 __le16 max_reset_timeout; /* in 100-millisecond units */
36 u8 reserved1[2];
37 __le32 legacy_intx_status;
38 __le32 legacy_intx_mask_set;
39 __le32 legacy_intx_mask_clear;
40 u8 reserved2[28];
41 __le32 device_status;
42 u8 reserved3[4];
43 __le64 admin_iq_pi_offset;
44 __le64 admin_oq_ci_offset;
45 __le64 admin_iq_element_array_addr;
46 __le64 admin_oq_element_array_addr;
47 __le64 admin_iq_ci_addr;
48 __le64 admin_oq_pi_addr;
49 u8 admin_iq_num_elements;
50 u8 admin_oq_num_elements;
51 __le16 admin_queue_int_msg_num;
52 u8 reserved4[4];
53 __le32 device_error;
54 u8 reserved5[4];
55 __le64 error_details;
56 __le32 device_reset;
57 __le32 power_action;
58 u8 reserved6[104];
59};
60
61/*
62 * controller registers
63 *
64 * These are defined by the PMC implementation.
65 *
66 * Some registers (those named sis_*) are only used when in
67 * legacy SIS mode before we transition the controller into
68 * PQI mode. There are a number of other SIS mode registers,
69 * but we don't use them, so only the SIS registers that we
70 * care about are defined here. The offsets mentioned in the
71 * comments are the offsets from the PCIe BAR 0.
72 */
73struct pqi_ctrl_registers {
74 u8 reserved[0x20];
75 __le32 sis_host_to_ctrl_doorbell; /* 20h */
76 u8 reserved1[0x34 - (0x20 + sizeof(__le32))];
77 __le32 sis_interrupt_mask; /* 34h */
78 u8 reserved2[0x9c - (0x34 + sizeof(__le32))];
79 __le32 sis_ctrl_to_host_doorbell; /* 9Ch */
80 u8 reserved3[0xa0 - (0x9c + sizeof(__le32))];
81 __le32 sis_ctrl_to_host_doorbell_clear; /* A0h */
82 u8 reserved4[0xb0 - (0xa0 + sizeof(__le32))];
83 __le32 sis_driver_scratch; /* B0h */
84 u8 reserved5[0xbc - (0xb0 + sizeof(__le32))];
85 __le32 sis_firmware_status; /* BCh */
86 u8 reserved6[0x1000 - (0xbc + sizeof(__le32))];
87 __le32 sis_mailbox[8]; /* 1000h */
88 u8 reserved7[0x4000 - (0x1000 + (sizeof(__le32) * 8))];
89 /*
90 * The PQI spec states that the PQI registers should be at
91 * offset 0 from the PCIe BAR 0. However, we can't map
92 * them at offset 0 because that would break compatibility
93 * with the SIS registers. So we map them at offset 4000h.
94 */
95 struct pqi_device_registers pqi_registers; /* 4000h */
96};
97
98#define PQI_DEVICE_REGISTERS_OFFSET 0x4000
99
100enum pqi_io_path {
101 RAID_PATH = 0,
102 AIO_PATH = 1
103};
104
105struct pqi_sg_descriptor {
106 __le64 address;
107 __le32 length;
108 __le32 flags;
109};
110
111/* manifest constants for the flags field of pqi_sg_descriptor */
112#define CISS_SG_LAST 0x40000000
113#define CISS_SG_CHAIN 0x80000000
114
115struct pqi_iu_header {
116 u8 iu_type;
117 u8 reserved;
118 __le16 iu_length; /* in bytes - does not include the length */
119 /* of this header */
120 __le16 response_queue_id; /* specifies the OQ where the */
121 /* response IU is to be delivered */
122 u8 work_area[2]; /* reserved for driver use */
123};
124
125/*
126 * According to the PQI spec, the IU header is only the first 4 bytes of our
127 * pqi_iu_header structure.
128 */
129#define PQI_REQUEST_HEADER_LENGTH 4
130
131struct pqi_general_admin_request {
132 struct pqi_iu_header header;
133 __le16 request_id;
134 u8 function_code;
135 union {
136 struct {
137 u8 reserved[33];
138 __le32 buffer_length;
139 struct pqi_sg_descriptor sg_descriptor;
140 } report_device_capability;
141
142 struct {
143 u8 reserved;
144 __le16 queue_id;
145 u8 reserved1[2];
146 __le64 element_array_addr;
147 __le64 ci_addr;
148 __le16 num_elements;
149 __le16 element_length;
150 u8 queue_protocol;
151 u8 reserved2[23];
152 __le32 vendor_specific;
153 } create_operational_iq;
154
155 struct {
156 u8 reserved;
157 __le16 queue_id;
158 u8 reserved1[2];
159 __le64 element_array_addr;
160 __le64 pi_addr;
161 __le16 num_elements;
162 __le16 element_length;
163 u8 queue_protocol;
164 u8 reserved2[3];
165 __le16 int_msg_num;
166 __le16 coalescing_count;
167 __le32 min_coalescing_time;
168 __le32 max_coalescing_time;
169 u8 reserved3[8];
170 __le32 vendor_specific;
171 } create_operational_oq;
172
173 struct {
174 u8 reserved;
175 __le16 queue_id;
176 u8 reserved1[50];
177 } delete_operational_queue;
178
179 struct {
180 u8 reserved;
181 __le16 queue_id;
182 u8 reserved1[46];
183 __le32 vendor_specific;
184 } change_operational_iq_properties;
185
186 } data;
187};
188
189struct pqi_general_admin_response {
190 struct pqi_iu_header header;
191 __le16 request_id;
192 u8 function_code;
193 u8 status;
194 union {
195 struct {
196 u8 status_descriptor[4];
197 __le64 iq_pi_offset;
198 u8 reserved[40];
199 } create_operational_iq;
200
201 struct {
202 u8 status_descriptor[4];
203 __le64 oq_ci_offset;
204 u8 reserved[40];
205 } create_operational_oq;
206 } data;
207};
208
209struct pqi_iu_layer_descriptor {
210 u8 inbound_spanning_supported : 1;
211 u8 reserved : 7;
212 u8 reserved1[5];
213 __le16 max_inbound_iu_length;
214 u8 outbound_spanning_supported : 1;
215 u8 reserved2 : 7;
216 u8 reserved3[5];
217 __le16 max_outbound_iu_length;
218};
219
220struct pqi_device_capability {
221 __le16 data_length;
222 u8 reserved[6];
223 u8 iq_arbitration_priority_support_bitmask;
224 u8 maximum_aw_a;
225 u8 maximum_aw_b;
226 u8 maximum_aw_c;
227 u8 max_arbitration_burst : 3;
228 u8 reserved1 : 4;
229 u8 iqa : 1;
230 u8 reserved2[2];
231 u8 iq_freeze : 1;
232 u8 reserved3 : 7;
233 __le16 max_inbound_queues;
234 __le16 max_elements_per_iq;
235 u8 reserved4[4];
236 __le16 max_iq_element_length;
237 __le16 min_iq_element_length;
238 u8 reserved5[2];
239 __le16 max_outbound_queues;
240 __le16 max_elements_per_oq;
241 __le16 intr_coalescing_time_granularity;
242 __le16 max_oq_element_length;
243 __le16 min_oq_element_length;
244 u8 reserved6[24];
245 struct pqi_iu_layer_descriptor iu_layer_descriptors[32];
246};
247
248#define PQI_MAX_EMBEDDED_SG_DESCRIPTORS 4
249
250struct pqi_raid_path_request {
251 struct pqi_iu_header header;
252 __le16 request_id;
253 __le16 nexus_id;
254 __le32 buffer_length;
255 u8 lun_number[8];
256 __le16 protocol_specific;
257 u8 data_direction : 2;
258 u8 partial : 1;
259 u8 reserved1 : 4;
260 u8 fence : 1;
261 __le16 error_index;
262 u8 reserved2;
263 u8 task_attribute : 3;
264 u8 command_priority : 4;
265 u8 reserved3 : 1;
266 u8 reserved4 : 2;
267 u8 additional_cdb_bytes_usage : 3;
268 u8 reserved5 : 3;
269 u8 cdb[32];
270 struct pqi_sg_descriptor
271 sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
272};
273
274struct pqi_aio_path_request {
275 struct pqi_iu_header header;
276 __le16 request_id;
277 u8 reserved1[2];
278 __le32 nexus_id;
279 __le32 buffer_length;
280 u8 data_direction : 2;
281 u8 partial : 1;
282 u8 memory_type : 1;
283 u8 fence : 1;
284 u8 encryption_enable : 1;
285 u8 reserved2 : 2;
286 u8 task_attribute : 3;
287 u8 command_priority : 4;
288 u8 reserved3 : 1;
289 __le16 data_encryption_key_index;
290 __le32 encrypt_tweak_lower;
291 __le32 encrypt_tweak_upper;
292 u8 cdb[16];
293 __le16 error_index;
294 u8 num_sg_descriptors;
295 u8 cdb_length;
296 u8 lun_number[8];
297 u8 reserved4[4];
298 struct pqi_sg_descriptor
299 sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS];
300};
301
302struct pqi_io_response {
303 struct pqi_iu_header header;
304 __le16 request_id;
305 __le16 error_index;
306 u8 reserved2[4];
307};
308
309struct pqi_general_management_request {
310 struct pqi_iu_header header;
311 __le16 request_id;
312 union {
313 struct {
314 u8 reserved[2];
315 __le32 buffer_length;
316 struct pqi_sg_descriptor sg_descriptors[3];
317 } report_event_configuration;
318
319 struct {
320 __le16 global_event_oq_id;
321 __le32 buffer_length;
322 struct pqi_sg_descriptor sg_descriptors[3];
323 } set_event_configuration;
324 } data;
325};
326
327struct pqi_event_descriptor {
328 u8 event_type;
329 u8 reserved;
330 __le16 oq_id;
331};
332
333struct pqi_event_config {
334 u8 reserved[2];
335 u8 num_event_descriptors;
336 u8 reserved1;
337 struct pqi_event_descriptor descriptors[1];
338};
339
340#define PQI_MAX_EVENT_DESCRIPTORS 255
341
342struct pqi_event_response {
343 struct pqi_iu_header header;
344 u8 event_type;
345 u8 reserved2 : 7;
346 u8 request_acknowlege : 1;
347 __le16 event_id;
348 __le32 additional_event_id;
349 u8 data[16];
350};
351
352struct pqi_event_acknowledge_request {
353 struct pqi_iu_header header;
354 u8 event_type;
355 u8 reserved2;
356 __le16 event_id;
357 __le32 additional_event_id;
358};
359
360struct pqi_task_management_request {
361 struct pqi_iu_header header;
362 __le16 request_id;
363 __le16 nexus_id;
364 u8 reserved[4];
365 u8 lun_number[8];
366 __le16 protocol_specific;
367 __le16 outbound_queue_id_to_manage;
368 __le16 request_id_to_manage;
369 u8 task_management_function;
370 u8 reserved2 : 7;
371 u8 fence : 1;
372};
373
374#define SOP_TASK_MANAGEMENT_LUN_RESET 0x8
375
376struct pqi_task_management_response {
377 struct pqi_iu_header header;
378 __le16 request_id;
379 __le16 nexus_id;
380 u8 additional_response_info[3];
381 u8 response_code;
382};
383
384struct pqi_aio_error_info {
385 u8 status;
386 u8 service_response;
387 u8 data_present;
388 u8 reserved;
389 __le32 residual_count;
390 __le16 data_length;
391 __le16 reserved1;
392 u8 data[256];
393};
394
395struct pqi_raid_error_info {
396 u8 data_in_result;
397 u8 data_out_result;
398 u8 reserved[3];
399 u8 status;
400 __le16 status_qualifier;
401 __le16 sense_data_length;
402 __le16 response_data_length;
403 __le32 data_in_transferred;
404 __le32 data_out_transferred;
405 u8 data[256];
406};
407
408#define PQI_REQUEST_IU_TASK_MANAGEMENT 0x13
409#define PQI_REQUEST_IU_RAID_PATH_IO 0x14
410#define PQI_REQUEST_IU_AIO_PATH_IO 0x15
411#define PQI_REQUEST_IU_GENERAL_ADMIN 0x60
412#define PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG 0x72
413#define PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG 0x73
414#define PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT 0xf6
415
416#define PQI_RESPONSE_IU_GENERAL_MANAGEMENT 0x81
417#define PQI_RESPONSE_IU_TASK_MANAGEMENT 0x93
418#define PQI_RESPONSE_IU_GENERAL_ADMIN 0xe0
419#define PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS 0xf0
420#define PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS 0xf1
421#define PQI_RESPONSE_IU_RAID_PATH_IO_ERROR 0xf2
422#define PQI_RESPONSE_IU_AIO_PATH_IO_ERROR 0xf3
423#define PQI_RESPONSE_IU_AIO_PATH_DISABLED 0xf4
424#define PQI_RESPONSE_IU_VENDOR_EVENT 0xf5
425
426#define PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY 0x0
427#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ 0x10
428#define PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ 0x11
429#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ 0x12
430#define PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ 0x13
431#define PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY 0x14
432
433#define PQI_GENERAL_ADMIN_STATUS_SUCCESS 0x0
434
435#define PQI_IQ_PROPERTY_IS_AIO_QUEUE 0x1
436
437#define PQI_GENERAL_ADMIN_IU_LENGTH 0x3c
438#define PQI_PROTOCOL_SOP 0x0
439
440#define PQI_DATA_IN_OUT_GOOD 0x0
441#define PQI_DATA_IN_OUT_UNDERFLOW 0x1
442#define PQI_DATA_IN_OUT_BUFFER_ERROR 0x40
443#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW 0x41
444#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA 0x42
445#define PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE 0x43
446#define PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR 0x60
447#define PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT 0x61
448#define PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED 0x62
449#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED 0x63
450#define PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED 0x64
451#define PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST 0x65
452#define PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION 0x66
453#define PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED 0x67
454#define PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ 0x6F
455#define PQI_DATA_IN_OUT_ERROR 0xf0
456#define PQI_DATA_IN_OUT_PROTOCOL_ERROR 0xf1
457#define PQI_DATA_IN_OUT_HARDWARE_ERROR 0xf2
458#define PQI_DATA_IN_OUT_UNSOLICITED_ABORT 0xf3
459#define PQI_DATA_IN_OUT_ABORTED 0xf4
460#define PQI_DATA_IN_OUT_TIMEOUT 0xf5
461
462#define CISS_CMD_STATUS_SUCCESS 0x0
463#define CISS_CMD_STATUS_TARGET_STATUS 0x1
464#define CISS_CMD_STATUS_DATA_UNDERRUN 0x2
465#define CISS_CMD_STATUS_DATA_OVERRUN 0x3
466#define CISS_CMD_STATUS_INVALID 0x4
467#define CISS_CMD_STATUS_PROTOCOL_ERROR 0x5
468#define CISS_CMD_STATUS_HARDWARE_ERROR 0x6
469#define CISS_CMD_STATUS_CONNECTION_LOST 0x7
470#define CISS_CMD_STATUS_ABORTED 0x8
471#define CISS_CMD_STATUS_ABORT_FAILED 0x9
472#define CISS_CMD_STATUS_UNSOLICITED_ABORT 0xa
473#define CISS_CMD_STATUS_TIMEOUT 0xb
474#define CISS_CMD_STATUS_UNABORTABLE 0xc
475#define CISS_CMD_STATUS_TMF 0xd
476#define CISS_CMD_STATUS_AIO_DISABLED 0xe
477
478#define PQI_NUM_EVENT_QUEUE_ELEMENTS 32
479#define PQI_EVENT_OQ_ELEMENT_LENGTH sizeof(struct pqi_event_response)
480
481#define PQI_EVENT_TYPE_HOTPLUG 0x1
482#define PQI_EVENT_TYPE_HARDWARE 0x2
483#define PQI_EVENT_TYPE_PHYSICAL_DEVICE 0x4
484#define PQI_EVENT_TYPE_LOGICAL_DEVICE 0x5
485#define PQI_EVENT_TYPE_AIO_STATE_CHANGE 0xfd
486#define PQI_EVENT_TYPE_AIO_CONFIG_CHANGE 0xfe
487#define PQI_EVENT_TYPE_HEARTBEAT 0xff
488
489#pragma pack()
490
491#define PQI_ERROR_BUFFER_ELEMENT_LENGTH \
492 sizeof(struct pqi_raid_error_info)
493
494/* these values are based on our implementation */
495#define PQI_ADMIN_IQ_NUM_ELEMENTS 8
496#define PQI_ADMIN_OQ_NUM_ELEMENTS 20
497#define PQI_ADMIN_IQ_ELEMENT_LENGTH 64
498#define PQI_ADMIN_OQ_ELEMENT_LENGTH 64
499
500#define PQI_OPERATIONAL_IQ_ELEMENT_LENGTH 128
501#define PQI_OPERATIONAL_OQ_ELEMENT_LENGTH 16
502
503#define PQI_MIN_MSIX_VECTORS 1
504#define PQI_MAX_MSIX_VECTORS 64
505
506/* these values are defined by the PQI spec */
507#define PQI_MAX_NUM_ELEMENTS_ADMIN_QUEUE 255
508#define PQI_MAX_NUM_ELEMENTS_OPERATIONAL_QUEUE 65535
509#define PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT 64
510#define PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT 16
511#define PQI_ADMIN_INDEX_ALIGNMENT 64
512#define PQI_OPERATIONAL_INDEX_ALIGNMENT 4
513
514#define PQI_MIN_OPERATIONAL_QUEUE_ID 1
515#define PQI_MAX_OPERATIONAL_QUEUE_ID 65535
516
517#define PQI_AIO_SERV_RESPONSE_COMPLETE 0
518#define PQI_AIO_SERV_RESPONSE_FAILURE 1
519#define PQI_AIO_SERV_RESPONSE_TMF_COMPLETE 2
520#define PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED 3
521#define PQI_AIO_SERV_RESPONSE_TMF_REJECTED 4
522#define PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN 5
523
524#define PQI_AIO_STATUS_IO_ERROR 0x1
525#define PQI_AIO_STATUS_IO_ABORTED 0x2
526#define PQI_AIO_STATUS_NO_PATH_TO_DEVICE 0x3
527#define PQI_AIO_STATUS_INVALID_DEVICE 0x4
528#define PQI_AIO_STATUS_AIO_PATH_DISABLED 0xe
529#define PQI_AIO_STATUS_UNDERRUN 0x51
530#define PQI_AIO_STATUS_OVERRUN 0x75
531
532typedef u32 pqi_index_t;
533
534/* SOP data direction flags */
535#define SOP_NO_DIRECTION_FLAG 0
536#define SOP_WRITE_FLAG 1 /* host writes data to Data-Out */
537 /* buffer */
538#define SOP_READ_FLAG 2 /* host receives data from Data-In */
539 /* buffer */
540#define SOP_BIDIRECTIONAL 3 /* data is transferred from the */
541 /* Data-Out buffer and data is */
542 /* transferred to the Data-In buffer */
543
544#define SOP_TASK_ATTRIBUTE_SIMPLE 0
545#define SOP_TASK_ATTRIBUTE_HEAD_OF_QUEUE 1
546#define SOP_TASK_ATTRIBUTE_ORDERED 2
547#define SOP_TASK_ATTRIBUTE_ACA 4
548
549#define SOP_TMF_COMPLETE 0x0
550#define SOP_TMF_FUNCTION_SUCCEEDED 0x8
551
552/* additional CDB bytes usage field codes */
553#define SOP_ADDITIONAL_CDB_BYTES_0 0 /* 16-byte CDB */
554#define SOP_ADDITIONAL_CDB_BYTES_4 1 /* 20-byte CDB */
555#define SOP_ADDITIONAL_CDB_BYTES_8 2 /* 24-byte CDB */
556#define SOP_ADDITIONAL_CDB_BYTES_12 3 /* 28-byte CDB */
557#define SOP_ADDITIONAL_CDB_BYTES_16 4 /* 32-byte CDB */
558
559/*
560 * The purpose of this structure is to obtain proper alignment of objects in
561 * an admin queue pair.
562 */
563struct pqi_admin_queues_aligned {
564 __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
565 u8 iq_element_array[PQI_ADMIN_IQ_ELEMENT_LENGTH]
566 [PQI_ADMIN_IQ_NUM_ELEMENTS];
567 __aligned(PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT)
568 u8 oq_element_array[PQI_ADMIN_OQ_ELEMENT_LENGTH]
569 [PQI_ADMIN_OQ_NUM_ELEMENTS];
570 __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t iq_ci;
571 __aligned(PQI_ADMIN_INDEX_ALIGNMENT) pqi_index_t oq_pi;
572};
573
574struct pqi_admin_queues {
575 void *iq_element_array;
576 void *oq_element_array;
577 volatile pqi_index_t *iq_ci;
578 volatile pqi_index_t *oq_pi;
579 dma_addr_t iq_element_array_bus_addr;
580 dma_addr_t oq_element_array_bus_addr;
581 dma_addr_t iq_ci_bus_addr;
582 dma_addr_t oq_pi_bus_addr;
583 __le32 __iomem *iq_pi;
584 pqi_index_t iq_pi_copy;
585 __le32 __iomem *oq_ci;
586 pqi_index_t oq_ci_copy;
587 struct task_struct *task;
588 u16 int_msg_num;
589};
590
591struct pqi_queue_group {
592 struct pqi_ctrl_info *ctrl_info; /* backpointer */
593 u16 iq_id[2];
594 u16 oq_id;
595 u16 int_msg_num;
596 void *iq_element_array[2];
597 void *oq_element_array;
598 dma_addr_t iq_element_array_bus_addr[2];
599 dma_addr_t oq_element_array_bus_addr;
600 __le32 __iomem *iq_pi[2];
601 pqi_index_t iq_pi_copy[2];
602 volatile pqi_index_t *iq_ci[2];
603 volatile pqi_index_t *oq_pi;
604 dma_addr_t iq_ci_bus_addr[2];
605 dma_addr_t oq_pi_bus_addr;
606 __le32 __iomem *oq_ci;
607 pqi_index_t oq_ci_copy;
608 spinlock_t submit_lock[2]; /* protect submission queue */
609 struct list_head request_list[2];
610};
611
612struct pqi_event_queue {
613 u16 oq_id;
614 u16 int_msg_num;
615 void *oq_element_array;
616 volatile pqi_index_t *oq_pi;
617 dma_addr_t oq_element_array_bus_addr;
618 dma_addr_t oq_pi_bus_addr;
619 __le32 __iomem *oq_ci;
620 pqi_index_t oq_ci_copy;
621};
622
623#define PQI_DEFAULT_QUEUE_GROUP 0
624#define PQI_MAX_QUEUE_GROUPS PQI_MAX_MSIX_VECTORS
625
626struct pqi_encryption_info {
627 u16 data_encryption_key_index;
628 u32 encrypt_tweak_lower;
629 u32 encrypt_tweak_upper;
630};
631
632#define PQI_MAX_OUTSTANDING_REQUESTS ((u32)~0)
633#define PQI_MAX_TRANSFER_SIZE (4 * 1024U * 1024U)
634
635#define RAID_MAP_MAX_ENTRIES 1024
636
637#define PQI_PHYSICAL_DEVICE_BUS 0
638#define PQI_RAID_VOLUME_BUS 1
639#define PQI_HBA_BUS 2
640#define PQI_MAX_BUS PQI_HBA_BUS
641
642#pragma pack(1)
643
644struct report_lun_header {
645 __be32 list_length;
646 u8 extended_response;
647 u8 reserved[3];
648};
649
650struct report_log_lun_extended_entry {
651 u8 lunid[8];
652 u8 volume_id[16];
653};
654
655struct report_log_lun_extended {
656 struct report_lun_header header;
657 struct report_log_lun_extended_entry lun_entries[1];
658};
659
660struct report_phys_lun_extended_entry {
661 u8 lunid[8];
662 __be64 wwid;
663 u8 device_type;
664 u8 device_flags;
665 u8 lun_count; /* number of LUNs in a multi-LUN device */
666 u8 redundant_paths;
667 u32 aio_handle;
668};
669
670/* for device_flags field of struct report_phys_lun_extended_entry */
671#define REPORT_PHYS_LUN_DEV_FLAG_NON_DISK 0x1
672#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8
673
674struct report_phys_lun_extended {
675 struct report_lun_header header;
676 struct report_phys_lun_extended_entry lun_entries[1];
677};
678
679struct raid_map_disk_data {
680 u32 aio_handle;
681 u8 xor_mult[2];
682 u8 reserved[2];
683};
684
685/* constants for flags field of RAID map */
686#define RAID_MAP_ENCRYPTION_ENABLED 0x1
687
688struct raid_map {
689 __le32 structure_size; /* size of entire structure in bytes */
690 __le32 volume_blk_size; /* bytes / block in the volume */
691 __le64 volume_blk_cnt; /* logical blocks on the volume */
692 u8 phys_blk_shift; /* shift factor to convert between */
693 /* units of logical blocks and */
694 /* physical disk blocks */
695 u8 parity_rotation_shift; /* shift factor to convert between */
696 /* units of logical stripes and */
697 /* physical stripes */
698 __le16 strip_size; /* blocks used on each disk / stripe */
699 __le64 disk_starting_blk; /* first disk block used in volume */
700 __le64 disk_blk_cnt; /* disk blocks used by volume / disk */
701 __le16 data_disks_per_row; /* data disk entries / row in the map */
702 __le16 metadata_disks_per_row; /* mirror/parity disk entries / row */
703 /* in the map */
704 __le16 row_cnt; /* rows in each layout map */
705 __le16 layout_map_count; /* layout maps (1 map per */
706 /* mirror parity group) */
707 __le16 flags;
708 __le16 data_encryption_key_index;
709 u8 reserved[16];
710 struct raid_map_disk_data disk_data[RAID_MAP_MAX_ENTRIES];
711};
712
713#pragma pack()
714
715#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
716
717struct pqi_scsi_dev {
718 int devtype; /* as reported by INQUIRY commmand */
719 u8 device_type; /* as reported by */
720 /* BMIC_IDENTIFY_PHYSICAL_DEVICE */
721 /* only valid for devtype = TYPE_DISK */
722 int bus;
723 int target;
724 int lun;
725 u8 scsi3addr[8];
726 __be64 wwid;
727 u8 volume_id[16];
728 u8 is_physical_device : 1;
729 u8 target_lun_valid : 1;
730 u8 expose_device : 1;
731 u8 no_uld_attach : 1;
732 u8 aio_enabled : 1; /* only valid for physical disks */
733 u8 device_gone : 1;
734 u8 new_device : 1;
735 u8 keep_device : 1;
736 u8 volume_offline : 1;
737 u8 vendor[8]; /* bytes 8-15 of inquiry data */
738 u8 model[16]; /* bytes 16-31 of inquiry data */
739 u64 sas_address;
740 u8 raid_level;
741 u16 queue_depth; /* max. queue_depth for this device */
742 u16 advertised_queue_depth;
743 u32 aio_handle;
744 u8 volume_status;
745 u8 active_path_index;
746 u8 path_map;
747 u8 bay;
748 u8 box[8];
749 u16 phys_connector[8];
750 int offload_configured; /* I/O accel RAID offload configured */
751 int offload_enabled; /* I/O accel RAID offload enabled */
752 int offload_enabled_pending;
753 int offload_to_mirror; /* Send next I/O accelerator RAID */
754 /* offload request to mirror drive. */
755 struct raid_map *raid_map; /* I/O accelerator RAID map */
756
757 struct pqi_sas_port *sas_port;
758 struct scsi_device *sdev;
759
760 struct list_head scsi_device_list_entry;
761 struct list_head new_device_list_entry;
762 struct list_head add_list_entry;
763 struct list_head delete_list_entry;
764};
765
766/* VPD inquiry pages */
767#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */
768#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */
769#define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */
770#define CISS_VPD_LV_OFFLOAD_STATUS 0xc2 /* vendor-specific page */
771#define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */
772
773#define VPD_PAGE (1 << 8)
774
775#pragma pack(1)
776
777/* structure for CISS_VPD_LV_STATUS */
778struct ciss_vpd_logical_volume_status {
779 u8 peripheral_info;
780 u8 page_code;
781 u8 reserved;
782 u8 page_length;
783 u8 volume_status;
784 u8 reserved2[3];
785 __be32 flags;
786};
787
788#pragma pack()
789
790/* constants for volume_status field of ciss_vpd_logical_volume_status */
791#define CISS_LV_OK 0
792#define CISS_LV_FAILED 1
793#define CISS_LV_NOT_CONFIGURED 2
794#define CISS_LV_DEGRADED 3
795#define CISS_LV_READY_FOR_RECOVERY 4
796#define CISS_LV_UNDERGOING_RECOVERY 5
797#define CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED 6
798#define CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM 7
799#define CISS_LV_HARDWARE_OVERHEATING 8
800#define CISS_LV_HARDWARE_HAS_OVERHEATED 9
801#define CISS_LV_UNDERGOING_EXPANSION 10
802#define CISS_LV_NOT_AVAILABLE 11
803#define CISS_LV_QUEUED_FOR_EXPANSION 12
804#define CISS_LV_DISABLED_SCSI_ID_CONFLICT 13
805#define CISS_LV_EJECTED 14
806#define CISS_LV_UNDERGOING_ERASE 15
807/* state 16 not used */
808#define CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD 17
809#define CISS_LV_UNDERGOING_RPI 18
810#define CISS_LV_PENDING_RPI 19
811#define CISS_LV_ENCRYPTED_NO_KEY 20
812/* state 21 not used */
813#define CISS_LV_UNDERGOING_ENCRYPTION 22
814#define CISS_LV_UNDERGOING_ENCRYPTION_REKEYING 23
815#define CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER 24
816#define CISS_LV_PENDING_ENCRYPTION 25
817#define CISS_LV_PENDING_ENCRYPTION_REKEYING 26
818#define CISS_LV_NOT_SUPPORTED 27
819#define CISS_LV_STATUS_UNAVAILABLE 255
820
821/* constants for flags field of ciss_vpd_logical_volume_status */
822#define CISS_LV_FLAGS_NO_HOST_IO 0x1 /* volume not available for */
823 /* host I/O */
824
825/* for SAS hosts and SAS expanders */
826struct pqi_sas_node {
827 struct device *parent_dev;
828 struct list_head port_list_head;
829};
830
831struct pqi_sas_port {
832 struct list_head port_list_entry;
833 u64 sas_address;
834 struct sas_port *port;
835 int next_phy_index;
836 struct list_head phy_list_head;
837 struct pqi_sas_node *parent_node;
838 struct sas_rphy *rphy;
839};
840
841struct pqi_sas_phy {
842 struct list_head phy_list_entry;
843 struct sas_phy *phy;
844 struct pqi_sas_port *parent_port;
845 bool added_to_port;
846};
847
848struct pqi_io_request {
849 atomic_t refcount;
850 u16 index;
851 void (*io_complete_callback)(struct pqi_io_request *io_request,
852 void *context);
853 void *context;
854 int status;
855 struct scsi_cmnd *scmd;
856 void *error_info;
857 struct pqi_sg_descriptor *sg_chain_buffer;
858 dma_addr_t sg_chain_buffer_dma_handle;
859 void *iu;
860 struct list_head request_list_entry;
861};
862
863/* for indexing into the pending_events[] field of struct pqi_ctrl_info */
864#define PQI_EVENT_HEARTBEAT 0
865#define PQI_EVENT_HOTPLUG 1
866#define PQI_EVENT_HARDWARE 2
867#define PQI_EVENT_PHYSICAL_DEVICE 3
868#define PQI_EVENT_LOGICAL_DEVICE 4
869#define PQI_EVENT_AIO_STATE_CHANGE 5
870#define PQI_EVENT_AIO_CONFIG_CHANGE 6
871#define PQI_NUM_SUPPORTED_EVENTS 7
872
873struct pqi_event {
874 bool pending;
875 u8 event_type;
876 __le16 event_id;
877 __le32 additional_event_id;
878};
879
880#define PQI_RESERVED_IO_SLOTS_LUN_RESET 1
881#define PQI_RESERVED_IO_SLOTS_EVENT_ACK PQI_NUM_SUPPORTED_EVENTS
882#define PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS 3
883#define PQI_RESERVED_IO_SLOTS \
884 (PQI_RESERVED_IO_SLOTS_LUN_RESET + PQI_RESERVED_IO_SLOTS_EVENT_ACK + \
885 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS)
886
887struct pqi_ctrl_info {
888 unsigned int ctrl_id;
889 struct pci_dev *pci_dev;
890 char firmware_version[11];
891 void __iomem *iomem_base;
892 struct pqi_ctrl_registers __iomem *registers;
893 struct pqi_device_registers __iomem *pqi_registers;
894 u32 max_sg_entries;
895 u32 config_table_offset;
896 u32 config_table_length;
897 u16 max_inbound_queues;
898 u16 max_elements_per_iq;
899 u16 max_iq_element_length;
900 u16 max_outbound_queues;
901 u16 max_elements_per_oq;
902 u16 max_oq_element_length;
903 u32 max_transfer_size;
904 u32 max_outstanding_requests;
905 u32 max_io_slots;
906 unsigned int scsi_ml_can_queue;
907 unsigned short sg_tablesize;
908 unsigned int max_sectors;
909 u32 error_buffer_length;
910 void *error_buffer;
911 dma_addr_t error_buffer_dma_handle;
912 size_t sg_chain_buffer_length;
913 unsigned int num_queue_groups;
914 unsigned int num_active_queue_groups;
915 u16 num_elements_per_iq;
916 u16 num_elements_per_oq;
917 u16 max_inbound_iu_length_per_firmware;
918 u16 max_inbound_iu_length;
919 unsigned int max_sg_per_iu;
920 void *admin_queue_memory_base;
921 u32 admin_queue_memory_length;
922 dma_addr_t admin_queue_memory_base_dma_handle;
923 void *queue_memory_base;
924 u32 queue_memory_length;
925 dma_addr_t queue_memory_base_dma_handle;
926 struct pqi_admin_queues admin_queues;
927 struct pqi_queue_group queue_groups[PQI_MAX_QUEUE_GROUPS];
928 struct pqi_event_queue event_queue;
929 int max_msix_vectors;
930 int num_msix_vectors_enabled;
931 int num_msix_vectors_initialized;
932 u32 msix_vectors[PQI_MAX_MSIX_VECTORS];
933 void *intr_data[PQI_MAX_MSIX_VECTORS];
934 int event_irq;
935 struct Scsi_Host *scsi_host;
936
937 struct mutex scan_mutex;
938 u8 inbound_spanning_supported : 1;
939 u8 outbound_spanning_supported : 1;
940 u8 pqi_mode_enabled : 1;
941 u8 controller_online : 1;
942 u8 heartbeat_timer_started : 1;
943
944 struct list_head scsi_device_list;
945 spinlock_t scsi_device_list_lock;
946
947 struct delayed_work rescan_work;
948 struct delayed_work update_time_work;
949
950 struct pqi_sas_node *sas_host;
951 u64 sas_address;
952
953 struct pqi_io_request *io_request_pool;
954 u16 next_io_request_slot;
955
956 struct pqi_event pending_events[PQI_NUM_SUPPORTED_EVENTS];
957 struct work_struct event_work;
958
959 atomic_t num_interrupts;
960 int previous_num_interrupts;
961 unsigned int num_heartbeats_requested;
962 struct timer_list heartbeat_timer;
963
964 struct semaphore sync_request_sem;
965 struct semaphore lun_reset_sem;
966};
967
968enum pqi_ctrl_mode {
969 UNKNOWN,
970 PQI_MODE
971};
972
973/*
974 * assume worst case: SATA queue depth of 31 minus 4 internal firmware commands
975 */
976#define PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH 27
977
978/* 0 = no limit */
979#define PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH 0
980
981/* CISS commands */
982#define CISS_READ 0xc0
983#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
984#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
985#define CISS_GET_RAID_MAP 0xc8
986
987/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */
988#define CISS_REPORT_LOG_EXTENDED 0x1
989#define CISS_REPORT_PHYS_EXTENDED 0x2
990
991/* BMIC commands */
992#define BMIC_IDENTIFY_CONTROLLER 0x11
993#define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15
994#define BMIC_READ 0x26
995#define BMIC_WRITE 0x27
996#define BMIC_SENSE_CONTROLLER_PARAMETERS 0x64
997#define BMIC_SENSE_SUBSYSTEM_INFORMATION 0x66
998#define BMIC_WRITE_HOST_WELLNESS 0xa5
999#define BMIC_CACHE_FLUSH 0xc2
1000
1001#define SA_CACHE_FLUSH 0x01
1002
1003#define MASKED_DEVICE(lunid) ((lunid)[3] & 0xc0)
1004#define CISS_GET_BUS(lunid) ((lunid)[7] & 0x3f)
1005#define CISS_GET_LEVEL_2_TARGET(lunid) ((lunid)[6])
1006#define CISS_GET_DRIVE_NUMBER(lunid) \
1007 (((CISS_GET_BUS((lunid)) - 1) << 8) + \
1008 CISS_GET_LEVEL_2_TARGET((lunid)))
1009
1010#define NO_TIMEOUT ((unsigned long) -1)
1011
1012#pragma pack(1)
1013
1014struct bmic_identify_controller {
1015 u8 configured_logical_drive_count;
1016 __le32 configuration_signature;
1017 u8 firmware_version[4];
1018 u8 reserved[145];
1019 __le16 extended_logical_unit_count;
1020 u8 reserved1[34];
1021 __le16 firmware_build_number;
1022 u8 reserved2[100];
1023 u8 controller_mode;
1024 u8 reserved3[32];
1025};
1026
1027struct bmic_identify_physical_device {
1028 u8 scsi_bus; /* SCSI Bus number on controller */
1029 u8 scsi_id; /* SCSI ID on this bus */
1030 __le16 block_size; /* sector size in bytes */
1031 __le32 total_blocks; /* number for sectors on drive */
1032 __le32 reserved_blocks; /* controller reserved (RIS) */
1033 u8 model[40]; /* Physical Drive Model */
1034 u8 serial_number[40]; /* Drive Serial Number */
1035 u8 firmware_revision[8]; /* drive firmware revision */
1036 u8 scsi_inquiry_bits; /* inquiry byte 7 bits */
1037 u8 compaq_drive_stamp; /* 0 means drive not stamped */
1038 u8 last_failure_reason;
1039 u8 flags;
1040 u8 more_flags;
1041 u8 scsi_lun; /* SCSI LUN for phys drive */
1042 u8 yet_more_flags;
1043 u8 even_more_flags;
1044 __le32 spi_speed_rules;
1045 u8 phys_connector[2]; /* connector number on controller */
1046 u8 phys_box_on_bus; /* phys enclosure this drive resides */
1047 u8 phys_bay_in_box; /* phys drv bay this drive resides */
1048 __le32 rpm; /* drive rotational speed in RPM */
1049 u8 device_type; /* type of drive */
1050 u8 sata_version; /* only valid when device_type = */
1051 /* BMIC_DEVICE_TYPE_SATA */
1052 __le64 big_total_block_count;
1053 __le64 ris_starting_lba;
1054 __le32 ris_size;
1055 u8 wwid[20];
1056 u8 controller_phy_map[32];
1057 __le16 phy_count;
1058 u8 phy_connected_dev_type[256];
1059 u8 phy_to_drive_bay_num[256];
1060 __le16 phy_to_attached_dev_index[256];
1061 u8 box_index;
1062 u8 reserved;
1063 __le16 extra_physical_drive_flags;
1064 u8 negotiated_link_rate[256];
1065 u8 phy_to_phy_map[256];
1066 u8 redundant_path_present_map;
1067 u8 redundant_path_failure_map;
1068 u8 active_path_number;
1069 __le16 alternate_paths_phys_connector[8];
1070 u8 alternate_paths_phys_box_on_port[8];
1071 u8 multi_lun_device_lun_count;
1072 u8 minimum_good_fw_revision[8];
1073 u8 unique_inquiry_bytes[20];
1074 u8 current_temperature_degreesC;
1075 u8 temperature_threshold_degreesC;
1076 u8 max_temperature_degreesC;
1077 u8 logical_blocks_per_phys_block_exp;
1078 __le16 current_queue_depth_limit;
1079 u8 switch_name[10];
1080 __le16 switch_port;
1081 u8 alternate_paths_switch_name[40];
1082 u8 alternate_paths_switch_port[8];
1083 __le16 power_on_hours;
1084 __le16 percent_endurance_used;
1085 u8 drive_authentication;
1086 u8 smart_carrier_authentication;
1087 u8 smart_carrier_app_fw_version;
1088 u8 smart_carrier_bootloader_fw_version;
1089 u8 encryption_key_name[64];
1090 __le32 misc_drive_flags;
1091 __le16 dek_index;
1092 u8 padding[112];
1093};
1094
1095#pragma pack()
1096
1097int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info);
1098void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info);
1099int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
1100 struct pqi_scsi_dev *device);
1101void pqi_remove_sas_device(struct pqi_scsi_dev *device);
1102struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
1103 struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy);
1104
1105extern struct sas_function_template pqi_sas_transport_functions;
1106
1107#if !defined(readq)
1108#define readq readq
1109static inline u64 readq(const volatile void __iomem *addr)
1110{
1111 u32 lower32;
1112 u32 upper32;
1113
1114 lower32 = readl(addr);
1115 upper32 = readl(addr + 4);
1116
1117 return ((u64)upper32 << 32) | lower32;
1118}
1119#endif
1120
1121#if !defined(writeq)
1122#define writeq writeq
1123static inline void writeq(u64 value, volatile void __iomem *addr)
1124{
1125 u32 lower32;
1126 u32 upper32;
1127
1128 lower32 = lower_32_bits(value);
1129 upper32 = upper_32_bits(value);
1130
1131 writel(lower32, addr);
1132 writel(upper32, addr + 4);
1133}
1134#endif
1135
1136#endif /* _SMARTPQI_H */
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
new file mode 100644
index 000000000000..a535b2661f38
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -0,0 +1,6303 @@
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_cmnd.h>
30#include <scsi/scsi_device.h>
31#include <scsi/scsi_eh.h>
32#include <scsi/scsi_transport_sas.h>
33#include <asm/unaligned.h>
34#include "smartpqi.h"
35#include "smartpqi_sis.h"
36
37#if !defined(BUILD_TIMESTAMP)
38#define BUILD_TIMESTAMP
39#endif
40
41#define DRIVER_VERSION "0.9.13-370"
42#define DRIVER_MAJOR 0
43#define DRIVER_MINOR 9
44#define DRIVER_RELEASE 13
45#define DRIVER_REVISION 370
46
47#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
48#define DRIVER_NAME_SHORT "smartpqi"
49
50MODULE_AUTHOR("Microsemi");
51MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
52 DRIVER_VERSION);
53MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
54MODULE_VERSION(DRIVER_VERSION);
55MODULE_LICENSE("GPL");
56
57#define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
58
59static char *hpe_branded_controller = "HPE Smart Array Controller";
60static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
61
62static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
63static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
64static void pqi_scan_start(struct Scsi_Host *shost);
65static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
66 struct pqi_queue_group *queue_group, enum pqi_io_path path,
67 struct pqi_io_request *io_request);
68static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
69 struct pqi_iu_header *request, unsigned int flags,
70 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
71static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
72 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
73 unsigned int cdb_length, struct pqi_queue_group *queue_group,
74 struct pqi_encryption_info *encryption_info);
75
76/* for flags argument to pqi_submit_raid_request_synchronous() */
77#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
78
79static struct scsi_transport_template *pqi_sas_transport_template;
80
81static atomic_t pqi_controller_count = ATOMIC_INIT(0);
82
83static int pqi_disable_device_id_wildcards;
84module_param_named(disable_device_id_wildcards,
85 pqi_disable_device_id_wildcards, int, S_IRUGO | S_IWUSR);
86MODULE_PARM_DESC(disable_device_id_wildcards,
87 "Disable device ID wildcards.");
88
89static char *raid_levels[] = {
90 "RAID-0",
91 "RAID-4",
92 "RAID-1(1+0)",
93 "RAID-5",
94 "RAID-5+1",
95 "RAID-ADG",
96 "RAID-1(ADM)",
97};
98
99static char *pqi_raid_level_to_string(u8 raid_level)
100{
101 if (raid_level < ARRAY_SIZE(raid_levels))
102 return raid_levels[raid_level];
103
104 return "";
105}
106
107#define SA_RAID_0 0
108#define SA_RAID_4 1
109#define SA_RAID_1 2 /* also used for RAID 10 */
110#define SA_RAID_5 3 /* also used for RAID 50 */
111#define SA_RAID_51 4
112#define SA_RAID_6 5 /* also used for RAID 60 */
113#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
114#define SA_RAID_MAX SA_RAID_ADM
115#define SA_RAID_UNKNOWN 0xff
116
117static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
118{
119 scmd->scsi_done(scmd);
120}
121
122static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
123{
124 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
125}
126
127static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
128{
129 void *hostdata = shost_priv(shost);
130
131 return *((struct pqi_ctrl_info **)hostdata);
132}
133
134static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
135{
136 return !device->is_physical_device;
137}
138
139static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
140{
141 return !ctrl_info->controller_online;
142}
143
144static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
145{
146 if (ctrl_info->controller_online)
147 if (!sis_is_firmware_running(ctrl_info))
148 pqi_take_ctrl_offline(ctrl_info);
149}
150
151static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
152{
153 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
154}
155
156static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
157 struct pqi_ctrl_info *ctrl_info)
158{
159 return sis_read_driver_scratch(ctrl_info);
160}
161
162static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
163 enum pqi_ctrl_mode mode)
164{
165 sis_write_driver_scratch(ctrl_info, mode);
166}
167
168#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
169
170static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
171{
172 schedule_delayed_work(&ctrl_info->rescan_work,
173 PQI_RESCAN_WORK_INTERVAL);
174}
175
176static int pqi_map_single(struct pci_dev *pci_dev,
177 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
178 size_t buffer_length, int data_direction)
179{
180 dma_addr_t bus_address;
181
182 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
183 return 0;
184
185 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
186 data_direction);
187 if (pci_dma_mapping_error(pci_dev, bus_address))
188 return -ENOMEM;
189
190 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
191 put_unaligned_le32(buffer_length, &sg_descriptor->length);
192 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
193
194 return 0;
195}
196
197static void pqi_pci_unmap(struct pci_dev *pci_dev,
198 struct pqi_sg_descriptor *descriptors, int num_descriptors,
199 int data_direction)
200{
201 int i;
202
203 if (data_direction == PCI_DMA_NONE)
204 return;
205
206 for (i = 0; i < num_descriptors; i++)
207 pci_unmap_single(pci_dev,
208 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
209 get_unaligned_le32(&descriptors[i].length),
210 data_direction);
211}
212
213static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
214 struct pqi_raid_path_request *request, u8 cmd,
215 u8 *scsi3addr, void *buffer, size_t buffer_length,
216 u16 vpd_page, int *pci_direction)
217{
218 u8 *cdb;
219 int pci_dir;
220
221 memset(request, 0, sizeof(*request));
222
223 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
224 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
225 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
226 &request->header.iu_length);
227 put_unaligned_le32(buffer_length, &request->buffer_length);
228 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
229 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
230 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
231
232 cdb = request->cdb;
233
234 switch (cmd) {
235 case INQUIRY:
236 request->data_direction = SOP_READ_FLAG;
237 cdb[0] = INQUIRY;
238 if (vpd_page & VPD_PAGE) {
239 cdb[1] = 0x1;
240 cdb[2] = (u8)vpd_page;
241 }
242 cdb[4] = (u8)buffer_length;
243 break;
244 case CISS_REPORT_LOG:
245 case CISS_REPORT_PHYS:
246 request->data_direction = SOP_READ_FLAG;
247 cdb[0] = cmd;
248 if (cmd == CISS_REPORT_PHYS)
249 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
250 else
251 cdb[1] = CISS_REPORT_LOG_EXTENDED;
252 put_unaligned_be32(buffer_length, &cdb[6]);
253 break;
254 case CISS_GET_RAID_MAP:
255 request->data_direction = SOP_READ_FLAG;
256 cdb[0] = CISS_READ;
257 cdb[1] = CISS_GET_RAID_MAP;
258 put_unaligned_be32(buffer_length, &cdb[6]);
259 break;
260 case SA_CACHE_FLUSH:
261 request->data_direction = SOP_WRITE_FLAG;
262 cdb[0] = BMIC_WRITE;
263 cdb[6] = BMIC_CACHE_FLUSH;
264 put_unaligned_be16(buffer_length, &cdb[7]);
265 break;
266 case BMIC_IDENTIFY_CONTROLLER:
267 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
268 request->data_direction = SOP_READ_FLAG;
269 cdb[0] = BMIC_READ;
270 cdb[6] = cmd;
271 put_unaligned_be16(buffer_length, &cdb[7]);
272 break;
273 case BMIC_WRITE_HOST_WELLNESS:
274 request->data_direction = SOP_WRITE_FLAG;
275 cdb[0] = BMIC_WRITE;
276 cdb[6] = cmd;
277 put_unaligned_be16(buffer_length, &cdb[7]);
278 break;
279 default:
280 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
281 cmd);
282 WARN_ON(cmd);
283 break;
284 }
285
286 switch (request->data_direction) {
287 case SOP_READ_FLAG:
288 pci_dir = PCI_DMA_FROMDEVICE;
289 break;
290 case SOP_WRITE_FLAG:
291 pci_dir = PCI_DMA_TODEVICE;
292 break;
293 case SOP_NO_DIRECTION_FLAG:
294 pci_dir = PCI_DMA_NONE;
295 break;
296 default:
297 pci_dir = PCI_DMA_BIDIRECTIONAL;
298 break;
299 }
300
301 *pci_direction = pci_dir;
302
303 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
304 buffer, buffer_length, pci_dir);
305}
306
307static struct pqi_io_request *pqi_alloc_io_request(
308 struct pqi_ctrl_info *ctrl_info)
309{
310 struct pqi_io_request *io_request;
311 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
312
313 while (1) {
314 io_request = &ctrl_info->io_request_pool[i];
315 if (atomic_inc_return(&io_request->refcount) == 1)
316 break;
317 atomic_dec(&io_request->refcount);
318 i = (i + 1) % ctrl_info->max_io_slots;
319 }
320
321 /* benignly racy */
322 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
323
324 io_request->scmd = NULL;
325 io_request->status = 0;
326 io_request->error_info = NULL;
327
328 return io_request;
329}
330
331static void pqi_free_io_request(struct pqi_io_request *io_request)
332{
333 atomic_dec(&io_request->refcount);
334}
335
336static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
337 struct bmic_identify_controller *buffer)
338{
339 int rc;
340 int pci_direction;
341 struct pqi_raid_path_request request;
342
343 rc = pqi_build_raid_path_request(ctrl_info, &request,
344 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
345 sizeof(*buffer), 0, &pci_direction);
346 if (rc)
347 return rc;
348
349 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
350 NULL, NO_TIMEOUT);
351
352 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
353 pci_direction);
354
355 return rc;
356}
357
358static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
359 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
360{
361 int rc;
362 int pci_direction;
363 struct pqi_raid_path_request request;
364
365 rc = pqi_build_raid_path_request(ctrl_info, &request,
366 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
367 &pci_direction);
368 if (rc)
369 return rc;
370
371 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
372 NULL, NO_TIMEOUT);
373
374 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
375 pci_direction);
376
377 return rc;
378}
379
380static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
381 struct pqi_scsi_dev *device,
382 struct bmic_identify_physical_device *buffer,
383 size_t buffer_length)
384{
385 int rc;
386 int pci_direction;
387 u16 bmic_device_index;
388 struct pqi_raid_path_request request;
389
390 rc = pqi_build_raid_path_request(ctrl_info, &request,
391 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
392 buffer_length, 0, &pci_direction);
393 if (rc)
394 return rc;
395
396 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
397 request.cdb[2] = (u8)bmic_device_index;
398 request.cdb[9] = (u8)(bmic_device_index >> 8);
399
400 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
401 0, NULL, NO_TIMEOUT);
402
403 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
404 pci_direction);
405
406 return rc;
407}
408
409#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
410
411static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
412{
413 int rc;
414 struct pqi_raid_path_request request;
415 int pci_direction;
416 u8 *buffer;
417
418 /*
419 * Don't bother trying to flush the cache if the controller is
420 * locked up.
421 */
422 if (pqi_ctrl_offline(ctrl_info))
423 return -ENXIO;
424
425 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
426 if (!buffer)
427 return -ENOMEM;
428
429 rc = pqi_build_raid_path_request(ctrl_info, &request,
430 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
431 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
432 if (rc)
433 goto out;
434
435 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
436 0, NULL, NO_TIMEOUT);
437
438 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
439 pci_direction);
440
441out:
442 kfree(buffer);
443
444 return rc;
445}
446
447static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
448 void *buffer, size_t buffer_length)
449{
450 int rc;
451 struct pqi_raid_path_request request;
452 int pci_direction;
453
454 rc = pqi_build_raid_path_request(ctrl_info, &request,
455 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
456 buffer_length, 0, &pci_direction);
457 if (rc)
458 return rc;
459
460 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
461 0, NULL, NO_TIMEOUT);
462
463 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
464 pci_direction);
465
466 return rc;
467}
468
469#pragma pack(1)
470
471struct bmic_host_wellness_driver_version {
472 u8 start_tag[4];
473 u8 driver_version_tag[2];
474 __le16 driver_version_length;
475 char driver_version[32];
476 u8 end_tag[2];
477};
478
479#pragma pack()
480
481static int pqi_write_driver_version_to_host_wellness(
482 struct pqi_ctrl_info *ctrl_info)
483{
484 int rc;
485 struct bmic_host_wellness_driver_version *buffer;
486 size_t buffer_length;
487
488 buffer_length = sizeof(*buffer);
489
490 buffer = kmalloc(buffer_length, GFP_KERNEL);
491 if (!buffer)
492 return -ENOMEM;
493
494 buffer->start_tag[0] = '<';
495 buffer->start_tag[1] = 'H';
496 buffer->start_tag[2] = 'W';
497 buffer->start_tag[3] = '>';
498 buffer->driver_version_tag[0] = 'D';
499 buffer->driver_version_tag[1] = 'V';
500 put_unaligned_le16(sizeof(buffer->driver_version),
501 &buffer->driver_version_length);
502 strncpy(buffer->driver_version, DRIVER_VERSION,
503 sizeof(buffer->driver_version) - 1);
504 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
505 buffer->end_tag[0] = 'Z';
506 buffer->end_tag[1] = 'Z';
507
508 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
509
510 kfree(buffer);
511
512 return rc;
513}
514
515#pragma pack(1)
516
517struct bmic_host_wellness_time {
518 u8 start_tag[4];
519 u8 time_tag[2];
520 __le16 time_length;
521 u8 time[8];
522 u8 dont_write_tag[2];
523 u8 end_tag[2];
524};
525
526#pragma pack()
527
528static int pqi_write_current_time_to_host_wellness(
529 struct pqi_ctrl_info *ctrl_info)
530{
531 int rc;
532 struct bmic_host_wellness_time *buffer;
533 size_t buffer_length;
534 time64_t local_time;
535 unsigned int year;
536 struct timeval time;
537 struct rtc_time tm;
538
539 buffer_length = sizeof(*buffer);
540
541 buffer = kmalloc(buffer_length, GFP_KERNEL);
542 if (!buffer)
543 return -ENOMEM;
544
545 buffer->start_tag[0] = '<';
546 buffer->start_tag[1] = 'H';
547 buffer->start_tag[2] = 'W';
548 buffer->start_tag[3] = '>';
549 buffer->time_tag[0] = 'T';
550 buffer->time_tag[1] = 'D';
551 put_unaligned_le16(sizeof(buffer->time),
552 &buffer->time_length);
553
554 do_gettimeofday(&time);
555 local_time = time.tv_sec - (sys_tz.tz_minuteswest * 60);
556 rtc_time64_to_tm(local_time, &tm);
557 year = tm.tm_year + 1900;
558
559 buffer->time[0] = bin2bcd(tm.tm_hour);
560 buffer->time[1] = bin2bcd(tm.tm_min);
561 buffer->time[2] = bin2bcd(tm.tm_sec);
562 buffer->time[3] = 0;
563 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
564 buffer->time[5] = bin2bcd(tm.tm_mday);
565 buffer->time[6] = bin2bcd(year / 100);
566 buffer->time[7] = bin2bcd(year % 100);
567
568 buffer->dont_write_tag[0] = 'D';
569 buffer->dont_write_tag[1] = 'W';
570 buffer->end_tag[0] = 'Z';
571 buffer->end_tag[1] = 'Z';
572
573 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
574
575 kfree(buffer);
576
577 return rc;
578}
579
580#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
581
582static void pqi_update_time_worker(struct work_struct *work)
583{
584 int rc;
585 struct pqi_ctrl_info *ctrl_info;
586
587 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
588 update_time_work);
589
590 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
591 if (rc)
592 dev_warn(&ctrl_info->pci_dev->dev,
593 "error updating time on controller\n");
594
595 schedule_delayed_work(&ctrl_info->update_time_work,
596 PQI_UPDATE_TIME_WORK_INTERVAL);
597}
598
599static inline void pqi_schedule_update_time_worker(
600 struct pqi_ctrl_info *ctrl_info)
601{
602 schedule_delayed_work(&ctrl_info->update_time_work, 0);
603}
604
605static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
606 void *buffer, size_t buffer_length)
607{
608 int rc;
609 int pci_direction;
610 struct pqi_raid_path_request request;
611
612 rc = pqi_build_raid_path_request(ctrl_info, &request,
613 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
614 if (rc)
615 return rc;
616
617 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
618 NULL, NO_TIMEOUT);
619
620 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
621 pci_direction);
622
623 return rc;
624}
625
626static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
627 void **buffer)
628{
629 int rc;
630 size_t lun_list_length;
631 size_t lun_data_length;
632 size_t new_lun_list_length;
633 void *lun_data = NULL;
634 struct report_lun_header *report_lun_header;
635
636 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
637 if (!report_lun_header) {
638 rc = -ENOMEM;
639 goto out;
640 }
641
642 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
643 sizeof(*report_lun_header));
644 if (rc)
645 goto out;
646
647 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
648
649again:
650 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
651
652 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
653 if (!lun_data) {
654 rc = -ENOMEM;
655 goto out;
656 }
657
658 if (lun_list_length == 0) {
659 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
660 goto out;
661 }
662
663 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
664 if (rc)
665 goto out;
666
667 new_lun_list_length = get_unaligned_be32(
668 &((struct report_lun_header *)lun_data)->list_length);
669
670 if (new_lun_list_length > lun_list_length) {
671 lun_list_length = new_lun_list_length;
672 kfree(lun_data);
673 goto again;
674 }
675
676out:
677 kfree(report_lun_header);
678
679 if (rc) {
680 kfree(lun_data);
681 lun_data = NULL;
682 }
683
684 *buffer = lun_data;
685
686 return rc;
687}
688
689static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
690 void **buffer)
691{
692 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
693 buffer);
694}
695
696static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
697 void **buffer)
698{
699 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
700}
701
702static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
703 struct report_phys_lun_extended **physdev_list,
704 struct report_log_lun_extended **logdev_list)
705{
706 int rc;
707 size_t logdev_list_length;
708 size_t logdev_data_length;
709 struct report_log_lun_extended *internal_logdev_list;
710 struct report_log_lun_extended *logdev_data;
711 struct report_lun_header report_lun_header;
712
713 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
714 if (rc)
715 dev_err(&ctrl_info->pci_dev->dev,
716 "report physical LUNs failed\n");
717
718 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
719 if (rc)
720 dev_err(&ctrl_info->pci_dev->dev,
721 "report logical LUNs failed\n");
722
723 /*
724 * Tack the controller itself onto the end of the logical device list.
725 */
726
727 logdev_data = *logdev_list;
728
729 if (logdev_data) {
730 logdev_list_length =
731 get_unaligned_be32(&logdev_data->header.list_length);
732 } else {
733 memset(&report_lun_header, 0, sizeof(report_lun_header));
734 logdev_data =
735 (struct report_log_lun_extended *)&report_lun_header;
736 logdev_list_length = 0;
737 }
738
739 logdev_data_length = sizeof(struct report_lun_header) +
740 logdev_list_length;
741
742 internal_logdev_list = kmalloc(logdev_data_length +
743 sizeof(struct report_log_lun_extended), GFP_KERNEL);
744 if (!internal_logdev_list) {
745 kfree(*logdev_list);
746 *logdev_list = NULL;
747 return -ENOMEM;
748 }
749
750 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
751 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
752 sizeof(struct report_log_lun_extended_entry));
753 put_unaligned_be32(logdev_list_length +
754 sizeof(struct report_log_lun_extended_entry),
755 &internal_logdev_list->header.list_length);
756
757 kfree(*logdev_list);
758 *logdev_list = internal_logdev_list;
759
760 return 0;
761}
762
763static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
764 int bus, int target, int lun)
765{
766 device->bus = bus;
767 device->target = target;
768 device->lun = lun;
769}
770
771static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
772{
773 u8 *scsi3addr;
774 u32 lunid;
775
776 scsi3addr = device->scsi3addr;
777 lunid = get_unaligned_le32(scsi3addr);
778
779 if (pqi_is_hba_lunid(scsi3addr)) {
780 /* The specified device is the controller. */
781 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
782 device->target_lun_valid = true;
783 return;
784 }
785
786 if (pqi_is_logical_device(device)) {
787 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
788 lunid & 0x3fff);
789 device->target_lun_valid = true;
790 return;
791 }
792
793 /*
794 * Defer target and LUN assignment for non-controller physical devices
795 * because the SAS transport layer will make these assignments later.
796 */
797 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
798}
799
800static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
801 struct pqi_scsi_dev *device)
802{
803 int rc;
804 u8 raid_level;
805 u8 *buffer;
806
807 raid_level = SA_RAID_UNKNOWN;
808
809 buffer = kmalloc(64, GFP_KERNEL);
810 if (buffer) {
811 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
812 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
813 if (rc == 0) {
814 raid_level = buffer[8];
815 if (raid_level > SA_RAID_MAX)
816 raid_level = SA_RAID_UNKNOWN;
817 }
818 kfree(buffer);
819 }
820
821 device->raid_level = raid_level;
822}
823
824static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
825 struct pqi_scsi_dev *device, struct raid_map *raid_map)
826{
827 char *err_msg;
828 u32 raid_map_size;
829 u32 r5or6_blocks_per_row;
830 unsigned int num_phys_disks;
831 unsigned int num_raid_map_entries;
832
833 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
834
835 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
836 err_msg = "RAID map too small";
837 goto bad_raid_map;
838 }
839
840 if (raid_map_size > sizeof(*raid_map)) {
841 err_msg = "RAID map too large";
842 goto bad_raid_map;
843 }
844
845 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
846 (get_unaligned_le16(&raid_map->data_disks_per_row) +
847 get_unaligned_le16(&raid_map->metadata_disks_per_row));
848 num_raid_map_entries = num_phys_disks *
849 get_unaligned_le16(&raid_map->row_cnt);
850
851 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
852 err_msg = "invalid number of map entries in RAID map";
853 goto bad_raid_map;
854 }
855
856 if (device->raid_level == SA_RAID_1) {
857 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
858 err_msg = "invalid RAID-1 map";
859 goto bad_raid_map;
860 }
861 } else if (device->raid_level == SA_RAID_ADM) {
862 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
863 err_msg = "invalid RAID-1(ADM) map";
864 goto bad_raid_map;
865 }
866 } else if ((device->raid_level == SA_RAID_5 ||
867 device->raid_level == SA_RAID_6) &&
868 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
869 /* RAID 50/60 */
870 r5or6_blocks_per_row =
871 get_unaligned_le16(&raid_map->strip_size) *
872 get_unaligned_le16(&raid_map->data_disks_per_row);
873 if (r5or6_blocks_per_row == 0) {
874 err_msg = "invalid RAID-5 or RAID-6 map";
875 goto bad_raid_map;
876 }
877 }
878
879 return 0;
880
881bad_raid_map:
882 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", err_msg);
883
884 return -EINVAL;
885}
886
887static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
888 struct pqi_scsi_dev *device)
889{
890 int rc;
891 int pci_direction;
892 struct pqi_raid_path_request request;
893 struct raid_map *raid_map;
894
895 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
896 if (!raid_map)
897 return -ENOMEM;
898
899 rc = pqi_build_raid_path_request(ctrl_info, &request,
900 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
901 sizeof(*raid_map), 0, &pci_direction);
902 if (rc)
903 goto error;
904
905 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
906 NULL, NO_TIMEOUT);
907
908 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
909 pci_direction);
910
911 if (rc)
912 goto error;
913
914 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
915 if (rc)
916 goto error;
917
918 device->raid_map = raid_map;
919
920 return 0;
921
922error:
923 kfree(raid_map);
924
925 return rc;
926}
927
928static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
929 struct pqi_scsi_dev *device)
930{
931 int rc;
932 u8 *buffer;
933 u8 offload_status;
934
935 buffer = kmalloc(64, GFP_KERNEL);
936 if (!buffer)
937 return;
938
939 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
940 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
941 if (rc)
942 goto out;
943
944#define OFFLOAD_STATUS_BYTE 4
945#define OFFLOAD_CONFIGURED_BIT 0x1
946#define OFFLOAD_ENABLED_BIT 0x2
947
948 offload_status = buffer[OFFLOAD_STATUS_BYTE];
949 device->offload_configured =
950 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
951 if (device->offload_configured) {
952 device->offload_enabled_pending =
953 !!(offload_status & OFFLOAD_ENABLED_BIT);
954 if (pqi_get_raid_map(ctrl_info, device))
955 device->offload_enabled_pending = false;
956 }
957
958out:
959 kfree(buffer);
960}
961
962/*
963 * Use vendor-specific VPD to determine online/offline status of a volume.
964 */
965
966static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
967 struct pqi_scsi_dev *device)
968{
969 int rc;
970 size_t page_length;
971 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
972 bool volume_offline = true;
973 u32 volume_flags;
974 struct ciss_vpd_logical_volume_status *vpd;
975
976 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
977 if (!vpd)
978 goto no_buffer;
979
980 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
981 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
982 if (rc)
983 goto out;
984
985 page_length = offsetof(struct ciss_vpd_logical_volume_status,
986 volume_status) + vpd->page_length;
987 if (page_length < sizeof(*vpd))
988 goto out;
989
990 volume_status = vpd->volume_status;
991 volume_flags = get_unaligned_be32(&vpd->flags);
992 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
993
994out:
995 kfree(vpd);
996no_buffer:
997 device->volume_status = volume_status;
998 device->volume_offline = volume_offline;
999}
1000
1001static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1002 struct pqi_scsi_dev *device)
1003{
1004 int rc;
1005 u8 *buffer;
1006
1007 buffer = kmalloc(64, GFP_KERNEL);
1008 if (!buffer)
1009 return -ENOMEM;
1010
1011 /* Send an inquiry to the device to see what it is. */
1012 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1013 if (rc)
1014 goto out;
1015
1016 scsi_sanitize_inquiry_string(&buffer[8], 8);
1017 scsi_sanitize_inquiry_string(&buffer[16], 16);
1018
1019 device->devtype = buffer[0] & 0x1f;
1020 memcpy(device->vendor, &buffer[8],
1021 sizeof(device->vendor));
1022 memcpy(device->model, &buffer[16],
1023 sizeof(device->model));
1024
1025 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1026 pqi_get_raid_level(ctrl_info, device);
1027 pqi_get_offload_status(ctrl_info, device);
1028 pqi_get_volume_status(ctrl_info, device);
1029 }
1030
1031out:
1032 kfree(buffer);
1033
1034 return rc;
1035}
1036
1037static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1038 struct pqi_scsi_dev *device,
1039 struct bmic_identify_physical_device *id_phys)
1040{
1041 int rc;
1042
1043 memset(id_phys, 0, sizeof(*id_phys));
1044
1045 rc = pqi_identify_physical_device(ctrl_info, device,
1046 id_phys, sizeof(*id_phys));
1047 if (rc) {
1048 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1049 return;
1050 }
1051
1052 device->queue_depth =
1053 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1054 device->device_type = id_phys->device_type;
1055 device->active_path_index = id_phys->active_path_number;
1056 device->path_map = id_phys->redundant_path_present_map;
1057 memcpy(&device->box,
1058 &id_phys->alternate_paths_phys_box_on_port,
1059 sizeof(device->box));
1060 memcpy(&device->phys_connector,
1061 &id_phys->alternate_paths_phys_connector,
1062 sizeof(device->phys_connector));
1063 device->bay = id_phys->phys_bay_in_box;
1064}
1065
1066static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1067 struct pqi_scsi_dev *device)
1068{
1069 char *status;
1070 static const char unknown_state_str[] =
1071 "Volume is in an unknown state (%u)";
1072 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1073
1074 switch (device->volume_status) {
1075 case CISS_LV_OK:
1076 status = "Volume online";
1077 break;
1078 case CISS_LV_FAILED:
1079 status = "Volume failed";
1080 break;
1081 case CISS_LV_NOT_CONFIGURED:
1082 status = "Volume not configured";
1083 break;
1084 case CISS_LV_DEGRADED:
1085 status = "Volume degraded";
1086 break;
1087 case CISS_LV_READY_FOR_RECOVERY:
1088 status = "Volume ready for recovery operation";
1089 break;
1090 case CISS_LV_UNDERGOING_RECOVERY:
1091 status = "Volume undergoing recovery";
1092 break;
1093 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1094 status = "Wrong physical drive was replaced";
1095 break;
1096 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1097 status = "A physical drive not properly connected";
1098 break;
1099 case CISS_LV_HARDWARE_OVERHEATING:
1100 status = "Hardware is overheating";
1101 break;
1102 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1103 status = "Hardware has overheated";
1104 break;
1105 case CISS_LV_UNDERGOING_EXPANSION:
1106 status = "Volume undergoing expansion";
1107 break;
1108 case CISS_LV_NOT_AVAILABLE:
1109 status = "Volume waiting for transforming volume";
1110 break;
1111 case CISS_LV_QUEUED_FOR_EXPANSION:
1112 status = "Volume queued for expansion";
1113 break;
1114 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1115 status = "Volume disabled due to SCSI ID conflict";
1116 break;
1117 case CISS_LV_EJECTED:
1118 status = "Volume has been ejected";
1119 break;
1120 case CISS_LV_UNDERGOING_ERASE:
1121 status = "Volume undergoing background erase";
1122 break;
1123 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1124 status = "Volume ready for predictive spare rebuild";
1125 break;
1126 case CISS_LV_UNDERGOING_RPI:
1127 status = "Volume undergoing rapid parity initialization";
1128 break;
1129 case CISS_LV_PENDING_RPI:
1130 status = "Volume queued for rapid parity initialization";
1131 break;
1132 case CISS_LV_ENCRYPTED_NO_KEY:
1133 status = "Encrypted volume inaccessible - key not present";
1134 break;
1135 case CISS_LV_UNDERGOING_ENCRYPTION:
1136 status = "Volume undergoing encryption process";
1137 break;
1138 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1139 status = "Volume undergoing encryption re-keying process";
1140 break;
1141 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1142 status =
1143 "Encrypted volume inaccessible - disabled on ctrl";
1144 break;
1145 case CISS_LV_PENDING_ENCRYPTION:
1146 status = "Volume pending migration to encrypted state";
1147 break;
1148 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1149 status = "Volume pending encryption rekeying";
1150 break;
1151 case CISS_LV_NOT_SUPPORTED:
1152 status = "Volume not supported on this controller";
1153 break;
1154 case CISS_LV_STATUS_UNAVAILABLE:
1155 status = "Volume status not available";
1156 break;
1157 default:
1158 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1159 unknown_state_str, device->volume_status);
1160 status = unknown_state_buffer;
1161 break;
1162 }
1163
1164 dev_info(&ctrl_info->pci_dev->dev,
1165 "scsi %d:%d:%d:%d %s\n",
1166 ctrl_info->scsi_host->host_no,
1167 device->bus, device->target, device->lun, status);
1168}
1169
1170static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1171 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1172{
1173 struct pqi_scsi_dev *device;
1174
1175 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1176 scsi_device_list_entry) {
1177 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1178 continue;
1179 if (pqi_is_logical_device(device))
1180 continue;
1181 if (device->aio_handle == aio_handle)
1182 return device;
1183 }
1184
1185 return NULL;
1186}
1187
1188static void pqi_update_logical_drive_queue_depth(
1189 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1190{
1191 unsigned int i;
1192 struct raid_map *raid_map;
1193 struct raid_map_disk_data *disk_data;
1194 struct pqi_scsi_dev *phys_disk;
1195 unsigned int num_phys_disks;
1196 unsigned int num_raid_map_entries;
1197 unsigned int queue_depth;
1198
1199 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1200
1201 raid_map = logical_drive->raid_map;
1202 if (!raid_map)
1203 return;
1204
1205 disk_data = raid_map->disk_data;
1206 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1207 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1208 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1209 num_raid_map_entries = num_phys_disks *
1210 get_unaligned_le16(&raid_map->row_cnt);
1211
1212 queue_depth = 0;
1213 for (i = 0; i < num_raid_map_entries; i++) {
1214 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1215 disk_data[i].aio_handle);
1216
1217 if (!phys_disk) {
1218 dev_warn(&ctrl_info->pci_dev->dev,
1219 "failed to find physical disk for logical drive %016llx\n",
1220 get_unaligned_be64(logical_drive->scsi3addr));
1221 logical_drive->offload_enabled = false;
1222 logical_drive->offload_enabled_pending = false;
1223 kfree(raid_map);
1224 logical_drive->raid_map = NULL;
1225 return;
1226 }
1227
1228 queue_depth += phys_disk->queue_depth;
1229 }
1230
1231 logical_drive->queue_depth = queue_depth;
1232}
1233
1234static void pqi_update_all_logical_drive_queue_depths(
1235 struct pqi_ctrl_info *ctrl_info)
1236{
1237 struct pqi_scsi_dev *device;
1238
1239 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1240 scsi_device_list_entry) {
1241 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1242 continue;
1243 if (!pqi_is_logical_device(device))
1244 continue;
1245 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1246 }
1247}
1248
1249static void pqi_rescan_worker(struct work_struct *work)
1250{
1251 struct pqi_ctrl_info *ctrl_info;
1252
1253 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1254 rescan_work);
1255
1256 pqi_scan_scsi_devices(ctrl_info);
1257}
1258
1259static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1260 struct pqi_scsi_dev *device)
1261{
1262 int rc;
1263
1264 if (pqi_is_logical_device(device))
1265 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1266 device->target, device->lun);
1267 else
1268 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1269
1270 return rc;
1271}
1272
1273static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1274 struct pqi_scsi_dev *device)
1275{
1276 if (pqi_is_logical_device(device))
1277 scsi_remove_device(device->sdev);
1278 else
1279 pqi_remove_sas_device(device);
1280}
1281
1282/* Assumes the SCSI device list lock is held. */
1283
1284static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1285 int bus, int target, int lun)
1286{
1287 struct pqi_scsi_dev *device;
1288
1289 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1290 scsi_device_list_entry)
1291 if (device->bus == bus && device->target == target &&
1292 device->lun == lun)
1293 return device;
1294
1295 return NULL;
1296}
1297
1298static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1299 struct pqi_scsi_dev *dev2)
1300{
1301 if (dev1->is_physical_device != dev2->is_physical_device)
1302 return false;
1303
1304 if (dev1->is_physical_device)
1305 return dev1->wwid == dev2->wwid;
1306
1307 return memcmp(dev1->volume_id, dev2->volume_id,
1308 sizeof(dev1->volume_id)) == 0;
1309}
1310
1311enum pqi_find_result {
1312 DEVICE_NOT_FOUND,
1313 DEVICE_CHANGED,
1314 DEVICE_SAME,
1315};
1316
1317static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1318 struct pqi_scsi_dev *device_to_find,
1319 struct pqi_scsi_dev **matching_device)
1320{
1321 struct pqi_scsi_dev *device;
1322
1323 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1324 scsi_device_list_entry) {
1325 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1326 device->scsi3addr)) {
1327 *matching_device = device;
1328 if (pqi_device_equal(device_to_find, device)) {
1329 if (device_to_find->volume_offline)
1330 return DEVICE_CHANGED;
1331 return DEVICE_SAME;
1332 }
1333 return DEVICE_CHANGED;
1334 }
1335 }
1336
1337 return DEVICE_NOT_FOUND;
1338}
1339
1340static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1341 char *action, struct pqi_scsi_dev *device)
1342{
1343 dev_info(&ctrl_info->pci_dev->dev,
1344 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1345 action,
1346 ctrl_info->scsi_host->host_no,
1347 device->bus,
1348 device->target,
1349 device->lun,
1350 scsi_device_type(device->devtype),
1351 device->vendor,
1352 device->model,
1353 pqi_raid_level_to_string(device->raid_level),
1354 device->offload_configured ? '+' : '-',
1355 device->offload_enabled_pending ? '+' : '-',
1356 device->expose_device ? '+' : '-',
1357 device->queue_depth);
1358}
1359
1360/* Assumes the SCSI device list lock is held. */
1361
1362static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1363 struct pqi_scsi_dev *new_device)
1364{
1365 existing_device->devtype = new_device->devtype;
1366 existing_device->device_type = new_device->device_type;
1367 existing_device->bus = new_device->bus;
1368 if (new_device->target_lun_valid) {
1369 existing_device->target = new_device->target;
1370 existing_device->lun = new_device->lun;
1371 existing_device->target_lun_valid = true;
1372 }
1373
1374 /* By definition, the scsi3addr and wwid fields are already the same. */
1375
1376 existing_device->is_physical_device = new_device->is_physical_device;
1377 existing_device->expose_device = new_device->expose_device;
1378 existing_device->no_uld_attach = new_device->no_uld_attach;
1379 existing_device->aio_enabled = new_device->aio_enabled;
1380 memcpy(existing_device->vendor, new_device->vendor,
1381 sizeof(existing_device->vendor));
1382 memcpy(existing_device->model, new_device->model,
1383 sizeof(existing_device->model));
1384 existing_device->sas_address = new_device->sas_address;
1385 existing_device->raid_level = new_device->raid_level;
1386 existing_device->queue_depth = new_device->queue_depth;
1387 existing_device->aio_handle = new_device->aio_handle;
1388 existing_device->volume_status = new_device->volume_status;
1389 existing_device->active_path_index = new_device->active_path_index;
1390 existing_device->path_map = new_device->path_map;
1391 existing_device->bay = new_device->bay;
1392 memcpy(existing_device->box, new_device->box,
1393 sizeof(existing_device->box));
1394 memcpy(existing_device->phys_connector, new_device->phys_connector,
1395 sizeof(existing_device->phys_connector));
1396 existing_device->offload_configured = new_device->offload_configured;
1397 existing_device->offload_enabled = false;
1398 existing_device->offload_enabled_pending =
1399 new_device->offload_enabled_pending;
1400 existing_device->offload_to_mirror = 0;
1401 kfree(existing_device->raid_map);
1402 existing_device->raid_map = new_device->raid_map;
1403
1404 /* To prevent this from being freed later. */
1405 new_device->raid_map = NULL;
1406}
1407
1408static inline void pqi_free_device(struct pqi_scsi_dev *device)
1409{
1410 if (device) {
1411 kfree(device->raid_map);
1412 kfree(device);
1413 }
1414}
1415
1416/*
1417 * Called when exposing a new device to the OS fails in order to re-adjust
1418 * our internal SCSI device list to match the SCSI ML's view.
1419 */
1420
1421static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1422 struct pqi_scsi_dev *device)
1423{
1424 unsigned long flags;
1425
1426 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1427 list_del(&device->scsi_device_list_entry);
1428 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1429
1430 /* Allow the device structure to be freed later. */
1431 device->keep_device = false;
1432}
1433
1434static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1435 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1436{
1437 int rc;
1438 unsigned int i;
1439 unsigned long flags;
1440 enum pqi_find_result find_result;
1441 struct pqi_scsi_dev *device;
1442 struct pqi_scsi_dev *next;
1443 struct pqi_scsi_dev *matching_device;
1444 struct list_head add_list;
1445 struct list_head delete_list;
1446
1447 INIT_LIST_HEAD(&add_list);
1448 INIT_LIST_HEAD(&delete_list);
1449
1450 /*
1451 * The idea here is to do as little work as possible while holding the
1452 * spinlock. That's why we go to great pains to defer anything other
1453 * than updating the internal device list until after we release the
1454 * spinlock.
1455 */
1456
1457 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1458
1459 /* Assume that all devices in the existing list have gone away. */
1460 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1461 scsi_device_list_entry)
1462 device->device_gone = true;
1463
1464 for (i = 0; i < num_new_devices; i++) {
1465 device = new_device_list[i];
1466
1467 find_result = pqi_scsi_find_entry(ctrl_info, device,
1468 &matching_device);
1469
1470 switch (find_result) {
1471 case DEVICE_SAME:
1472 /*
1473 * The newly found device is already in the existing
1474 * device list.
1475 */
1476 device->new_device = false;
1477 matching_device->device_gone = false;
1478 pqi_scsi_update_device(matching_device, device);
1479 break;
1480 case DEVICE_NOT_FOUND:
1481 /*
1482 * The newly found device is NOT in the existing device
1483 * list.
1484 */
1485 device->new_device = true;
1486 break;
1487 case DEVICE_CHANGED:
1488 /*
1489 * The original device has gone away and we need to add
1490 * the new device.
1491 */
1492 device->new_device = true;
1493 break;
1494 default:
1495 WARN_ON(find_result);
1496 break;
1497 }
1498 }
1499
1500 /* Process all devices that have gone away. */
1501 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1502 scsi_device_list_entry) {
1503 if (device->device_gone) {
1504 list_del(&device->scsi_device_list_entry);
1505 list_add_tail(&device->delete_list_entry, &delete_list);
1506 }
1507 }
1508
1509 /* Process all new devices. */
1510 for (i = 0; i < num_new_devices; i++) {
1511 device = new_device_list[i];
1512 if (!device->new_device)
1513 continue;
1514 if (device->volume_offline)
1515 continue;
1516 list_add_tail(&device->scsi_device_list_entry,
1517 &ctrl_info->scsi_device_list);
1518 list_add_tail(&device->add_list_entry, &add_list);
1519 /* To prevent this device structure from being freed later. */
1520 device->keep_device = true;
1521 }
1522
1523 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1524
1525 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1526 scsi_device_list_entry)
1527 device->offload_enabled =
1528 device->offload_enabled_pending;
1529
1530 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1531
1532 /* Remove all devices that have gone away. */
1533 list_for_each_entry_safe(device, next, &delete_list,
1534 delete_list_entry) {
1535 if (device->sdev)
1536 pqi_remove_device(ctrl_info, device);
1537 if (device->volume_offline) {
1538 pqi_dev_info(ctrl_info, "offline", device);
1539 pqi_show_volume_status(ctrl_info, device);
1540 } else {
1541 pqi_dev_info(ctrl_info, "removed", device);
1542 }
1543 list_del(&device->delete_list_entry);
1544 pqi_free_device(device);
1545 }
1546
1547 /*
1548 * Notify the SCSI ML if the queue depth of any existing device has
1549 * changed.
1550 */
1551 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1552 scsi_device_list_entry) {
1553 if (device->sdev && device->queue_depth !=
1554 device->advertised_queue_depth) {
1555 device->advertised_queue_depth = device->queue_depth;
1556 scsi_change_queue_depth(device->sdev,
1557 device->advertised_queue_depth);
1558 }
1559 }
1560
1561 /* Expose any new devices. */
1562 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1563 if (device->expose_device && !device->sdev) {
1564 rc = pqi_add_device(ctrl_info, device);
1565 if (rc) {
1566 dev_warn(&ctrl_info->pci_dev->dev,
1567 "scsi %d:%d:%d:%d addition failed, device not added\n",
1568 ctrl_info->scsi_host->host_no,
1569 device->bus, device->target,
1570 device->lun);
1571 pqi_fixup_botched_add(ctrl_info, device);
1572 continue;
1573 }
1574 }
1575 pqi_dev_info(ctrl_info, "added", device);
1576 }
1577}
1578
1579static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1580{
1581 bool is_supported = false;
1582
1583 switch (device->devtype) {
1584 case TYPE_DISK:
1585 case TYPE_ZBC:
1586 case TYPE_TAPE:
1587 case TYPE_MEDIUM_CHANGER:
1588 case TYPE_ENCLOSURE:
1589 is_supported = true;
1590 break;
1591 case TYPE_RAID:
1592 /*
1593 * Only support the HBA controller itself as a RAID
1594 * controller. If it's a RAID controller other than
1595 * the HBA itself (an external RAID controller, MSA500
1596 * or similar), we don't support it.
1597 */
1598 if (pqi_is_hba_lunid(device->scsi3addr))
1599 is_supported = true;
1600 break;
1601 }
1602
1603 return is_supported;
1604}
1605
1606static inline bool pqi_skip_device(u8 *scsi3addr,
1607 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1608{
1609 u8 device_flags;
1610
1611 if (!MASKED_DEVICE(scsi3addr))
1612 return false;
1613
1614 /* The device is masked. */
1615
1616 device_flags = phys_lun_ext_entry->device_flags;
1617
1618 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1619 /*
1620 * It's a non-disk device. We ignore all devices of this type
1621 * when they're masked.
1622 */
1623 return true;
1624 }
1625
1626 return false;
1627}
1628
1629static inline bool pqi_expose_device(struct pqi_scsi_dev *device)
1630{
1631 /* Expose all devices except for physical devices that are masked. */
1632 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1633 return false;
1634
1635 return true;
1636}
1637
1638static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1639{
1640 int i;
1641 int rc;
1642 struct list_head new_device_list_head;
1643 struct report_phys_lun_extended *physdev_list = NULL;
1644 struct report_log_lun_extended *logdev_list = NULL;
1645 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1646 struct report_log_lun_extended_entry *log_lun_ext_entry;
1647 struct bmic_identify_physical_device *id_phys = NULL;
1648 u32 num_physicals;
1649 u32 num_logicals;
1650 struct pqi_scsi_dev **new_device_list = NULL;
1651 struct pqi_scsi_dev *device;
1652 struct pqi_scsi_dev *next;
1653 unsigned int num_new_devices;
1654 unsigned int num_valid_devices;
1655 bool is_physical_device;
1656 u8 *scsi3addr;
1657 static char *out_of_memory_msg =
1658 "out of memory, device discovery stopped";
1659
1660 INIT_LIST_HEAD(&new_device_list_head);
1661
1662 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1663 if (rc)
1664 goto out;
1665
1666 if (physdev_list)
1667 num_physicals =
1668 get_unaligned_be32(&physdev_list->header.list_length)
1669 / sizeof(physdev_list->lun_entries[0]);
1670 else
1671 num_physicals = 0;
1672
1673 if (logdev_list)
1674 num_logicals =
1675 get_unaligned_be32(&logdev_list->header.list_length)
1676 / sizeof(logdev_list->lun_entries[0]);
1677 else
1678 num_logicals = 0;
1679
1680 if (num_physicals) {
1681 /*
1682 * We need this buffer for calls to pqi_get_physical_disk_info()
1683 * below. We allocate it here instead of inside
1684 * pqi_get_physical_disk_info() because it's a fairly large
1685 * buffer.
1686 */
1687 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1688 if (!id_phys) {
1689 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1690 out_of_memory_msg);
1691 rc = -ENOMEM;
1692 goto out;
1693 }
1694 }
1695
1696 num_new_devices = num_physicals + num_logicals;
1697
1698 new_device_list = kmalloc(sizeof(*new_device_list) *
1699 num_new_devices, GFP_KERNEL);
1700 if (!new_device_list) {
1701 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1702 rc = -ENOMEM;
1703 goto out;
1704 }
1705
1706 for (i = 0; i < num_new_devices; i++) {
1707 device = kzalloc(sizeof(*device), GFP_KERNEL);
1708 if (!device) {
1709 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1710 out_of_memory_msg);
1711 rc = -ENOMEM;
1712 goto out;
1713 }
1714 list_add_tail(&device->new_device_list_entry,
1715 &new_device_list_head);
1716 }
1717
1718 device = NULL;
1719 num_valid_devices = 0;
1720
1721 for (i = 0; i < num_new_devices; i++) {
1722
1723 if (i < num_physicals) {
1724 is_physical_device = true;
1725 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1726 log_lun_ext_entry = NULL;
1727 scsi3addr = phys_lun_ext_entry->lunid;
1728 } else {
1729 is_physical_device = false;
1730 phys_lun_ext_entry = NULL;
1731 log_lun_ext_entry =
1732 &logdev_list->lun_entries[i - num_physicals];
1733 scsi3addr = log_lun_ext_entry->lunid;
1734 }
1735
1736 if (is_physical_device &&
1737 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1738 continue;
1739
1740 if (device)
1741 device = list_next_entry(device, new_device_list_entry);
1742 else
1743 device = list_first_entry(&new_device_list_head,
1744 struct pqi_scsi_dev, new_device_list_entry);
1745
1746 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1747 device->is_physical_device = is_physical_device;
1748 device->raid_level = SA_RAID_UNKNOWN;
1749
1750 /* Gather information about the device. */
1751 rc = pqi_get_device_info(ctrl_info, device);
1752 if (rc == -ENOMEM) {
1753 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1754 out_of_memory_msg);
1755 goto out;
1756 }
1757 if (rc) {
1758 dev_warn(&ctrl_info->pci_dev->dev,
1759 "obtaining device info failed, skipping device %016llx\n",
1760 get_unaligned_be64(device->scsi3addr));
1761 rc = 0;
1762 continue;
1763 }
1764
1765 if (!pqi_is_supported_device(device))
1766 continue;
1767
1768 pqi_assign_bus_target_lun(device);
1769
1770 device->expose_device = pqi_expose_device(device);
1771
1772 if (device->is_physical_device) {
1773 device->wwid = phys_lun_ext_entry->wwid;
1774 if ((phys_lun_ext_entry->device_flags &
1775 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1776 phys_lun_ext_entry->aio_handle)
1777 device->aio_enabled = true;
1778 } else {
1779 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1780 sizeof(device->volume_id));
1781 }
1782
1783 switch (device->devtype) {
1784 case TYPE_DISK:
1785 case TYPE_ZBC:
1786 case TYPE_ENCLOSURE:
1787 if (device->is_physical_device) {
1788 device->sas_address =
1789 get_unaligned_be64(&device->wwid);
1790 if (device->devtype == TYPE_DISK ||
1791 device->devtype == TYPE_ZBC) {
1792 device->aio_handle =
1793 phys_lun_ext_entry->aio_handle;
1794 pqi_get_physical_disk_info(ctrl_info,
1795 device, id_phys);
1796 }
1797 }
1798 break;
1799 }
1800
1801 new_device_list[num_valid_devices++] = device;
1802 }
1803
1804 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1805
1806out:
1807 list_for_each_entry_safe(device, next, &new_device_list_head,
1808 new_device_list_entry) {
1809 if (device->keep_device)
1810 continue;
1811 list_del(&device->new_device_list_entry);
1812 pqi_free_device(device);
1813 }
1814
1815 kfree(new_device_list);
1816 kfree(physdev_list);
1817 kfree(logdev_list);
1818 kfree(id_phys);
1819
1820 return rc;
1821}
1822
1823static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1824{
1825 unsigned long flags;
1826 struct pqi_scsi_dev *device;
1827 struct pqi_scsi_dev *next;
1828
1829 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1830
1831 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1832 scsi_device_list_entry) {
1833 if (device->sdev)
1834 pqi_remove_device(ctrl_info, device);
1835 list_del(&device->scsi_device_list_entry);
1836 pqi_free_device(device);
1837 }
1838
1839 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1840}
1841
1842static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1843{
1844 int rc;
1845
1846 if (pqi_ctrl_offline(ctrl_info))
1847 return -ENXIO;
1848
1849 mutex_lock(&ctrl_info->scan_mutex);
1850
1851 rc = pqi_update_scsi_devices(ctrl_info);
1852 if (rc)
1853 pqi_schedule_rescan_worker(ctrl_info);
1854
1855 mutex_unlock(&ctrl_info->scan_mutex);
1856
1857 return rc;
1858}
1859
1860static void pqi_scan_start(struct Scsi_Host *shost)
1861{
1862 pqi_scan_scsi_devices(shost_to_hba(shost));
1863}
1864
1865/* Returns TRUE if scan is finished. */
1866
1867static int pqi_scan_finished(struct Scsi_Host *shost,
1868 unsigned long elapsed_time)
1869{
1870 struct pqi_ctrl_info *ctrl_info;
1871
1872 ctrl_info = shost_priv(shost);
1873
1874 return !mutex_is_locked(&ctrl_info->scan_mutex);
1875}
1876
1877static inline void pqi_set_encryption_info(
1878 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
1879 u64 first_block)
1880{
1881 u32 volume_blk_size;
1882
1883 /*
1884 * Set the encryption tweak values based on logical block address.
1885 * If the block size is 512, the tweak value is equal to the LBA.
1886 * For other block sizes, tweak value is (LBA * block size) / 512.
1887 */
1888 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
1889 if (volume_blk_size != 512)
1890 first_block = (first_block * volume_blk_size) / 512;
1891
1892 encryption_info->data_encryption_key_index =
1893 get_unaligned_le16(&raid_map->data_encryption_key_index);
1894 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
1895 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
1896}
1897
1898/*
1899 * Attempt to perform offload RAID mapping for a logical volume I/O.
1900 */
1901
1902#define PQI_RAID_BYPASS_INELIGIBLE 1
1903
1904static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
1905 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
1906 struct pqi_queue_group *queue_group)
1907{
1908 struct raid_map *raid_map;
1909 bool is_write = false;
1910 u32 map_index;
1911 u64 first_block;
1912 u64 last_block;
1913 u32 block_cnt;
1914 u32 blocks_per_row;
1915 u64 first_row;
1916 u64 last_row;
1917 u32 first_row_offset;
1918 u32 last_row_offset;
1919 u32 first_column;
1920 u32 last_column;
1921 u64 r0_first_row;
1922 u64 r0_last_row;
1923 u32 r5or6_blocks_per_row;
1924 u64 r5or6_first_row;
1925 u64 r5or6_last_row;
1926 u32 r5or6_first_row_offset;
1927 u32 r5or6_last_row_offset;
1928 u32 r5or6_first_column;
1929 u32 r5or6_last_column;
1930 u16 data_disks_per_row;
1931 u32 total_disks_per_row;
1932 u16 layout_map_count;
1933 u32 stripesize;
1934 u16 strip_size;
1935 u32 first_group;
1936 u32 last_group;
1937 u32 current_group;
1938 u32 map_row;
1939 u32 aio_handle;
1940 u64 disk_block;
1941 u32 disk_block_cnt;
1942 u8 cdb[16];
1943 u8 cdb_length;
1944 int offload_to_mirror;
1945 struct pqi_encryption_info *encryption_info_ptr;
1946 struct pqi_encryption_info encryption_info;
1947#if BITS_PER_LONG == 32
1948 u64 tmpdiv;
1949#endif
1950
1951 /* Check for valid opcode, get LBA and block count. */
1952 switch (scmd->cmnd[0]) {
1953 case WRITE_6:
1954 is_write = true;
1955 /* fall through */
1956 case READ_6:
1957 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
1958 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
1959 block_cnt = (u32)scmd->cmnd[4];
1960 if (block_cnt == 0)
1961 block_cnt = 256;
1962 break;
1963 case WRITE_10:
1964 is_write = true;
1965 /* fall through */
1966 case READ_10:
1967 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1968 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
1969 break;
1970 case WRITE_12:
1971 is_write = true;
1972 /* fall through */
1973 case READ_12:
1974 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
1975 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1976 break;
1977 case WRITE_16:
1978 is_write = true;
1979 /* fall through */
1980 case READ_16:
1981 first_block = get_unaligned_be64(&scmd->cmnd[2]);
1982 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
1983 break;
1984 default:
1985 /* Process via normal I/O path. */
1986 return PQI_RAID_BYPASS_INELIGIBLE;
1987 }
1988
1989 /* Check for write to non-RAID-0. */
1990 if (is_write && device->raid_level != SA_RAID_0)
1991 return PQI_RAID_BYPASS_INELIGIBLE;
1992
1993 if (unlikely(block_cnt == 0))
1994 return PQI_RAID_BYPASS_INELIGIBLE;
1995
1996 last_block = first_block + block_cnt - 1;
1997 raid_map = device->raid_map;
1998
1999 /* Check for invalid block or wraparound. */
2000 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2001 last_block < first_block)
2002 return PQI_RAID_BYPASS_INELIGIBLE;
2003
2004 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2005 strip_size = get_unaligned_le16(&raid_map->strip_size);
2006 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2007
2008 /* Calculate stripe information for the request. */
2009 blocks_per_row = data_disks_per_row * strip_size;
2010#if BITS_PER_LONG == 32
2011 tmpdiv = first_block;
2012 do_div(tmpdiv, blocks_per_row);
2013 first_row = tmpdiv;
2014 tmpdiv = last_block;
2015 do_div(tmpdiv, blocks_per_row);
2016 last_row = tmpdiv;
2017 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2018 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2019 tmpdiv = first_row_offset;
2020 do_div(tmpdiv, strip_size);
2021 first_column = tmpdiv;
2022 tmpdiv = last_row_offset;
2023 do_div(tmpdiv, strip_size);
2024 last_column = tmpdiv;
2025#else
2026 first_row = first_block / blocks_per_row;
2027 last_row = last_block / blocks_per_row;
2028 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2029 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2030 first_column = first_row_offset / strip_size;
2031 last_column = last_row_offset / strip_size;
2032#endif
2033
2034 /* If this isn't a single row/column then give to the controller. */
2035 if (first_row != last_row || first_column != last_column)
2036 return PQI_RAID_BYPASS_INELIGIBLE;
2037
2038 /* Proceeding with driver mapping. */
2039 total_disks_per_row = data_disks_per_row +
2040 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2041 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2042 get_unaligned_le16(&raid_map->row_cnt);
2043 map_index = (map_row * total_disks_per_row) + first_column;
2044
2045 /* RAID 1 */
2046 if (device->raid_level == SA_RAID_1) {
2047 if (device->offload_to_mirror)
2048 map_index += data_disks_per_row;
2049 device->offload_to_mirror = !device->offload_to_mirror;
2050 } else if (device->raid_level == SA_RAID_ADM) {
2051 /* RAID ADM */
2052 /*
2053 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2054 * divisible by 3.
2055 */
2056 offload_to_mirror = device->offload_to_mirror;
2057 if (offload_to_mirror == 0) {
2058 /* use physical disk in the first mirrored group. */
2059 map_index %= data_disks_per_row;
2060 } else {
2061 do {
2062 /*
2063 * Determine mirror group that map_index
2064 * indicates.
2065 */
2066 current_group = map_index / data_disks_per_row;
2067
2068 if (offload_to_mirror != current_group) {
2069 if (current_group <
2070 layout_map_count - 1) {
2071 /*
2072 * Select raid index from
2073 * next group.
2074 */
2075 map_index += data_disks_per_row;
2076 current_group++;
2077 } else {
2078 /*
2079 * Select raid index from first
2080 * group.
2081 */
2082 map_index %= data_disks_per_row;
2083 current_group = 0;
2084 }
2085 }
2086 } while (offload_to_mirror != current_group);
2087 }
2088
2089 /* Set mirror group to use next time. */
2090 offload_to_mirror =
2091 (offload_to_mirror >= layout_map_count - 1) ?
2092 0 : offload_to_mirror + 1;
2093 WARN_ON(offload_to_mirror >= layout_map_count);
2094 device->offload_to_mirror = offload_to_mirror;
2095 /*
2096 * Avoid direct use of device->offload_to_mirror within this
2097 * function since multiple threads might simultaneously
2098 * increment it beyond the range of device->layout_map_count -1.
2099 */
2100 } else if ((device->raid_level == SA_RAID_5 ||
2101 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2102 /* RAID 50/60 */
2103 /* Verify first and last block are in same RAID group */
2104 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2105 stripesize = r5or6_blocks_per_row * layout_map_count;
2106#if BITS_PER_LONG == 32
2107 tmpdiv = first_block;
2108 first_group = do_div(tmpdiv, stripesize);
2109 tmpdiv = first_group;
2110 do_div(tmpdiv, r5or6_blocks_per_row);
2111 first_group = tmpdiv;
2112 tmpdiv = last_block;
2113 last_group = do_div(tmpdiv, stripesize);
2114 tmpdiv = last_group;
2115 do_div(tmpdiv, r5or6_blocks_per_row);
2116 last_group = tmpdiv;
2117#else
2118 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2119 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2120#endif
2121 if (first_group != last_group)
2122 return PQI_RAID_BYPASS_INELIGIBLE;
2123
2124 /* Verify request is in a single row of RAID 5/6 */
2125#if BITS_PER_LONG == 32
2126 tmpdiv = first_block;
2127 do_div(tmpdiv, stripesize);
2128 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2129 tmpdiv = last_block;
2130 do_div(tmpdiv, stripesize);
2131 r5or6_last_row = r0_last_row = tmpdiv;
2132#else
2133 first_row = r5or6_first_row = r0_first_row =
2134 first_block / stripesize;
2135 r5or6_last_row = r0_last_row = last_block / stripesize;
2136#endif
2137 if (r5or6_first_row != r5or6_last_row)
2138 return PQI_RAID_BYPASS_INELIGIBLE;
2139
2140 /* Verify request is in a single column */
2141#if BITS_PER_LONG == 32
2142 tmpdiv = first_block;
2143 first_row_offset = do_div(tmpdiv, stripesize);
2144 tmpdiv = first_row_offset;
2145 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2146 r5or6_first_row_offset = first_row_offset;
2147 tmpdiv = last_block;
2148 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2149 tmpdiv = r5or6_last_row_offset;
2150 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2151 tmpdiv = r5or6_first_row_offset;
2152 do_div(tmpdiv, strip_size);
2153 first_column = r5or6_first_column = tmpdiv;
2154 tmpdiv = r5or6_last_row_offset;
2155 do_div(tmpdiv, strip_size);
2156 r5or6_last_column = tmpdiv;
2157#else
2158 first_row_offset = r5or6_first_row_offset =
2159 (u32)((first_block % stripesize) %
2160 r5or6_blocks_per_row);
2161
2162 r5or6_last_row_offset =
2163 (u32)((last_block % stripesize) %
2164 r5or6_blocks_per_row);
2165
2166 first_column = r5or6_first_row_offset / strip_size;
2167 r5or6_first_column = first_column;
2168 r5or6_last_column = r5or6_last_row_offset / strip_size;
2169#endif
2170 if (r5or6_first_column != r5or6_last_column)
2171 return PQI_RAID_BYPASS_INELIGIBLE;
2172
2173 /* Request is eligible */
2174 map_row =
2175 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2176 get_unaligned_le16(&raid_map->row_cnt);
2177
2178 map_index = (first_group *
2179 (get_unaligned_le16(&raid_map->row_cnt) *
2180 total_disks_per_row)) +
2181 (map_row * total_disks_per_row) + first_column;
2182 }
2183
2184 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2185 return PQI_RAID_BYPASS_INELIGIBLE;
2186
2187 aio_handle = raid_map->disk_data[map_index].aio_handle;
2188 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2189 first_row * strip_size +
2190 (first_row_offset - first_column * strip_size);
2191 disk_block_cnt = block_cnt;
2192
2193 /* Handle differing logical/physical block sizes. */
2194 if (raid_map->phys_blk_shift) {
2195 disk_block <<= raid_map->phys_blk_shift;
2196 disk_block_cnt <<= raid_map->phys_blk_shift;
2197 }
2198
2199 if (unlikely(disk_block_cnt > 0xffff))
2200 return PQI_RAID_BYPASS_INELIGIBLE;
2201
2202 /* Build the new CDB for the physical disk I/O. */
2203 if (disk_block > 0xffffffff) {
2204 cdb[0] = is_write ? WRITE_16 : READ_16;
2205 cdb[1] = 0;
2206 put_unaligned_be64(disk_block, &cdb[2]);
2207 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2208 cdb[14] = 0;
2209 cdb[15] = 0;
2210 cdb_length = 16;
2211 } else {
2212 cdb[0] = is_write ? WRITE_10 : READ_10;
2213 cdb[1] = 0;
2214 put_unaligned_be32((u32)disk_block, &cdb[2]);
2215 cdb[6] = 0;
2216 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2217 cdb[9] = 0;
2218 cdb_length = 10;
2219 }
2220
2221 if (get_unaligned_le16(&raid_map->flags) &
2222 RAID_MAP_ENCRYPTION_ENABLED) {
2223 pqi_set_encryption_info(&encryption_info, raid_map,
2224 first_block);
2225 encryption_info_ptr = &encryption_info;
2226 } else {
2227 encryption_info_ptr = NULL;
2228 }
2229
2230 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2231 cdb, cdb_length, queue_group, encryption_info_ptr);
2232}
2233
2234#define PQI_STATUS_IDLE 0x0
2235
2236#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2237#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2238
2239#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2240#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2241#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2242#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2243#define PQI_DEVICE_STATE_ERROR 0x4
2244
2245#define PQI_MODE_READY_TIMEOUT_SECS 30
2246#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2247
2248static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2249{
2250 struct pqi_device_registers __iomem *pqi_registers;
2251 unsigned long timeout;
2252 u64 signature;
2253 u8 status;
2254
2255 pqi_registers = ctrl_info->pqi_registers;
2256 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2257
2258 while (1) {
2259 signature = readq(&pqi_registers->signature);
2260 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2261 sizeof(signature)) == 0)
2262 break;
2263 if (time_after(jiffies, timeout)) {
2264 dev_err(&ctrl_info->pci_dev->dev,
2265 "timed out waiting for PQI signature\n");
2266 return -ETIMEDOUT;
2267 }
2268 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2269 }
2270
2271 while (1) {
2272 status = readb(&pqi_registers->function_and_status_code);
2273 if (status == PQI_STATUS_IDLE)
2274 break;
2275 if (time_after(jiffies, timeout)) {
2276 dev_err(&ctrl_info->pci_dev->dev,
2277 "timed out waiting for PQI IDLE\n");
2278 return -ETIMEDOUT;
2279 }
2280 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2281 }
2282
2283 while (1) {
2284 if (readl(&pqi_registers->device_status) ==
2285 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2286 break;
2287 if (time_after(jiffies, timeout)) {
2288 dev_err(&ctrl_info->pci_dev->dev,
2289 "timed out waiting for PQI all registers ready\n");
2290 return -ETIMEDOUT;
2291 }
2292 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2293 }
2294
2295 return 0;
2296}
2297
2298static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2299{
2300 struct pqi_scsi_dev *device;
2301
2302 device = io_request->scmd->device->hostdata;
2303 device->offload_enabled = false;
2304}
2305
2306static inline void pqi_take_device_offline(struct scsi_device *sdev)
2307{
2308 struct pqi_ctrl_info *ctrl_info;
2309 struct pqi_scsi_dev *device;
2310
2311 if (scsi_device_online(sdev)) {
2312 scsi_device_set_state(sdev, SDEV_OFFLINE);
2313 ctrl_info = shost_to_hba(sdev->host);
2314 schedule_delayed_work(&ctrl_info->rescan_work, 0);
2315 device = sdev->hostdata;
2316 dev_err(&ctrl_info->pci_dev->dev, "offlined scsi %d:%d:%d:%d\n",
2317 ctrl_info->scsi_host->host_no, device->bus,
2318 device->target, device->lun);
2319 }
2320}
2321
2322static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2323{
2324 u8 scsi_status;
2325 u8 host_byte;
2326 struct scsi_cmnd *scmd;
2327 struct pqi_raid_error_info *error_info;
2328 size_t sense_data_length;
2329 int residual_count;
2330 int xfer_count;
2331 struct scsi_sense_hdr sshdr;
2332
2333 scmd = io_request->scmd;
2334 if (!scmd)
2335 return;
2336
2337 error_info = io_request->error_info;
2338 scsi_status = error_info->status;
2339 host_byte = DID_OK;
2340
2341 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2342 xfer_count =
2343 get_unaligned_le32(&error_info->data_out_transferred);
2344 residual_count = scsi_bufflen(scmd) - xfer_count;
2345 scsi_set_resid(scmd, residual_count);
2346 if (xfer_count < scmd->underflow)
2347 host_byte = DID_SOFT_ERROR;
2348 }
2349
2350 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2351 if (sense_data_length == 0)
2352 sense_data_length =
2353 get_unaligned_le16(&error_info->response_data_length);
2354 if (sense_data_length) {
2355 if (sense_data_length > sizeof(error_info->data))
2356 sense_data_length = sizeof(error_info->data);
2357
2358 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2359 scsi_normalize_sense(error_info->data,
2360 sense_data_length, &sshdr) &&
2361 sshdr.sense_key == HARDWARE_ERROR &&
2362 sshdr.asc == 0x3e &&
2363 sshdr.ascq == 0x1) {
2364 pqi_take_device_offline(scmd->device);
2365 host_byte = DID_NO_CONNECT;
2366 }
2367
2368 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2369 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2370 memcpy(scmd->sense_buffer, error_info->data,
2371 sense_data_length);
2372 }
2373
2374 scmd->result = scsi_status;
2375 set_host_byte(scmd, host_byte);
2376}
2377
2378static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2379{
2380 u8 scsi_status;
2381 u8 host_byte;
2382 struct scsi_cmnd *scmd;
2383 struct pqi_aio_error_info *error_info;
2384 size_t sense_data_length;
2385 int residual_count;
2386 int xfer_count;
2387 bool device_offline;
2388
2389 scmd = io_request->scmd;
2390 error_info = io_request->error_info;
2391 host_byte = DID_OK;
2392 sense_data_length = 0;
2393 device_offline = false;
2394
2395 switch (error_info->service_response) {
2396 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2397 scsi_status = error_info->status;
2398 break;
2399 case PQI_AIO_SERV_RESPONSE_FAILURE:
2400 switch (error_info->status) {
2401 case PQI_AIO_STATUS_IO_ABORTED:
2402 scsi_status = SAM_STAT_TASK_ABORTED;
2403 break;
2404 case PQI_AIO_STATUS_UNDERRUN:
2405 scsi_status = SAM_STAT_GOOD;
2406 residual_count = get_unaligned_le32(
2407 &error_info->residual_count);
2408 scsi_set_resid(scmd, residual_count);
2409 xfer_count = scsi_bufflen(scmd) - residual_count;
2410 if (xfer_count < scmd->underflow)
2411 host_byte = DID_SOFT_ERROR;
2412 break;
2413 case PQI_AIO_STATUS_OVERRUN:
2414 scsi_status = SAM_STAT_GOOD;
2415 break;
2416 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2417 pqi_aio_path_disabled(io_request);
2418 scsi_status = SAM_STAT_GOOD;
2419 io_request->status = -EAGAIN;
2420 break;
2421 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2422 case PQI_AIO_STATUS_INVALID_DEVICE:
2423 device_offline = true;
2424 pqi_take_device_offline(scmd->device);
2425 host_byte = DID_NO_CONNECT;
2426 scsi_status = SAM_STAT_CHECK_CONDITION;
2427 break;
2428 case PQI_AIO_STATUS_IO_ERROR:
2429 default:
2430 scsi_status = SAM_STAT_CHECK_CONDITION;
2431 break;
2432 }
2433 break;
2434 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2435 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2436 scsi_status = SAM_STAT_GOOD;
2437 break;
2438 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2439 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2440 default:
2441 scsi_status = SAM_STAT_CHECK_CONDITION;
2442 break;
2443 }
2444
2445 if (error_info->data_present) {
2446 sense_data_length =
2447 get_unaligned_le16(&error_info->data_length);
2448 if (sense_data_length) {
2449 if (sense_data_length > sizeof(error_info->data))
2450 sense_data_length = sizeof(error_info->data);
2451 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2452 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2453 memcpy(scmd->sense_buffer, error_info->data,
2454 sense_data_length);
2455 }
2456 }
2457
2458 if (device_offline && sense_data_length == 0)
2459 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2460 0x3e, 0x1);
2461
2462 scmd->result = scsi_status;
2463 set_host_byte(scmd, host_byte);
2464}
2465
2466static void pqi_process_io_error(unsigned int iu_type,
2467 struct pqi_io_request *io_request)
2468{
2469 switch (iu_type) {
2470 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2471 pqi_process_raid_io_error(io_request);
2472 break;
2473 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2474 pqi_process_aio_io_error(io_request);
2475 break;
2476 }
2477}
2478
2479static int pqi_interpret_task_management_response(
2480 struct pqi_task_management_response *response)
2481{
2482 int rc;
2483
2484 switch (response->response_code) {
2485 case SOP_TMF_COMPLETE:
2486 case SOP_TMF_FUNCTION_SUCCEEDED:
2487 rc = 0;
2488 break;
2489 default:
2490 rc = -EIO;
2491 break;
2492 }
2493
2494 return rc;
2495}
2496
2497static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2498 struct pqi_queue_group *queue_group)
2499{
2500 unsigned int num_responses;
2501 pqi_index_t oq_pi;
2502 pqi_index_t oq_ci;
2503 struct pqi_io_request *io_request;
2504 struct pqi_io_response *response;
2505 u16 request_id;
2506
2507 num_responses = 0;
2508 oq_ci = queue_group->oq_ci_copy;
2509
2510 while (1) {
2511 oq_pi = *queue_group->oq_pi;
2512 if (oq_pi == oq_ci)
2513 break;
2514
2515 num_responses++;
2516 response = queue_group->oq_element_array +
2517 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2518
2519 request_id = get_unaligned_le16(&response->request_id);
2520 WARN_ON(request_id >= ctrl_info->max_io_slots);
2521
2522 io_request = &ctrl_info->io_request_pool[request_id];
2523 WARN_ON(atomic_read(&io_request->refcount) == 0);
2524
2525 switch (response->header.iu_type) {
2526 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2527 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2528 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2529 break;
2530 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2531 io_request->status =
2532 pqi_interpret_task_management_response(
2533 (void *)response);
2534 break;
2535 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2536 pqi_aio_path_disabled(io_request);
2537 io_request->status = -EAGAIN;
2538 break;
2539 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2540 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2541 io_request->error_info = ctrl_info->error_buffer +
2542 (get_unaligned_le16(&response->error_index) *
2543 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2544 pqi_process_io_error(response->header.iu_type,
2545 io_request);
2546 break;
2547 default:
2548 dev_err(&ctrl_info->pci_dev->dev,
2549 "unexpected IU type: 0x%x\n",
2550 response->header.iu_type);
2551 WARN_ON(response->header.iu_type);
2552 break;
2553 }
2554
2555 io_request->io_complete_callback(io_request,
2556 io_request->context);
2557
2558 /*
2559 * Note that the I/O request structure CANNOT BE TOUCHED after
2560 * returning from the I/O completion callback!
2561 */
2562
2563 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2564 }
2565
2566 if (num_responses) {
2567 queue_group->oq_ci_copy = oq_ci;
2568 writel(oq_ci, queue_group->oq_ci);
2569 }
2570
2571 return num_responses;
2572}
2573
2574static inline unsigned int pqi_num_elements_free(unsigned int pi,
2575 unsigned int ci, unsigned int elements_in_queue)
2576{
2577 unsigned int num_elements_used;
2578
2579 if (pi >= ci)
2580 num_elements_used = pi - ci;
2581 else
2582 num_elements_used = elements_in_queue - ci + pi;
2583
2584 return elements_in_queue - num_elements_used - 1;
2585}
2586
2587#define PQI_EVENT_ACK_TIMEOUT 30
2588
2589static void pqi_start_event_ack(struct pqi_ctrl_info *ctrl_info,
2590 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2591{
2592 pqi_index_t iq_pi;
2593 pqi_index_t iq_ci;
2594 unsigned long flags;
2595 void *next_element;
2596 unsigned long timeout;
2597 struct pqi_queue_group *queue_group;
2598
2599 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2600 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2601
2602 timeout = (PQI_EVENT_ACK_TIMEOUT * HZ) + jiffies;
2603
2604 while (1) {
2605 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2606
2607 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2608 iq_ci = *queue_group->iq_ci[RAID_PATH];
2609
2610 if (pqi_num_elements_free(iq_pi, iq_ci,
2611 ctrl_info->num_elements_per_iq))
2612 break;
2613
2614 spin_unlock_irqrestore(
2615 &queue_group->submit_lock[RAID_PATH], flags);
2616
2617 if (time_after(jiffies, timeout)) {
2618 dev_err(&ctrl_info->pci_dev->dev,
2619 "sending event acknowledge timed out\n");
2620 return;
2621 }
2622 }
2623
2624 next_element = queue_group->iq_element_array[RAID_PATH] +
2625 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2626
2627 memcpy(next_element, iu, iu_length);
2628
2629 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
2630
2631 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2632
2633 /*
2634 * This write notifies the controller that an IU is available to be
2635 * processed.
2636 */
2637 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2638
2639 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
2640}
2641
2642static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2643 struct pqi_event *event)
2644{
2645 struct pqi_event_acknowledge_request request;
2646
2647 memset(&request, 0, sizeof(request));
2648
2649 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2650 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2651 &request.header.iu_length);
2652 request.event_type = event->event_type;
2653 request.event_id = event->event_id;
2654 request.additional_event_id = event->additional_event_id;
2655
2656 pqi_start_event_ack(ctrl_info, &request, sizeof(request));
2657}
2658
2659static void pqi_event_worker(struct work_struct *work)
2660{
2661 unsigned int i;
2662 struct pqi_ctrl_info *ctrl_info;
2663 struct pqi_event *pending_event;
2664 bool got_non_heartbeat_event = false;
2665
2666 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2667
2668 pending_event = ctrl_info->pending_events;
2669 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
2670 if (pending_event->pending) {
2671 pending_event->pending = false;
2672 pqi_acknowledge_event(ctrl_info, pending_event);
2673 if (i != PQI_EVENT_HEARTBEAT)
2674 got_non_heartbeat_event = true;
2675 }
2676 pending_event++;
2677 }
2678
2679 if (got_non_heartbeat_event)
2680 pqi_schedule_rescan_worker(ctrl_info);
2681}
2682
2683static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2684{
2685 unsigned int i;
2686 unsigned int path;
2687 struct pqi_queue_group *queue_group;
2688 unsigned long flags;
2689 struct pqi_io_request *io_request;
2690 struct pqi_io_request *next;
2691 struct scsi_cmnd *scmd;
2692
2693 ctrl_info->controller_online = false;
2694 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
2695
2696 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2697 queue_group = &ctrl_info->queue_groups[i];
2698
2699 for (path = 0; path < 2; path++) {
2700 spin_lock_irqsave(
2701 &queue_group->submit_lock[path], flags);
2702
2703 list_for_each_entry_safe(io_request, next,
2704 &queue_group->request_list[path],
2705 request_list_entry) {
2706
2707 scmd = io_request->scmd;
2708 if (scmd) {
2709 set_host_byte(scmd, DID_NO_CONNECT);
2710 pqi_scsi_done(scmd);
2711 }
2712
2713 list_del(&io_request->request_list_entry);
2714 }
2715
2716 spin_unlock_irqrestore(
2717 &queue_group->submit_lock[path], flags);
2718 }
2719 }
2720}
2721
2722#define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2723#define PQI_MAX_HEARTBEAT_REQUESTS 5
2724
2725static void pqi_heartbeat_timer_handler(unsigned long data)
2726{
2727 int num_interrupts;
2728 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2729
2730 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
2731
2732 if (num_interrupts == ctrl_info->previous_num_interrupts) {
2733 ctrl_info->num_heartbeats_requested++;
2734 if (ctrl_info->num_heartbeats_requested >
2735 PQI_MAX_HEARTBEAT_REQUESTS) {
2736 pqi_take_ctrl_offline(ctrl_info);
2737 return;
2738 }
2739 ctrl_info->pending_events[PQI_EVENT_HEARTBEAT].pending = true;
2740 schedule_work(&ctrl_info->event_work);
2741 } else {
2742 ctrl_info->num_heartbeats_requested = 0;
2743 }
2744
2745 ctrl_info->previous_num_interrupts = num_interrupts;
2746 mod_timer(&ctrl_info->heartbeat_timer,
2747 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2748}
2749
2750static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2751{
2752 ctrl_info->previous_num_interrupts =
2753 atomic_read(&ctrl_info->num_interrupts);
2754
2755 init_timer(&ctrl_info->heartbeat_timer);
2756 ctrl_info->heartbeat_timer.expires =
2757 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2758 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2759 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
2760 add_timer(&ctrl_info->heartbeat_timer);
2761 ctrl_info->heartbeat_timer_started = true;
2762}
2763
2764static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2765{
2766 if (ctrl_info->heartbeat_timer_started)
2767 del_timer_sync(&ctrl_info->heartbeat_timer);
2768}
2769
2770static int pqi_event_type_to_event_index(unsigned int event_type)
2771{
2772 int index;
2773
2774 switch (event_type) {
2775 case PQI_EVENT_TYPE_HEARTBEAT:
2776 index = PQI_EVENT_HEARTBEAT;
2777 break;
2778 case PQI_EVENT_TYPE_HOTPLUG:
2779 index = PQI_EVENT_HOTPLUG;
2780 break;
2781 case PQI_EVENT_TYPE_HARDWARE:
2782 index = PQI_EVENT_HARDWARE;
2783 break;
2784 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
2785 index = PQI_EVENT_PHYSICAL_DEVICE;
2786 break;
2787 case PQI_EVENT_TYPE_LOGICAL_DEVICE:
2788 index = PQI_EVENT_LOGICAL_DEVICE;
2789 break;
2790 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
2791 index = PQI_EVENT_AIO_STATE_CHANGE;
2792 break;
2793 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
2794 index = PQI_EVENT_AIO_CONFIG_CHANGE;
2795 break;
2796 default:
2797 index = -1;
2798 break;
2799 }
2800
2801 return index;
2802}
2803
2804static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2805{
2806 unsigned int num_events;
2807 pqi_index_t oq_pi;
2808 pqi_index_t oq_ci;
2809 struct pqi_event_queue *event_queue;
2810 struct pqi_event_response *response;
2811 struct pqi_event *pending_event;
2812 bool need_delayed_work;
2813 int event_index;
2814
2815 event_queue = &ctrl_info->event_queue;
2816 num_events = 0;
2817 need_delayed_work = false;
2818 oq_ci = event_queue->oq_ci_copy;
2819
2820 while (1) {
2821 oq_pi = *event_queue->oq_pi;
2822 if (oq_pi == oq_ci)
2823 break;
2824
2825 num_events++;
2826 response = event_queue->oq_element_array +
2827 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2828
2829 event_index =
2830 pqi_event_type_to_event_index(response->event_type);
2831
2832 if (event_index >= 0) {
2833 if (response->request_acknowlege) {
2834 pending_event =
2835 &ctrl_info->pending_events[event_index];
2836 pending_event->event_type =
2837 response->event_type;
2838 pending_event->event_id = response->event_id;
2839 pending_event->additional_event_id =
2840 response->additional_event_id;
2841 if (event_index != PQI_EVENT_HEARTBEAT) {
2842 pending_event->pending = true;
2843 need_delayed_work = true;
2844 }
2845 }
2846 }
2847
2848 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2849 }
2850
2851 if (num_events) {
2852 event_queue->oq_ci_copy = oq_ci;
2853 writel(oq_ci, event_queue->oq_ci);
2854
2855 if (need_delayed_work)
2856 schedule_work(&ctrl_info->event_work);
2857 }
2858
2859 return num_events;
2860}
2861
2862static irqreturn_t pqi_irq_handler(int irq, void *data)
2863{
2864 struct pqi_ctrl_info *ctrl_info;
2865 struct pqi_queue_group *queue_group;
2866 unsigned int num_responses_handled;
2867
2868 queue_group = data;
2869 ctrl_info = queue_group->ctrl_info;
2870
2871 if (!ctrl_info || !queue_group->oq_ci)
2872 return IRQ_NONE;
2873
2874 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
2875
2876 if (irq == ctrl_info->event_irq)
2877 num_responses_handled += pqi_process_event_intr(ctrl_info);
2878
2879 if (num_responses_handled)
2880 atomic_inc(&ctrl_info->num_interrupts);
2881
2882 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
2883 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
2884
2885 return IRQ_HANDLED;
2886}
2887
2888static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
2889{
2890 int i;
2891 int rc;
2892
2893 ctrl_info->event_irq = ctrl_info->msix_vectors[0];
2894
2895 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
2896 rc = request_irq(ctrl_info->msix_vectors[i],
2897 pqi_irq_handler, 0,
2898 DRIVER_NAME_SHORT, ctrl_info->intr_data[i]);
2899 if (rc) {
2900 dev_err(&ctrl_info->pci_dev->dev,
2901 "irq %u init failed with error %d\n",
2902 ctrl_info->msix_vectors[i], rc);
2903 return rc;
2904 }
2905 ctrl_info->num_msix_vectors_initialized++;
2906 }
2907
2908 return 0;
2909}
2910
2911static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
2912{
2913 int i;
2914
2915 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2916 free_irq(ctrl_info->msix_vectors[i],
2917 ctrl_info->intr_data[i]);
2918}
2919
2920static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
2921{
2922 unsigned int i;
2923 int max_vectors;
2924 int num_vectors_enabled;
2925 struct msix_entry msix_entries[PQI_MAX_MSIX_VECTORS];
2926
2927 max_vectors = ctrl_info->num_queue_groups;
2928
2929 for (i = 0; i < max_vectors; i++)
2930 msix_entries[i].entry = i;
2931
2932 num_vectors_enabled = pci_enable_msix_range(ctrl_info->pci_dev,
2933 msix_entries, PQI_MIN_MSIX_VECTORS, max_vectors);
2934
2935 if (num_vectors_enabled < 0) {
2936 dev_err(&ctrl_info->pci_dev->dev,
2937 "MSI-X init failed with error %d\n",
2938 num_vectors_enabled);
2939 return num_vectors_enabled;
2940 }
2941
2942 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
2943 for (i = 0; i < num_vectors_enabled; i++) {
2944 ctrl_info->msix_vectors[i] = msix_entries[i].vector;
2945 ctrl_info->intr_data[i] = &ctrl_info->queue_groups[i];
2946 }
2947
2948 return 0;
2949}
2950
2951static void pqi_irq_set_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2952{
2953 int i;
2954 int rc;
2955 int cpu;
2956
2957 cpu = cpumask_first(cpu_online_mask);
2958 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++) {
2959 rc = irq_set_affinity_hint(ctrl_info->msix_vectors[i],
2960 get_cpu_mask(cpu));
2961 if (rc)
2962 dev_err(&ctrl_info->pci_dev->dev,
2963 "error %d setting affinity hint for irq vector %u\n",
2964 rc, ctrl_info->msix_vectors[i]);
2965 cpu = cpumask_next(cpu, cpu_online_mask);
2966 }
2967}
2968
2969static void pqi_irq_unset_affinity_hint(struct pqi_ctrl_info *ctrl_info)
2970{
2971 int i;
2972
2973 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
2974 irq_set_affinity_hint(ctrl_info->msix_vectors[i], NULL);
2975}
2976
2977static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
2978{
2979 unsigned int i;
2980 size_t alloc_length;
2981 size_t element_array_length_per_iq;
2982 size_t element_array_length_per_oq;
2983 void *element_array;
2984 void *next_queue_index;
2985 void *aligned_pointer;
2986 unsigned int num_inbound_queues;
2987 unsigned int num_outbound_queues;
2988 unsigned int num_queue_indexes;
2989 struct pqi_queue_group *queue_group;
2990
2991 element_array_length_per_iq =
2992 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
2993 ctrl_info->num_elements_per_iq;
2994 element_array_length_per_oq =
2995 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
2996 ctrl_info->num_elements_per_oq;
2997 num_inbound_queues = ctrl_info->num_queue_groups * 2;
2998 num_outbound_queues = ctrl_info->num_queue_groups;
2999 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3000
3001 aligned_pointer = NULL;
3002
3003 for (i = 0; i < num_inbound_queues; i++) {
3004 aligned_pointer = PTR_ALIGN(aligned_pointer,
3005 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3006 aligned_pointer += element_array_length_per_iq;
3007 }
3008
3009 for (i = 0; i < num_outbound_queues; i++) {
3010 aligned_pointer = PTR_ALIGN(aligned_pointer,
3011 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3012 aligned_pointer += element_array_length_per_oq;
3013 }
3014
3015 aligned_pointer = PTR_ALIGN(aligned_pointer,
3016 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3017 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3018 PQI_EVENT_OQ_ELEMENT_LENGTH;
3019
3020 for (i = 0; i < num_queue_indexes; i++) {
3021 aligned_pointer = PTR_ALIGN(aligned_pointer,
3022 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3023 aligned_pointer += sizeof(pqi_index_t);
3024 }
3025
3026 alloc_length = (size_t)aligned_pointer +
3027 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3028
3029 ctrl_info->queue_memory_base =
3030 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3031 alloc_length,
3032 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3033
3034 if (!ctrl_info->queue_memory_base) {
3035 dev_err(&ctrl_info->pci_dev->dev,
3036 "failed to allocate memory for PQI admin queues\n");
3037 return -ENOMEM;
3038 }
3039
3040 ctrl_info->queue_memory_length = alloc_length;
3041
3042 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3043 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3044
3045 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3046 queue_group = &ctrl_info->queue_groups[i];
3047 queue_group->iq_element_array[RAID_PATH] = element_array;
3048 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3049 ctrl_info->queue_memory_base_dma_handle +
3050 (element_array - ctrl_info->queue_memory_base);
3051 element_array += element_array_length_per_iq;
3052 element_array = PTR_ALIGN(element_array,
3053 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3054 queue_group->iq_element_array[AIO_PATH] = element_array;
3055 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3056 ctrl_info->queue_memory_base_dma_handle +
3057 (element_array - ctrl_info->queue_memory_base);
3058 element_array += element_array_length_per_iq;
3059 element_array = PTR_ALIGN(element_array,
3060 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3061 }
3062
3063 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3064 queue_group = &ctrl_info->queue_groups[i];
3065 queue_group->oq_element_array = element_array;
3066 queue_group->oq_element_array_bus_addr =
3067 ctrl_info->queue_memory_base_dma_handle +
3068 (element_array - ctrl_info->queue_memory_base);
3069 element_array += element_array_length_per_oq;
3070 element_array = PTR_ALIGN(element_array,
3071 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3072 }
3073
3074 ctrl_info->event_queue.oq_element_array = element_array;
3075 ctrl_info->event_queue.oq_element_array_bus_addr =
3076 ctrl_info->queue_memory_base_dma_handle +
3077 (element_array - ctrl_info->queue_memory_base);
3078 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3079 PQI_EVENT_OQ_ELEMENT_LENGTH;
3080
3081 next_queue_index = PTR_ALIGN(element_array,
3082 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3083
3084 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3085 queue_group = &ctrl_info->queue_groups[i];
3086 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3087 queue_group->iq_ci_bus_addr[RAID_PATH] =
3088 ctrl_info->queue_memory_base_dma_handle +
3089 (next_queue_index - ctrl_info->queue_memory_base);
3090 next_queue_index += sizeof(pqi_index_t);
3091 next_queue_index = PTR_ALIGN(next_queue_index,
3092 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3093 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3094 queue_group->iq_ci_bus_addr[AIO_PATH] =
3095 ctrl_info->queue_memory_base_dma_handle +
3096 (next_queue_index - ctrl_info->queue_memory_base);
3097 next_queue_index += sizeof(pqi_index_t);
3098 next_queue_index = PTR_ALIGN(next_queue_index,
3099 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3100 queue_group->oq_pi = next_queue_index;
3101 queue_group->oq_pi_bus_addr =
3102 ctrl_info->queue_memory_base_dma_handle +
3103 (next_queue_index - ctrl_info->queue_memory_base);
3104 next_queue_index += sizeof(pqi_index_t);
3105 next_queue_index = PTR_ALIGN(next_queue_index,
3106 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3107 }
3108
3109 ctrl_info->event_queue.oq_pi = next_queue_index;
3110 ctrl_info->event_queue.oq_pi_bus_addr =
3111 ctrl_info->queue_memory_base_dma_handle +
3112 (next_queue_index - ctrl_info->queue_memory_base);
3113
3114 return 0;
3115}
3116
3117static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3118{
3119 unsigned int i;
3120 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3121 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3122
3123 /*
3124 * Initialize the backpointers to the controller structure in
3125 * each operational queue group structure.
3126 */
3127 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3128 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3129
3130 /*
3131 * Assign IDs to all operational queues. Note that the IDs
3132 * assigned to operational IQs are independent of the IDs
3133 * assigned to operational OQs.
3134 */
3135 ctrl_info->event_queue.oq_id = next_oq_id++;
3136 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3137 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3138 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3139 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3140 }
3141
3142 /*
3143 * Assign MSI-X table entry indexes to all queues. Note that the
3144 * interrupt for the event queue is shared with the first queue group.
3145 */
3146 ctrl_info->event_queue.int_msg_num = 0;
3147 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3148 ctrl_info->queue_groups[i].int_msg_num = i;
3149
3150 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3151 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3152 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3153 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3154 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3155 }
3156}
3157
3158static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3159{
3160 size_t alloc_length;
3161 struct pqi_admin_queues_aligned *admin_queues_aligned;
3162 struct pqi_admin_queues *admin_queues;
3163
3164 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3165 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3166
3167 ctrl_info->admin_queue_memory_base =
3168 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3169 alloc_length,
3170 &ctrl_info->admin_queue_memory_base_dma_handle,
3171 GFP_KERNEL);
3172
3173 if (!ctrl_info->admin_queue_memory_base)
3174 return -ENOMEM;
3175
3176 ctrl_info->admin_queue_memory_length = alloc_length;
3177
3178 admin_queues = &ctrl_info->admin_queues;
3179 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3180 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3181 admin_queues->iq_element_array =
3182 &admin_queues_aligned->iq_element_array;
3183 admin_queues->oq_element_array =
3184 &admin_queues_aligned->oq_element_array;
3185 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3186 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3187
3188 admin_queues->iq_element_array_bus_addr =
3189 ctrl_info->admin_queue_memory_base_dma_handle +
3190 (admin_queues->iq_element_array -
3191 ctrl_info->admin_queue_memory_base);
3192 admin_queues->oq_element_array_bus_addr =
3193 ctrl_info->admin_queue_memory_base_dma_handle +
3194 (admin_queues->oq_element_array -
3195 ctrl_info->admin_queue_memory_base);
3196 admin_queues->iq_ci_bus_addr =
3197 ctrl_info->admin_queue_memory_base_dma_handle +
3198 ((void *)admin_queues->iq_ci -
3199 ctrl_info->admin_queue_memory_base);
3200 admin_queues->oq_pi_bus_addr =
3201 ctrl_info->admin_queue_memory_base_dma_handle +
3202 ((void *)admin_queues->oq_pi -
3203 ctrl_info->admin_queue_memory_base);
3204
3205 return 0;
3206}
3207
3208#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3209#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3210
3211static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3212{
3213 struct pqi_device_registers __iomem *pqi_registers;
3214 struct pqi_admin_queues *admin_queues;
3215 unsigned long timeout;
3216 u8 status;
3217 u32 reg;
3218
3219 pqi_registers = ctrl_info->pqi_registers;
3220 admin_queues = &ctrl_info->admin_queues;
3221
3222 writeq((u64)admin_queues->iq_element_array_bus_addr,
3223 &pqi_registers->admin_iq_element_array_addr);
3224 writeq((u64)admin_queues->oq_element_array_bus_addr,
3225 &pqi_registers->admin_oq_element_array_addr);
3226 writeq((u64)admin_queues->iq_ci_bus_addr,
3227 &pqi_registers->admin_iq_ci_addr);
3228 writeq((u64)admin_queues->oq_pi_bus_addr,
3229 &pqi_registers->admin_oq_pi_addr);
3230
3231 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3232 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3233 (admin_queues->int_msg_num << 16);
3234 writel(reg, &pqi_registers->admin_iq_num_elements);
3235 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3236 &pqi_registers->function_and_status_code);
3237
3238 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3239 while (1) {
3240 status = readb(&pqi_registers->function_and_status_code);
3241 if (status == PQI_STATUS_IDLE)
3242 break;
3243 if (time_after(jiffies, timeout))
3244 return -ETIMEDOUT;
3245 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3246 }
3247
3248 /*
3249 * The offset registers are not initialized to the correct
3250 * offsets until *after* the create admin queue pair command
3251 * completes successfully.
3252 */
3253 admin_queues->iq_pi = ctrl_info->iomem_base +
3254 PQI_DEVICE_REGISTERS_OFFSET +
3255 readq(&pqi_registers->admin_iq_pi_offset);
3256 admin_queues->oq_ci = ctrl_info->iomem_base +
3257 PQI_DEVICE_REGISTERS_OFFSET +
3258 readq(&pqi_registers->admin_oq_ci_offset);
3259
3260 return 0;
3261}
3262
3263static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3264 struct pqi_general_admin_request *request)
3265{
3266 struct pqi_admin_queues *admin_queues;
3267 void *next_element;
3268 pqi_index_t iq_pi;
3269
3270 admin_queues = &ctrl_info->admin_queues;
3271 iq_pi = admin_queues->iq_pi_copy;
3272
3273 next_element = admin_queues->iq_element_array +
3274 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3275
3276 memcpy(next_element, request, sizeof(*request));
3277
3278 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3279 admin_queues->iq_pi_copy = iq_pi;
3280
3281 /*
3282 * This write notifies the controller that an IU is available to be
3283 * processed.
3284 */
3285 writel(iq_pi, admin_queues->iq_pi);
3286}
3287
3288static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3289 struct pqi_general_admin_response *response)
3290{
3291 struct pqi_admin_queues *admin_queues;
3292 pqi_index_t oq_pi;
3293 pqi_index_t oq_ci;
3294 unsigned long timeout;
3295
3296 admin_queues = &ctrl_info->admin_queues;
3297 oq_ci = admin_queues->oq_ci_copy;
3298
3299 timeout = (3 * HZ) + jiffies;
3300
3301 while (1) {
3302 oq_pi = *admin_queues->oq_pi;
3303 if (oq_pi != oq_ci)
3304 break;
3305 if (time_after(jiffies, timeout)) {
3306 dev_err(&ctrl_info->pci_dev->dev,
3307 "timed out waiting for admin response\n");
3308 return -ETIMEDOUT;
3309 }
3310 usleep_range(1000, 2000);
3311 }
3312
3313 memcpy(response, admin_queues->oq_element_array +
3314 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3315
3316 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3317 admin_queues->oq_ci_copy = oq_ci;
3318 writel(oq_ci, admin_queues->oq_ci);
3319
3320 return 0;
3321}
3322
3323static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3324 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3325 struct pqi_io_request *io_request)
3326{
3327 struct pqi_io_request *next;
3328 void *next_element;
3329 pqi_index_t iq_pi;
3330 pqi_index_t iq_ci;
3331 size_t iu_length;
3332 unsigned long flags;
3333 unsigned int num_elements_needed;
3334 unsigned int num_elements_to_end_of_queue;
3335 size_t copy_count;
3336 struct pqi_iu_header *request;
3337
3338 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3339
3340 if (io_request)
3341 list_add_tail(&io_request->request_list_entry,
3342 &queue_group->request_list[path]);
3343
3344 iq_pi = queue_group->iq_pi_copy[path];
3345
3346 list_for_each_entry_safe(io_request, next,
3347 &queue_group->request_list[path], request_list_entry) {
3348
3349 request = io_request->iu;
3350
3351 iu_length = get_unaligned_le16(&request->iu_length) +
3352 PQI_REQUEST_HEADER_LENGTH;
3353 num_elements_needed =
3354 DIV_ROUND_UP(iu_length,
3355 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3356
3357 iq_ci = *queue_group->iq_ci[path];
3358
3359 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3360 ctrl_info->num_elements_per_iq))
3361 break;
3362
3363 put_unaligned_le16(queue_group->oq_id,
3364 &request->response_queue_id);
3365
3366 next_element = queue_group->iq_element_array[path] +
3367 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3368
3369 num_elements_to_end_of_queue =
3370 ctrl_info->num_elements_per_iq - iq_pi;
3371
3372 if (num_elements_needed <= num_elements_to_end_of_queue) {
3373 memcpy(next_element, request, iu_length);
3374 } else {
3375 copy_count = num_elements_to_end_of_queue *
3376 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3377 memcpy(next_element, request, copy_count);
3378 memcpy(queue_group->iq_element_array[path],
3379 (u8 *)request + copy_count,
3380 iu_length - copy_count);
3381 }
3382
3383 iq_pi = (iq_pi + num_elements_needed) %
3384 ctrl_info->num_elements_per_iq;
3385
3386 list_del(&io_request->request_list_entry);
3387 }
3388
3389 if (iq_pi != queue_group->iq_pi_copy[path]) {
3390 queue_group->iq_pi_copy[path] = iq_pi;
3391 /*
3392 * This write notifies the controller that one or more IUs are
3393 * available to be processed.
3394 */
3395 writel(iq_pi, queue_group->iq_pi[path]);
3396 }
3397
3398 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3399}
3400
3401static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3402 void *context)
3403{
3404 struct completion *waiting = context;
3405
3406 complete(waiting);
3407}
3408
3409static int pqi_submit_raid_request_synchronous_with_io_request(
3410 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3411 unsigned long timeout_msecs)
3412{
3413 int rc = 0;
3414 DECLARE_COMPLETION_ONSTACK(wait);
3415
3416 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3417 io_request->context = &wait;
3418
3419 pqi_start_io(ctrl_info,
3420 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3421 io_request);
3422
3423 if (timeout_msecs == NO_TIMEOUT) {
3424 wait_for_completion_io(&wait);
3425 } else {
3426 if (!wait_for_completion_io_timeout(&wait,
3427 msecs_to_jiffies(timeout_msecs))) {
3428 dev_warn(&ctrl_info->pci_dev->dev,
3429 "command timed out\n");
3430 rc = -ETIMEDOUT;
3431 }
3432 }
3433
3434 return rc;
3435}
3436
3437static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3438 struct pqi_iu_header *request, unsigned int flags,
3439 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3440{
3441 int rc;
3442 struct pqi_io_request *io_request;
3443 unsigned long start_jiffies;
3444 unsigned long msecs_blocked;
3445 size_t iu_length;
3446
3447 /*
3448 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3449 * are mutually exclusive.
3450 */
3451
3452 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3453 if (down_interruptible(&ctrl_info->sync_request_sem))
3454 return -ERESTARTSYS;
3455 } else {
3456 if (timeout_msecs == NO_TIMEOUT) {
3457 down(&ctrl_info->sync_request_sem);
3458 } else {
3459 start_jiffies = jiffies;
3460 if (down_timeout(&ctrl_info->sync_request_sem,
3461 msecs_to_jiffies(timeout_msecs)))
3462 return -ETIMEDOUT;
3463 msecs_blocked =
3464 jiffies_to_msecs(jiffies - start_jiffies);
3465 if (msecs_blocked >= timeout_msecs)
3466 return -ETIMEDOUT;
3467 timeout_msecs -= msecs_blocked;
3468 }
3469 }
3470
3471 io_request = pqi_alloc_io_request(ctrl_info);
3472
3473 put_unaligned_le16(io_request->index,
3474 &(((struct pqi_raid_path_request *)request)->request_id));
3475
3476 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3477 ((struct pqi_raid_path_request *)request)->error_index =
3478 ((struct pqi_raid_path_request *)request)->request_id;
3479
3480 iu_length = get_unaligned_le16(&request->iu_length) +
3481 PQI_REQUEST_HEADER_LENGTH;
3482 memcpy(io_request->iu, request, iu_length);
3483
3484 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3485 io_request, timeout_msecs);
3486
3487 if (error_info) {
3488 if (io_request->error_info)
3489 memcpy(error_info, io_request->error_info,
3490 sizeof(*error_info));
3491 else
3492 memset(error_info, 0, sizeof(*error_info));
3493 } else if (rc == 0 && io_request->error_info) {
3494 u8 scsi_status;
3495 struct pqi_raid_error_info *raid_error_info;
3496
3497 raid_error_info = io_request->error_info;
3498 scsi_status = raid_error_info->status;
3499
3500 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3501 raid_error_info->data_out_result ==
3502 PQI_DATA_IN_OUT_UNDERFLOW)
3503 scsi_status = SAM_STAT_GOOD;
3504
3505 if (scsi_status != SAM_STAT_GOOD)
3506 rc = -EIO;
3507 }
3508
3509 pqi_free_io_request(io_request);
3510
3511 up(&ctrl_info->sync_request_sem);
3512
3513 return rc;
3514}
3515
3516static int pqi_validate_admin_response(
3517 struct pqi_general_admin_response *response, u8 expected_function_code)
3518{
3519 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3520 return -EINVAL;
3521
3522 if (get_unaligned_le16(&response->header.iu_length) !=
3523 PQI_GENERAL_ADMIN_IU_LENGTH)
3524 return -EINVAL;
3525
3526 if (response->function_code != expected_function_code)
3527 return -EINVAL;
3528
3529 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3530 return -EINVAL;
3531
3532 return 0;
3533}
3534
3535static int pqi_submit_admin_request_synchronous(
3536 struct pqi_ctrl_info *ctrl_info,
3537 struct pqi_general_admin_request *request,
3538 struct pqi_general_admin_response *response)
3539{
3540 int rc;
3541
3542 pqi_submit_admin_request(ctrl_info, request);
3543
3544 rc = pqi_poll_for_admin_response(ctrl_info, response);
3545
3546 if (rc == 0)
3547 rc = pqi_validate_admin_response(response,
3548 request->function_code);
3549
3550 return rc;
3551}
3552
3553static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3554{
3555 int rc;
3556 struct pqi_general_admin_request request;
3557 struct pqi_general_admin_response response;
3558 struct pqi_device_capability *capability;
3559 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3560
3561 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3562 if (!capability)
3563 return -ENOMEM;
3564
3565 memset(&request, 0, sizeof(request));
3566
3567 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3568 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3569 &request.header.iu_length);
3570 request.function_code =
3571 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3572 put_unaligned_le32(sizeof(*capability),
3573 &request.data.report_device_capability.buffer_length);
3574
3575 rc = pqi_map_single(ctrl_info->pci_dev,
3576 &request.data.report_device_capability.sg_descriptor,
3577 capability, sizeof(*capability),
3578 PCI_DMA_FROMDEVICE);
3579 if (rc)
3580 goto out;
3581
3582 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3583 &response);
3584
3585 pqi_pci_unmap(ctrl_info->pci_dev,
3586 &request.data.report_device_capability.sg_descriptor, 1,
3587 PCI_DMA_FROMDEVICE);
3588
3589 if (rc)
3590 goto out;
3591
3592 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3593 rc = -EIO;
3594 goto out;
3595 }
3596
3597 ctrl_info->max_inbound_queues =
3598 get_unaligned_le16(&capability->max_inbound_queues);
3599 ctrl_info->max_elements_per_iq =
3600 get_unaligned_le16(&capability->max_elements_per_iq);
3601 ctrl_info->max_iq_element_length =
3602 get_unaligned_le16(&capability->max_iq_element_length)
3603 * 16;
3604 ctrl_info->max_outbound_queues =
3605 get_unaligned_le16(&capability->max_outbound_queues);
3606 ctrl_info->max_elements_per_oq =
3607 get_unaligned_le16(&capability->max_elements_per_oq);
3608 ctrl_info->max_oq_element_length =
3609 get_unaligned_le16(&capability->max_oq_element_length)
3610 * 16;
3611
3612 sop_iu_layer_descriptor =
3613 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3614
3615 ctrl_info->max_inbound_iu_length_per_firmware =
3616 get_unaligned_le16(
3617 &sop_iu_layer_descriptor->max_inbound_iu_length);
3618 ctrl_info->inbound_spanning_supported =
3619 sop_iu_layer_descriptor->inbound_spanning_supported;
3620 ctrl_info->outbound_spanning_supported =
3621 sop_iu_layer_descriptor->outbound_spanning_supported;
3622
3623out:
3624 kfree(capability);
3625
3626 return rc;
3627}
3628
3629static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3630{
3631 if (ctrl_info->max_iq_element_length <
3632 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3633 dev_err(&ctrl_info->pci_dev->dev,
3634 "max. inbound queue element length of %d is less than the required length of %d\n",
3635 ctrl_info->max_iq_element_length,
3636 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3637 return -EINVAL;
3638 }
3639
3640 if (ctrl_info->max_oq_element_length <
3641 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3642 dev_err(&ctrl_info->pci_dev->dev,
3643 "max. outbound queue element length of %d is less than the required length of %d\n",
3644 ctrl_info->max_oq_element_length,
3645 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3646 return -EINVAL;
3647 }
3648
3649 if (ctrl_info->max_inbound_iu_length_per_firmware <
3650 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3651 dev_err(&ctrl_info->pci_dev->dev,
3652 "max. inbound IU length of %u is less than the min. required length of %d\n",
3653 ctrl_info->max_inbound_iu_length_per_firmware,
3654 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3655 return -EINVAL;
3656 }
3657
3658 if (!ctrl_info->inbound_spanning_supported) {
3659 dev_err(&ctrl_info->pci_dev->dev,
3660 "the controller does not support inbound spanning\n");
3661 return -EINVAL;
3662 }
3663
3664 if (ctrl_info->outbound_spanning_supported) {
3665 dev_err(&ctrl_info->pci_dev->dev,
3666 "the controller supports outbound spanning but this driver does not\n");
3667 return -EINVAL;
3668 }
3669
3670 return 0;
3671}
3672
3673static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3674 bool inbound_queue, u16 queue_id)
3675{
3676 struct pqi_general_admin_request request;
3677 struct pqi_general_admin_response response;
3678
3679 memset(&request, 0, sizeof(request));
3680 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3681 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3682 &request.header.iu_length);
3683 if (inbound_queue)
3684 request.function_code =
3685 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3686 else
3687 request.function_code =
3688 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3689 put_unaligned_le16(queue_id,
3690 &request.data.delete_operational_queue.queue_id);
3691
3692 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3693 &response);
3694}
3695
3696static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3697{
3698 int rc;
3699 struct pqi_event_queue *event_queue;
3700 struct pqi_general_admin_request request;
3701 struct pqi_general_admin_response response;
3702
3703 event_queue = &ctrl_info->event_queue;
3704
3705 /*
3706 * Create OQ (Outbound Queue - device to host queue) to dedicate
3707 * to events.
3708 */
3709 memset(&request, 0, sizeof(request));
3710 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3711 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3712 &request.header.iu_length);
3713 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3714 put_unaligned_le16(event_queue->oq_id,
3715 &request.data.create_operational_oq.queue_id);
3716 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3717 &request.data.create_operational_oq.element_array_addr);
3718 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3719 &request.data.create_operational_oq.pi_addr);
3720 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3721 &request.data.create_operational_oq.num_elements);
3722 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3723 &request.data.create_operational_oq.element_length);
3724 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3725 put_unaligned_le16(event_queue->int_msg_num,
3726 &request.data.create_operational_oq.int_msg_num);
3727
3728 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3729 &response);
3730 if (rc)
3731 return rc;
3732
3733 event_queue->oq_ci = ctrl_info->iomem_base +
3734 PQI_DEVICE_REGISTERS_OFFSET +
3735 get_unaligned_le64(
3736 &response.data.create_operational_oq.oq_ci_offset);
3737
3738 return 0;
3739}
3740
3741static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info)
3742{
3743 unsigned int i;
3744 int rc;
3745 struct pqi_queue_group *queue_group;
3746 struct pqi_general_admin_request request;
3747 struct pqi_general_admin_response response;
3748
3749 i = ctrl_info->num_active_queue_groups;
3750 queue_group = &ctrl_info->queue_groups[i];
3751
3752 /*
3753 * Create IQ (Inbound Queue - host to device queue) for
3754 * RAID path.
3755 */
3756 memset(&request, 0, sizeof(request));
3757 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3758 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3759 &request.header.iu_length);
3760 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3761 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3762 &request.data.create_operational_iq.queue_id);
3763 put_unaligned_le64(
3764 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3765 &request.data.create_operational_iq.element_array_addr);
3766 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3767 &request.data.create_operational_iq.ci_addr);
3768 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3769 &request.data.create_operational_iq.num_elements);
3770 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3771 &request.data.create_operational_iq.element_length);
3772 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3773
3774 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3775 &response);
3776 if (rc) {
3777 dev_err(&ctrl_info->pci_dev->dev,
3778 "error creating inbound RAID queue\n");
3779 return rc;
3780 }
3781
3782 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3783 PQI_DEVICE_REGISTERS_OFFSET +
3784 get_unaligned_le64(
3785 &response.data.create_operational_iq.iq_pi_offset);
3786
3787 /*
3788 * Create IQ (Inbound Queue - host to device queue) for
3789 * Advanced I/O (AIO) path.
3790 */
3791 memset(&request, 0, sizeof(request));
3792 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3793 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3794 &request.header.iu_length);
3795 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3796 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3797 &request.data.create_operational_iq.queue_id);
3798 put_unaligned_le64((u64)queue_group->
3799 iq_element_array_bus_addr[AIO_PATH],
3800 &request.data.create_operational_iq.element_array_addr);
3801 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
3802 &request.data.create_operational_iq.ci_addr);
3803 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3804 &request.data.create_operational_iq.num_elements);
3805 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3806 &request.data.create_operational_iq.element_length);
3807 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3808
3809 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3810 &response);
3811 if (rc) {
3812 dev_err(&ctrl_info->pci_dev->dev,
3813 "error creating inbound AIO queue\n");
3814 goto delete_inbound_queue_raid;
3815 }
3816
3817 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
3818 PQI_DEVICE_REGISTERS_OFFSET +
3819 get_unaligned_le64(
3820 &response.data.create_operational_iq.iq_pi_offset);
3821
3822 /*
3823 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3824 * assumed to be for RAID path I/O unless we change the queue's
3825 * property.
3826 */
3827 memset(&request, 0, sizeof(request));
3828 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3829 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3830 &request.header.iu_length);
3831 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
3832 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3833 &request.data.change_operational_iq_properties.queue_id);
3834 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
3835 &request.data.change_operational_iq_properties.vendor_specific);
3836
3837 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3838 &response);
3839 if (rc) {
3840 dev_err(&ctrl_info->pci_dev->dev,
3841 "error changing queue property\n");
3842 goto delete_inbound_queue_aio;
3843 }
3844
3845 /*
3846 * Create OQ (Outbound Queue - device to host queue).
3847 */
3848 memset(&request, 0, sizeof(request));
3849 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3850 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3851 &request.header.iu_length);
3852 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3853 put_unaligned_le16(queue_group->oq_id,
3854 &request.data.create_operational_oq.queue_id);
3855 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
3856 &request.data.create_operational_oq.element_array_addr);
3857 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
3858 &request.data.create_operational_oq.pi_addr);
3859 put_unaligned_le16(ctrl_info->num_elements_per_oq,
3860 &request.data.create_operational_oq.num_elements);
3861 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
3862 &request.data.create_operational_oq.element_length);
3863 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3864 put_unaligned_le16(queue_group->int_msg_num,
3865 &request.data.create_operational_oq.int_msg_num);
3866
3867 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3868 &response);
3869 if (rc) {
3870 dev_err(&ctrl_info->pci_dev->dev,
3871 "error creating outbound queue\n");
3872 goto delete_inbound_queue_aio;
3873 }
3874
3875 queue_group->oq_ci = ctrl_info->iomem_base +
3876 PQI_DEVICE_REGISTERS_OFFSET +
3877 get_unaligned_le64(
3878 &response.data.create_operational_oq.oq_ci_offset);
3879
3880 ctrl_info->num_active_queue_groups++;
3881
3882 return 0;
3883
3884delete_inbound_queue_aio:
3885 pqi_delete_operational_queue(ctrl_info, true,
3886 queue_group->iq_id[AIO_PATH]);
3887
3888delete_inbound_queue_raid:
3889 pqi_delete_operational_queue(ctrl_info, true,
3890 queue_group->iq_id[RAID_PATH]);
3891
3892 return rc;
3893}
3894
3895static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
3896{
3897 int rc;
3898 unsigned int i;
3899
3900 rc = pqi_create_event_queue(ctrl_info);
3901 if (rc) {
3902 dev_err(&ctrl_info->pci_dev->dev,
3903 "error creating event queue\n");
3904 return rc;
3905 }
3906
3907 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3908 rc = pqi_create_queue_group(ctrl_info);
3909 if (rc) {
3910 dev_err(&ctrl_info->pci_dev->dev,
3911 "error creating queue group number %u/%u\n",
3912 i, ctrl_info->num_queue_groups);
3913 return rc;
3914 }
3915 }
3916
3917 return 0;
3918}
3919
3920#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3921 (offsetof(struct pqi_event_config, descriptors) + \
3922 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3923
3924static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info)
3925{
3926 int rc;
3927 unsigned int i;
3928 struct pqi_event_config *event_config;
3929 struct pqi_general_management_request request;
3930
3931 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3932 GFP_KERNEL);
3933 if (!event_config)
3934 return -ENOMEM;
3935
3936 memset(&request, 0, sizeof(request));
3937
3938 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
3939 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3940 data.report_event_configuration.sg_descriptors[1]) -
3941 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3942 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3943 &request.data.report_event_configuration.buffer_length);
3944
3945 rc = pqi_map_single(ctrl_info->pci_dev,
3946 request.data.report_event_configuration.sg_descriptors,
3947 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3948 PCI_DMA_FROMDEVICE);
3949 if (rc)
3950 goto out;
3951
3952 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
3953 0, NULL, NO_TIMEOUT);
3954
3955 pqi_pci_unmap(ctrl_info->pci_dev,
3956 request.data.report_event_configuration.sg_descriptors, 1,
3957 PCI_DMA_FROMDEVICE);
3958
3959 if (rc)
3960 goto out;
3961
3962 for (i = 0; i < event_config->num_event_descriptors; i++)
3963 put_unaligned_le16(ctrl_info->event_queue.oq_id,
3964 &event_config->descriptors[i].oq_id);
3965
3966 memset(&request, 0, sizeof(request));
3967
3968 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
3969 put_unaligned_le16(offsetof(struct pqi_general_management_request,
3970 data.report_event_configuration.sg_descriptors[1]) -
3971 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
3972 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3973 &request.data.report_event_configuration.buffer_length);
3974
3975 rc = pqi_map_single(ctrl_info->pci_dev,
3976 request.data.report_event_configuration.sg_descriptors,
3977 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
3978 PCI_DMA_TODEVICE);
3979 if (rc)
3980 goto out;
3981
3982 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
3983 NULL, NO_TIMEOUT);
3984
3985 pqi_pci_unmap(ctrl_info->pci_dev,
3986 request.data.report_event_configuration.sg_descriptors, 1,
3987 PCI_DMA_TODEVICE);
3988
3989out:
3990 kfree(event_config);
3991
3992 return rc;
3993}
3994
3995static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
3996{
3997 unsigned int i;
3998 struct device *dev;
3999 size_t sg_chain_buffer_length;
4000 struct pqi_io_request *io_request;
4001
4002 if (!ctrl_info->io_request_pool)
4003 return;
4004
4005 dev = &ctrl_info->pci_dev->dev;
4006 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4007 io_request = ctrl_info->io_request_pool;
4008
4009 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4010 kfree(io_request->iu);
4011 if (!io_request->sg_chain_buffer)
4012 break;
4013 dma_free_coherent(dev, sg_chain_buffer_length,
4014 io_request->sg_chain_buffer,
4015 io_request->sg_chain_buffer_dma_handle);
4016 io_request++;
4017 }
4018
4019 kfree(ctrl_info->io_request_pool);
4020 ctrl_info->io_request_pool = NULL;
4021}
4022
4023static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4024{
4025 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4026 ctrl_info->error_buffer_length,
4027 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4028
4029 if (!ctrl_info->error_buffer)
4030 return -ENOMEM;
4031
4032 return 0;
4033}
4034
4035static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4036{
4037 unsigned int i;
4038 void *sg_chain_buffer;
4039 size_t sg_chain_buffer_length;
4040 dma_addr_t sg_chain_buffer_dma_handle;
4041 struct device *dev;
4042 struct pqi_io_request *io_request;
4043
4044 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4045 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4046
4047 if (!ctrl_info->io_request_pool) {
4048 dev_err(&ctrl_info->pci_dev->dev,
4049 "failed to allocate I/O request pool\n");
4050 goto error;
4051 }
4052
4053 dev = &ctrl_info->pci_dev->dev;
4054 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4055 io_request = ctrl_info->io_request_pool;
4056
4057 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4058 io_request->iu =
4059 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4060
4061 if (!io_request->iu) {
4062 dev_err(&ctrl_info->pci_dev->dev,
4063 "failed to allocate IU buffers\n");
4064 goto error;
4065 }
4066
4067 sg_chain_buffer = dma_alloc_coherent(dev,
4068 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4069 GFP_KERNEL);
4070
4071 if (!sg_chain_buffer) {
4072 dev_err(&ctrl_info->pci_dev->dev,
4073 "failed to allocate PQI scatter-gather chain buffers\n");
4074 goto error;
4075 }
4076
4077 io_request->index = i;
4078 io_request->sg_chain_buffer = sg_chain_buffer;
4079 io_request->sg_chain_buffer_dma_handle =
4080 sg_chain_buffer_dma_handle;
4081 io_request++;
4082 }
4083
4084 return 0;
4085
4086error:
4087 pqi_free_all_io_requests(ctrl_info);
4088
4089 return -ENOMEM;
4090}
4091
4092/*
4093 * Calculate required resources that are sized based on max. outstanding
4094 * requests and max. transfer size.
4095 */
4096
4097static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4098{
4099 u32 max_transfer_size;
4100 u32 max_sg_entries;
4101
4102 ctrl_info->scsi_ml_can_queue =
4103 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4104 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4105
4106 ctrl_info->error_buffer_length =
4107 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4108
4109 max_transfer_size =
4110 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4111
4112 max_sg_entries = max_transfer_size / PAGE_SIZE;
4113
4114 /* +1 to cover when the buffer is not page-aligned. */
4115 max_sg_entries++;
4116
4117 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4118
4119 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4120
4121 ctrl_info->sg_chain_buffer_length =
4122 max_sg_entries * sizeof(struct pqi_sg_descriptor);
4123 ctrl_info->sg_tablesize = max_sg_entries;
4124 ctrl_info->max_sectors = max_transfer_size / 512;
4125}
4126
4127static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4128{
4129 int num_cpus;
4130 int max_queue_groups;
4131 int num_queue_groups;
4132 u16 num_elements_per_iq;
4133 u16 num_elements_per_oq;
4134
4135 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4136 ctrl_info->max_outbound_queues - 1);
4137 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4138
4139 num_cpus = num_online_cpus();
4140 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4141 num_queue_groups = min(num_queue_groups, max_queue_groups);
4142
4143 ctrl_info->num_queue_groups = num_queue_groups;
4144
4145 /*
4146 * Make sure that the max. inbound IU length is an even multiple
4147 * of our inbound element length.
4148 */
4149 ctrl_info->max_inbound_iu_length =
4150 (ctrl_info->max_inbound_iu_length_per_firmware /
4151 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4152 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
4153
4154 num_elements_per_iq =
4155 (ctrl_info->max_inbound_iu_length /
4156 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4157
4158 /* Add one because one element in each queue is unusable. */
4159 num_elements_per_iq++;
4160
4161 num_elements_per_iq = min(num_elements_per_iq,
4162 ctrl_info->max_elements_per_iq);
4163
4164 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4165 num_elements_per_oq = min(num_elements_per_oq,
4166 ctrl_info->max_elements_per_oq);
4167
4168 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4169 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4170
4171 ctrl_info->max_sg_per_iu =
4172 ((ctrl_info->max_inbound_iu_length -
4173 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4174 sizeof(struct pqi_sg_descriptor)) +
4175 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4176}
4177
4178static inline void pqi_set_sg_descriptor(
4179 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4180{
4181 u64 address = (u64)sg_dma_address(sg);
4182 unsigned int length = sg_dma_len(sg);
4183
4184 put_unaligned_le64(address, &sg_descriptor->address);
4185 put_unaligned_le32(length, &sg_descriptor->length);
4186 put_unaligned_le32(0, &sg_descriptor->flags);
4187}
4188
4189static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4190 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4191 struct pqi_io_request *io_request)
4192{
4193 int i;
4194 u16 iu_length;
4195 int sg_count;
4196 bool chained;
4197 unsigned int num_sg_in_iu;
4198 unsigned int max_sg_per_iu;
4199 struct scatterlist *sg;
4200 struct pqi_sg_descriptor *sg_descriptor;
4201
4202 sg_count = scsi_dma_map(scmd);
4203 if (sg_count < 0)
4204 return sg_count;
4205
4206 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4207 PQI_REQUEST_HEADER_LENGTH;
4208
4209 if (sg_count == 0)
4210 goto out;
4211
4212 sg = scsi_sglist(scmd);
4213 sg_descriptor = request->sg_descriptors;
4214 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4215 chained = false;
4216 num_sg_in_iu = 0;
4217 i = 0;
4218
4219 while (1) {
4220 pqi_set_sg_descriptor(sg_descriptor, sg);
4221 if (!chained)
4222 num_sg_in_iu++;
4223 i++;
4224 if (i == sg_count)
4225 break;
4226 sg_descriptor++;
4227 if (i == max_sg_per_iu) {
4228 put_unaligned_le64(
4229 (u64)io_request->sg_chain_buffer_dma_handle,
4230 &sg_descriptor->address);
4231 put_unaligned_le32((sg_count - num_sg_in_iu)
4232 * sizeof(*sg_descriptor),
4233 &sg_descriptor->length);
4234 put_unaligned_le32(CISS_SG_CHAIN,
4235 &sg_descriptor->flags);
4236 chained = true;
4237 num_sg_in_iu++;
4238 sg_descriptor = io_request->sg_chain_buffer;
4239 }
4240 sg = sg_next(sg);
4241 }
4242
4243 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4244 request->partial = chained;
4245 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4246
4247out:
4248 put_unaligned_le16(iu_length, &request->header.iu_length);
4249
4250 return 0;
4251}
4252
4253static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4254 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4255 struct pqi_io_request *io_request)
4256{
4257 int i;
4258 u16 iu_length;
4259 int sg_count;
4260 bool chained;
4261 unsigned int num_sg_in_iu;
4262 unsigned int max_sg_per_iu;
4263 struct scatterlist *sg;
4264 struct pqi_sg_descriptor *sg_descriptor;
4265
4266 sg_count = scsi_dma_map(scmd);
4267 if (sg_count < 0)
4268 return sg_count;
4269
4270 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4271 PQI_REQUEST_HEADER_LENGTH;
4272 num_sg_in_iu = 0;
4273
4274 if (sg_count == 0)
4275 goto out;
4276
4277 sg = scsi_sglist(scmd);
4278 sg_descriptor = request->sg_descriptors;
4279 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4280 chained = false;
4281 i = 0;
4282
4283 while (1) {
4284 pqi_set_sg_descriptor(sg_descriptor, sg);
4285 if (!chained)
4286 num_sg_in_iu++;
4287 i++;
4288 if (i == sg_count)
4289 break;
4290 sg_descriptor++;
4291 if (i == max_sg_per_iu) {
4292 put_unaligned_le64(
4293 (u64)io_request->sg_chain_buffer_dma_handle,
4294 &sg_descriptor->address);
4295 put_unaligned_le32((sg_count - num_sg_in_iu)
4296 * sizeof(*sg_descriptor),
4297 &sg_descriptor->length);
4298 put_unaligned_le32(CISS_SG_CHAIN,
4299 &sg_descriptor->flags);
4300 chained = true;
4301 num_sg_in_iu++;
4302 sg_descriptor = io_request->sg_chain_buffer;
4303 }
4304 sg = sg_next(sg);
4305 }
4306
4307 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4308 request->partial = chained;
4309 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4310
4311out:
4312 put_unaligned_le16(iu_length, &request->header.iu_length);
4313 request->num_sg_descriptors = num_sg_in_iu;
4314
4315 return 0;
4316}
4317
4318static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4319 void *context)
4320{
4321 struct scsi_cmnd *scmd;
4322
4323 scmd = io_request->scmd;
4324 pqi_free_io_request(io_request);
4325 scsi_dma_unmap(scmd);
4326 pqi_scsi_done(scmd);
4327}
4328
4329static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4330 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4331 struct pqi_queue_group *queue_group)
4332{
4333 int rc;
4334 size_t cdb_length;
4335 struct pqi_io_request *io_request;
4336 struct pqi_raid_path_request *request;
4337
4338 io_request = pqi_alloc_io_request(ctrl_info);
4339 io_request->io_complete_callback = pqi_raid_io_complete;
4340 io_request->scmd = scmd;
4341
4342 scmd->host_scribble = (unsigned char *)io_request;
4343
4344 request = io_request->iu;
4345 memset(request, 0,
4346 offsetof(struct pqi_raid_path_request, sg_descriptors));
4347
4348 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4349 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4350 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4351 put_unaligned_le16(io_request->index, &request->request_id);
4352 request->error_index = request->request_id;
4353 memcpy(request->lun_number, device->scsi3addr,
4354 sizeof(request->lun_number));
4355
4356 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4357 memcpy(request->cdb, scmd->cmnd, cdb_length);
4358
4359 switch (cdb_length) {
4360 case 6:
4361 case 10:
4362 case 12:
4363 case 16:
4364 /* No bytes in the Additional CDB bytes field */
4365 request->additional_cdb_bytes_usage =
4366 SOP_ADDITIONAL_CDB_BYTES_0;
4367 break;
4368 case 20:
4369 /* 4 bytes in the Additional cdb field */
4370 request->additional_cdb_bytes_usage =
4371 SOP_ADDITIONAL_CDB_BYTES_4;
4372 break;
4373 case 24:
4374 /* 8 bytes in the Additional cdb field */
4375 request->additional_cdb_bytes_usage =
4376 SOP_ADDITIONAL_CDB_BYTES_8;
4377 break;
4378 case 28:
4379 /* 12 bytes in the Additional cdb field */
4380 request->additional_cdb_bytes_usage =
4381 SOP_ADDITIONAL_CDB_BYTES_12;
4382 break;
4383 case 32:
4384 default:
4385 /* 16 bytes in the Additional cdb field */
4386 request->additional_cdb_bytes_usage =
4387 SOP_ADDITIONAL_CDB_BYTES_16;
4388 break;
4389 }
4390
4391 switch (scmd->sc_data_direction) {
4392 case DMA_TO_DEVICE:
4393 request->data_direction = SOP_READ_FLAG;
4394 break;
4395 case DMA_FROM_DEVICE:
4396 request->data_direction = SOP_WRITE_FLAG;
4397 break;
4398 case DMA_NONE:
4399 request->data_direction = SOP_NO_DIRECTION_FLAG;
4400 break;
4401 case DMA_BIDIRECTIONAL:
4402 request->data_direction = SOP_BIDIRECTIONAL;
4403 break;
4404 default:
4405 dev_err(&ctrl_info->pci_dev->dev,
4406 "unknown data direction: %d\n",
4407 scmd->sc_data_direction);
4408 WARN_ON(scmd->sc_data_direction);
4409 break;
4410 }
4411
4412 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4413 if (rc) {
4414 pqi_free_io_request(io_request);
4415 return SCSI_MLQUEUE_HOST_BUSY;
4416 }
4417
4418 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4419
4420 return 0;
4421}
4422
4423static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4424 void *context)
4425{
4426 struct scsi_cmnd *scmd;
4427
4428 scmd = io_request->scmd;
4429 scsi_dma_unmap(scmd);
4430 if (io_request->status == -EAGAIN)
4431 set_host_byte(scmd, DID_IMM_RETRY);
4432 pqi_free_io_request(io_request);
4433 pqi_scsi_done(scmd);
4434}
4435
4436static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4437 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4438 struct pqi_queue_group *queue_group)
4439{
4440 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4441 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4442}
4443
4444static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4445 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4446 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4447 struct pqi_encryption_info *encryption_info)
4448{
4449 int rc;
4450 struct pqi_io_request *io_request;
4451 struct pqi_aio_path_request *request;
4452
4453 io_request = pqi_alloc_io_request(ctrl_info);
4454 io_request->io_complete_callback = pqi_aio_io_complete;
4455 io_request->scmd = scmd;
4456
4457 scmd->host_scribble = (unsigned char *)io_request;
4458
4459 request = io_request->iu;
4460 memset(request, 0,
4461 offsetof(struct pqi_raid_path_request, sg_descriptors));
4462
4463 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4464 put_unaligned_le32(aio_handle, &request->nexus_id);
4465 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4466 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4467 put_unaligned_le16(io_request->index, &request->request_id);
4468 request->error_index = request->request_id;
4469 if (cdb_length > sizeof(request->cdb))
4470 cdb_length = sizeof(request->cdb);
4471 request->cdb_length = cdb_length;
4472 memcpy(request->cdb, cdb, cdb_length);
4473
4474 switch (scmd->sc_data_direction) {
4475 case DMA_TO_DEVICE:
4476 request->data_direction = SOP_READ_FLAG;
4477 break;
4478 case DMA_FROM_DEVICE:
4479 request->data_direction = SOP_WRITE_FLAG;
4480 break;
4481 case DMA_NONE:
4482 request->data_direction = SOP_NO_DIRECTION_FLAG;
4483 break;
4484 case DMA_BIDIRECTIONAL:
4485 request->data_direction = SOP_BIDIRECTIONAL;
4486 break;
4487 default:
4488 dev_err(&ctrl_info->pci_dev->dev,
4489 "unknown data direction: %d\n",
4490 scmd->sc_data_direction);
4491 WARN_ON(scmd->sc_data_direction);
4492 break;
4493 }
4494
4495 if (encryption_info) {
4496 request->encryption_enable = true;
4497 put_unaligned_le16(encryption_info->data_encryption_key_index,
4498 &request->data_encryption_key_index);
4499 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4500 &request->encrypt_tweak_lower);
4501 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4502 &request->encrypt_tweak_upper);
4503 }
4504
4505 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4506 if (rc) {
4507 pqi_free_io_request(io_request);
4508 return SCSI_MLQUEUE_HOST_BUSY;
4509 }
4510
4511 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4512
4513 return 0;
4514}
4515
4516static int pqi_scsi_queue_command(struct Scsi_Host *shost,
4517 struct scsi_cmnd *scmd)
4518{
4519 int rc;
4520 struct pqi_ctrl_info *ctrl_info;
4521 struct pqi_scsi_dev *device;
4522 u16 hwq;
4523 struct pqi_queue_group *queue_group;
4524 bool raid_bypassed;
4525
4526 device = scmd->device->hostdata;
4527 ctrl_info = shost_to_hba(shost);
4528
4529 if (pqi_ctrl_offline(ctrl_info)) {
4530 set_host_byte(scmd, DID_NO_CONNECT);
4531 pqi_scsi_done(scmd);
4532 return 0;
4533 }
4534
4535 /*
4536 * This is necessary because the SML doesn't zero out this field during
4537 * error recovery.
4538 */
4539 scmd->result = 0;
4540
4541 hwq = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4542 if (hwq >= ctrl_info->num_queue_groups)
4543 hwq = 0;
4544
4545 queue_group = &ctrl_info->queue_groups[hwq];
4546
4547 if (pqi_is_logical_device(device)) {
4548 raid_bypassed = false;
4549 if (device->offload_enabled &&
4550 scmd->request->cmd_type == REQ_TYPE_FS) {
4551 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4552 scmd, queue_group);
4553 if (rc == 0 ||
4554 rc == SCSI_MLQUEUE_HOST_BUSY ||
4555 rc == SAM_STAT_CHECK_CONDITION ||
4556 rc == SAM_STAT_RESERVATION_CONFLICT)
4557 raid_bypassed = true;
4558 }
4559 if (!raid_bypassed)
4560 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4561 queue_group);
4562 } else {
4563 if (device->aio_enabled)
4564 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4565 queue_group);
4566 else
4567 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4568 queue_group);
4569 }
4570
4571 return rc;
4572}
4573
4574static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
4575 void *context)
4576{
4577 struct completion *waiting = context;
4578
4579 complete(waiting);
4580}
4581
4582#define PQI_LUN_RESET_TIMEOUT_SECS 10
4583
4584static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4585 struct pqi_scsi_dev *device, struct completion *wait)
4586{
4587 int rc;
4588 unsigned int wait_secs = 0;
4589
4590 while (1) {
4591 if (wait_for_completion_io_timeout(wait,
4592 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4593 rc = 0;
4594 break;
4595 }
4596
4597 pqi_check_ctrl_health(ctrl_info);
4598 if (pqi_ctrl_offline(ctrl_info)) {
4599 rc = -ETIMEDOUT;
4600 break;
4601 }
4602
4603 wait_secs += PQI_LUN_RESET_TIMEOUT_SECS;
4604
4605 dev_err(&ctrl_info->pci_dev->dev,
4606 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4607 ctrl_info->scsi_host->host_no, device->bus,
4608 device->target, device->lun, wait_secs);
4609 }
4610
4611 return rc;
4612}
4613
4614static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
4615 struct pqi_scsi_dev *device)
4616{
4617 int rc;
4618 struct pqi_io_request *io_request;
4619 DECLARE_COMPLETION_ONSTACK(wait);
4620 struct pqi_task_management_request *request;
4621
4622 down(&ctrl_info->lun_reset_sem);
4623
4624 io_request = pqi_alloc_io_request(ctrl_info);
4625 io_request->io_complete_callback = pqi_lun_reset_complete;
4626 io_request->context = &wait;
4627
4628 request = io_request->iu;
4629 memset(request, 0, sizeof(*request));
4630
4631 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
4632 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
4633 &request->header.iu_length);
4634 put_unaligned_le16(io_request->index, &request->request_id);
4635 memcpy(request->lun_number, device->scsi3addr,
4636 sizeof(request->lun_number));
4637 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
4638
4639 pqi_start_io(ctrl_info,
4640 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
4641 io_request);
4642
4643 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
4644 if (rc == 0)
4645 rc = io_request->status;
4646
4647 pqi_free_io_request(io_request);
4648 up(&ctrl_info->lun_reset_sem);
4649
4650 return rc;
4651}
4652
4653/* Performs a reset at the LUN level. */
4654
4655static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
4656 struct pqi_scsi_dev *device)
4657{
4658 int rc;
4659
4660 pqi_check_ctrl_health(ctrl_info);
4661 if (pqi_ctrl_offline(ctrl_info))
4662 return FAILED;
4663
4664 rc = pqi_lun_reset(ctrl_info, device);
4665
4666 return rc == 0 ? SUCCESS : FAILED;
4667}
4668
4669static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
4670{
4671 int rc;
4672 struct pqi_ctrl_info *ctrl_info;
4673 struct pqi_scsi_dev *device;
4674
4675 ctrl_info = shost_to_hba(scmd->device->host);
4676 device = scmd->device->hostdata;
4677
4678 dev_err(&ctrl_info->pci_dev->dev,
4679 "resetting scsi %d:%d:%d:%d\n",
4680 ctrl_info->scsi_host->host_no,
4681 device->bus, device->target, device->lun);
4682
4683 rc = pqi_device_reset(ctrl_info, device);
4684
4685 dev_err(&ctrl_info->pci_dev->dev,
4686 "reset of scsi %d:%d:%d:%d: %s\n",
4687 ctrl_info->scsi_host->host_no,
4688 device->bus, device->target, device->lun,
4689 rc == SUCCESS ? "SUCCESS" : "FAILED");
4690
4691 return rc;
4692}
4693
4694static int pqi_slave_alloc(struct scsi_device *sdev)
4695{
4696 struct pqi_scsi_dev *device;
4697 unsigned long flags;
4698 struct pqi_ctrl_info *ctrl_info;
4699 struct scsi_target *starget;
4700 struct sas_rphy *rphy;
4701
4702 ctrl_info = shost_to_hba(sdev->host);
4703
4704 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4705
4706 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
4707 starget = scsi_target(sdev);
4708 rphy = target_to_rphy(starget);
4709 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
4710 if (device) {
4711 device->target = sdev_id(sdev);
4712 device->lun = sdev->lun;
4713 device->target_lun_valid = true;
4714 }
4715 } else {
4716 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
4717 sdev_id(sdev), sdev->lun);
4718 }
4719
4720 if (device && device->expose_device) {
4721 sdev->hostdata = device;
4722 device->sdev = sdev;
4723 if (device->queue_depth) {
4724 device->advertised_queue_depth = device->queue_depth;
4725 scsi_change_queue_depth(sdev,
4726 device->advertised_queue_depth);
4727 }
4728 }
4729
4730 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
4731
4732 return 0;
4733}
4734
4735static int pqi_slave_configure(struct scsi_device *sdev)
4736{
4737 struct pqi_scsi_dev *device;
4738
4739 device = sdev->hostdata;
4740 if (!device->expose_device)
4741 sdev->no_uld_attach = true;
4742
4743 return 0;
4744}
4745
4746static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
4747 void __user *arg)
4748{
4749 struct pci_dev *pci_dev;
4750 u32 subsystem_vendor;
4751 u32 subsystem_device;
4752 cciss_pci_info_struct pciinfo;
4753
4754 if (!arg)
4755 return -EINVAL;
4756
4757 pci_dev = ctrl_info->pci_dev;
4758
4759 pciinfo.domain = pci_domain_nr(pci_dev->bus);
4760 pciinfo.bus = pci_dev->bus->number;
4761 pciinfo.dev_fn = pci_dev->devfn;
4762 subsystem_vendor = pci_dev->subsystem_vendor;
4763 subsystem_device = pci_dev->subsystem_device;
4764 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
4765 subsystem_vendor;
4766
4767 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
4768 return -EFAULT;
4769
4770 return 0;
4771}
4772
4773static int pqi_getdrivver_ioctl(void __user *arg)
4774{
4775 u32 version;
4776
4777 if (!arg)
4778 return -EINVAL;
4779
4780 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
4781 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
4782
4783 if (copy_to_user(arg, &version, sizeof(version)))
4784 return -EFAULT;
4785
4786 return 0;
4787}
4788
4789struct ciss_error_info {
4790 u8 scsi_status;
4791 int command_status;
4792 size_t sense_data_length;
4793};
4794
4795static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
4796 struct ciss_error_info *ciss_error_info)
4797{
4798 int ciss_cmd_status;
4799 size_t sense_data_length;
4800
4801 switch (pqi_error_info->data_out_result) {
4802 case PQI_DATA_IN_OUT_GOOD:
4803 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
4804 break;
4805 case PQI_DATA_IN_OUT_UNDERFLOW:
4806 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
4807 break;
4808 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
4809 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
4810 break;
4811 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
4812 case PQI_DATA_IN_OUT_BUFFER_ERROR:
4813 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
4814 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
4815 case PQI_DATA_IN_OUT_ERROR:
4816 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
4817 break;
4818 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
4819 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
4820 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
4821 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
4822 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
4823 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
4824 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
4825 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
4826 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
4827 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
4828 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
4829 break;
4830 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
4831 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
4832 break;
4833 case PQI_DATA_IN_OUT_ABORTED:
4834 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
4835 break;
4836 case PQI_DATA_IN_OUT_TIMEOUT:
4837 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
4838 break;
4839 default:
4840 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
4841 break;
4842 }
4843
4844 sense_data_length =
4845 get_unaligned_le16(&pqi_error_info->sense_data_length);
4846 if (sense_data_length == 0)
4847 sense_data_length =
4848 get_unaligned_le16(&pqi_error_info->response_data_length);
4849 if (sense_data_length)
4850 if (sense_data_length > sizeof(pqi_error_info->data))
4851 sense_data_length = sizeof(pqi_error_info->data);
4852
4853 ciss_error_info->scsi_status = pqi_error_info->status;
4854 ciss_error_info->command_status = ciss_cmd_status;
4855 ciss_error_info->sense_data_length = sense_data_length;
4856}
4857
4858static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
4859{
4860 int rc;
4861 char *kernel_buffer = NULL;
4862 u16 iu_length;
4863 size_t sense_data_length;
4864 IOCTL_Command_struct iocommand;
4865 struct pqi_raid_path_request request;
4866 struct pqi_raid_error_info pqi_error_info;
4867 struct ciss_error_info ciss_error_info;
4868
4869 if (pqi_ctrl_offline(ctrl_info))
4870 return -ENXIO;
4871 if (!arg)
4872 return -EINVAL;
4873 if (!capable(CAP_SYS_RAWIO))
4874 return -EPERM;
4875 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
4876 return -EFAULT;
4877 if (iocommand.buf_size < 1 &&
4878 iocommand.Request.Type.Direction != XFER_NONE)
4879 return -EINVAL;
4880 if (iocommand.Request.CDBLen > sizeof(request.cdb))
4881 return -EINVAL;
4882 if (iocommand.Request.Type.Type != TYPE_CMD)
4883 return -EINVAL;
4884
4885 switch (iocommand.Request.Type.Direction) {
4886 case XFER_NONE:
4887 case XFER_WRITE:
4888 case XFER_READ:
4889 break;
4890 default:
4891 return -EINVAL;
4892 }
4893
4894 if (iocommand.buf_size > 0) {
4895 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
4896 if (!kernel_buffer)
4897 return -ENOMEM;
4898 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4899 if (copy_from_user(kernel_buffer, iocommand.buf,
4900 iocommand.buf_size)) {
4901 rc = -EFAULT;
4902 goto out;
4903 }
4904 } else {
4905 memset(kernel_buffer, 0, iocommand.buf_size);
4906 }
4907 }
4908
4909 memset(&request, 0, sizeof(request));
4910
4911 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4912 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4913 PQI_REQUEST_HEADER_LENGTH;
4914 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
4915 sizeof(request.lun_number));
4916 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
4917 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
4918
4919 switch (iocommand.Request.Type.Direction) {
4920 case XFER_NONE:
4921 request.data_direction = SOP_NO_DIRECTION_FLAG;
4922 break;
4923 case XFER_WRITE:
4924 request.data_direction = SOP_WRITE_FLAG;
4925 break;
4926 case XFER_READ:
4927 request.data_direction = SOP_READ_FLAG;
4928 break;
4929 }
4930
4931 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4932
4933 if (iocommand.buf_size > 0) {
4934 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
4935
4936 rc = pqi_map_single(ctrl_info->pci_dev,
4937 &request.sg_descriptors[0], kernel_buffer,
4938 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4939 if (rc)
4940 goto out;
4941
4942 iu_length += sizeof(request.sg_descriptors[0]);
4943 }
4944
4945 put_unaligned_le16(iu_length, &request.header.iu_length);
4946
4947 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4948 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
4949
4950 if (iocommand.buf_size > 0)
4951 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
4952 PCI_DMA_BIDIRECTIONAL);
4953
4954 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
4955
4956 if (rc == 0) {
4957 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
4958 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
4959 iocommand.error_info.CommandStatus =
4960 ciss_error_info.command_status;
4961 sense_data_length = ciss_error_info.sense_data_length;
4962 if (sense_data_length) {
4963 if (sense_data_length >
4964 sizeof(iocommand.error_info.SenseInfo))
4965 sense_data_length =
4966 sizeof(iocommand.error_info.SenseInfo);
4967 memcpy(iocommand.error_info.SenseInfo,
4968 pqi_error_info.data, sense_data_length);
4969 iocommand.error_info.SenseLen = sense_data_length;
4970 }
4971 }
4972
4973 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
4974 rc = -EFAULT;
4975 goto out;
4976 }
4977
4978 if (rc == 0 && iocommand.buf_size > 0 &&
4979 (iocommand.Request.Type.Direction & XFER_READ)) {
4980 if (copy_to_user(iocommand.buf, kernel_buffer,
4981 iocommand.buf_size)) {
4982 rc = -EFAULT;
4983 }
4984 }
4985
4986out:
4987 kfree(kernel_buffer);
4988
4989 return rc;
4990}
4991
4992static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
4993{
4994 int rc;
4995 struct pqi_ctrl_info *ctrl_info;
4996
4997 ctrl_info = shost_to_hba(sdev->host);
4998
4999 switch (cmd) {
5000 case CCISS_DEREGDISK:
5001 case CCISS_REGNEWDISK:
5002 case CCISS_REGNEWD:
5003 rc = pqi_scan_scsi_devices(ctrl_info);
5004 break;
5005 case CCISS_GETPCIINFO:
5006 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5007 break;
5008 case CCISS_GETDRIVVER:
5009 rc = pqi_getdrivver_ioctl(arg);
5010 break;
5011 case CCISS_PASSTHRU:
5012 rc = pqi_passthru_ioctl(ctrl_info, arg);
5013 break;
5014 default:
5015 rc = -EINVAL;
5016 break;
5017 }
5018
5019 return rc;
5020}
5021
5022static ssize_t pqi_version_show(struct device *dev,
5023 struct device_attribute *attr, char *buffer)
5024{
5025 ssize_t count = 0;
5026 struct Scsi_Host *shost;
5027 struct pqi_ctrl_info *ctrl_info;
5028
5029 shost = class_to_shost(dev);
5030 ctrl_info = shost_to_hba(shost);
5031
5032 count += snprintf(buffer + count, PAGE_SIZE - count,
5033 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5034
5035 count += snprintf(buffer + count, PAGE_SIZE - count,
5036 "firmware: %s\n", ctrl_info->firmware_version);
5037
5038 return count;
5039}
5040
5041static ssize_t pqi_host_rescan_store(struct device *dev,
5042 struct device_attribute *attr, const char *buffer, size_t count)
5043{
5044 struct Scsi_Host *shost = class_to_shost(dev);
5045
5046 pqi_scan_start(shost);
5047
5048 return count;
5049}
5050
5051static DEVICE_ATTR(version, S_IRUGO, pqi_version_show, NULL);
5052static DEVICE_ATTR(rescan, S_IWUSR, NULL, pqi_host_rescan_store);
5053
5054static struct device_attribute *pqi_shost_attrs[] = {
5055 &dev_attr_version,
5056 &dev_attr_rescan,
5057 NULL
5058};
5059
5060static ssize_t pqi_sas_address_show(struct device *dev,
5061 struct device_attribute *attr, char *buffer)
5062{
5063 struct pqi_ctrl_info *ctrl_info;
5064 struct scsi_device *sdev;
5065 struct pqi_scsi_dev *device;
5066 unsigned long flags;
5067 u64 sas_address;
5068
5069 sdev = to_scsi_device(dev);
5070 ctrl_info = shost_to_hba(sdev->host);
5071
5072 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5073
5074 device = sdev->hostdata;
5075 if (pqi_is_logical_device(device)) {
5076 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5077 flags);
5078 return -ENODEV;
5079 }
5080 sas_address = device->sas_address;
5081
5082 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5083
5084 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5085}
5086
5087static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5088 struct device_attribute *attr, char *buffer)
5089{
5090 struct pqi_ctrl_info *ctrl_info;
5091 struct scsi_device *sdev;
5092 struct pqi_scsi_dev *device;
5093 unsigned long flags;
5094
5095 sdev = to_scsi_device(dev);
5096 ctrl_info = shost_to_hba(sdev->host);
5097
5098 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5099
5100 device = sdev->hostdata;
5101 buffer[0] = device->offload_enabled ? '1' : '0';
5102 buffer[1] = '\n';
5103 buffer[2] = '\0';
5104
5105 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5106
5107 return 2;
5108}
5109
5110static DEVICE_ATTR(sas_address, S_IRUGO, pqi_sas_address_show, NULL);
5111static DEVICE_ATTR(ssd_smart_path_enabled, S_IRUGO,
5112 pqi_ssd_smart_path_enabled_show, NULL);
5113
5114static struct device_attribute *pqi_sdev_attrs[] = {
5115 &dev_attr_sas_address,
5116 &dev_attr_ssd_smart_path_enabled,
5117 NULL
5118};
5119
5120static struct scsi_host_template pqi_driver_template = {
5121 .module = THIS_MODULE,
5122 .name = DRIVER_NAME_SHORT,
5123 .proc_name = DRIVER_NAME_SHORT,
5124 .queuecommand = pqi_scsi_queue_command,
5125 .scan_start = pqi_scan_start,
5126 .scan_finished = pqi_scan_finished,
5127 .this_id = -1,
5128 .use_clustering = ENABLE_CLUSTERING,
5129 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5130 .ioctl = pqi_ioctl,
5131 .slave_alloc = pqi_slave_alloc,
5132 .slave_configure = pqi_slave_configure,
5133 .sdev_attrs = pqi_sdev_attrs,
5134 .shost_attrs = pqi_shost_attrs,
5135};
5136
5137static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5138{
5139 int rc;
5140 struct Scsi_Host *shost;
5141
5142 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5143 if (!shost) {
5144 dev_err(&ctrl_info->pci_dev->dev,
5145 "scsi_host_alloc failed for controller %u\n",
5146 ctrl_info->ctrl_id);
5147 return -ENOMEM;
5148 }
5149
5150 shost->io_port = 0;
5151 shost->n_io_port = 0;
5152 shost->this_id = -1;
5153 shost->max_channel = PQI_MAX_BUS;
5154 shost->max_cmd_len = MAX_COMMAND_SIZE;
5155 shost->max_lun = ~0;
5156 shost->max_id = ~0;
5157 shost->max_sectors = ctrl_info->max_sectors;
5158 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5159 shost->cmd_per_lun = shost->can_queue;
5160 shost->sg_tablesize = ctrl_info->sg_tablesize;
5161 shost->transportt = pqi_sas_transport_template;
5162 shost->irq = ctrl_info->msix_vectors[0];
5163 shost->unique_id = shost->irq;
5164 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5165 shost->hostdata[0] = (unsigned long)ctrl_info;
5166
5167 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5168 if (rc) {
5169 dev_err(&ctrl_info->pci_dev->dev,
5170 "scsi_add_host failed for controller %u\n",
5171 ctrl_info->ctrl_id);
5172 goto free_host;
5173 }
5174
5175 rc = pqi_add_sas_host(shost, ctrl_info);
5176 if (rc) {
5177 dev_err(&ctrl_info->pci_dev->dev,
5178 "add SAS host failed for controller %u\n",
5179 ctrl_info->ctrl_id);
5180 goto remove_host;
5181 }
5182
5183 ctrl_info->scsi_host = shost;
5184
5185 return 0;
5186
5187remove_host:
5188 scsi_remove_host(shost);
5189free_host:
5190 scsi_host_put(shost);
5191
5192 return rc;
5193}
5194
5195static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5196{
5197 struct Scsi_Host *shost;
5198
5199 pqi_delete_sas_host(ctrl_info);
5200
5201 shost = ctrl_info->scsi_host;
5202 if (!shost)
5203 return;
5204
5205 scsi_remove_host(shost);
5206 scsi_host_put(shost);
5207}
5208
5209#define PQI_RESET_ACTION_RESET 0x1
5210
5211#define PQI_RESET_TYPE_NO_RESET 0x0
5212#define PQI_RESET_TYPE_SOFT_RESET 0x1
5213#define PQI_RESET_TYPE_FIRM_RESET 0x2
5214#define PQI_RESET_TYPE_HARD_RESET 0x3
5215
5216static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5217{
5218 int rc;
5219 u32 reset_params;
5220
5221 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5222 PQI_RESET_TYPE_HARD_RESET;
5223
5224 writel(reset_params,
5225 &ctrl_info->pqi_registers->device_reset);
5226
5227 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5228 if (rc)
5229 dev_err(&ctrl_info->pci_dev->dev,
5230 "PQI reset failed\n");
5231
5232 return rc;
5233}
5234
5235static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5236{
5237 int rc;
5238 struct bmic_identify_controller *identify;
5239
5240 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5241 if (!identify)
5242 return -ENOMEM;
5243
5244 rc = pqi_identify_controller(ctrl_info, identify);
5245 if (rc)
5246 goto out;
5247
5248 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5249 sizeof(identify->firmware_version));
5250 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5251 snprintf(ctrl_info->firmware_version +
5252 strlen(ctrl_info->firmware_version),
5253 sizeof(ctrl_info->firmware_version),
5254 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5255
5256out:
5257 kfree(identify);
5258
5259 return rc;
5260}
5261
5262static int pqi_kdump_init(struct pqi_ctrl_info *ctrl_info)
5263{
5264 if (!sis_is_firmware_running(ctrl_info))
5265 return -ENXIO;
5266
5267 if (pqi_get_ctrl_mode(ctrl_info) == PQI_MODE) {
5268 sis_disable_msix(ctrl_info);
5269 if (pqi_reset(ctrl_info) == 0)
5270 sis_reenable_sis_mode(ctrl_info);
5271 }
5272
5273 return 0;
5274}
5275
5276static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5277{
5278 int rc;
5279
5280 if (reset_devices) {
5281 rc = pqi_kdump_init(ctrl_info);
5282 if (rc)
5283 return rc;
5284 }
5285
5286 /*
5287 * When the controller comes out of reset, it is always running
5288 * in legacy SIS mode. This is so that it can be compatible
5289 * with legacy drivers shipped with OSes. So we have to talk
5290 * to it using SIS commands at first. Once we are satisified
5291 * that the controller supports PQI, we transition it into PQI
5292 * mode.
5293 */
5294
5295 /*
5296 * Wait until the controller is ready to start accepting SIS
5297 * commands.
5298 */
5299 rc = sis_wait_for_ctrl_ready(ctrl_info);
5300 if (rc) {
5301 dev_err(&ctrl_info->pci_dev->dev,
5302 "error initializing SIS interface\n");
5303 return rc;
5304 }
5305
5306 /*
5307 * Get the controller properties. This allows us to determine
5308 * whether or not it supports PQI mode.
5309 */
5310 rc = sis_get_ctrl_properties(ctrl_info);
5311 if (rc) {
5312 dev_err(&ctrl_info->pci_dev->dev,
5313 "error obtaining controller properties\n");
5314 return rc;
5315 }
5316
5317 rc = sis_get_pqi_capabilities(ctrl_info);
5318 if (rc) {
5319 dev_err(&ctrl_info->pci_dev->dev,
5320 "error obtaining controller capabilities\n");
5321 return rc;
5322 }
5323
5324 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5325 ctrl_info->max_outstanding_requests =
5326 PQI_MAX_OUTSTANDING_REQUESTS;
5327
5328 pqi_calculate_io_resources(ctrl_info);
5329
5330 rc = pqi_alloc_error_buffer(ctrl_info);
5331 if (rc) {
5332 dev_err(&ctrl_info->pci_dev->dev,
5333 "failed to allocate PQI error buffer\n");
5334 return rc;
5335 }
5336
5337 /*
5338 * If the function we are about to call succeeds, the
5339 * controller will transition from legacy SIS mode
5340 * into PQI mode.
5341 */
5342 rc = sis_init_base_struct_addr(ctrl_info);
5343 if (rc) {
5344 dev_err(&ctrl_info->pci_dev->dev,
5345 "error initializing PQI mode\n");
5346 return rc;
5347 }
5348
5349 /* Wait for the controller to complete the SIS -> PQI transition. */
5350 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5351 if (rc) {
5352 dev_err(&ctrl_info->pci_dev->dev,
5353 "transition to PQI mode failed\n");
5354 return rc;
5355 }
5356
5357 /* From here on, we are running in PQI mode. */
5358 ctrl_info->pqi_mode_enabled = true;
5359 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
5360
5361 rc = pqi_alloc_admin_queues(ctrl_info);
5362 if (rc) {
5363 dev_err(&ctrl_info->pci_dev->dev,
5364 "error allocating admin queues\n");
5365 return rc;
5366 }
5367
5368 rc = pqi_create_admin_queues(ctrl_info);
5369 if (rc) {
5370 dev_err(&ctrl_info->pci_dev->dev,
5371 "error creating admin queues\n");
5372 return rc;
5373 }
5374
5375 rc = pqi_report_device_capability(ctrl_info);
5376 if (rc) {
5377 dev_err(&ctrl_info->pci_dev->dev,
5378 "obtaining device capability failed\n");
5379 return rc;
5380 }
5381
5382 rc = pqi_validate_device_capability(ctrl_info);
5383 if (rc)
5384 return rc;
5385
5386 pqi_calculate_queue_resources(ctrl_info);
5387
5388 rc = pqi_enable_msix_interrupts(ctrl_info);
5389 if (rc)
5390 return rc;
5391
5392 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5393 ctrl_info->max_msix_vectors =
5394 ctrl_info->num_msix_vectors_enabled;
5395 pqi_calculate_queue_resources(ctrl_info);
5396 }
5397
5398 rc = pqi_alloc_io_resources(ctrl_info);
5399 if (rc)
5400 return rc;
5401
5402 rc = pqi_alloc_operational_queues(ctrl_info);
5403 if (rc)
5404 return rc;
5405
5406 pqi_init_operational_queues(ctrl_info);
5407
5408 rc = pqi_request_irqs(ctrl_info);
5409 if (rc)
5410 return rc;
5411
5412 pqi_irq_set_affinity_hint(ctrl_info);
5413
5414 rc = pqi_create_queues(ctrl_info);
5415 if (rc)
5416 return rc;
5417
5418 sis_enable_msix(ctrl_info);
5419
5420 rc = pqi_configure_events(ctrl_info);
5421 if (rc) {
5422 dev_err(&ctrl_info->pci_dev->dev,
5423 "error configuring events\n");
5424 return rc;
5425 }
5426
5427 pqi_start_heartbeat_timer(ctrl_info);
5428
5429 ctrl_info->controller_online = true;
5430
5431 /* Register with the SCSI subsystem. */
5432 rc = pqi_register_scsi(ctrl_info);
5433 if (rc)
5434 return rc;
5435
5436 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5437 if (rc) {
5438 dev_err(&ctrl_info->pci_dev->dev,
5439 "error obtaining firmware version\n");
5440 return rc;
5441 }
5442
5443 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5444 if (rc) {
5445 dev_err(&ctrl_info->pci_dev->dev,
5446 "error updating host wellness\n");
5447 return rc;
5448 }
5449
5450 pqi_schedule_update_time_worker(ctrl_info);
5451
5452 pqi_scan_scsi_devices(ctrl_info);
5453
5454 return 0;
5455}
5456
5457static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
5458{
5459 int rc;
5460 u64 mask;
5461
5462 rc = pci_enable_device(ctrl_info->pci_dev);
5463 if (rc) {
5464 dev_err(&ctrl_info->pci_dev->dev,
5465 "failed to enable PCI device\n");
5466 return rc;
5467 }
5468
5469 if (sizeof(dma_addr_t) > 4)
5470 mask = DMA_BIT_MASK(64);
5471 else
5472 mask = DMA_BIT_MASK(32);
5473
5474 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
5475 if (rc) {
5476 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
5477 goto disable_device;
5478 }
5479
5480 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
5481 if (rc) {
5482 dev_err(&ctrl_info->pci_dev->dev,
5483 "failed to obtain PCI resources\n");
5484 goto disable_device;
5485 }
5486
5487 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
5488 ctrl_info->pci_dev, 0),
5489 sizeof(struct pqi_ctrl_registers));
5490 if (!ctrl_info->iomem_base) {
5491 dev_err(&ctrl_info->pci_dev->dev,
5492 "failed to map memory for controller registers\n");
5493 rc = -ENOMEM;
5494 goto release_regions;
5495 }
5496
5497 ctrl_info->registers = ctrl_info->iomem_base;
5498 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
5499
5500 /* Enable bus mastering. */
5501 pci_set_master(ctrl_info->pci_dev);
5502
5503 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
5504
5505 return 0;
5506
5507release_regions:
5508 pci_release_regions(ctrl_info->pci_dev);
5509disable_device:
5510 pci_disable_device(ctrl_info->pci_dev);
5511
5512 return rc;
5513}
5514
5515static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
5516{
5517 iounmap(ctrl_info->iomem_base);
5518 pci_release_regions(ctrl_info->pci_dev);
5519 pci_disable_device(ctrl_info->pci_dev);
5520 pci_set_drvdata(ctrl_info->pci_dev, NULL);
5521}
5522
5523static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
5524{
5525 struct pqi_ctrl_info *ctrl_info;
5526
5527 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
5528 GFP_KERNEL, numa_node);
5529 if (!ctrl_info)
5530 return NULL;
5531
5532 mutex_init(&ctrl_info->scan_mutex);
5533
5534 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
5535 spin_lock_init(&ctrl_info->scsi_device_list_lock);
5536
5537 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
5538 atomic_set(&ctrl_info->num_interrupts, 0);
5539
5540 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
5541 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
5542
5543 sema_init(&ctrl_info->sync_request_sem,
5544 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
5545 sema_init(&ctrl_info->lun_reset_sem, PQI_RESERVED_IO_SLOTS_LUN_RESET);
5546
5547 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
5548 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
5549
5550 return ctrl_info;
5551}
5552
5553static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
5554{
5555 kfree(ctrl_info);
5556}
5557
5558static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
5559{
5560 pqi_irq_unset_affinity_hint(ctrl_info);
5561 pqi_free_irqs(ctrl_info);
5562 if (ctrl_info->num_msix_vectors_enabled)
5563 pci_disable_msix(ctrl_info->pci_dev);
5564}
5565
5566static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
5567{
5568 pqi_stop_heartbeat_timer(ctrl_info);
5569 pqi_free_interrupts(ctrl_info);
5570 if (ctrl_info->queue_memory_base)
5571 dma_free_coherent(&ctrl_info->pci_dev->dev,
5572 ctrl_info->queue_memory_length,
5573 ctrl_info->queue_memory_base,
5574 ctrl_info->queue_memory_base_dma_handle);
5575 if (ctrl_info->admin_queue_memory_base)
5576 dma_free_coherent(&ctrl_info->pci_dev->dev,
5577 ctrl_info->admin_queue_memory_length,
5578 ctrl_info->admin_queue_memory_base,
5579 ctrl_info->admin_queue_memory_base_dma_handle);
5580 pqi_free_all_io_requests(ctrl_info);
5581 if (ctrl_info->error_buffer)
5582 dma_free_coherent(&ctrl_info->pci_dev->dev,
5583 ctrl_info->error_buffer_length,
5584 ctrl_info->error_buffer,
5585 ctrl_info->error_buffer_dma_handle);
5586 if (ctrl_info->iomem_base)
5587 pqi_cleanup_pci_init(ctrl_info);
5588 pqi_free_ctrl_info(ctrl_info);
5589}
5590
5591static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
5592{
5593 cancel_delayed_work_sync(&ctrl_info->rescan_work);
5594 cancel_delayed_work_sync(&ctrl_info->update_time_work);
5595 pqi_remove_all_scsi_devices(ctrl_info);
5596 pqi_unregister_scsi(ctrl_info);
5597
5598 if (ctrl_info->pqi_mode_enabled) {
5599 sis_disable_msix(ctrl_info);
5600 if (pqi_reset(ctrl_info) == 0)
5601 sis_reenable_sis_mode(ctrl_info);
5602 }
5603 pqi_free_ctrl_resources(ctrl_info);
5604}
5605
5606static void pqi_print_ctrl_info(struct pci_dev *pdev,
5607 const struct pci_device_id *id)
5608{
5609 char *ctrl_description;
5610
5611 if (id->driver_data) {
5612 ctrl_description = (char *)id->driver_data;
5613 } else {
5614 switch (id->subvendor) {
5615 case PCI_VENDOR_ID_HP:
5616 ctrl_description = hpe_branded_controller;
5617 break;
5618 case PCI_VENDOR_ID_ADAPTEC2:
5619 default:
5620 ctrl_description = microsemi_branded_controller;
5621 break;
5622 }
5623 }
5624
5625 dev_info(&pdev->dev, "%s found\n", ctrl_description);
5626}
5627
5628static int pqi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5629{
5630 int rc;
5631 int node;
5632 struct pqi_ctrl_info *ctrl_info;
5633
5634 pqi_print_ctrl_info(pdev, id);
5635
5636 if (pqi_disable_device_id_wildcards &&
5637 id->subvendor == PCI_ANY_ID &&
5638 id->subdevice == PCI_ANY_ID) {
5639 dev_warn(&pdev->dev,
5640 "controller not probed because device ID wildcards are disabled\n");
5641 return -ENODEV;
5642 }
5643
5644 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
5645 dev_warn(&pdev->dev,
5646 "controller device ID matched using wildcards\n");
5647
5648 node = dev_to_node(&pdev->dev);
5649 if (node == NUMA_NO_NODE)
5650 set_dev_node(&pdev->dev, 0);
5651
5652 ctrl_info = pqi_alloc_ctrl_info(node);
5653 if (!ctrl_info) {
5654 dev_err(&pdev->dev,
5655 "failed to allocate controller info block\n");
5656 return -ENOMEM;
5657 }
5658
5659 ctrl_info->pci_dev = pdev;
5660
5661 rc = pqi_pci_init(ctrl_info);
5662 if (rc)
5663 goto error;
5664
5665 rc = pqi_ctrl_init(ctrl_info);
5666 if (rc)
5667 goto error;
5668
5669 return 0;
5670
5671error:
5672 pqi_remove_ctrl(ctrl_info);
5673
5674 return rc;
5675}
5676
5677static void pqi_pci_remove(struct pci_dev *pdev)
5678{
5679 struct pqi_ctrl_info *ctrl_info;
5680
5681 ctrl_info = pci_get_drvdata(pdev);
5682 if (!ctrl_info)
5683 return;
5684
5685 pqi_remove_ctrl(ctrl_info);
5686}
5687
5688static void pqi_shutdown(struct pci_dev *pdev)
5689{
5690 int rc;
5691 struct pqi_ctrl_info *ctrl_info;
5692
5693 ctrl_info = pci_get_drvdata(pdev);
5694 if (!ctrl_info)
5695 goto error;
5696
5697 /*
5698 * Write all data in the controller's battery-backed cache to
5699 * storage.
5700 */
5701 rc = pqi_flush_cache(ctrl_info);
5702 if (rc == 0)
5703 return;
5704
5705error:
5706 dev_warn(&pdev->dev,
5707 "unable to flush controller cache\n");
5708}
5709
5710/* Define the PCI IDs for the controllers that we support. */
5711static const struct pci_device_id pqi_pci_id_table[] = {
5712 {
5713 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5714 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
5715 },
5716 {
5717 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5718 PCI_VENDOR_ID_HP, 0x0600)
5719 },
5720 {
5721 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5722 PCI_VENDOR_ID_HP, 0x0601)
5723 },
5724 {
5725 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5726 PCI_VENDOR_ID_HP, 0x0602)
5727 },
5728 {
5729 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5730 PCI_VENDOR_ID_HP, 0x0603)
5731 },
5732 {
5733 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5734 PCI_VENDOR_ID_HP, 0x0650)
5735 },
5736 {
5737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5738 PCI_VENDOR_ID_HP, 0x0651)
5739 },
5740 {
5741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5742 PCI_VENDOR_ID_HP, 0x0652)
5743 },
5744 {
5745 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5746 PCI_VENDOR_ID_HP, 0x0653)
5747 },
5748 {
5749 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5750 PCI_VENDOR_ID_HP, 0x0654)
5751 },
5752 {
5753 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5754 PCI_VENDOR_ID_HP, 0x0655)
5755 },
5756 {
5757 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5758 PCI_VENDOR_ID_HP, 0x0700)
5759 },
5760 {
5761 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5762 PCI_VENDOR_ID_HP, 0x0701)
5763 },
5764 {
5765 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5766 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
5767 },
5768 {
5769 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5770 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
5771 },
5772 {
5773 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5774 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
5775 },
5776 {
5777 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5778 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
5779 },
5780 {
5781 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5782 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
5783 },
5784 {
5785 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5786 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
5787 },
5788 {
5789 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5790 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
5791 },
5792 {
5793 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5794 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
5795 },
5796 {
5797 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5798 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
5799 },
5800 {
5801 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5802 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
5803 },
5804 {
5805 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5806 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
5807 },
5808 {
5809 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5810 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
5811 },
5812 {
5813 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5814 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
5815 },
5816 {
5817 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5818 PCI_VENDOR_ID_HP, 0x1001)
5819 },
5820 {
5821 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5822 PCI_VENDOR_ID_HP, 0x1100)
5823 },
5824 {
5825 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5826 PCI_VENDOR_ID_HP, 0x1101)
5827 },
5828 {
5829 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5830 PCI_VENDOR_ID_HP, 0x1102)
5831 },
5832 {
5833 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5834 PCI_VENDOR_ID_HP, 0x1150)
5835 },
5836 {
5837 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
5838 PCI_ANY_ID, PCI_ANY_ID)
5839 },
5840 { 0 }
5841};
5842
5843MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
5844
5845static struct pci_driver pqi_pci_driver = {
5846 .name = DRIVER_NAME_SHORT,
5847 .id_table = pqi_pci_id_table,
5848 .probe = pqi_pci_probe,
5849 .remove = pqi_pci_remove,
5850 .shutdown = pqi_shutdown,
5851};
5852
5853static int __init pqi_init(void)
5854{
5855 int rc;
5856
5857 pr_info(DRIVER_NAME "\n");
5858
5859 pqi_sas_transport_template =
5860 sas_attach_transport(&pqi_sas_transport_functions);
5861 if (!pqi_sas_transport_template)
5862 return -ENODEV;
5863
5864 rc = pci_register_driver(&pqi_pci_driver);
5865 if (rc)
5866 sas_release_transport(pqi_sas_transport_template);
5867
5868 return rc;
5869}
5870
5871static void __exit pqi_cleanup(void)
5872{
5873 pci_unregister_driver(&pqi_pci_driver);
5874 sas_release_transport(pqi_sas_transport_template);
5875}
5876
5877module_init(pqi_init);
5878module_exit(pqi_cleanup);
5879
5880static void __attribute__((unused)) verify_structures(void)
5881{
5882 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5883 sis_host_to_ctrl_doorbell) != 0x20);
5884 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5885 sis_interrupt_mask) != 0x34);
5886 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5887 sis_ctrl_to_host_doorbell) != 0x9c);
5888 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5889 sis_ctrl_to_host_doorbell_clear) != 0xa0);
5890 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5891 sis_driver_scratch) != 0xb0);
5892 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5893 sis_firmware_status) != 0xbc);
5894 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5895 sis_mailbox) != 0x1000);
5896 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
5897 pqi_registers) != 0x4000);
5898
5899 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5900 iu_type) != 0x0);
5901 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5902 iu_length) != 0x2);
5903 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5904 response_queue_id) != 0x4);
5905 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
5906 work_area) != 0x6);
5907 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
5908
5909 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5910 status) != 0x0);
5911 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5912 service_response) != 0x1);
5913 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5914 data_present) != 0x2);
5915 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5916 reserved) != 0x3);
5917 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5918 residual_count) != 0x4);
5919 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5920 data_length) != 0x8);
5921 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5922 reserved1) != 0xa);
5923 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
5924 data) != 0xc);
5925 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
5926
5927 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5928 data_in_result) != 0x0);
5929 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5930 data_out_result) != 0x1);
5931 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5932 reserved) != 0x2);
5933 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5934 status) != 0x5);
5935 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5936 status_qualifier) != 0x6);
5937 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5938 sense_data_length) != 0x8);
5939 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5940 response_data_length) != 0xa);
5941 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5942 data_in_transferred) != 0xc);
5943 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5944 data_out_transferred) != 0x10);
5945 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
5946 data) != 0x14);
5947 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
5948
5949 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5950 signature) != 0x0);
5951 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5952 function_and_status_code) != 0x8);
5953 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5954 max_admin_iq_elements) != 0x10);
5955 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5956 max_admin_oq_elements) != 0x11);
5957 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5958 admin_iq_element_length) != 0x12);
5959 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5960 admin_oq_element_length) != 0x13);
5961 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5962 max_reset_timeout) != 0x14);
5963 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5964 legacy_intx_status) != 0x18);
5965 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5966 legacy_intx_mask_set) != 0x1c);
5967 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5968 legacy_intx_mask_clear) != 0x20);
5969 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5970 device_status) != 0x40);
5971 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5972 admin_iq_pi_offset) != 0x48);
5973 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5974 admin_oq_ci_offset) != 0x50);
5975 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5976 admin_iq_element_array_addr) != 0x58);
5977 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5978 admin_oq_element_array_addr) != 0x60);
5979 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5980 admin_iq_ci_addr) != 0x68);
5981 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5982 admin_oq_pi_addr) != 0x70);
5983 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5984 admin_iq_num_elements) != 0x78);
5985 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5986 admin_oq_num_elements) != 0x79);
5987 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5988 admin_queue_int_msg_num) != 0x7a);
5989 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5990 device_error) != 0x80);
5991 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5992 error_details) != 0x88);
5993 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5994 device_reset) != 0x90);
5995 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
5996 power_action) != 0x94);
5997 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
5998
5999 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6000 header.iu_type) != 0);
6001 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6002 header.iu_length) != 2);
6003 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6004 header.work_area) != 6);
6005 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6006 request_id) != 8);
6007 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6008 function_code) != 10);
6009 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6010 data.report_device_capability.buffer_length) != 44);
6011 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6012 data.report_device_capability.sg_descriptor) != 48);
6013 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6014 data.create_operational_iq.queue_id) != 12);
6015 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6016 data.create_operational_iq.element_array_addr) != 16);
6017 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6018 data.create_operational_iq.ci_addr) != 24);
6019 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6020 data.create_operational_iq.num_elements) != 32);
6021 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6022 data.create_operational_iq.element_length) != 34);
6023 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6024 data.create_operational_iq.queue_protocol) != 36);
6025 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6026 data.create_operational_oq.queue_id) != 12);
6027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6028 data.create_operational_oq.element_array_addr) != 16);
6029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6030 data.create_operational_oq.pi_addr) != 24);
6031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6032 data.create_operational_oq.num_elements) != 32);
6033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6034 data.create_operational_oq.element_length) != 34);
6035 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6036 data.create_operational_oq.queue_protocol) != 36);
6037 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6038 data.create_operational_oq.int_msg_num) != 40);
6039 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6040 data.create_operational_oq.coalescing_count) != 42);
6041 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6042 data.create_operational_oq.min_coalescing_time) != 44);
6043 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6044 data.create_operational_oq.max_coalescing_time) != 48);
6045 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6046 data.delete_operational_queue.queue_id) != 12);
6047 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6048 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6049 data.create_operational_iq) != 64 - 11);
6050 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6051 data.create_operational_oq) != 64 - 11);
6052 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6053 data.delete_operational_queue) != 64 - 11);
6054
6055 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6056 header.iu_type) != 0);
6057 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6058 header.iu_length) != 2);
6059 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6060 header.work_area) != 6);
6061 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6062 request_id) != 8);
6063 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6064 function_code) != 10);
6065 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6066 status) != 11);
6067 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6068 data.create_operational_iq.status_descriptor) != 12);
6069 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6070 data.create_operational_iq.iq_pi_offset) != 16);
6071 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6072 data.create_operational_oq.status_descriptor) != 12);
6073 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6074 data.create_operational_oq.oq_ci_offset) != 16);
6075 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6076
6077 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6078 header.iu_type) != 0);
6079 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6080 header.iu_length) != 2);
6081 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6082 header.response_queue_id) != 4);
6083 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6084 header.work_area) != 6);
6085 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6086 request_id) != 8);
6087 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6088 nexus_id) != 10);
6089 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6090 buffer_length) != 12);
6091 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6092 lun_number) != 16);
6093 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6094 protocol_specific) != 24);
6095 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6096 error_index) != 27);
6097 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6098 cdb) != 32);
6099 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6100 sg_descriptors) != 64);
6101 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6102 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6103
6104 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6105 header.iu_type) != 0);
6106 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6107 header.iu_length) != 2);
6108 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6109 header.response_queue_id) != 4);
6110 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6111 header.work_area) != 6);
6112 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6113 request_id) != 8);
6114 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6115 nexus_id) != 12);
6116 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6117 buffer_length) != 16);
6118 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6119 data_encryption_key_index) != 22);
6120 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6121 encrypt_tweak_lower) != 24);
6122 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6123 encrypt_tweak_upper) != 28);
6124 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6125 cdb) != 32);
6126 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6127 error_index) != 48);
6128 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6129 num_sg_descriptors) != 50);
6130 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6131 cdb_length) != 51);
6132 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6133 lun_number) != 52);
6134 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6135 sg_descriptors) != 64);
6136 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6137 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6138
6139 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6140 header.iu_type) != 0);
6141 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6142 header.iu_length) != 2);
6143 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6144 request_id) != 8);
6145 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6146 error_index) != 10);
6147
6148 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6149 header.iu_type) != 0);
6150 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6151 header.iu_length) != 2);
6152 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6153 header.response_queue_id) != 4);
6154 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6155 request_id) != 8);
6156 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6157 data.report_event_configuration.buffer_length) != 12);
6158 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6159 data.report_event_configuration.sg_descriptors) != 16);
6160 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6161 data.set_event_configuration.global_event_oq_id) != 10);
6162 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6163 data.set_event_configuration.buffer_length) != 12);
6164 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6165 data.set_event_configuration.sg_descriptors) != 16);
6166
6167 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6168 max_inbound_iu_length) != 6);
6169 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6170 max_outbound_iu_length) != 14);
6171 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6172
6173 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6174 data_length) != 0);
6175 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6176 iq_arbitration_priority_support_bitmask) != 8);
6177 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6178 maximum_aw_a) != 9);
6179 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6180 maximum_aw_b) != 10);
6181 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6182 maximum_aw_c) != 11);
6183 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6184 max_inbound_queues) != 16);
6185 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6186 max_elements_per_iq) != 18);
6187 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6188 max_iq_element_length) != 24);
6189 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6190 min_iq_element_length) != 26);
6191 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6192 max_outbound_queues) != 30);
6193 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6194 max_elements_per_oq) != 32);
6195 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6196 intr_coalescing_time_granularity) != 34);
6197 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6198 max_oq_element_length) != 36);
6199 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6200 min_oq_element_length) != 38);
6201 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6202 iu_layer_descriptors) != 64);
6203 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6204
6205 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6206 event_type) != 0);
6207 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6208 oq_id) != 2);
6209 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6210
6211 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6212 num_event_descriptors) != 2);
6213 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6214 descriptors) != 4);
6215
6216 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6217 header.iu_type) != 0);
6218 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6219 header.iu_length) != 2);
6220 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6221 event_type) != 8);
6222 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6223 event_id) != 10);
6224 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6225 additional_event_id) != 12);
6226 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6227 data) != 16);
6228 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6229
6230 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6231 header.iu_type) != 0);
6232 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6233 header.iu_length) != 2);
6234 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6235 event_type) != 8);
6236 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6237 event_id) != 10);
6238 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6239 additional_event_id) != 12);
6240 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
6241
6242 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6243 header.iu_type) != 0);
6244 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6245 header.iu_length) != 2);
6246 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6247 request_id) != 8);
6248 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6249 nexus_id) != 10);
6250 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6251 lun_number) != 16);
6252 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6253 protocol_specific) != 24);
6254 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6255 outbound_queue_id_to_manage) != 26);
6256 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6257 request_id_to_manage) != 28);
6258 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
6259 task_management_function) != 30);
6260 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
6261
6262 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6263 header.iu_type) != 0);
6264 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6265 header.iu_length) != 2);
6266 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6267 request_id) != 8);
6268 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6269 nexus_id) != 10);
6270 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6271 additional_response_info) != 12);
6272 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
6273 response_code) != 15);
6274 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
6275
6276 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6277 configured_logical_drive_count) != 0);
6278 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6279 configuration_signature) != 1);
6280 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6281 firmware_version) != 5);
6282 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6283 extended_logical_unit_count) != 154);
6284 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6285 firmware_build_number) != 190);
6286 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
6287 controller_mode) != 292);
6288
6289 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
6290 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
6291 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
6292 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6293 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
6294 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6295 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
6296 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
6297 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6298 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
6299 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
6300 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
6301
6302 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
6303}
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
new file mode 100644
index 000000000000..52ca4f93f1b2
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -0,0 +1,350 @@
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/kernel.h>
20#include <scsi/scsi_host.h>
21#include <scsi/scsi_cmnd.h>
22#include <scsi/scsi_transport_sas.h>
23#include "smartpqi.h"
24
25static struct pqi_sas_phy *pqi_alloc_sas_phy(struct pqi_sas_port *pqi_sas_port)
26{
27 struct pqi_sas_phy *pqi_sas_phy;
28 struct sas_phy *phy;
29
30 pqi_sas_phy = kzalloc(sizeof(*pqi_sas_phy), GFP_KERNEL);
31 if (!pqi_sas_phy)
32 return NULL;
33
34 phy = sas_phy_alloc(pqi_sas_port->parent_node->parent_dev,
35 pqi_sas_port->next_phy_index);
36 if (!phy) {
37 kfree(pqi_sas_phy);
38 return NULL;
39 }
40
41 pqi_sas_port->next_phy_index++;
42 pqi_sas_phy->phy = phy;
43 pqi_sas_phy->parent_port = pqi_sas_port;
44
45 return pqi_sas_phy;
46}
47
48static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy)
49{
50 struct sas_phy *phy = pqi_sas_phy->phy;
51
52 sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy);
53 sas_phy_free(phy);
54 if (pqi_sas_phy->added_to_port)
55 list_del(&pqi_sas_phy->phy_list_entry);
56 kfree(pqi_sas_phy);
57}
58
59static int pqi_sas_port_add_phy(struct pqi_sas_phy *pqi_sas_phy)
60{
61 int rc;
62 struct pqi_sas_port *pqi_sas_port;
63 struct sas_phy *phy;
64 struct sas_identify *identify;
65
66 pqi_sas_port = pqi_sas_phy->parent_port;
67 phy = pqi_sas_phy->phy;
68
69 identify = &phy->identify;
70 memset(identify, 0, sizeof(*identify));
71 identify->sas_address = pqi_sas_port->sas_address;
72 identify->device_type = SAS_END_DEVICE;
73 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
74 identify->target_port_protocols = SAS_PROTOCOL_STP;
75 phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
76 phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
77 phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
78 phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
79 phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
80
81 rc = sas_phy_add(pqi_sas_phy->phy);
82 if (rc)
83 return rc;
84
85 sas_port_add_phy(pqi_sas_port->port, pqi_sas_phy->phy);
86 list_add_tail(&pqi_sas_phy->phy_list_entry,
87 &pqi_sas_port->phy_list_head);
88 pqi_sas_phy->added_to_port = true;
89
90 return 0;
91}
92
93static int pqi_sas_port_add_rphy(struct pqi_sas_port *pqi_sas_port,
94 struct sas_rphy *rphy)
95{
96 struct sas_identify *identify;
97
98 identify = &rphy->identify;
99 identify->sas_address = pqi_sas_port->sas_address;
100 identify->initiator_port_protocols = SAS_PROTOCOL_STP;
101 identify->target_port_protocols = SAS_PROTOCOL_STP;
102
103 return sas_rphy_add(rphy);
104}
105
106static struct pqi_sas_port *pqi_alloc_sas_port(
107 struct pqi_sas_node *pqi_sas_node, u64 sas_address)
108{
109 int rc;
110 struct pqi_sas_port *pqi_sas_port;
111 struct sas_port *port;
112
113 pqi_sas_port = kzalloc(sizeof(*pqi_sas_port), GFP_KERNEL);
114 if (!pqi_sas_port)
115 return NULL;
116
117 INIT_LIST_HEAD(&pqi_sas_port->phy_list_head);
118 pqi_sas_port->parent_node = pqi_sas_node;
119
120 port = sas_port_alloc_num(pqi_sas_node->parent_dev);
121 if (!port)
122 goto free_pqi_port;
123
124 rc = sas_port_add(port);
125 if (rc)
126 goto free_sas_port;
127
128 pqi_sas_port->port = port;
129 pqi_sas_port->sas_address = sas_address;
130 list_add_tail(&pqi_sas_port->port_list_entry,
131 &pqi_sas_node->port_list_head);
132
133 return pqi_sas_port;
134
135free_sas_port:
136 sas_port_free(port);
137free_pqi_port:
138 kfree(pqi_sas_port);
139
140 return NULL;
141}
142
143static void pqi_free_sas_port(struct pqi_sas_port *pqi_sas_port)
144{
145 struct pqi_sas_phy *pqi_sas_phy;
146 struct pqi_sas_phy *next;
147
148 list_for_each_entry_safe(pqi_sas_phy, next,
149 &pqi_sas_port->phy_list_head, phy_list_entry)
150 pqi_free_sas_phy(pqi_sas_phy);
151
152 sas_port_delete(pqi_sas_port->port);
153 list_del(&pqi_sas_port->port_list_entry);
154 kfree(pqi_sas_port);
155}
156
157static struct pqi_sas_node *pqi_alloc_sas_node(struct device *parent_dev)
158{
159 struct pqi_sas_node *pqi_sas_node;
160
161 pqi_sas_node = kzalloc(sizeof(*pqi_sas_node), GFP_KERNEL);
162 if (pqi_sas_node) {
163 pqi_sas_node->parent_dev = parent_dev;
164 INIT_LIST_HEAD(&pqi_sas_node->port_list_head);
165 }
166
167 return pqi_sas_node;
168}
169
170static void pqi_free_sas_node(struct pqi_sas_node *pqi_sas_node)
171{
172 struct pqi_sas_port *pqi_sas_port;
173 struct pqi_sas_port *next;
174
175 if (!pqi_sas_node)
176 return;
177
178 list_for_each_entry_safe(pqi_sas_port, next,
179 &pqi_sas_node->port_list_head, port_list_entry)
180 pqi_free_sas_port(pqi_sas_port);
181
182 kfree(pqi_sas_node);
183}
184
185struct pqi_scsi_dev *pqi_find_device_by_sas_rphy(
186 struct pqi_ctrl_info *ctrl_info, struct sas_rphy *rphy)
187{
188 struct pqi_scsi_dev *device;
189
190 list_for_each_entry(device, &ctrl_info->scsi_device_list,
191 scsi_device_list_entry) {
192 if (!device->sas_port)
193 continue;
194 if (device->sas_port->rphy == rphy)
195 return device;
196 }
197
198 return NULL;
199}
200
201int pqi_add_sas_host(struct Scsi_Host *shost, struct pqi_ctrl_info *ctrl_info)
202{
203 int rc;
204 struct device *parent_dev;
205 struct pqi_sas_node *pqi_sas_node;
206 struct pqi_sas_port *pqi_sas_port;
207 struct pqi_sas_phy *pqi_sas_phy;
208
209 parent_dev = &shost->shost_gendev;
210
211 pqi_sas_node = pqi_alloc_sas_node(parent_dev);
212 if (!pqi_sas_node)
213 return -ENOMEM;
214
215 pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, ctrl_info->sas_address);
216 if (!pqi_sas_port) {
217 rc = -ENODEV;
218 goto free_sas_node;
219 }
220
221 pqi_sas_phy = pqi_alloc_sas_phy(pqi_sas_port);
222 if (!pqi_sas_phy) {
223 rc = -ENODEV;
224 goto free_sas_port;
225 }
226
227 rc = pqi_sas_port_add_phy(pqi_sas_phy);
228 if (rc)
229 goto free_sas_phy;
230
231 ctrl_info->sas_host = pqi_sas_node;
232
233 return 0;
234
235free_sas_phy:
236 pqi_free_sas_phy(pqi_sas_phy);
237free_sas_port:
238 pqi_free_sas_port(pqi_sas_port);
239free_sas_node:
240 pqi_free_sas_node(pqi_sas_node);
241
242 return rc;
243}
244
245void pqi_delete_sas_host(struct pqi_ctrl_info *ctrl_info)
246{
247 pqi_free_sas_node(ctrl_info->sas_host);
248}
249
250int pqi_add_sas_device(struct pqi_sas_node *pqi_sas_node,
251 struct pqi_scsi_dev *device)
252{
253 int rc;
254 struct pqi_sas_port *pqi_sas_port;
255 struct sas_rphy *rphy;
256
257 pqi_sas_port = pqi_alloc_sas_port(pqi_sas_node, device->sas_address);
258 if (!pqi_sas_port)
259 return -ENOMEM;
260
261 rphy = sas_end_device_alloc(pqi_sas_port->port);
262 if (!rphy) {
263 rc = -ENODEV;
264 goto free_sas_port;
265 }
266
267 pqi_sas_port->rphy = rphy;
268 device->sas_port = pqi_sas_port;
269
270 rc = pqi_sas_port_add_rphy(pqi_sas_port, rphy);
271 if (rc)
272 goto free_sas_port;
273
274 return 0;
275
276free_sas_port:
277 pqi_free_sas_port(pqi_sas_port);
278 device->sas_port = NULL;
279
280 return rc;
281}
282
283void pqi_remove_sas_device(struct pqi_scsi_dev *device)
284{
285 if (device->sas_port) {
286 pqi_free_sas_port(device->sas_port);
287 device->sas_port = NULL;
288 }
289}
290
291static int pqi_sas_get_linkerrors(struct sas_phy *phy)
292{
293 return 0;
294}
295
296static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy,
297 u64 *identifier)
298{
299 return 0;
300}
301
302static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy)
303{
304 return -ENXIO;
305}
306
307static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset)
308{
309 return 0;
310}
311
312static int pqi_sas_phy_enable(struct sas_phy *phy, int enable)
313{
314 return 0;
315}
316
317static int pqi_sas_phy_setup(struct sas_phy *phy)
318{
319 return 0;
320}
321
322static void pqi_sas_phy_release(struct sas_phy *phy)
323{
324}
325
326static int pqi_sas_phy_speed(struct sas_phy *phy,
327 struct sas_phy_linkrates *rates)
328{
329 return -EINVAL;
330}
331
332/* SMP = Serial Management Protocol */
333
334static int pqi_sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
335 struct request *req)
336{
337 return -EINVAL;
338}
339
340struct sas_function_template pqi_sas_transport_functions = {
341 .get_linkerrors = pqi_sas_get_linkerrors,
342 .get_enclosure_identifier = pqi_sas_get_enclosure_identifier,
343 .get_bay_identifier = pqi_sas_get_bay_identifier,
344 .phy_reset = pqi_sas_phy_reset,
345 .phy_enable = pqi_sas_phy_enable,
346 .phy_setup = pqi_sas_phy_setup,
347 .phy_release = pqi_sas_phy_release,
348 .set_phy_speed = pqi_sas_phy_speed,
349 .smp_handler = pqi_sas_smp_handler,
350};
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
new file mode 100644
index 000000000000..71408f9e8f75
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -0,0 +1,404 @@
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/delay.h>
22#include <linux/pci.h>
23#include <scsi/scsi_device.h>
24#include <asm/unaligned.h>
25#include "smartpqi.h"
26#include "smartpqi_sis.h"
27
28/* legacy SIS interface commands */
29#define SIS_CMD_GET_ADAPTER_PROPERTIES 0x19
30#define SIS_CMD_INIT_BASE_STRUCT_ADDRESS 0x1b
31#define SIS_CMD_GET_PQI_CAPABILITIES 0x3000
32
33/* for submission of legacy SIS commands */
34#define SIS_REENABLE_SIS_MODE 0x1
35#define SIS_ENABLE_MSIX 0x40
36#define SIS_SOFT_RESET 0x100
37#define SIS_CMD_READY 0x200
38#define SIS_CMD_COMPLETE 0x1000
39#define SIS_CLEAR_CTRL_TO_HOST_DOORBELL 0x1000
40#define SIS_CMD_STATUS_SUCCESS 0x1
41#define SIS_CMD_COMPLETE_TIMEOUT_SECS 30
42#define SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS 10
43
44/* used with SIS_CMD_GET_ADAPTER_PROPERTIES command */
45#define SIS_EXTENDED_PROPERTIES_SUPPORTED 0x800000
46#define SIS_SMARTARRAY_FEATURES_SUPPORTED 0x2
47#define SIS_PQI_MODE_SUPPORTED 0x4
48#define SIS_REQUIRED_EXTENDED_PROPERTIES \
49 (SIS_SMARTARRAY_FEATURES_SUPPORTED | SIS_PQI_MODE_SUPPORTED)
50
51/* used with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
52#define SIS_BASE_STRUCT_REVISION 9
53#define SIS_BASE_STRUCT_ALIGNMENT 16
54
55#define SIS_CTRL_KERNEL_UP 0x80
56#define SIS_CTRL_KERNEL_PANIC 0x100
57#define SIS_CTRL_READY_TIMEOUT_SECS 30
58#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
59
60#pragma pack(1)
61
62/* for use with SIS_CMD_INIT_BASE_STRUCT_ADDRESS command */
63struct sis_base_struct {
64 __le32 revision; /* revision of this structure */
65 __le32 flags; /* reserved */
66 __le32 error_buffer_paddr_low; /* lower 32 bits of physical memory */
67 /* buffer for PQI error response */
68 /* data */
69 __le32 error_buffer_paddr_high; /* upper 32 bits of physical */
70 /* memory buffer for PQI */
71 /* error response data */
72 __le32 error_buffer_element_length; /* length of each PQI error */
73 /* response buffer element */
74 /* in bytes */
75 __le32 error_buffer_num_elements; /* total number of PQI error */
76 /* response buffers available */
77};
78
79#pragma pack()
80
81int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info)
82{
83 unsigned long timeout;
84 u32 status;
85
86 timeout = (SIS_CTRL_READY_TIMEOUT_SECS * HZ) + jiffies;
87
88 while (1) {
89 status = readl(&ctrl_info->registers->sis_firmware_status);
90 if (status != ~0) {
91 if (status & SIS_CTRL_KERNEL_PANIC) {
92 dev_err(&ctrl_info->pci_dev->dev,
93 "controller is offline: status code 0x%x\n",
94 readl(
95 &ctrl_info->registers->sis_mailbox[7]));
96 return -ENODEV;
97 }
98 if (status & SIS_CTRL_KERNEL_UP)
99 break;
100 }
101 if (time_after(jiffies, timeout))
102 return -ETIMEDOUT;
103 msleep(SIS_CTRL_READY_POLL_INTERVAL_MSECS);
104 }
105
106 return 0;
107}
108
109bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info)
110{
111 bool running;
112 u32 status;
113
114 status = readl(&ctrl_info->registers->sis_firmware_status);
115
116 if (status & SIS_CTRL_KERNEL_PANIC)
117 running = false;
118 else
119 running = true;
120
121 if (!running)
122 dev_err(&ctrl_info->pci_dev->dev,
123 "controller is offline: status code 0x%x\n",
124 readl(&ctrl_info->registers->sis_mailbox[7]));
125
126 return running;
127}
128
129/* used for passing command parameters/results when issuing SIS commands */
130struct sis_sync_cmd_params {
131 u32 mailbox[6]; /* mailboxes 0-5 */
132};
133
134static int sis_send_sync_cmd(struct pqi_ctrl_info *ctrl_info,
135 u32 cmd, struct sis_sync_cmd_params *params)
136{
137 struct pqi_ctrl_registers __iomem *registers;
138 unsigned int i;
139 unsigned long timeout;
140 u32 doorbell;
141 u32 cmd_status;
142
143 registers = ctrl_info->registers;
144
145 /* Write the command to mailbox 0. */
146 writel(cmd, &registers->sis_mailbox[0]);
147
148 /*
149 * Write the command parameters to mailboxes 1-4 (mailbox 5 is not used
150 * when sending a command to the controller).
151 */
152 for (i = 1; i <= 4; i++)
153 writel(params->mailbox[i], &registers->sis_mailbox[i]);
154
155 /* Clear the command doorbell. */
156 writel(SIS_CLEAR_CTRL_TO_HOST_DOORBELL,
157 &registers->sis_ctrl_to_host_doorbell_clear);
158
159 /* Disable doorbell interrupts by masking all interrupts. */
160 writel(~0, &registers->sis_interrupt_mask);
161
162 /*
163 * Force the completion of the interrupt mask register write before
164 * submitting the command.
165 */
166 readl(&registers->sis_interrupt_mask);
167
168 /* Submit the command to the controller. */
169 writel(SIS_CMD_READY, &registers->sis_host_to_ctrl_doorbell);
170
171 /*
172 * Poll for command completion. Note that the call to msleep() is at
173 * the top of the loop in order to give the controller time to start
174 * processing the command before we start polling.
175 */
176 timeout = (SIS_CMD_COMPLETE_TIMEOUT_SECS * HZ) + jiffies;
177 while (1) {
178 msleep(SIS_CMD_COMPLETE_POLL_INTERVAL_MSECS);
179 doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
180 if (doorbell & SIS_CMD_COMPLETE)
181 break;
182 if (time_after(jiffies, timeout))
183 return -ETIMEDOUT;
184 }
185
186 /* Read the command status from mailbox 0. */
187 cmd_status = readl(&registers->sis_mailbox[0]);
188 if (cmd_status != SIS_CMD_STATUS_SUCCESS) {
189 dev_err(&ctrl_info->pci_dev->dev,
190 "SIS command failed for command 0x%x: status = 0x%x\n",
191 cmd, cmd_status);
192 return -EINVAL;
193 }
194
195 /*
196 * The command completed successfully, so save the command status and
197 * read the values returned in mailboxes 1-5.
198 */
199 params->mailbox[0] = cmd_status;
200 for (i = 1; i < ARRAY_SIZE(params->mailbox); i++)
201 params->mailbox[i] = readl(&registers->sis_mailbox[i]);
202
203 return 0;
204}
205
206/*
207 * This function verifies that we are talking to a controller that speaks PQI.
208 */
209
210int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info)
211{
212 int rc;
213 u32 properties;
214 u32 extended_properties;
215 struct sis_sync_cmd_params params;
216
217 memset(&params, 0, sizeof(params));
218
219 rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_ADAPTER_PROPERTIES,
220 &params);
221 if (rc)
222 return rc;
223
224 properties = params.mailbox[1];
225
226 if (!(properties & SIS_EXTENDED_PROPERTIES_SUPPORTED))
227 return -ENODEV;
228
229 extended_properties = params.mailbox[4];
230
231 if ((extended_properties & SIS_REQUIRED_EXTENDED_PROPERTIES) !=
232 SIS_REQUIRED_EXTENDED_PROPERTIES)
233 return -ENODEV;
234
235 return 0;
236}
237
238int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info)
239{
240 int rc;
241 struct sis_sync_cmd_params params;
242
243 memset(&params, 0, sizeof(params));
244
245 rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_GET_PQI_CAPABILITIES,
246 &params);
247 if (rc)
248 return rc;
249
250 ctrl_info->max_sg_entries = params.mailbox[1];
251 ctrl_info->max_transfer_size = params.mailbox[2];
252 ctrl_info->max_outstanding_requests = params.mailbox[3];
253 ctrl_info->config_table_offset = params.mailbox[4];
254 ctrl_info->config_table_length = params.mailbox[5];
255
256 return 0;
257}
258
259int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
260{
261 int rc;
262 void *base_struct_unaligned;
263 struct sis_base_struct *base_struct;
264 struct sis_sync_cmd_params params;
265 unsigned long error_buffer_paddr;
266 dma_addr_t bus_address;
267
268 base_struct_unaligned = kzalloc(sizeof(*base_struct)
269 + SIS_BASE_STRUCT_ALIGNMENT - 1, GFP_KERNEL);
270 if (!base_struct_unaligned)
271 return -ENOMEM;
272
273 base_struct = PTR_ALIGN(base_struct_unaligned,
274 SIS_BASE_STRUCT_ALIGNMENT);
275 error_buffer_paddr = (unsigned long)ctrl_info->error_buffer_dma_handle;
276
277 put_unaligned_le32(SIS_BASE_STRUCT_REVISION, &base_struct->revision);
278 put_unaligned_le32(lower_32_bits(error_buffer_paddr),
279 &base_struct->error_buffer_paddr_low);
280 put_unaligned_le32(upper_32_bits(error_buffer_paddr),
281 &base_struct->error_buffer_paddr_high);
282 put_unaligned_le32(PQI_ERROR_BUFFER_ELEMENT_LENGTH,
283 &base_struct->error_buffer_element_length);
284 put_unaligned_le32(ctrl_info->max_io_slots,
285 &base_struct->error_buffer_num_elements);
286
287 bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
288 sizeof(*base_struct), PCI_DMA_TODEVICE);
289 if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
290 rc = -ENOMEM;
291 goto out;
292 }
293
294 memset(&params, 0, sizeof(params));
295 params.mailbox[1] = lower_32_bits((u64)bus_address);
296 params.mailbox[2] = upper_32_bits((u64)bus_address);
297 params.mailbox[3] = sizeof(*base_struct);
298
299 rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
300 &params);
301
302 pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
303 PCI_DMA_TODEVICE);
304
305out:
306 kfree(base_struct_unaligned);
307
308 return rc;
309}
310
311/* Enable MSI-X interrupts on the controller. */
312
313void sis_enable_msix(struct pqi_ctrl_info *ctrl_info)
314{
315 u32 doorbell_register;
316
317 doorbell_register =
318 readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
319 doorbell_register |= SIS_ENABLE_MSIX;
320
321 writel(doorbell_register,
322 &ctrl_info->registers->sis_host_to_ctrl_doorbell);
323}
324
325/* Disable MSI-X interrupts on the controller. */
326
327void sis_disable_msix(struct pqi_ctrl_info *ctrl_info)
328{
329 u32 doorbell_register;
330
331 doorbell_register =
332 readl(&ctrl_info->registers->sis_host_to_ctrl_doorbell);
333 doorbell_register &= ~SIS_ENABLE_MSIX;
334
335 writel(doorbell_register,
336 &ctrl_info->registers->sis_host_to_ctrl_doorbell);
337}
338
339void sis_soft_reset(struct pqi_ctrl_info *ctrl_info)
340{
341 writel(SIS_SOFT_RESET,
342 &ctrl_info->registers->sis_host_to_ctrl_doorbell);
343}
344
345#define SIS_MODE_READY_TIMEOUT_SECS 30
346
347int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info)
348{
349 int rc;
350 unsigned long timeout;
351 struct pqi_ctrl_registers __iomem *registers;
352 u32 doorbell;
353
354 registers = ctrl_info->registers;
355
356 writel(SIS_REENABLE_SIS_MODE,
357 &registers->sis_host_to_ctrl_doorbell);
358
359 rc = 0;
360 timeout = (SIS_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
361
362 while (1) {
363 doorbell = readl(&registers->sis_ctrl_to_host_doorbell);
364 if ((doorbell & SIS_REENABLE_SIS_MODE) == 0)
365 break;
366 if (time_after(jiffies, timeout)) {
367 rc = -ETIMEDOUT;
368 break;
369 }
370 }
371
372 if (rc)
373 dev_err(&ctrl_info->pci_dev->dev,
374 "re-enabling SIS mode failed\n");
375
376 return rc;
377}
378
379void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value)
380{
381 writel(value, &ctrl_info->registers->sis_driver_scratch);
382}
383
384u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info)
385{
386 return readl(&ctrl_info->registers->sis_driver_scratch);
387}
388
389static void __attribute__((unused)) verify_structures(void)
390{
391 BUILD_BUG_ON(offsetof(struct sis_base_struct,
392 revision) != 0x0);
393 BUILD_BUG_ON(offsetof(struct sis_base_struct,
394 flags) != 0x4);
395 BUILD_BUG_ON(offsetof(struct sis_base_struct,
396 error_buffer_paddr_low) != 0x8);
397 BUILD_BUG_ON(offsetof(struct sis_base_struct,
398 error_buffer_paddr_high) != 0xc);
399 BUILD_BUG_ON(offsetof(struct sis_base_struct,
400 error_buffer_element_length) != 0x10);
401 BUILD_BUG_ON(offsetof(struct sis_base_struct,
402 error_buffer_num_elements) != 0x14);
403 BUILD_BUG_ON(sizeof(struct sis_base_struct) != 0x18);
404}
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
new file mode 100644
index 000000000000..bd6e7b08338e
--- /dev/null
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -0,0 +1,34 @@
1/*
2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#if !defined(_SMARTPQI_SIS_H)
20#define _SMARTPQI_SIS_H
21
22int sis_wait_for_ctrl_ready(struct pqi_ctrl_info *ctrl_info);
23bool sis_is_firmware_running(struct pqi_ctrl_info *ctrl_info);
24int sis_get_ctrl_properties(struct pqi_ctrl_info *ctrl_info);
25int sis_get_pqi_capabilities(struct pqi_ctrl_info *ctrl_info);
26int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info);
27void sis_enable_msix(struct pqi_ctrl_info *ctrl_info);
28void sis_disable_msix(struct pqi_ctrl_info *ctrl_info);
29void sis_soft_reset(struct pqi_ctrl_info *ctrl_info);
30int sis_reenable_sis_mode(struct pqi_ctrl_info *ctrl_info);
31void sis_write_driver_scratch(struct pqi_ctrl_info *ctrl_info, u32 value);
32u32 sis_read_driver_scratch(struct pqi_ctrl_info *ctrl_info);
33
34#endif /* _SMARTPQI_SIS_H */
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index ed179348de80..bed2bbd6b923 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -83,7 +83,7 @@ static int sr_init_command(struct scsi_cmnd *SCpnt);
83static int sr_done(struct scsi_cmnd *); 83static int sr_done(struct scsi_cmnd *);
84static int sr_runtime_suspend(struct device *dev); 84static int sr_runtime_suspend(struct device *dev);
85 85
86static struct dev_pm_ops sr_pm_ops = { 86static const struct dev_pm_ops sr_pm_ops = {
87 .runtime_suspend = sr_runtime_suspend, 87 .runtime_suspend = sr_runtime_suspend,
88}; 88};
89 89
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 5d00e514ff28..d32e3ba8863e 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1874,7 +1874,7 @@ static void sym2_io_resume(struct pci_dev *pdev)
1874 1874
1875 spin_lock_irq(shost->host_lock); 1875 spin_lock_irq(shost->host_lock);
1876 if (sym_data->io_reset) 1876 if (sym_data->io_reset)
1877 complete_all(sym_data->io_reset); 1877 complete(sym_data->io_reset);
1878 spin_unlock_irq(shost->host_lock); 1878 spin_unlock_irq(shost->host_lock);
1879} 1879}
1880 1880
diff --git a/drivers/scsi/t128.c b/drivers/scsi/t128.c
deleted file mode 100644
index 8a8608ac62e6..000000000000
--- a/drivers/scsi/t128.c
+++ /dev/null
@@ -1,407 +0,0 @@
1/*
2 * Trantor T128/T128F/T228 driver
3 * Note : architecturally, the T100 and T130 are different and won't
4 * work
5 *
6 * Copyright 1993, Drew Eckhardt
7 * Visionary Computing
8 * (Unix and Linux consulting and custom programming)
9 * drew@colorado.edu
10 * +1 (303) 440-4894
11 *
12 * For more information, please consult
13 *
14 * Trantor Systems, Ltd.
15 * T128/T128F/T228 SCSI Host Adapter
16 * Hardware Specifications
17 *
18 * Trantor Systems, Ltd.
19 * 5415 Randall Place
20 * Fremont, CA 94538
21 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
22 */
23
24/*
25 * The card is detected and initialized in one of several ways :
26 * 1. Autoprobe (default) - since the board is memory mapped,
27 * a BIOS signature is scanned for to locate the registers.
28 * An interrupt is triggered to autoprobe for the interrupt
29 * line.
30 *
31 * 2. With command line overrides - t128=address,irq may be
32 * used on the LILO command line to override the defaults.
33 *
34 * 3. With the T128_OVERRIDE compile time define. This is
35 * specified as an array of address, irq tuples. Ie, for
36 * one board at the default 0xcc000 address, IRQ5, I could say
37 * -DT128_OVERRIDE={{0xcc000, 5}}
38 *
39 * Note that if the override methods are used, place holders must
40 * be specified for other boards in the system.
41 *
42 * T128/T128F jumper/dipswitch settings (note : on my sample, the switches
43 * were epoxy'd shut, meaning I couldn't change the 0xcc000 base address) :
44 *
45 * T128 Sw7 Sw8 Sw6 = 0ws Sw5 = boot
46 * T128F Sw6 Sw7 Sw5 = 0ws Sw4 = boot Sw8 = floppy disable
47 * cc000 off off
48 * c8000 off on
49 * dc000 on off
50 * d8000 on on
51 *
52 *
53 * Interrupts
54 * There is a 12 pin jumper block, jp1, numbered as follows :
55 * T128 (JP1) T128F (J5)
56 * 2 4 6 8 10 12 11 9 7 5 3 1
57 * 1 3 5 7 9 11 12 10 8 6 4 2
58 *
59 * 3 2-4
60 * 5 1-3
61 * 7 3-5
62 * T128F only
63 * 10 8-10
64 * 12 7-9
65 * 14 10-12
66 * 15 9-11
67 */
68
69#include <linux/io.h>
70#include <linux/blkdev.h>
71#include <linux/interrupt.h>
72#include <linux/init.h>
73#include <linux/module.h>
74
75#include <scsi/scsi_host.h>
76#include "t128.h"
77#include "NCR5380.h"
78
79static struct override {
80 unsigned long address;
81 int irq;
82} overrides
83#ifdef T128_OVERRIDE
84 [] __initdata = T128_OVERRIDE;
85#else
86 [4] __initdata = {{0, IRQ_AUTO}, {0, IRQ_AUTO},
87 {0 ,IRQ_AUTO}, {0, IRQ_AUTO}};
88#endif
89
90#define NO_OVERRIDES ARRAY_SIZE(overrides)
91
92static struct base {
93 unsigned int address;
94 int noauto;
95} bases[] __initdata = {
96 { 0xcc000, 0}, { 0xc8000, 0}, { 0xdc000, 0}, { 0xd8000, 0}
97};
98
99#define NO_BASES ARRAY_SIZE(bases)
100
101static struct signature {
102 const char *string;
103 int offset;
104} signatures[] __initdata = {
105{"TSROM: SCSI BIOS, Version 1.12", 0x36},
106};
107
108#define NO_SIGNATURES ARRAY_SIZE(signatures)
109
110#ifndef MODULE
111/*
112 * Function : t128_setup(char *str, int *ints)
113 *
114 * Purpose : LILO command line initialization of the overrides array,
115 *
116 * Inputs : str - unused, ints - array of integer parameters with ints[0]
117 * equal to the number of ints.
118 *
119 */
120
121static int __init t128_setup(char *str)
122{
123 static int commandline_current;
124 int i;
125 int ints[10];
126
127 get_options(str, ARRAY_SIZE(ints), ints);
128 if (ints[0] != 2)
129 printk("t128_setup : usage t128=address,irq\n");
130 else
131 if (commandline_current < NO_OVERRIDES) {
132 overrides[commandline_current].address = ints[1];
133 overrides[commandline_current].irq = ints[2];
134 for (i = 0; i < NO_BASES; ++i)
135 if (bases[i].address == ints[1]) {
136 bases[i].noauto = 1;
137 break;
138 }
139 ++commandline_current;
140 }
141 return 1;
142}
143
144__setup("t128=", t128_setup);
145#endif
146
147/*
148 * Function : int t128_detect(struct scsi_host_template * tpnt)
149 *
150 * Purpose : detects and initializes T128,T128F, or T228 controllers
151 * that were autoprobed, overridden on the LILO command line,
152 * or specified at compile time.
153 *
154 * Inputs : tpnt - template for this SCSI adapter.
155 *
156 * Returns : 1 if a host adapter was found, 0 if not.
157 *
158 */
159
160static int __init t128_detect(struct scsi_host_template *tpnt)
161{
162 static int current_override, current_base;
163 struct Scsi_Host *instance;
164 unsigned long base;
165 void __iomem *p;
166 int sig, count;
167
168 for (count = 0; current_override < NO_OVERRIDES; ++current_override) {
169 base = 0;
170 p = NULL;
171
172 if (overrides[current_override].address) {
173 base = overrides[current_override].address;
174 p = ioremap(bases[current_base].address, 0x2000);
175 if (!p)
176 base = 0;
177 } else
178 for (; !base && (current_base < NO_BASES); ++current_base) {
179 dprintk(NDEBUG_INIT, "t128: probing address 0x%08x\n",
180 bases[current_base].address);
181 if (bases[current_base].noauto)
182 continue;
183 p = ioremap(bases[current_base].address, 0x2000);
184 if (!p)
185 continue;
186 for (sig = 0; sig < NO_SIGNATURES; ++sig)
187 if (check_signature(p + signatures[sig].offset,
188 signatures[sig].string,
189 strlen(signatures[sig].string))) {
190 base = bases[current_base].address;
191 dprintk(NDEBUG_INIT, "t128: detected board\n");
192 goto found;
193 }
194 iounmap(p);
195 }
196
197 dprintk(NDEBUG_INIT, "t128: base = 0x%08x\n", (unsigned int)base);
198
199 if (!base)
200 break;
201
202found:
203 instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
204 if(instance == NULL)
205 goto out_unmap;
206
207 instance->base = base;
208 ((struct NCR5380_hostdata *)instance->hostdata)->base = p;
209
210 if (NCR5380_init(instance, FLAG_DMA_FIXUP | FLAG_LATE_DMA_SETUP))
211 goto out_unregister;
212
213 NCR5380_maybe_reset_bus(instance);
214
215 if (overrides[current_override].irq != IRQ_AUTO)
216 instance->irq = overrides[current_override].irq;
217 else
218 instance->irq = NCR5380_probe_irq(instance, T128_IRQS);
219
220 /* Compatibility with documented NCR5380 kernel parameters */
221 if (instance->irq == 255)
222 instance->irq = NO_IRQ;
223
224 if (instance->irq != NO_IRQ)
225 if (request_irq(instance->irq, t128_intr, 0, "t128",
226 instance)) {
227 printk("scsi%d : IRQ%d not free, interrupts disabled\n",
228 instance->host_no, instance->irq);
229 instance->irq = NO_IRQ;
230 }
231
232 if (instance->irq == NO_IRQ) {
233 printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no);
234 printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no);
235 }
236
237 dprintk(NDEBUG_INIT, "scsi%d: irq = %d\n",
238 instance->host_no, instance->irq);
239
240 ++current_override;
241 ++count;
242 }
243 return count;
244
245out_unregister:
246 scsi_unregister(instance);
247out_unmap:
248 iounmap(p);
249 return count;
250}
251
252static int t128_release(struct Scsi_Host *shost)
253{
254 struct NCR5380_hostdata *hostdata = shost_priv(shost);
255
256 if (shost->irq != NO_IRQ)
257 free_irq(shost->irq, shost);
258 NCR5380_exit(shost);
259 scsi_unregister(shost);
260 iounmap(hostdata->base);
261 return 0;
262}
263
264/*
265 * Function : int t128_biosparam(Disk * disk, struct block_device *dev, int *ip)
266 *
267 * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for
268 * the specified device / size.
269 *
270 * Inputs : size = size of device in sectors (512 bytes), dev = block device
271 * major / minor, ip[] = {heads, sectors, cylinders}
272 *
273 * Returns : always 0 (success), initializes ip
274 *
275 */
276
277/*
278 * XXX Most SCSI boards use this mapping, I could be incorrect. Some one
279 * using hard disks on a trantor should verify that this mapping corresponds
280 * to that used by the BIOS / ASPI driver by running the linux fdisk program
281 * and matching the H_C_S coordinates to what DOS uses.
282 */
283
284static int t128_biosparam(struct scsi_device *sdev, struct block_device *bdev,
285 sector_t capacity, int *ip)
286{
287 ip[0] = 64;
288 ip[1] = 32;
289 ip[2] = capacity >> 11;
290 return 0;
291}
292
293/*
294 * Function : int t128_pread (struct Scsi_Host *instance,
295 * unsigned char *dst, int len)
296 *
297 * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to
298 * dst
299 *
300 * Inputs : dst = destination, len = length in bytes
301 *
302 * Returns : 0 on success, non zero on a failure such as a watchdog
303 * timeout.
304 */
305
306static inline int t128_pread(struct Scsi_Host *instance,
307 unsigned char *dst, int len)
308{
309 struct NCR5380_hostdata *hostdata = shost_priv(instance);
310 void __iomem *reg, *base = hostdata->base;
311 unsigned char *d = dst;
312 register int i = len;
313
314 reg = base + T_DATA_REG_OFFSET;
315
316#if 0
317 for (; i; --i) {
318 while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
319#else
320 while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
321 for (; i; --i) {
322#endif
323 *d++ = readb(reg);
324 }
325
326 if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
327 unsigned char tmp;
328 void __iomem *foo = base + T_CONTROL_REG_OFFSET;
329 tmp = readb(foo);
330 writeb(tmp | T_CR_CT, foo);
331 writeb(tmp, foo);
332 printk("scsi%d : watchdog timer fired in NCR5380_pread()\n",
333 instance->host_no);
334 return -1;
335 } else
336 return 0;
337}
338
339/*
340 * Function : int t128_pwrite (struct Scsi_Host *instance,
341 * unsigned char *src, int len)
342 *
343 * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from
344 * src
345 *
346 * Inputs : src = source, len = length in bytes
347 *
348 * Returns : 0 on success, non zero on a failure such as a watchdog
349 * timeout.
350 */
351
352static inline int t128_pwrite(struct Scsi_Host *instance,
353 unsigned char *src, int len)
354{
355 struct NCR5380_hostdata *hostdata = shost_priv(instance);
356 void __iomem *reg, *base = hostdata->base;
357 unsigned char *s = src;
358 register int i = len;
359
360 reg = base + T_DATA_REG_OFFSET;
361
362#if 0
363 for (; i; --i) {
364 while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
365#else
366 while (!(readb(base+T_STATUS_REG_OFFSET) & T_ST_RDY)) barrier();
367 for (; i; --i) {
368#endif
369 writeb(*s++, reg);
370 }
371
372 if (readb(base + T_STATUS_REG_OFFSET) & T_ST_TIM) {
373 unsigned char tmp;
374 void __iomem *foo = base + T_CONTROL_REG_OFFSET;
375 tmp = readb(foo);
376 writeb(tmp | T_CR_CT, foo);
377 writeb(tmp, foo);
378 printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n",
379 instance->host_no);
380 return -1;
381 } else
382 return 0;
383}
384
385MODULE_LICENSE("GPL");
386
387#include "NCR5380.c"
388
389static struct scsi_host_template driver_template = {
390 .name = "Trantor T128/T128F/T228",
391 .detect = t128_detect,
392 .release = t128_release,
393 .proc_name = "t128",
394 .info = t128_info,
395 .queuecommand = t128_queue_command,
396 .eh_abort_handler = t128_abort,
397 .eh_bus_reset_handler = t128_bus_reset,
398 .bios_param = t128_biosparam,
399 .can_queue = 32,
400 .this_id = 7,
401 .sg_tablesize = SG_ALL,
402 .cmd_per_lun = 2,
403 .use_clustering = DISABLE_CLUSTERING,
404 .cmd_size = NCR5380_CMD_SIZE,
405 .max_sectors = 128,
406};
407#include "scsi_module.c"
diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h
deleted file mode 100644
index c95bcd839109..000000000000
--- a/drivers/scsi/t128.h
+++ /dev/null
@@ -1,97 +0,0 @@
1/*
2 * Trantor T128/T128F/T228 defines
3 * Note : architecturally, the T100 and T128 are different and won't work
4 *
5 * Copyright 1993, Drew Eckhardt
6 * Visionary Computing
7 * (Unix and Linux consulting and custom programming)
8 * drew@colorado.edu
9 * +1 (303) 440-4894
10 *
11 * For more information, please consult
12 *
13 * Trantor Systems, Ltd.
14 * T128/T128F/T228 SCSI Host Adapter
15 * Hardware Specifications
16 *
17 * Trantor Systems, Ltd.
18 * 5415 Randall Place
19 * Fremont, CA 94538
20 * 1+ (415) 770-1400, FAX 1+ (415) 770-9910
21 */
22
23#ifndef T128_H
24#define T128_H
25
26/*
27 * The trantor boards are memory mapped. They use an NCR5380 or
28 * equivalent (my sample board had part second sourced from ZILOG).
29 * NCR's recommended "Pseudo-DMA" architecture is used, where
30 * a PAL drives the DMA signals on the 5380 allowing fast, blind
31 * transfers with proper handshaking.
32 */
33
34/*
35 * Note : a boot switch is provided for the purpose of informing the
36 * firmware to boot or not boot from attached SCSI devices. So, I imagine
37 * there are fewer people who've yanked the ROM like they do on the Seagate
38 * to make bootup faster, and I'll probably use this for autodetection.
39 */
40#define T_ROM_OFFSET 0
41
42/*
43 * Note : my sample board *WAS NOT* populated with the SRAM, so this
44 * can't be used for autodetection without a ROM present.
45 */
46#define T_RAM_OFFSET 0x1800
47
48/*
49 * All of the registers are allocated 32 bytes of address space, except
50 * for the data register (read/write to/from the 5380 in pseudo-DMA mode)
51 */
52#define T_CONTROL_REG_OFFSET 0x1c00 /* rw */
53#define T_CR_INT 0x10 /* Enable interrupts */
54#define T_CR_CT 0x02 /* Reset watchdog timer */
55
56#define T_STATUS_REG_OFFSET 0x1c20 /* ro */
57#define T_ST_BOOT 0x80 /* Boot switch */
58#define T_ST_S3 0x40 /* User settable switches, */
59#define T_ST_S2 0x20 /* read 0 when switch is on, 1 off */
60#define T_ST_S1 0x10
61#define T_ST_PS2 0x08 /* Set for Microchannel 228 */
62#define T_ST_RDY 0x04 /* 5380 DRQ */
63#define T_ST_TIM 0x02 /* indicates 40us watchdog timer fired */
64#define T_ST_ZERO 0x01 /* Always zero */
65
66#define T_5380_OFFSET 0x1d00 /* 8 registers here, see NCR5380.h */
67
68#define T_DATA_REG_OFFSET 0x1e00 /* rw 512 bytes long */
69
70#define NCR5380_implementation_fields \
71 void __iomem *base
72
73#define T128_address(reg) \
74 (((struct NCR5380_hostdata *)shost_priv(instance))->base + T_5380_OFFSET + ((reg) * 0x20))
75
76#define NCR5380_read(reg) readb(T128_address(reg))
77#define NCR5380_write(reg, value) writeb((value),(T128_address(reg)))
78
79#define NCR5380_dma_xfer_len(instance, cmd, phase) (cmd->transfersize)
80#define NCR5380_dma_recv_setup t128_pread
81#define NCR5380_dma_send_setup t128_pwrite
82#define NCR5380_dma_residual(instance) (0)
83
84#define NCR5380_intr t128_intr
85#define NCR5380_queue_command t128_queue_command
86#define NCR5380_abort t128_abort
87#define NCR5380_bus_reset t128_bus_reset
88#define NCR5380_info t128_info
89
90#define NCR5380_io_delay(x) udelay(x)
91
92/* 15 14 12 10 7 5 3
93 1101 0100 1010 1000 */
94
95#define T128_IRQS 0xc4a8
96
97#endif /* T128_H */
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
deleted file mode 100644
index 14eb50b95a1e..000000000000
--- a/drivers/scsi/u14-34f.c
+++ /dev/null
@@ -1,1971 +0,0 @@
1/*
2 * u14-34f.c - Low-level driver for UltraStor 14F/34F SCSI host adapters.
3 *
4 * 03 Jun 2003 Rev. 8.10 for linux-2.5.70
5 * + Update for new IRQ API.
6 * + Use "goto" when appropriate.
7 * + Drop u14-34f.h.
8 * + Update for new module_param API.
9 * + Module parameters can now be specified only in the
10 * same format as the kernel boot options.
11 *
12 * boot option old module param
13 * ----------- ------------------
14 * addr,... io_port=addr,...
15 * lc:[y|n] linked_comm=[1|0]
16 * mq:xx max_queue_depth=xx
17 * tm:[0|1|2] tag_mode=[0|1|2]
18 * et:[y|n] ext_tran=[1|0]
19 * of:[y|n] have_old_firmware=[1|0]
20 *
21 * A valid example using the new parameter format is:
22 * modprobe u14-34f "u14-34f=0x340,0x330,lc:y,tm:0,mq:4"
23 *
24 * which is equivalent to the old format:
25 * modprobe u14-34f io_port=0x340,0x330 linked_comm=1 tag_mode=0 \
26 * max_queue_depth=4
27 *
28 * With actual module code, u14-34f and u14_34f are equivalent
29 * as module parameter names.
30 *
31 * 12 Feb 2003 Rev. 8.04 for linux 2.5.60
32 * + Release irq before calling scsi_register.
33 *
34 * 12 Nov 2002 Rev. 8.02 for linux 2.5.47
35 * + Release driver_lock before calling scsi_register.
36 *
37 * 11 Nov 2002 Rev. 8.01 for linux 2.5.47
38 * + Fixed bios_param and scsicam_bios_param calling parameters.
39 *
40 * 28 Oct 2002 Rev. 8.00 for linux 2.5.44-ac4
41 * + Use new tcq and adjust_queue_depth api.
42 * + New command line option (tm:[0-2]) to choose the type of tags:
43 * 0 -> disable tagging ; 1 -> simple tags ; 2 -> ordered tags.
44 * Default is tm:0 (tagged commands disabled).
45 * For compatibility the "tc:" option is an alias of the "tm:"
46 * option; tc:n is equivalent to tm:0 and tc:y is equivalent to
47 * tm:1.
48 *
49 * 10 Oct 2002 Rev. 7.70 for linux 2.5.42
50 * + Foreport from revision 6.70.
51 *
52 * 25 Jun 2002 Rev. 6.70 for linux 2.4.19
53 * + Fixed endian-ness problem due to bitfields.
54 *
55 * 21 Feb 2002 Rev. 6.52 for linux 2.4.18
56 * + Backport from rev. 7.22 (use io_request_lock).
57 *
58 * 20 Feb 2002 Rev. 7.22 for linux 2.5.5
59 * + Remove any reference to virt_to_bus().
60 * + Fix pio hang while detecting multiple HBAs.
61 *
62 * 01 Jan 2002 Rev. 7.20 for linux 2.5.1
63 * + Use the dynamic DMA mapping API.
64 *
65 * 19 Dec 2001 Rev. 7.02 for linux 2.5.1
66 * + Use SCpnt->sc_data_direction if set.
67 * + Use sglist.page instead of sglist.address.
68 *
69 * 11 Dec 2001 Rev. 7.00 for linux 2.5.1
70 * + Use host->host_lock instead of io_request_lock.
71 *
72 * 1 May 2001 Rev. 6.05 for linux 2.4.4
73 * + Fix data transfer direction for opcode SEND_CUE_SHEET (0x5d)
74 *
75 * 25 Jan 2001 Rev. 6.03 for linux 2.4.0
76 * + "check_region" call replaced by "request_region".
77 *
78 * 22 Nov 2000 Rev. 6.02 for linux 2.4.0-test11
79 * + Removed old scsi error handling support.
80 * + The obsolete boot option flag eh:n is silently ignored.
81 * + Removed error messages while a disk drive is powered up at
82 * boot time.
83 * + Improved boot messages: all tagged capable device are
84 * indicated as "tagged".
85 *
86 * 16 Sep 1999 Rev. 5.11 for linux 2.2.12 and 2.3.18
87 * + Updated to the new __setup interface for boot command line options.
88 * + When loaded as a module, accepts the new parameter boot_options
89 * which value is a string with the same format of the kernel boot
90 * command line options. A valid example is:
91 * modprobe u14-34f 'boot_options="0x230,0x340,lc:y,mq:4"'
92 *
93 * 22 Jul 1999 Rev. 5.00 for linux 2.2.10 and 2.3.11
94 * + Removed pre-2.2 source code compatibility.
95 *
96 * 26 Jul 1998 Rev. 4.33 for linux 2.0.35 and 2.1.111
97 * Added command line option (et:[y|n]) to use the existing
98 * translation (returned by scsicam_bios_param) as disk geometry.
99 * The default is et:n, which uses the disk geometry jumpered
100 * on the board.
101 * The default value et:n is compatible with all previous revisions
102 * of this driver.
103 *
104 * 28 May 1998 Rev. 4.32 for linux 2.0.33 and 2.1.104
105 * Increased busy timeout from 10 msec. to 200 msec. while
106 * processing interrupts.
107 *
108 * 18 May 1998 Rev. 4.31 for linux 2.0.33 and 2.1.102
109 * Improved abort handling during the eh recovery process.
110 *
111 * 13 May 1998 Rev. 4.30 for linux 2.0.33 and 2.1.101
112 * The driver is now fully SMP safe, including the
113 * abort and reset routines.
114 * Added command line options (eh:[y|n]) to choose between
115 * new_eh_code and the old scsi code.
116 * If linux version >= 2.1.101 the default is eh:y, while the eh
117 * option is ignored for previous releases and the old scsi code
118 * is used.
119 *
120 * 18 Apr 1998 Rev. 4.20 for linux 2.0.33 and 2.1.97
121 * Reworked interrupt handler.
122 *
123 * 11 Apr 1998 rev. 4.05 for linux 2.0.33 and 2.1.95
124 * Major reliability improvement: when a batch with overlapping
125 * requests is detected, requests are queued one at a time
126 * eliminating any possible board or drive reordering.
127 *
128 * 10 Apr 1998 rev. 4.04 for linux 2.0.33 and 2.1.95
129 * Improved SMP support (if linux version >= 2.1.95).
130 *
131 * 9 Apr 1998 rev. 4.03 for linux 2.0.33 and 2.1.94
132 * Performance improvement: when sequential i/o is detected,
133 * always use direct sort instead of reverse sort.
134 *
135 * 4 Apr 1998 rev. 4.02 for linux 2.0.33 and 2.1.92
136 * io_port is now unsigned long.
137 *
138 * 17 Mar 1998 rev. 4.01 for linux 2.0.33 and 2.1.88
139 * Use new scsi error handling code (if linux version >= 2.1.88).
140 * Use new interrupt code.
141 *
142 * 12 Sep 1997 rev. 3.11 for linux 2.0.30 and 2.1.55
143 * Use of udelay inside the wait loops to avoid timeout
144 * problems with fast cpus.
145 * Removed check about useless calls to the interrupt service
146 * routine (reported on SMP systems only).
147 * At initialization time "sorted/unsorted" is displayed instead
148 * of "linked/unlinked" to reinforce the fact that "linking" is
149 * nothing but "elevator sorting" in the actual implementation.
150 *
151 * 17 May 1997 rev. 3.10 for linux 2.0.30 and 2.1.38
152 * Use of serial_number_at_timeout in abort and reset processing.
153 * Use of the __initfunc and __initdata macro in setup code.
154 * Minor cleanups in the list_statistics code.
155 *
156 * 24 Feb 1997 rev. 3.00 for linux 2.0.29 and 2.1.26
157 * When loading as a module, parameter passing is now supported
158 * both in 2.0 and in 2.1 style.
159 * Fixed data transfer direction for some SCSI opcodes.
160 * Immediate acknowledge to request sense commands.
161 * Linked commands to each disk device are now reordered by elevator
162 * sorting. Rare cases in which reordering of write requests could
163 * cause wrong results are managed.
164 *
165 * 18 Jan 1997 rev. 2.60 for linux 2.1.21 and 2.0.28
166 * Added command line options to enable/disable linked commands
167 * (lc:[y|n]), old firmware support (of:[y|n]) and to set the max
168 * queue depth (mq:xx). Default is "u14-34f=lc:n,of:n,mq:8".
169 * Improved command linking.
170 *
171 * 8 Jan 1997 rev. 2.50 for linux 2.1.20 and 2.0.27
172 * Added linked command support.
173 *
174 * 3 Dec 1996 rev. 2.40 for linux 2.1.14 and 2.0.27
175 * Added queue depth adjustment.
176 *
177 * 22 Nov 1996 rev. 2.30 for linux 2.1.12 and 2.0.26
178 * The list of i/o ports to be probed can be overwritten by the
179 * "u14-34f=port0,port1,...." boot command line option.
180 * Scatter/gather lists are now allocated by a number of kmalloc
181 * calls, in order to avoid the previous size limit of 64Kb.
182 *
183 * 16 Nov 1996 rev. 2.20 for linux 2.1.10 and 2.0.25
184 * Added multichannel support.
185 *
186 * 27 Sep 1996 rev. 2.12 for linux 2.1.0
187 * Portability cleanups (virtual/bus addressing, little/big endian
188 * support).
189 *
190 * 09 Jul 1996 rev. 2.11 for linux 2.0.4
191 * "Data over/under-run" no longer implies a redo on all targets.
192 * Number of internal retries is now limited.
193 *
194 * 16 Apr 1996 rev. 2.10 for linux 1.3.90
195 * New argument "reset_flags" to the reset routine.
196 *
197 * 21 Jul 1995 rev. 2.02 for linux 1.3.11
198 * Fixed Data Transfer Direction for some SCSI commands.
199 *
200 * 13 Jun 1995 rev. 2.01 for linux 1.2.10
201 * HAVE_OLD_UX4F_FIRMWARE should be defined for U34F boards when
202 * the firmware prom is not the latest one (28008-006).
203 *
204 * 11 Mar 1995 rev. 2.00 for linux 1.2.0
205 * Fixed a bug which prevented media change detection for removable
206 * disk drives.
207 *
208 * 23 Feb 1995 rev. 1.18 for linux 1.1.94
209 * Added a check for scsi_register returning NULL.
210 *
211 * 11 Feb 1995 rev. 1.17 for linux 1.1.91
212 * U14F qualified to run with 32 sglists.
213 * Now DEBUG_RESET is disabled by default.
214 *
215 * 9 Feb 1995 rev. 1.16 for linux 1.1.90
216 * Use host->wish_block instead of host->block.
217 *
218 * 8 Feb 1995 rev. 1.15 for linux 1.1.89
219 * Cleared target_time_out counter while performing a reset.
220 *
221 * 28 Jan 1995 rev. 1.14 for linux 1.1.86
222 * Added module support.
223 * Log and do a retry when a disk drive returns a target status
224 * different from zero on a recovered error.
225 * Auto detects if U14F boards have an old firmware revision.
226 * Max number of scatter/gather lists set to 16 for all boards
227 * (most installation run fine using 33 sglists, while other
228 * has problems when using more than 16).
229 *
230 * 16 Jan 1995 rev. 1.13 for linux 1.1.81
231 * Display a message if check_region detects a port address
232 * already in use.
233 *
234 * 15 Dec 1994 rev. 1.12 for linux 1.1.74
235 * The host->block flag is set for all the detected ISA boards.
236 *
237 * 30 Nov 1994 rev. 1.11 for linux 1.1.68
238 * Redo i/o on target status CHECK_CONDITION for TYPE_DISK only.
239 * Added optional support for using a single board at a time.
240 *
241 * 14 Nov 1994 rev. 1.10 for linux 1.1.63
242 *
243 * 28 Oct 1994 rev. 1.09 for linux 1.1.58 Final BETA release.
244 * 16 Jul 1994 rev. 1.00 for linux 1.1.29 Initial ALPHA release.
245 *
246 * This driver is a total replacement of the original UltraStor
247 * scsi driver, but it supports ONLY the 14F and 34F boards.
248 * It can be configured in the same kernel in which the original
249 * ultrastor driver is configured to allow the original U24F
250 * support.
251 *
252 * Multiple U14F and/or U34F host adapters are supported.
253 *
254 * Copyright (C) 1994-2003 Dario Ballabio (ballabio_dario@emc.com)
255 *
256 * Alternate email: dario.ballabio@inwind.it, dario.ballabio@tiscalinet.it
257 *
258 * Redistribution and use in source and binary forms, with or without
259 * modification, are permitted provided that redistributions of source
260 * code retain the above copyright notice and this comment without
261 * modification.
262 *
263 * WARNING: if your 14/34F board has an old firmware revision (see below)
264 * you must change "#undef" into "#define" in the following
265 * statement.
266 */
267#undef HAVE_OLD_UX4F_FIRMWARE
268/*
269 * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
270 * performance SCSI-2 host adapters.
271 * Here is the scoop on the various models:
272 *
273 * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
274 * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
275 * 34F - VESA Local-Bus Bus Master HA (no WD1003 emulation).
276 *
277 * This code has been tested with up to two U14F boards, using both
278 * firmware 28004-005/38004-004 (BIOS rev. 2.00) and the latest firmware
279 * 28004-006/38004-005 (BIOS rev. 2.01).
280 *
281 * The latest firmware is required in order to get reliable operations when
282 * clustering is enabled. ENABLE_CLUSTERING provides a performance increase
283 * up to 50% on sequential access.
284 *
285 * Since the struct scsi_host_template structure is shared among all 14F and 34F,
286 * the last setting of use_clustering is in effect for all of these boards.
287 *
288 * Here a sample configuration using two U14F boards:
289 *
290 U14F0: ISA 0x330, BIOS 0xc8000, IRQ 11, DMA 5, SG 32, MB 16, of:n, lc:y, mq:8.
291 U14F1: ISA 0x340, BIOS 0x00000, IRQ 10, DMA 6, SG 32, MB 16, of:n, lc:y, mq:8.
292 *
293 * The boot controller must have its BIOS enabled, while other boards can
294 * have their BIOS disabled, or enabled to an higher address.
295 * Boards are named Ux4F0, Ux4F1..., according to the port address order in
296 * the io_port[] array.
297 *
298 * The following facts are based on real testing results (not on
299 * documentation) on the above U14F board.
300 *
301 * - The U14F board should be jumpered for bus on time less or equal to 7
302 * microseconds, while the default is 11 microseconds. This is order to
303 * get acceptable performance while using floppy drive and hard disk
304 * together. The jumpering for 7 microseconds is: JP13 pin 15-16,
305 * JP14 pin 7-8 and pin 9-10.
306 * The reduction has a little impact on scsi performance.
307 *
308 * - If scsi bus length exceeds 3m., the scsi bus speed needs to be reduced
309 * from 10Mhz to 5Mhz (do this by inserting a jumper on JP13 pin 7-8).
310 *
311 * - If U14F on board firmware is older than 28004-006/38004-005,
312 * the U14F board is unable to provide reliable operations if the scsi
313 * request length exceeds 16Kbyte. When this length is exceeded the
314 * behavior is:
315 * - adapter_status equal 0x96 or 0xa3 or 0x93 or 0x94;
316 * - adapter_status equal 0 and target_status equal 2 on for all targets
317 * in the next operation following the reset.
318 * This sequence takes a long time (>3 seconds), so in the meantime
319 * the SD_TIMEOUT in sd.c could expire giving rise to scsi aborts
320 * (SD_TIMEOUT has been increased from 3 to 6 seconds in 1.1.31).
321 * Because of this I had to DISABLE_CLUSTERING and to work around the
322 * bus reset in the interrupt service routine, returning DID_BUS_BUSY
323 * so that the operations are retried without complains from the scsi.c
324 * code.
325 * Any reset of the scsi bus is going to kill tape operations, since
326 * no retry is allowed for tapes. Bus resets are more likely when the
327 * scsi bus is under heavy load.
328 * Requests using scatter/gather have a maximum length of 16 x 1024 bytes
329 * when DISABLE_CLUSTERING is in effect, but unscattered requests could be
330 * larger than 16Kbyte.
331 *
332 * The new firmware has fixed all the above problems.
333 *
334 * For U34F boards the latest bios prom is 38008-002 (BIOS rev. 2.01),
335 * the latest firmware prom is 28008-006. Older firmware 28008-005 has
336 * problems when using more than 16 scatter/gather lists.
337 *
338 * The list of i/o ports to be probed can be totally replaced by the
339 * boot command line option: "u14-34f=port0,port1,port2,...", where the
340 * port0, port1... arguments are ISA/VESA addresses to be probed.
341 * For example using "u14-34f=0x230,0x340", the driver probes only the two
342 * addresses 0x230 and 0x340 in this order; "u14-34f=0" totally disables
343 * this driver.
344 *
345 * After the optional list of detection probes, other possible command line
346 * options are:
347 *
348 * et:y use disk geometry returned by scsicam_bios_param;
349 * et:n use disk geometry jumpered on the board;
350 * lc:y enables linked commands;
351 * lc:n disables linked commands;
352 * tm:0 disables tagged commands (same as tc:n);
353 * tm:1 use simple queue tags (same as tc:y);
354 * tm:2 use ordered queue tags (same as tc:2);
355 * of:y enables old firmware support;
356 * of:n disables old firmware support;
357 * mq:xx set the max queue depth to the value xx (2 <= xx <= 8).
358 *
359 * The default value is: "u14-34f=lc:n,of:n,mq:8,tm:0,et:n".
360 * An example using the list of detection probes could be:
361 * "u14-34f=0x230,0x340,lc:y,tm:2,of:n,mq:4,et:n".
362 *
363 * When loading as a module, parameters can be specified as well.
364 * The above example would be (use 1 in place of y and 0 in place of n):
365 *
366 * modprobe u14-34f io_port=0x230,0x340 linked_comm=1 have_old_firmware=0 \
367 * max_queue_depth=4 ext_tran=0 tag_mode=2
368 *
369 * ----------------------------------------------------------------------------
370 * In this implementation, linked commands are designed to work with any DISK
371 * or CD-ROM, since this linking has only the intent of clustering (time-wise)
372 * and reordering by elevator sorting commands directed to each device,
373 * without any relation with the actual SCSI protocol between the controller
374 * and the device.
375 * If Q is the queue depth reported at boot time for each device (also named
376 * cmds/lun) and Q > 2, whenever there is already an active command to the
377 * device all other commands to the same device (up to Q-1) are kept waiting
378 * in the elevator sorting queue. When the active command completes, the
379 * commands in this queue are sorted by sector address. The sort is chosen
380 * between increasing or decreasing by minimizing the seek distance between
381 * the sector of the commands just completed and the sector of the first
382 * command in the list to be sorted.
383 * Trivial math assures that the unsorted average seek distance when doing
384 * random seeks over S sectors is S/3.
385 * When (Q-1) requests are uniformly distributed over S sectors, the average
386 * distance between two adjacent requests is S/((Q-1) + 1), so the sorted
387 * average seek distance for (Q-1) random requests over S sectors is S/Q.
388 * The elevator sorting hence divides the seek distance by a factor Q/3.
389 * The above pure geometric remarks are valid in all cases and the
390 * driver effectively reduces the seek distance by the predicted factor
391 * when there are Q concurrent read i/o operations on the device, but this
392 * does not necessarily results in a noticeable performance improvement:
393 * your mileage may vary....
394 *
395 * Note: command reordering inside a batch of queued commands could cause
396 * wrong results only if there is at least one write request and the
397 * intersection (sector-wise) of all requests is not empty.
398 * When the driver detects a batch including overlapping requests
399 * (a really rare event) strict serial (pid) order is enforced.
400 * ----------------------------------------------------------------------------
401 *
402 * The boards are named Ux4F0, Ux4F1,... according to the detection order.
403 *
404 * In order to support multiple ISA boards in a reliable way,
405 * the driver sets host->wish_block = TRUE for all ISA boards.
406 */
407
408#include <linux/string.h>
409#include <linux/kernel.h>
410#include <linux/ioport.h>
411#include <linux/delay.h>
412#include <asm/io.h>
413#include <asm/byteorder.h>
414#include <linux/proc_fs.h>
415#include <linux/blkdev.h>
416#include <linux/interrupt.h>
417#include <linux/stat.h>
418#include <linux/pci.h>
419#include <linux/init.h>
420#include <linux/ctype.h>
421#include <linux/spinlock.h>
422#include <linux/slab.h>
423#include <asm/dma.h>
424#include <asm/irq.h>
425
426#include <scsi/scsi.h>
427#include <scsi/scsi_cmnd.h>
428#include <scsi/scsi_device.h>
429#include <scsi/scsi_host.h>
430#include <scsi/scsi_tcq.h>
431#include <scsi/scsicam.h>
432
433static int u14_34f_detect(struct scsi_host_template *);
434static int u14_34f_release(struct Scsi_Host *);
435static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
436static int u14_34f_eh_abort(struct scsi_cmnd *);
437static int u14_34f_eh_host_reset(struct scsi_cmnd *);
438static int u14_34f_bios_param(struct scsi_device *, struct block_device *,
439 sector_t, int *);
440static int u14_34f_slave_configure(struct scsi_device *);
441
442static struct scsi_host_template driver_template = {
443 .name = "UltraStor 14F/34F rev. 8.10.00 ",
444 .detect = u14_34f_detect,
445 .release = u14_34f_release,
446 .queuecommand = u14_34f_queuecommand,
447 .eh_abort_handler = u14_34f_eh_abort,
448 .eh_host_reset_handler = u14_34f_eh_host_reset,
449 .bios_param = u14_34f_bios_param,
450 .slave_configure = u14_34f_slave_configure,
451 .this_id = 7,
452 .unchecked_isa_dma = 1,
453 .use_clustering = ENABLE_CLUSTERING,
454 };
455
456#if !defined(__BIG_ENDIAN_BITFIELD) && !defined(__LITTLE_ENDIAN_BITFIELD)
457#error "Adjust your <asm/byteorder.h> defines"
458#endif
459
460/* Values for the PRODUCT_ID ports for the 14/34F */
461#define PRODUCT_ID1 0x56
462#define PRODUCT_ID2 0x40 /* NOTE: Only upper nibble is used */
463
464/* Subversion values */
465#define ISA 0
466#define ESA 1
467
468#define OP_HOST_ADAPTER 0x1
469#define OP_SCSI 0x2
470#define OP_RESET 0x4
471#define DTD_SCSI 0x0
472#define DTD_IN 0x1
473#define DTD_OUT 0x2
474#define DTD_NONE 0x3
475#define HA_CMD_INQUIRY 0x1
476#define HA_CMD_SELF_DIAG 0x2
477#define HA_CMD_READ_BUFF 0x3
478#define HA_CMD_WRITE_BUFF 0x4
479
480#undef DEBUG_LINKED_COMMANDS
481#undef DEBUG_DETECT
482#undef DEBUG_INTERRUPT
483#undef DEBUG_RESET
484#undef DEBUG_GENERATE_ERRORS
485#undef DEBUG_GENERATE_ABORTS
486#undef DEBUG_GEOMETRY
487
488#define MAX_ISA 3
489#define MAX_VESA 1
490#define MAX_EISA 0
491#define MAX_PCI 0
492#define MAX_BOARDS (MAX_ISA + MAX_VESA + MAX_EISA + MAX_PCI)
493#define MAX_CHANNEL 1
494#define MAX_LUN 8
495#define MAX_TARGET 8
496#define MAX_MAILBOXES 16
497#define MAX_SGLIST 32
498#define MAX_SAFE_SGLIST 16
499#define MAX_INTERNAL_RETRIES 64
500#define MAX_CMD_PER_LUN 2
501#define MAX_TAGGED_CMD_PER_LUN (MAX_MAILBOXES - MAX_CMD_PER_LUN)
502
503#define SKIP ULONG_MAX
504#define FALSE 0
505#define TRUE 1
506#define FREE 0
507#define IN_USE 1
508#define LOCKED 2
509#define IN_RESET 3
510#define IGNORE 4
511#define READY 5
512#define ABORTING 6
513#define NO_DMA 0xff
514#define MAXLOOP 10000
515#define TAG_DISABLED 0
516#define TAG_SIMPLE 1
517#define TAG_ORDERED 2
518
519#define REG_LCL_MASK 0
520#define REG_LCL_INTR 1
521#define REG_SYS_MASK 2
522#define REG_SYS_INTR 3
523#define REG_PRODUCT_ID1 4
524#define REG_PRODUCT_ID2 5
525#define REG_CONFIG1 6
526#define REG_CONFIG2 7
527#define REG_OGM 8
528#define REG_ICM 12
529#define REGION_SIZE 13UL
530#define BSY_ASSERTED 0x01
531#define IRQ_ASSERTED 0x01
532#define CMD_RESET 0xc0
533#define CMD_OGM_INTR 0x01
534#define CMD_CLR_INTR 0x01
535#define CMD_ENA_INTR 0x81
536#define ASOK 0x00
537#define ASST 0x91
538
539#define YESNO(a) ((a) ? 'y' : 'n')
540#define TLDEV(type) ((type) == TYPE_DISK || (type) == TYPE_ROM)
541
542#define PACKED __attribute__((packed))
543
544struct sg_list {
545 unsigned int address; /* Segment Address */
546 unsigned int num_bytes; /* Segment Length */
547 };
548
549/* MailBox SCSI Command Packet */
550struct mscp {
551
552#if defined(__BIG_ENDIAN_BITFIELD)
553 unsigned char sg:1, ca:1, dcn:1, xdir:2, opcode:3;
554 unsigned char lun: 3, channel:2, target:3;
555#else
556 unsigned char opcode: 3, /* type of command */
557 xdir: 2, /* data transfer direction */
558 dcn: 1, /* disable disconnect */
559 ca: 1, /* use cache (if available) */
560 sg: 1; /* scatter/gather operation */
561 unsigned char target: 3, /* SCSI target id */
562 channel: 2, /* SCSI channel number */
563 lun: 3; /* SCSI logical unit number */
564#endif
565
566 unsigned int data_address PACKED; /* transfer data pointer */
567 unsigned int data_len PACKED; /* length in bytes */
568 unsigned int link_address PACKED; /* for linking command chains */
569 unsigned char clink_id; /* identifies command in chain */
570 unsigned char use_sg; /* (if sg is set) 8 bytes per list */
571 unsigned char sense_len;
572 unsigned char cdb_len; /* 6, 10, or 12 */
573 unsigned char cdb[12]; /* SCSI Command Descriptor Block */
574 unsigned char adapter_status; /* non-zero indicates HA error */
575 unsigned char target_status; /* non-zero indicates target error */
576 unsigned int sense_addr PACKED;
577
578 /* Additional fields begin here. */
579 struct scsi_cmnd *SCpnt;
580 unsigned int cpp_index; /* cp index */
581
582 /* All the cp structure is zero filled by queuecommand except the
583 following CP_TAIL_SIZE bytes, initialized by detect */
584 dma_addr_t cp_dma_addr; /* dma handle for this cp structure */
585 struct sg_list *sglist; /* pointer to the allocated SG list */
586 };
587
588#define CP_TAIL_SIZE (sizeof(struct sglist *) + sizeof(dma_addr_t))
589
590struct hostdata {
591 struct mscp cp[MAX_MAILBOXES]; /* Mailboxes for this board */
592 unsigned int cp_stat[MAX_MAILBOXES]; /* FREE, IN_USE, LOCKED, IN_RESET */
593 unsigned int last_cp_used; /* Index of last mailbox used */
594 unsigned int iocount; /* Total i/o done for this board */
595 int board_number; /* Number of this board */
596 char board_name[16]; /* Name of this board */
597 int in_reset; /* True if board is doing a reset */
598 int target_to[MAX_TARGET][MAX_CHANNEL]; /* N. of timeout errors on target */
599 int target_redo[MAX_TARGET][MAX_CHANNEL]; /* If TRUE redo i/o on target */
600 unsigned int retries; /* Number of internal retries */
601 unsigned long last_retried_pid; /* Pid of last retried command */
602 unsigned char subversion; /* Bus type, either ISA or ESA */
603 struct pci_dev *pdev; /* Always NULL */
604 unsigned char heads;
605 unsigned char sectors;
606 char board_id[256]; /* data from INQUIRY on this board */
607 };
608
609static struct Scsi_Host *sh[MAX_BOARDS + 1];
610static const char *driver_name = "Ux4F";
611static char sha[MAX_BOARDS];
612static DEFINE_SPINLOCK(driver_lock);
613
614/* Initialize num_boards so that ihdlr can work while detect is in progress */
615static unsigned int num_boards = MAX_BOARDS;
616
617static unsigned long io_port[] = {
618
619 /* Space for MAX_INT_PARAM ports usable while loading as a module */
620 SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP, SKIP,
621 SKIP, SKIP,
622
623 /* Possible ISA/VESA ports */
624 0x330, 0x340, 0x230, 0x240, 0x210, 0x130, 0x140,
625
626 /* End of list */
627 0x0
628 };
629
630#define HD(board) ((struct hostdata *) &sh[board]->hostdata)
631#define BN(board) (HD(board)->board_name)
632
633/* Device is Little Endian */
634#define H2DEV(x) cpu_to_le32(x)
635#define DEV2H(x) le32_to_cpu(x)
636
637static irqreturn_t do_interrupt_handler(int, void *);
638static void flush_dev(struct scsi_device *, unsigned long, unsigned int, unsigned int);
639static int do_trace = FALSE;
640static int setup_done = FALSE;
641static int link_statistics;
642static int ext_tran = FALSE;
643
644#if defined(HAVE_OLD_UX4F_FIRMWARE)
645static int have_old_firmware = TRUE;
646#else
647static int have_old_firmware = FALSE;
648#endif
649
650#if defined(CONFIG_SCSI_U14_34F_TAGGED_QUEUE)
651static int tag_mode = TAG_SIMPLE;
652#else
653static int tag_mode = TAG_DISABLED;
654#endif
655
656#if defined(CONFIG_SCSI_U14_34F_LINKED_COMMANDS)
657static int linked_comm = TRUE;
658#else
659static int linked_comm = FALSE;
660#endif
661
662#if defined(CONFIG_SCSI_U14_34F_MAX_TAGS)
663static int max_queue_depth = CONFIG_SCSI_U14_34F_MAX_TAGS;
664#else
665static int max_queue_depth = MAX_CMD_PER_LUN;
666#endif
667
668#define MAX_INT_PARAM 10
669#define MAX_BOOT_OPTIONS_SIZE 256
670static char boot_options[MAX_BOOT_OPTIONS_SIZE];
671
672#if defined(MODULE)
673#include <linux/module.h>
674#include <linux/moduleparam.h>
675
676module_param_string(u14_34f, boot_options, MAX_BOOT_OPTIONS_SIZE, 0);
677MODULE_PARM_DESC(u14_34f, " equivalent to the \"u14-34f=...\" kernel boot " \
678"option." \
679" Example: modprobe u14-34f \"u14_34f=0x340,0x330,lc:y,tm:0,mq:4\"");
680MODULE_AUTHOR("Dario Ballabio");
681MODULE_LICENSE("GPL");
682MODULE_DESCRIPTION("UltraStor 14F/34F SCSI Driver");
683
684#endif
685
686static int u14_34f_slave_configure(struct scsi_device *dev) {
687 int j, tqd, utqd;
688 char *tag_suffix, *link_suffix;
689 struct Scsi_Host *host = dev->host;
690
691 j = ((struct hostdata *) host->hostdata)->board_number;
692
693 utqd = MAX_CMD_PER_LUN;
694 tqd = max_queue_depth;
695
696 if (TLDEV(dev->type) && dev->tagged_supported)
697
698 if (tag_mode == TAG_SIMPLE) {
699 scsi_change_queue_depth(dev, tqd);
700 tag_suffix = ", simple tags";
701 }
702 else if (tag_mode == TAG_ORDERED) {
703 scsi_change_queue_depth(dev, tqd);
704 tag_suffix = ", ordered tags";
705 }
706 else {
707 scsi_change_queue_depth(dev, tqd);
708 tag_suffix = ", no tags";
709 }
710
711 else if (TLDEV(dev->type) && linked_comm) {
712 scsi_change_queue_depth(dev, tqd);
713 tag_suffix = ", untagged";
714 }
715
716 else {
717 scsi_change_queue_depth(dev, utqd);
718 tag_suffix = "";
719 }
720
721 if (TLDEV(dev->type) && linked_comm && dev->queue_depth > 2)
722 link_suffix = ", sorted";
723 else if (TLDEV(dev->type))
724 link_suffix = ", unsorted";
725 else
726 link_suffix = "";
727
728 sdev_printk(KERN_INFO, dev, "cmds/lun %d%s%s.\n",
729 dev->queue_depth, link_suffix, tag_suffix);
730
731 return FALSE;
732}
733
734static int wait_on_busy(unsigned long iobase, unsigned int loop) {
735
736 while (inb(iobase + REG_LCL_INTR) & BSY_ASSERTED) {
737 udelay(1L);
738 if (--loop == 0) return TRUE;
739 }
740
741 return FALSE;
742}
743
744static int board_inquiry(unsigned int j) {
745 struct mscp *cpp;
746 dma_addr_t id_dma_addr;
747 unsigned int limit = 0;
748 unsigned long time;
749
750 id_dma_addr = pci_map_single(HD(j)->pdev, HD(j)->board_id,
751 sizeof(HD(j)->board_id), PCI_DMA_BIDIRECTIONAL);
752 cpp = &HD(j)->cp[0];
753 cpp->cp_dma_addr = pci_map_single(HD(j)->pdev, cpp, sizeof(struct mscp),
754 PCI_DMA_BIDIRECTIONAL);
755 memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
756 cpp->opcode = OP_HOST_ADAPTER;
757 cpp->xdir = DTD_IN;
758 cpp->data_address = H2DEV(id_dma_addr);
759 cpp->data_len = H2DEV(sizeof(HD(j)->board_id));
760 cpp->cdb_len = 6;
761 cpp->cdb[0] = HA_CMD_INQUIRY;
762
763 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
764 printk("%s: board_inquiry, adapter busy.\n", BN(j));
765 return TRUE;
766 }
767
768 HD(j)->cp_stat[0] = IGNORE;
769
770 /* Clear the interrupt indication */
771 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
772
773 /* Store pointer in OGM address bytes */
774 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
775
776 /* Issue OGM interrupt */
777 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
778
779 spin_unlock_irq(&driver_lock);
780 time = jiffies;
781 while ((jiffies - time) < HZ && limit++ < 20000) udelay(100L);
782 spin_lock_irq(&driver_lock);
783
784 if (cpp->adapter_status || HD(j)->cp_stat[0] != FREE) {
785 HD(j)->cp_stat[0] = FREE;
786 printk("%s: board_inquiry, err 0x%x.\n", BN(j), cpp->adapter_status);
787 return TRUE;
788 }
789
790 pci_unmap_single(HD(j)->pdev, cpp->cp_dma_addr, sizeof(struct mscp),
791 PCI_DMA_BIDIRECTIONAL);
792 pci_unmap_single(HD(j)->pdev, id_dma_addr, sizeof(HD(j)->board_id),
793 PCI_DMA_BIDIRECTIONAL);
794 return FALSE;
795}
796
797static int port_detect \
798 (unsigned long port_base, unsigned int j, struct scsi_host_template *tpnt) {
799 unsigned char irq, dma_channel, subversion, i;
800 unsigned char in_byte;
801 char *bus_type, dma_name[16];
802
803 /* Allowed BIOS base addresses (NULL indicates reserved) */
804 unsigned long bios_segment_table[8] = {
805 0,
806 0xc4000, 0xc8000, 0xcc000, 0xd0000,
807 0xd4000, 0xd8000, 0xdc000
808 };
809
810 /* Allowed IRQs */
811 unsigned char interrupt_table[4] = { 15, 14, 11, 10 };
812
813 /* Allowed DMA channels for ISA (0 indicates reserved) */
814 unsigned char dma_channel_table[4] = { 5, 6, 7, 0 };
815
816 /* Head/sector mappings */
817 struct {
818 unsigned char heads;
819 unsigned char sectors;
820 } mapping_table[4] = {
821 { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 }
822 };
823
824 struct config_1 {
825
826#if defined(__BIG_ENDIAN_BITFIELD)
827 unsigned char dma_channel: 2, interrupt:2,
828 removable_disks_as_fixed:1, bios_segment: 3;
829#else
830 unsigned char bios_segment: 3, removable_disks_as_fixed: 1,
831 interrupt: 2, dma_channel: 2;
832#endif
833
834 } config_1;
835
836 struct config_2 {
837
838#if defined(__BIG_ENDIAN_BITFIELD)
839 unsigned char tfr_port: 2, bios_drive_number: 1,
840 mapping_mode: 2, ha_scsi_id: 3;
841#else
842 unsigned char ha_scsi_id: 3, mapping_mode: 2,
843 bios_drive_number: 1, tfr_port: 2;
844#endif
845
846 } config_2;
847
848 char name[16];
849
850 sprintf(name, "%s%d", driver_name, j);
851
852 if (!request_region(port_base, REGION_SIZE, driver_name)) {
853#if defined(DEBUG_DETECT)
854 printk("%s: address 0x%03lx in use, skipping probe.\n", name, port_base);
855#endif
856 goto fail;
857 }
858
859 spin_lock_irq(&driver_lock);
860
861 if (inb(port_base + REG_PRODUCT_ID1) != PRODUCT_ID1) goto freelock;
862
863 in_byte = inb(port_base + REG_PRODUCT_ID2);
864
865 if ((in_byte & 0xf0) != PRODUCT_ID2) goto freelock;
866
867 *(char *)&config_1 = inb(port_base + REG_CONFIG1);
868 *(char *)&config_2 = inb(port_base + REG_CONFIG2);
869
870 irq = interrupt_table[config_1.interrupt];
871 dma_channel = dma_channel_table[config_1.dma_channel];
872 subversion = (in_byte & 0x0f);
873
874 /* Board detected, allocate its IRQ */
875 if (request_irq(irq, do_interrupt_handler,
876 (subversion == ESA) ? IRQF_SHARED : 0,
877 driver_name, (void *) &sha[j])) {
878 printk("%s: unable to allocate IRQ %u, detaching.\n", name, irq);
879 goto freelock;
880 }
881
882 if (subversion == ISA && request_dma(dma_channel, driver_name)) {
883 printk("%s: unable to allocate DMA channel %u, detaching.\n",
884 name, dma_channel);
885 goto freeirq;
886 }
887
888 if (have_old_firmware) tpnt->use_clustering = DISABLE_CLUSTERING;
889
890 spin_unlock_irq(&driver_lock);
891 sh[j] = scsi_register(tpnt, sizeof(struct hostdata));
892 spin_lock_irq(&driver_lock);
893
894 if (sh[j] == NULL) {
895 printk("%s: unable to register host, detaching.\n", name);
896 goto freedma;
897 }
898
899 sh[j]->io_port = port_base;
900 sh[j]->unique_id = port_base;
901 sh[j]->n_io_port = REGION_SIZE;
902 sh[j]->base = bios_segment_table[config_1.bios_segment];
903 sh[j]->irq = irq;
904 sh[j]->sg_tablesize = MAX_SGLIST;
905 sh[j]->this_id = config_2.ha_scsi_id;
906 sh[j]->can_queue = MAX_MAILBOXES;
907 sh[j]->cmd_per_lun = MAX_CMD_PER_LUN;
908
909#if defined(DEBUG_DETECT)
910 {
911 unsigned char sys_mask, lcl_mask;
912
913 sys_mask = inb(sh[j]->io_port + REG_SYS_MASK);
914 lcl_mask = inb(sh[j]->io_port + REG_LCL_MASK);
915 printk("SYS_MASK 0x%x, LCL_MASK 0x%x.\n", sys_mask, lcl_mask);
916 }
917#endif
918
919 /* Probably a bogus host scsi id, set it to the dummy value */
920 if (sh[j]->this_id == 0) sh[j]->this_id = -1;
921
922 /* If BIOS is disabled, force enable interrupts */
923 if (sh[j]->base == 0) outb(CMD_ENA_INTR, sh[j]->io_port + REG_SYS_MASK);
924
925 memset(HD(j), 0, sizeof(struct hostdata));
926 HD(j)->heads = mapping_table[config_2.mapping_mode].heads;
927 HD(j)->sectors = mapping_table[config_2.mapping_mode].sectors;
928 HD(j)->subversion = subversion;
929 HD(j)->pdev = NULL;
930 HD(j)->board_number = j;
931
932 if (have_old_firmware) sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
933
934 if (HD(j)->subversion == ESA) {
935 sh[j]->unchecked_isa_dma = FALSE;
936 sh[j]->dma_channel = NO_DMA;
937 sprintf(BN(j), "U34F%d", j);
938 bus_type = "VESA";
939 }
940 else {
941 unsigned long flags;
942 sh[j]->unchecked_isa_dma = TRUE;
943
944 flags=claim_dma_lock();
945 disable_dma(dma_channel);
946 clear_dma_ff(dma_channel);
947 set_dma_mode(dma_channel, DMA_MODE_CASCADE);
948 enable_dma(dma_channel);
949 release_dma_lock(flags);
950
951 sh[j]->dma_channel = dma_channel;
952 sprintf(BN(j), "U14F%d", j);
953 bus_type = "ISA";
954 }
955
956 sh[j]->max_channel = MAX_CHANNEL - 1;
957 sh[j]->max_id = MAX_TARGET;
958 sh[j]->max_lun = MAX_LUN;
959
960 if (HD(j)->subversion == ISA && !board_inquiry(j)) {
961 HD(j)->board_id[40] = 0;
962
963 if (strcmp(&HD(j)->board_id[32], "06000600")) {
964 printk("%s: %s.\n", BN(j), &HD(j)->board_id[8]);
965 printk("%s: firmware %s is outdated, FW PROM should be 28004-006.\n",
966 BN(j), &HD(j)->board_id[32]);
967 sh[j]->hostt->use_clustering = DISABLE_CLUSTERING;
968 sh[j]->sg_tablesize = MAX_SAFE_SGLIST;
969 }
970 }
971
972 if (dma_channel == NO_DMA) sprintf(dma_name, "%s", "BMST");
973 else sprintf(dma_name, "DMA %u", dma_channel);
974
975 spin_unlock_irq(&driver_lock);
976
977 for (i = 0; i < sh[j]->can_queue; i++)
978 HD(j)->cp[i].cp_dma_addr = pci_map_single(HD(j)->pdev,
979 &HD(j)->cp[i], sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
980
981 for (i = 0; i < sh[j]->can_queue; i++)
982 if (! ((&HD(j)->cp[i])->sglist = kmalloc(
983 sh[j]->sg_tablesize * sizeof(struct sg_list),
984 (sh[j]->unchecked_isa_dma ? GFP_DMA : 0) | GFP_ATOMIC))) {
985 printk("%s: kmalloc SGlist failed, mbox %d, detaching.\n", BN(j), i);
986 goto release;
987 }
988
989 if (max_queue_depth > MAX_TAGGED_CMD_PER_LUN)
990 max_queue_depth = MAX_TAGGED_CMD_PER_LUN;
991
992 if (max_queue_depth < MAX_CMD_PER_LUN) max_queue_depth = MAX_CMD_PER_LUN;
993
994 if (tag_mode != TAG_DISABLED && tag_mode != TAG_SIMPLE)
995 tag_mode = TAG_ORDERED;
996
997 if (j == 0) {
998 printk("UltraStor 14F/34F: Copyright (C) 1994-2003 Dario Ballabio.\n");
999 printk("%s config options -> of:%c, tm:%d, lc:%c, mq:%d, et:%c.\n",
1000 driver_name, YESNO(have_old_firmware), tag_mode,
1001 YESNO(linked_comm), max_queue_depth, YESNO(ext_tran));
1002 }
1003
1004 printk("%s: %s 0x%03lx, BIOS 0x%05x, IRQ %u, %s, SG %d, MB %d.\n",
1005 BN(j), bus_type, (unsigned long)sh[j]->io_port, (int)sh[j]->base,
1006 sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
1007
1008 if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
1009 printk("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n",
1010 BN(j), sh[j]->max_id, sh[j]->max_lun);
1011
1012 for (i = 0; i <= sh[j]->max_channel; i++)
1013 printk("%s: SCSI channel %u enabled, host target ID %d.\n",
1014 BN(j), i, sh[j]->this_id);
1015
1016 return TRUE;
1017
1018freedma:
1019 if (subversion == ISA) free_dma(dma_channel);
1020freeirq:
1021 free_irq(irq, &sha[j]);
1022freelock:
1023 spin_unlock_irq(&driver_lock);
1024 release_region(port_base, REGION_SIZE);
1025fail:
1026 return FALSE;
1027
1028release:
1029 u14_34f_release(sh[j]);
1030 return FALSE;
1031}
1032
1033static void internal_setup(char *str, int *ints) {
1034 int i, argc = ints[0];
1035 char *cur = str, *pc;
1036
1037 if (argc > 0) {
1038
1039 if (argc > MAX_INT_PARAM) argc = MAX_INT_PARAM;
1040
1041 for (i = 0; i < argc; i++) io_port[i] = ints[i + 1];
1042
1043 io_port[i] = 0;
1044 setup_done = TRUE;
1045 }
1046
1047 while (cur && (pc = strchr(cur, ':'))) {
1048 int val = 0, c = *++pc;
1049
1050 if (c == 'n' || c == 'N') val = FALSE;
1051 else if (c == 'y' || c == 'Y') val = TRUE;
1052 else val = (int) simple_strtoul(pc, NULL, 0);
1053
1054 if (!strncmp(cur, "lc:", 3)) linked_comm = val;
1055 else if (!strncmp(cur, "of:", 3)) have_old_firmware = val;
1056 else if (!strncmp(cur, "tm:", 3)) tag_mode = val;
1057 else if (!strncmp(cur, "tc:", 3)) tag_mode = val;
1058 else if (!strncmp(cur, "mq:", 3)) max_queue_depth = val;
1059 else if (!strncmp(cur, "ls:", 3)) link_statistics = val;
1060 else if (!strncmp(cur, "et:", 3)) ext_tran = val;
1061
1062 if ((cur = strchr(cur, ','))) ++cur;
1063 }
1064
1065 return;
1066}
1067
1068static int option_setup(char *str) {
1069 int ints[MAX_INT_PARAM];
1070 char *cur = str;
1071 int i = 1;
1072
1073 while (cur && isdigit(*cur) && i < MAX_INT_PARAM) {
1074 ints[i++] = simple_strtoul(cur, NULL, 0);
1075
1076 if ((cur = strchr(cur, ',')) != NULL) cur++;
1077 }
1078
1079 ints[0] = i - 1;
1080 internal_setup(cur, ints);
1081 return 1;
1082}
1083
1084static int u14_34f_detect(struct scsi_host_template *tpnt) {
1085 unsigned int j = 0, k;
1086
1087 tpnt->proc_name = "u14-34f";
1088
1089 if(strlen(boot_options)) option_setup(boot_options);
1090
1091#if defined(MODULE)
1092 /* io_port could have been modified when loading as a module */
1093 if(io_port[0] != SKIP) {
1094 setup_done = TRUE;
1095 io_port[MAX_INT_PARAM] = 0;
1096 }
1097#endif
1098
1099 for (k = 0; k < MAX_BOARDS + 1; k++) sh[k] = NULL;
1100
1101 for (k = 0; io_port[k]; k++) {
1102
1103 if (io_port[k] == SKIP) continue;
1104
1105 if (j < MAX_BOARDS && port_detect(io_port[k], j, tpnt)) j++;
1106 }
1107
1108 num_boards = j;
1109 return j;
1110}
1111
1112static void map_dma(unsigned int i, unsigned int j) {
1113 unsigned int data_len = 0;
1114 unsigned int k, pci_dir;
1115 int count;
1116 struct scatterlist *sg;
1117 struct mscp *cpp;
1118 struct scsi_cmnd *SCpnt;
1119
1120 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
1121 pci_dir = SCpnt->sc_data_direction;
1122
1123 if (SCpnt->sense_buffer)
1124 cpp->sense_addr = H2DEV(pci_map_single(HD(j)->pdev, SCpnt->sense_buffer,
1125 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE));
1126
1127 cpp->sense_len = SCSI_SENSE_BUFFERSIZE;
1128
1129 if (scsi_bufflen(SCpnt)) {
1130 count = scsi_dma_map(SCpnt);
1131 BUG_ON(count < 0);
1132
1133 scsi_for_each_sg(SCpnt, sg, count, k) {
1134 cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
1135 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
1136 data_len += sg->length;
1137 }
1138
1139 cpp->sg = TRUE;
1140 cpp->use_sg = scsi_sg_count(SCpnt);
1141 cpp->data_address =
1142 H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
1143 cpp->use_sg * sizeof(struct sg_list),
1144 pci_dir));
1145 cpp->data_len = H2DEV(data_len);
1146
1147 } else {
1148 pci_dir = PCI_DMA_BIDIRECTIONAL;
1149 cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
1150 }
1151}
1152
1153static void unmap_dma(unsigned int i, unsigned int j) {
1154 unsigned int pci_dir;
1155 struct mscp *cpp;
1156 struct scsi_cmnd *SCpnt;
1157
1158 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
1159 pci_dir = SCpnt->sc_data_direction;
1160
1161 if (DEV2H(cpp->sense_addr))
1162 pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
1163 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1164
1165 scsi_dma_unmap(SCpnt);
1166
1167 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
1168
1169 if (DEV2H(cpp->data_address))
1170 pci_unmap_single(HD(j)->pdev, DEV2H(cpp->data_address),
1171 DEV2H(cpp->data_len), pci_dir);
1172}
1173
1174static void sync_dma(unsigned int i, unsigned int j) {
1175 unsigned int pci_dir;
1176 struct mscp *cpp;
1177 struct scsi_cmnd *SCpnt;
1178
1179 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
1180 pci_dir = SCpnt->sc_data_direction;
1181
1182 if (DEV2H(cpp->sense_addr))
1183 pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
1184 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1185
1186 if (scsi_sg_count(SCpnt))
1187 pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
1188 scsi_sg_count(SCpnt), pci_dir);
1189
1190 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
1191
1192 if (DEV2H(cpp->data_address))
1193 pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->data_address),
1194 DEV2H(cpp->data_len), pci_dir);
1195}
1196
1197static void scsi_to_dev_dir(unsigned int i, unsigned int j) {
1198 unsigned int k;
1199
1200 static const unsigned char data_out_cmds[] = {
1201 0x0a, 0x2a, 0x15, 0x55, 0x04, 0x07, 0x18, 0x1d, 0x24, 0x2e,
1202 0x30, 0x31, 0x32, 0x38, 0x39, 0x3a, 0x3b, 0x3d, 0x3f, 0x40,
1203 0x41, 0x4c, 0xaa, 0xae, 0xb0, 0xb1, 0xb2, 0xb6, 0xea, 0x1b, 0x5d
1204 };
1205
1206 static const unsigned char data_none_cmds[] = {
1207 0x01, 0x0b, 0x10, 0x11, 0x13, 0x16, 0x17, 0x19, 0x2b, 0x1e,
1208 0x2c, 0xac, 0x2f, 0xaf, 0x33, 0xb3, 0x35, 0x36, 0x45, 0x47,
1209 0x48, 0x49, 0xa9, 0x4b, 0xa5, 0xa6, 0xb5, 0x00
1210 };
1211
1212 struct mscp *cpp;
1213 struct scsi_cmnd *SCpnt;
1214
1215 cpp = &HD(j)->cp[i]; SCpnt = cpp->SCpnt;
1216
1217 if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
1218 cpp->xdir = DTD_IN;
1219 return;
1220 }
1221 else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
1222 cpp->xdir = DTD_OUT;
1223 return;
1224 }
1225 else if (SCpnt->sc_data_direction == DMA_NONE) {
1226 cpp->xdir = DTD_NONE;
1227 return;
1228 }
1229
1230 if (SCpnt->sc_data_direction != DMA_BIDIRECTIONAL)
1231 panic("%s: qcomm, invalid SCpnt->sc_data_direction.\n", BN(j));
1232
1233 cpp->xdir = DTD_IN;
1234
1235 for (k = 0; k < ARRAY_SIZE(data_out_cmds); k++)
1236 if (SCpnt->cmnd[0] == data_out_cmds[k]) {
1237 cpp->xdir = DTD_OUT;
1238 break;
1239 }
1240
1241 if (cpp->xdir == DTD_IN)
1242 for (k = 0; k < ARRAY_SIZE(data_none_cmds); k++)
1243 if (SCpnt->cmnd[0] == data_none_cmds[k]) {
1244 cpp->xdir = DTD_NONE;
1245 break;
1246 }
1247
1248}
1249
1250static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) {
1251 unsigned int i, j, k;
1252 struct mscp *cpp;
1253
1254 /* j is the board number */
1255 j = ((struct hostdata *) SCpnt->device->host->hostdata)->board_number;
1256
1257 if (SCpnt->host_scribble)
1258 panic("%s: qcomm, SCpnt %p already active.\n",
1259 BN(j), SCpnt);
1260
1261 /* i is the mailbox number, look for the first free mailbox
1262 starting from last_cp_used */
1263 i = HD(j)->last_cp_used + 1;
1264
1265 for (k = 0; k < sh[j]->can_queue; k++, i++) {
1266
1267 if (i >= sh[j]->can_queue) i = 0;
1268
1269 if (HD(j)->cp_stat[i] == FREE) {
1270 HD(j)->last_cp_used = i;
1271 break;
1272 }
1273 }
1274
1275 if (k == sh[j]->can_queue) {
1276 printk("%s: qcomm, no free mailbox.\n", BN(j));
1277 return 1;
1278 }
1279
1280 /* Set pointer to control packet structure */
1281 cpp = &HD(j)->cp[i];
1282
1283 memset(cpp, 0, sizeof(struct mscp) - CP_TAIL_SIZE);
1284 SCpnt->scsi_done = done;
1285 cpp->cpp_index = i;
1286 SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
1287
1288 if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%u.\n",
1289 BN(j), i, SCpnt->device->channel, SCpnt->device->id,
1290 (u8)SCpnt->device->lun);
1291
1292 cpp->opcode = OP_SCSI;
1293 cpp->channel = SCpnt->device->channel;
1294 cpp->target = SCpnt->device->id;
1295 cpp->lun = (u8)SCpnt->device->lun;
1296 cpp->SCpnt = SCpnt;
1297 cpp->cdb_len = SCpnt->cmd_len;
1298 memcpy(cpp->cdb, SCpnt->cmnd, SCpnt->cmd_len);
1299
1300 /* Use data transfer direction SCpnt->sc_data_direction */
1301 scsi_to_dev_dir(i, j);
1302
1303 /* Map DMA buffers and SG list */
1304 map_dma(i, j);
1305
1306 if (linked_comm && SCpnt->device->queue_depth > 2
1307 && TLDEV(SCpnt->device->type)) {
1308 HD(j)->cp_stat[i] = READY;
1309 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE);
1310 return 0;
1311 }
1312
1313 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
1314 unmap_dma(i, j);
1315 SCpnt->host_scribble = NULL;
1316 scmd_printk(KERN_INFO, SCpnt,
1317 "qcomm, adapter busy.\n");
1318 return 1;
1319 }
1320
1321 /* Store pointer in OGM address bytes */
1322 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
1323
1324 /* Issue OGM interrupt */
1325 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
1326
1327 HD(j)->cp_stat[i] = IN_USE;
1328 return 0;
1329}
1330
1331static DEF_SCSI_QCMD(u14_34f_queuecommand)
1332
1333static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) {
1334 unsigned int i, j;
1335
1336 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
1337
1338 if (SCarg->host_scribble == NULL) {
1339 scmd_printk(KERN_INFO, SCarg, "abort, command inactive.\n");
1340 return SUCCESS;
1341 }
1342
1343 i = *(unsigned int *)SCarg->host_scribble;
1344 scmd_printk(KERN_INFO, SCarg, "abort, mbox %d.\n", i);
1345
1346 if (i >= sh[j]->can_queue)
1347 panic("%s: abort, invalid SCarg->host_scribble.\n", BN(j));
1348
1349 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
1350 printk("%s: abort, timeout error.\n", BN(j));
1351 return FAILED;
1352 }
1353
1354 if (HD(j)->cp_stat[i] == FREE) {
1355 printk("%s: abort, mbox %d is free.\n", BN(j), i);
1356 return SUCCESS;
1357 }
1358
1359 if (HD(j)->cp_stat[i] == IN_USE) {
1360 printk("%s: abort, mbox %d is in use.\n", BN(j), i);
1361
1362 if (SCarg != HD(j)->cp[i].SCpnt)
1363 panic("%s: abort, mbox %d, SCarg %p, cp SCpnt %p.\n",
1364 BN(j), i, SCarg, HD(j)->cp[i].SCpnt);
1365
1366 if (inb(sh[j]->io_port + REG_SYS_INTR) & IRQ_ASSERTED)
1367 printk("%s: abort, mbox %d, interrupt pending.\n", BN(j), i);
1368
1369 return FAILED;
1370 }
1371
1372 if (HD(j)->cp_stat[i] == IN_RESET) {
1373 printk("%s: abort, mbox %d is in reset.\n", BN(j), i);
1374 return FAILED;
1375 }
1376
1377 if (HD(j)->cp_stat[i] == LOCKED) {
1378 printk("%s: abort, mbox %d is locked.\n", BN(j), i);
1379 return SUCCESS;
1380 }
1381
1382 if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
1383 unmap_dma(i, j);
1384 SCarg->result = DID_ABORT << 16;
1385 SCarg->host_scribble = NULL;
1386 HD(j)->cp_stat[i] = FREE;
1387 printk("%s, abort, mbox %d ready, DID_ABORT, done.\n", BN(j), i);
1388 SCarg->scsi_done(SCarg);
1389 return SUCCESS;
1390 }
1391
1392 panic("%s: abort, mbox %d, invalid cp_stat.\n", BN(j), i);
1393}
1394
1395static int u14_34f_eh_host_reset(struct scsi_cmnd *SCarg) {
1396 unsigned int i, j, k, c, limit = 0;
1397 unsigned long time;
1398 int arg_done = FALSE;
1399 struct scsi_cmnd *SCpnt;
1400
1401 j = ((struct hostdata *) SCarg->device->host->hostdata)->board_number;
1402 scmd_printk(KERN_INFO, SCarg, "reset, enter.\n");
1403
1404 spin_lock_irq(sh[j]->host_lock);
1405
1406 if (SCarg->host_scribble == NULL)
1407 printk("%s: reset, inactive.\n", BN(j));
1408
1409 if (HD(j)->in_reset) {
1410 printk("%s: reset, exit, already in reset.\n", BN(j));
1411 spin_unlock_irq(sh[j]->host_lock);
1412 return FAILED;
1413 }
1414
1415 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
1416 printk("%s: reset, exit, timeout error.\n", BN(j));
1417 spin_unlock_irq(sh[j]->host_lock);
1418 return FAILED;
1419 }
1420
1421 HD(j)->retries = 0;
1422
1423 for (c = 0; c <= sh[j]->max_channel; c++)
1424 for (k = 0; k < sh[j]->max_id; k++) {
1425 HD(j)->target_redo[k][c] = TRUE;
1426 HD(j)->target_to[k][c] = 0;
1427 }
1428
1429 for (i = 0; i < sh[j]->can_queue; i++) {
1430
1431 if (HD(j)->cp_stat[i] == FREE) continue;
1432
1433 if (HD(j)->cp_stat[i] == LOCKED) {
1434 HD(j)->cp_stat[i] = FREE;
1435 printk("%s: reset, locked mbox %d forced free.\n", BN(j), i);
1436 continue;
1437 }
1438
1439 if (!(SCpnt = HD(j)->cp[i].SCpnt))
1440 panic("%s: reset, mbox %d, SCpnt == NULL.\n", BN(j), i);
1441
1442 if (HD(j)->cp_stat[i] == READY || HD(j)->cp_stat[i] == ABORTING) {
1443 HD(j)->cp_stat[i] = ABORTING;
1444 printk("%s: reset, mbox %d aborting.\n", BN(j), i);
1445 }
1446
1447 else {
1448 HD(j)->cp_stat[i] = IN_RESET;
1449 printk("%s: reset, mbox %d in reset.\n", BN(j), i);
1450 }
1451
1452 if (SCpnt->host_scribble == NULL)
1453 panic("%s: reset, mbox %d, garbled SCpnt.\n", BN(j), i);
1454
1455 if (*(unsigned int *)SCpnt->host_scribble != i)
1456 panic("%s: reset, mbox %d, index mismatch.\n", BN(j), i);
1457
1458 if (SCpnt->scsi_done == NULL)
1459 panic("%s: reset, mbox %d, SCpnt->scsi_done == NULL.\n", BN(j), i);
1460
1461 if (SCpnt == SCarg) arg_done = TRUE;
1462 }
1463
1464 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
1465 printk("%s: reset, cannot reset, timeout error.\n", BN(j));
1466 spin_unlock_irq(sh[j]->host_lock);
1467 return FAILED;
1468 }
1469
1470 outb(CMD_RESET, sh[j]->io_port + REG_LCL_INTR);
1471 printk("%s: reset, board reset done, enabling interrupts.\n", BN(j));
1472
1473#if defined(DEBUG_RESET)
1474 do_trace = TRUE;
1475#endif
1476
1477 HD(j)->in_reset = TRUE;
1478
1479 spin_unlock_irq(sh[j]->host_lock);
1480 time = jiffies;
1481 while ((jiffies - time) < (10 * HZ) && limit++ < 200000) udelay(100L);
1482 spin_lock_irq(sh[j]->host_lock);
1483
1484 printk("%s: reset, interrupts disabled, loops %d.\n", BN(j), limit);
1485
1486 for (i = 0; i < sh[j]->can_queue; i++) {
1487
1488 if (HD(j)->cp_stat[i] == IN_RESET) {
1489 SCpnt = HD(j)->cp[i].SCpnt;
1490 unmap_dma(i, j);
1491 SCpnt->result = DID_RESET << 16;
1492 SCpnt->host_scribble = NULL;
1493
1494 /* This mailbox is still waiting for its interrupt */
1495 HD(j)->cp_stat[i] = LOCKED;
1496
1497 printk("%s, reset, mbox %d locked, DID_RESET, done.\n", BN(j), i);
1498 }
1499
1500 else if (HD(j)->cp_stat[i] == ABORTING) {
1501 SCpnt = HD(j)->cp[i].SCpnt;
1502 unmap_dma(i, j);
1503 SCpnt->result = DID_RESET << 16;
1504 SCpnt->host_scribble = NULL;
1505
1506 /* This mailbox was never queued to the adapter */
1507 HD(j)->cp_stat[i] = FREE;
1508
1509 printk("%s, reset, mbox %d aborting, DID_RESET, done.\n", BN(j), i);
1510 }
1511
1512 else
1513
1514 /* Any other mailbox has already been set free by interrupt */
1515 continue;
1516
1517 SCpnt->scsi_done(SCpnt);
1518 }
1519
1520 HD(j)->in_reset = FALSE;
1521 do_trace = FALSE;
1522
1523 if (arg_done) printk("%s: reset, exit, done.\n", BN(j));
1524 else printk("%s: reset, exit.\n", BN(j));
1525
1526 spin_unlock_irq(sh[j]->host_lock);
1527 return SUCCESS;
1528}
1529
1530static int u14_34f_bios_param(struct scsi_device *disk,
1531 struct block_device *bdev, sector_t capacity, int *dkinfo) {
1532 unsigned int j = 0;
1533 unsigned int size = capacity;
1534
1535 dkinfo[0] = HD(j)->heads;
1536 dkinfo[1] = HD(j)->sectors;
1537 dkinfo[2] = size / (HD(j)->heads * HD(j)->sectors);
1538
1539 if (ext_tran && (scsicam_bios_param(bdev, capacity, dkinfo) < 0)) {
1540 dkinfo[0] = 255;
1541 dkinfo[1] = 63;
1542 dkinfo[2] = size / (dkinfo[0] * dkinfo[1]);
1543 }
1544
1545#if defined (DEBUG_GEOMETRY)
1546 printk ("%s: bios_param, head=%d, sec=%d, cyl=%d.\n", driver_name,
1547 dkinfo[0], dkinfo[1], dkinfo[2]);
1548#endif
1549
1550 return FALSE;
1551}
1552
1553static void sort(unsigned long sk[], unsigned int da[], unsigned int n,
1554 unsigned int rev) {
1555 unsigned int i, j, k, y;
1556 unsigned long x;
1557
1558 for (i = 0; i < n - 1; i++) {
1559 k = i;
1560
1561 for (j = k + 1; j < n; j++)
1562 if (rev) {
1563 if (sk[j] > sk[k]) k = j;
1564 }
1565 else {
1566 if (sk[j] < sk[k]) k = j;
1567 }
1568
1569 if (k != i) {
1570 x = sk[k]; sk[k] = sk[i]; sk[i] = x;
1571 y = da[k]; da[k] = da[i]; da[i] = y;
1572 }
1573 }
1574
1575 return;
1576 }
1577
1578static int reorder(unsigned int j, unsigned long cursec,
1579 unsigned int ihdlr, unsigned int il[], unsigned int n_ready) {
1580 struct scsi_cmnd *SCpnt;
1581 struct mscp *cpp;
1582 unsigned int k, n;
1583 unsigned int rev = FALSE, s = TRUE, r = TRUE;
1584 unsigned int input_only = TRUE, overlap = FALSE;
1585 unsigned long sl[n_ready], pl[n_ready], ll[n_ready];
1586 unsigned long maxsec = 0, minsec = ULONG_MAX, seek = 0, iseek = 0;
1587 unsigned long ioseek = 0;
1588
1589 static unsigned int flushcount = 0, batchcount = 0, sortcount = 0;
1590 static unsigned int readycount = 0, ovlcount = 0, inputcount = 0;
1591 static unsigned int readysorted = 0, revcount = 0;
1592 static unsigned long seeksorted = 0, seeknosort = 0;
1593
1594 if (link_statistics && !(++flushcount % link_statistics))
1595 printk("fc %d bc %d ic %d oc %d rc %d rs %d sc %d re %d"\
1596 " av %ldK as %ldK.\n", flushcount, batchcount, inputcount,
1597 ovlcount, readycount, readysorted, sortcount, revcount,
1598 seeknosort / (readycount + 1),
1599 seeksorted / (readycount + 1));
1600
1601 if (n_ready <= 1) return FALSE;
1602
1603 for (n = 0; n < n_ready; n++) {
1604 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1605
1606 if (!(cpp->xdir == DTD_IN)) input_only = FALSE;
1607
1608 if (blk_rq_pos(SCpnt->request) < minsec)
1609 minsec = blk_rq_pos(SCpnt->request);
1610 if (blk_rq_pos(SCpnt->request) > maxsec)
1611 maxsec = blk_rq_pos(SCpnt->request);
1612
1613 sl[n] = blk_rq_pos(SCpnt->request);
1614 ioseek += blk_rq_sectors(SCpnt->request);
1615
1616 if (!n) continue;
1617
1618 if (sl[n] < sl[n - 1]) s = FALSE;
1619 if (sl[n] > sl[n - 1]) r = FALSE;
1620
1621 if (link_statistics) {
1622 if (sl[n] > sl[n - 1])
1623 seek += sl[n] - sl[n - 1];
1624 else
1625 seek += sl[n - 1] - sl[n];
1626 }
1627
1628 }
1629
1630 if (link_statistics) {
1631 if (cursec > sl[0]) seek += cursec - sl[0]; else seek += sl[0] - cursec;
1632 }
1633
1634 if (cursec > ((maxsec + minsec) / 2)) rev = TRUE;
1635
1636 if (ioseek > ((maxsec - minsec) / 2)) rev = FALSE;
1637
1638 if (!((rev && r) || (!rev && s))) sort(sl, il, n_ready, rev);
1639
1640 if (!input_only) for (n = 0; n < n_ready; n++) {
1641 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1642 ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number;
1643
1644 if (!n) continue;
1645
1646 if ((sl[n] == sl[n - 1]) || (!rev && ((sl[n - 1] + ll[n - 1]) > sl[n]))
1647 || (rev && ((sl[n] + ll[n]) > sl[n - 1]))) overlap = TRUE;
1648 }
1649
1650 if (overlap) sort(pl, il, n_ready, FALSE);
1651
1652 if (link_statistics) {
1653 if (cursec > sl[0]) iseek = cursec - sl[0]; else iseek = sl[0] - cursec;
1654 batchcount++; readycount += n_ready; seeknosort += seek / 1024;
1655 if (input_only) inputcount++;
1656 if (overlap) { ovlcount++; seeksorted += iseek / 1024; }
1657 else seeksorted += (iseek + maxsec - minsec) / 1024;
1658 if (rev && !r) { revcount++; readysorted += n_ready; }
1659 if (!rev && !s) { sortcount++; readysorted += n_ready; }
1660 }
1661
1662#if defined(DEBUG_LINKED_COMMANDS)
1663 if (link_statistics && (overlap || !(flushcount % link_statistics)))
1664 for (n = 0; n < n_ready; n++) {
1665 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1666 printk("%s %d.%d:%llu mb %d fc %d nr %d sec %ld ns %u"\
1667 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n",
1668 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target,
1669 (u8)SCpnt->lun, k, flushcount, n_ready,
1670 blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request),
1671 cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only),
1672 YESNO(overlap), cpp->xdir);
1673 }
1674#endif
1675 return overlap;
1676}
1677
1678static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned int j,
1679 unsigned int ihdlr) {
1680 struct scsi_cmnd *SCpnt;
1681 struct mscp *cpp;
1682 unsigned int k, n, n_ready = 0, il[MAX_MAILBOXES];
1683
1684 for (k = 0; k < sh[j]->can_queue; k++) {
1685
1686 if (HD(j)->cp_stat[k] != READY && HD(j)->cp_stat[k] != IN_USE) continue;
1687
1688 cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1689
1690 if (SCpnt->device != dev) continue;
1691
1692 if (HD(j)->cp_stat[k] == IN_USE) return;
1693
1694 il[n_ready++] = k;
1695 }
1696
1697 if (reorder(j, cursec, ihdlr, il, n_ready)) n_ready = 1;
1698
1699 for (n = 0; n < n_ready; n++) {
1700 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt;
1701
1702 if (wait_on_busy(sh[j]->io_port, MAXLOOP)) {
1703 scmd_printk(KERN_INFO, SCpnt,
1704 "%s, mbox %d, adapter"
1705 " busy, will abort.\n", (ihdlr ? "ihdlr" : "qcomm"),
1706 k);
1707 HD(j)->cp_stat[k] = ABORTING;
1708 continue;
1709 }
1710
1711 outl(H2DEV(cpp->cp_dma_addr), sh[j]->io_port + REG_OGM);
1712 outb(CMD_OGM_INTR, sh[j]->io_port + REG_LCL_INTR);
1713 HD(j)->cp_stat[k] = IN_USE;
1714 }
1715
1716}
1717
1718static irqreturn_t ihdlr(unsigned int j)
1719{
1720 struct scsi_cmnd *SCpnt;
1721 unsigned int i, k, c, status, tstatus, reg, ret;
1722 struct mscp *spp, *cpp;
1723 int irq = sh[j]->irq;
1724
1725 /* Check if this board need to be serviced */
1726 if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
1727
1728 HD(j)->iocount++;
1729
1730 if (do_trace) printk("%s: ihdlr, enter, irq %d, count %d.\n", BN(j), irq,
1731 HD(j)->iocount);
1732
1733 /* Check if this board is still busy */
1734 if (wait_on_busy(sh[j]->io_port, 20 * MAXLOOP)) {
1735 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
1736 printk("%s: ihdlr, busy timeout error, irq %d, reg 0x%x, count %d.\n",
1737 BN(j), irq, reg, HD(j)->iocount);
1738 goto none;
1739 }
1740
1741 ret = inl(sh[j]->io_port + REG_ICM);
1742
1743 /* Clear interrupt pending flag */
1744 outb(CMD_CLR_INTR, sh[j]->io_port + REG_SYS_INTR);
1745
1746 /* Find the mailbox to be serviced on this board */
1747 for (i = 0; i < sh[j]->can_queue; i++)
1748 if (H2DEV(HD(j)->cp[i].cp_dma_addr) == ret) break;
1749
1750 if (i >= sh[j]->can_queue)
1751 panic("%s: ihdlr, invalid mscp bus address %p, cp0 %p.\n", BN(j),
1752 (void *)ret, (void *)H2DEV(HD(j)->cp[0].cp_dma_addr));
1753
1754 cpp = &(HD(j)->cp[i]);
1755 spp = cpp;
1756
1757#if defined(DEBUG_GENERATE_ABORTS)
1758 if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 500) < 3)) goto handled;
1759#endif
1760
1761 if (HD(j)->cp_stat[i] == IGNORE) {
1762 HD(j)->cp_stat[i] = FREE;
1763 goto handled;
1764 }
1765 else if (HD(j)->cp_stat[i] == LOCKED) {
1766 HD(j)->cp_stat[i] = FREE;
1767 printk("%s: ihdlr, mbox %d unlocked, count %d.\n", BN(j), i,
1768 HD(j)->iocount);
1769 goto handled;
1770 }
1771 else if (HD(j)->cp_stat[i] == FREE) {
1772 printk("%s: ihdlr, mbox %d is free, count %d.\n", BN(j), i,
1773 HD(j)->iocount);
1774 goto handled;
1775 }
1776 else if (HD(j)->cp_stat[i] == IN_RESET)
1777 printk("%s: ihdlr, mbox %d is in reset.\n", BN(j), i);
1778 else if (HD(j)->cp_stat[i] != IN_USE)
1779 panic("%s: ihdlr, mbox %d, invalid cp_stat: %d.\n",
1780 BN(j), i, HD(j)->cp_stat[i]);
1781
1782 HD(j)->cp_stat[i] = FREE;
1783 SCpnt = cpp->SCpnt;
1784
1785 if (SCpnt == NULL) panic("%s: ihdlr, mbox %d, SCpnt == NULL.\n", BN(j), i);
1786
1787 if (SCpnt->host_scribble == NULL)
1788 panic("%s: ihdlr, mbox %d, SCpnt %p garbled.\n", BN(j), i,
1789 SCpnt);
1790
1791 if (*(unsigned int *)SCpnt->host_scribble != i)
1792 panic("%s: ihdlr, mbox %d, index mismatch %d.\n",
1793 BN(j), i, *(unsigned int *)SCpnt->host_scribble);
1794
1795 sync_dma(i, j);
1796
1797 if (linked_comm && SCpnt->device->queue_depth > 2
1798 && TLDEV(SCpnt->device->type))
1799 flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE);
1800
1801 tstatus = status_byte(spp->target_status);
1802
1803#if defined(DEBUG_GENERATE_ERRORS)
1804 if ((HD(j)->iocount > 500) && ((HD(j)->iocount % 200) < 2))
1805 spp->adapter_status = 0x01;
1806#endif
1807
1808 switch (spp->adapter_status) {
1809 case ASOK: /* status OK */
1810
1811 /* Forces a reset if a disk drive keeps returning BUSY */
1812 if (tstatus == BUSY && SCpnt->device->type != TYPE_TAPE)
1813 status = DID_ERROR << 16;
1814
1815 /* If there was a bus reset, redo operation on each target */
1816 else if (tstatus != GOOD && SCpnt->device->type == TYPE_DISK
1817 && HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)])
1818 status = DID_BUS_BUSY << 16;
1819
1820 /* Works around a flaw in scsi.c */
1821 else if (tstatus == CHECK_CONDITION
1822 && SCpnt->device->type == TYPE_DISK
1823 && (SCpnt->sense_buffer[2] & 0xf) == RECOVERED_ERROR)
1824 status = DID_BUS_BUSY << 16;
1825
1826 else
1827 status = DID_OK << 16;
1828
1829 if (tstatus == GOOD)
1830 HD(j)->target_redo[scmd_id(SCpnt)][scmd_channel(SCpnt)] = FALSE;
1831
1832 if (spp->target_status && SCpnt->device->type == TYPE_DISK &&
1833 (!(tstatus == CHECK_CONDITION && HD(j)->iocount <= 1000 &&
1834 (SCpnt->sense_buffer[2] & 0xf) == NOT_READY)))
1835 scmd_printk(KERN_INFO, SCpnt,
1836 "ihdlr, target_status 0x%x, sense key 0x%x.\n",
1837 spp->target_status,
1838 SCpnt->sense_buffer[2]);
1839
1840 HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] = 0;
1841
1842 if (HD(j)->last_retried_pid == SCpnt->serial_number) HD(j)->retries = 0;
1843
1844 break;
1845 case ASST: /* Selection Time Out */
1846
1847 if (HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)] > 1)
1848 status = DID_ERROR << 16;
1849 else {
1850 status = DID_TIME_OUT << 16;
1851 HD(j)->target_to[scmd_id(SCpnt)][scmd_channel(SCpnt)]++;
1852 }
1853
1854 break;
1855
1856 /* Perform a limited number of internal retries */
1857 case 0x93: /* Unexpected bus free */
1858 case 0x94: /* Target bus phase sequence failure */
1859 case 0x96: /* Illegal SCSI command */
1860 case 0xa3: /* SCSI bus reset error */
1861
1862 for (c = 0; c <= sh[j]->max_channel; c++)
1863 for (k = 0; k < sh[j]->max_id; k++)
1864 HD(j)->target_redo[k][c] = TRUE;
1865
1866
1867 case 0x92: /* Data over/under-run */
1868
1869 if (SCpnt->device->type != TYPE_TAPE
1870 && HD(j)->retries < MAX_INTERNAL_RETRIES) {
1871
1872#if defined(DID_SOFT_ERROR)
1873 status = DID_SOFT_ERROR << 16;
1874#else
1875 status = DID_BUS_BUSY << 16;
1876#endif
1877
1878 HD(j)->retries++;
1879 HD(j)->last_retried_pid = SCpnt->serial_number;
1880 }
1881 else
1882 status = DID_ERROR << 16;
1883
1884 break;
1885 case 0x01: /* Invalid command */
1886 case 0x02: /* Invalid parameters */
1887 case 0x03: /* Invalid data list */
1888 case 0x84: /* SCSI bus abort error */
1889 case 0x9b: /* Auto request sense error */
1890 case 0x9f: /* Unexpected command complete message error */
1891 case 0xff: /* Invalid parameter in the S/G list */
1892 default:
1893 status = DID_ERROR << 16;
1894 break;
1895 }
1896
1897 SCpnt->result = status | spp->target_status;
1898
1899#if defined(DEBUG_INTERRUPT)
1900 if (SCpnt->result || do_trace)
1901#else
1902 if ((spp->adapter_status != ASOK && HD(j)->iocount > 1000) ||
1903 (spp->adapter_status != ASOK &&
1904 spp->adapter_status != ASST && HD(j)->iocount <= 1000) ||
1905 do_trace || msg_byte(spp->target_status))
1906#endif
1907 scmd_printk(KERN_INFO, SCpnt, "ihdlr, mbox %2d, err 0x%x:%x,"\
1908 " reg 0x%x, count %d.\n",
1909 i, spp->adapter_status, spp->target_status,
1910 reg, HD(j)->iocount);
1911
1912 unmap_dma(i, j);
1913
1914 /* Set the command state to inactive */
1915 SCpnt->host_scribble = NULL;
1916
1917 SCpnt->scsi_done(SCpnt);
1918
1919 if (do_trace) printk("%s: ihdlr, exit, irq %d, count %d.\n", BN(j), irq,
1920 HD(j)->iocount);
1921
1922handled:
1923 return IRQ_HANDLED;
1924none:
1925 return IRQ_NONE;
1926}
1927
1928static irqreturn_t do_interrupt_handler(int irq, void *shap) {
1929 unsigned int j;
1930 unsigned long spin_flags;
1931 irqreturn_t ret;
1932
1933 /* Check if the interrupt must be processed by this handler */
1934 if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
1935
1936 spin_lock_irqsave(sh[j]->host_lock, spin_flags);
1937 ret = ihdlr(j);
1938 spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
1939 return ret;
1940}
1941
1942static int u14_34f_release(struct Scsi_Host *shpnt) {
1943 unsigned int i, j;
1944
1945 for (j = 0; sh[j] != NULL && sh[j] != shpnt; j++);
1946
1947 if (sh[j] == NULL)
1948 panic("%s: release, invalid Scsi_Host pointer.\n", driver_name);
1949
1950 for (i = 0; i < sh[j]->can_queue; i++)
1951 kfree((&HD(j)->cp[i])->sglist);
1952
1953 for (i = 0; i < sh[j]->can_queue; i++)
1954 pci_unmap_single(HD(j)->pdev, HD(j)->cp[i].cp_dma_addr,
1955 sizeof(struct mscp), PCI_DMA_BIDIRECTIONAL);
1956
1957 free_irq(sh[j]->irq, &sha[j]);
1958
1959 if (sh[j]->dma_channel != NO_DMA)
1960 free_dma(sh[j]->dma_channel);
1961
1962 release_region(sh[j]->io_port, sh[j]->n_io_port);
1963 scsi_unregister(sh[j]);
1964 return FALSE;
1965}
1966
1967#include "scsi_module.c"
1968
1969#ifndef MODULE
1970__setup("u14-34f=", option_setup);
1971#endif /* end MODULE */
diff --git a/drivers/scsi/ufs/tc-dwc-g210.c b/drivers/scsi/ufs/tc-dwc-g210.c
index 70db6d999ca3..dc03e47f7c58 100644
--- a/drivers/scsi/ufs/tc-dwc-g210.c
+++ b/drivers/scsi/ufs/tc-dwc-g210.c
@@ -15,6 +15,7 @@
15 15
16#include "ufshcd-dwc.h" 16#include "ufshcd-dwc.h"
17#include "ufshci-dwc.h" 17#include "ufshci-dwc.h"
18#include "tc-dwc-g210.h"
18 19
19/** 20/**
20 * tc_dwc_g210_setup_40bit_rmmi() 21 * tc_dwc_g210_setup_40bit_rmmi()
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index b291fa6ed2ad..845b874e2977 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -327,6 +327,7 @@ enum {
327 MASK_QUERY_DATA_SEG_LEN = 0xFFFF, 327 MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
328 MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF, 328 MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
329 MASK_RSP_EXCEPTION_EVENT = 0x10000, 329 MASK_RSP_EXCEPTION_EVENT = 0x10000,
330 MASK_TM_SERVICE_RESP = 0xFF,
330}; 331};
331 332
332/* Task management service response */ 333/* Task management service response */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f08d41a2d70b..37f3c51e9d92 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2568,7 +2568,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
2568 status = ufshcd_get_upmcrs(hba); 2568 status = ufshcd_get_upmcrs(hba);
2569 if (status != PWR_LOCAL) { 2569 if (status != PWR_LOCAL) {
2570 dev_err(hba->dev, 2570 dev_err(hba->dev,
2571 "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n", 2571 "pwr ctrl cmd 0x%0x failed, host upmcrs:0x%x\n",
2572 cmd->command, status); 2572 cmd->command, status);
2573 ret = (status != PWR_OK) ? status : -1; 2573 ret = (status != PWR_OK) ? status : -1;
2574 } 2574 }
@@ -3364,8 +3364,8 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
3364 if (ocs_value == OCS_SUCCESS) { 3364 if (ocs_value == OCS_SUCCESS) {
3365 task_rsp_upiup = (struct utp_upiu_task_rsp *) 3365 task_rsp_upiup = (struct utp_upiu_task_rsp *)
3366 task_req_descp[index].task_rsp_upiu; 3366 task_req_descp[index].task_rsp_upiu;
3367 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1); 3367 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
3368 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8); 3368 task_result = task_result & MASK_TM_SERVICE_RESP;
3369 if (resp) 3369 if (resp)
3370 *resp = (u8)task_result; 3370 *resp = (u8)task_result;
3371 } else { 3371 } else {
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
deleted file mode 100644
index 14e0c40a68c9..000000000000
--- a/drivers/scsi/ultrastor.c
+++ /dev/null
@@ -1,1210 +0,0 @@
1/*
2 * ultrastor.c Copyright (C) 1992 David B. Gentzel
3 * Low-level SCSI driver for UltraStor 14F, 24F, and 34F
4 * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
5 * (gentzel@nova.enet.dec.com)
6 * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
7 * 24F and multiple command support by John F. Carr (jfc@athena.mit.edu)
8 * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
9 * Eric Youngdale (ericy@cais.com).
10 * Thanks to UltraStor for providing the necessary documentation
11 *
12 * This is an old driver, for the 14F and 34F you should be using the
13 * u14-34f driver instead.
14 */
15
16/*
17 * TODO:
18 * 1. Find out why scatter/gather is limited to 16 requests per command.
19 * This is fixed, at least on the 24F, as of version 1.12 - CAE.
20 * 2. Look at command linking (mscp.command_link and
21 * mscp.command_link_id). (Does not work with many disks,
22 * and no performance increase. ERY).
23 * 3. Allow multiple adapters.
24 */
25
26/*
27 * NOTES:
28 * The UltraStor 14F, 24F, and 34F are a family of intelligent, high
29 * performance SCSI-2 host adapters. They all support command queueing
30 * and scatter/gather I/O. Some of them can also emulate the standard
31 * WD1003 interface for use with OS's which don't support SCSI. Here
32 * is the scoop on the various models:
33 * 14F - ISA first-party DMA HA with floppy support and WD1003 emulation.
34 * 14N - ISA HA with floppy support. I think that this is a non-DMA
35 * HA. Nothing further known.
36 * 24F - EISA Bus Master HA with floppy support and WD1003 emulation.
37 * 34F - VL-Bus Bus Master HA with floppy support (no WD1003 emulation).
38 *
39 * The 14F, 24F, and 34F are supported by this driver.
40 *
41 * Places flagged with a triple question-mark are things which are either
42 * unfinished, questionable, or wrong.
43 */
44
45/* Changes from version 1.11 alpha to 1.12
46 *
47 * Increased the size of the scatter-gather list to 33 entries for
48 * the 24F adapter (it was 16). I don't have the specs for the 14F
49 * or the 34F, so they may support larger s-g lists as well.
50 *
51 * Caleb Epstein <cae@jpmorgan.com>
52 */
53
54/* Changes from version 1.9 to 1.11
55 *
56 * Patches to bring this driver up to speed with the default kernel
57 * driver which supports only the 14F and 34F adapters. This version
58 * should compile cleanly into 0.99.13, 0.99.12 and probably 0.99.11.
59 *
60 * Fixes from Eric Youngdale to fix a few possible race conditions and
61 * several problems with bit testing operations (insufficient
62 * parentheses).
63 *
64 * Removed the ultrastor_abort() and ultrastor_reset() functions
65 * (enclosed them in #if 0 / #endif). These functions, at least on
66 * the 24F, cause the SCSI bus to do odd things and generally lead to
67 * kernel panics and machine hangs. This is like the Adaptec code.
68 *
69 * Use check/snarf_region for 14f, 34f to avoid I/O space address conflicts.
70 */
71
72/* Changes from version 1.8 to version 1.9
73 *
74 * 0.99.11 patches (cae@jpmorgan.com) */
75
76/* Changes from version 1.7 to version 1.8
77 *
78 * Better error reporting.
79 */
80
81/* Changes from version 1.6 to version 1.7
82 *
83 * Removed CSIR command code.
84 *
85 * Better race condition avoidance (xchgb function added).
86 *
87 * Set ICM and OGM status to zero at probe (24F)
88 *
89 * reset sends soft reset to UltraStor adapter
90 *
91 * reset adapter if adapter interrupts with an invalid MSCP address
92 *
93 * handle aborted command interrupt (24F)
94 *
95 */
96
97/* Changes from version 1.5 to version 1.6:
98 *
99 * Read MSCP address from ICM _before_ clearing the interrupt flag.
100 * This fixes a race condition.
101 */
102
103/* Changes from version 1.4 to version 1.5:
104 *
105 * Abort now calls done when multiple commands are enabled.
106 *
107 * Clear busy when aborted command finishes, not when abort is called.
108 *
109 * More debugging messages for aborts.
110 */
111
112/* Changes from version 1.3 to version 1.4:
113 *
114 * Enable automatic request of sense data on error (requires newer version
115 * of scsi.c to be useful).
116 *
117 * Fix PORT_OVERRIDE for 14F.
118 *
119 * Fix abort and reset to work properly (config.aborted wasn't cleared
120 * after it was tested, so after a command abort no further commands would
121 * work).
122 *
123 * Boot time test to enable SCSI bus reset (defaults to not allowing reset).
124 *
125 * Fix test for OGM busy -- the busy bit is in different places on the 24F.
126 *
127 * Release ICM slot by clearing first byte on 24F.
128 */
129
130#include <linux/module.h>
131#include <linux/blkdev.h>
132#include <linux/interrupt.h>
133#include <linux/stddef.h>
134#include <linux/string.h>
135#include <linux/kernel.h>
136#include <linux/ioport.h>
137#include <linux/proc_fs.h>
138#include <linux/spinlock.h>
139#include <linux/stat.h>
140#include <linux/bitops.h>
141#include <linux/delay.h>
142
143#include <asm/io.h>
144#include <asm/dma.h>
145
146#define ULTRASTOR_PRIVATE /* Get the private stuff from ultrastor.h */
147#include "scsi.h"
148#include <scsi/scsi_host.h>
149#include "ultrastor.h"
150
151#define FALSE 0
152#define TRUE 1
153
154#ifndef ULTRASTOR_DEBUG
155#define ULTRASTOR_DEBUG (UD_ABORT|UD_CSIR|UD_RESET)
156#endif
157
158#define VERSION "1.12"
159
160#define PACKED __attribute__((packed))
161#define ALIGNED(x) __attribute__((aligned(x)))
162
163
164/* The 14F uses an array of 4-byte ints for its scatter/gather list.
165 The data can be unaligned, but need not be. It's easier to give
166 the list normal alignment since it doesn't need to fit into a
167 packed structure. */
168
169typedef struct {
170 u32 address;
171 u32 num_bytes;
172} ultrastor_sg_list;
173
174
175/* MailBox SCSI Command Packet. Basic command structure for communicating
176 with controller. */
177struct mscp {
178 unsigned char opcode: 3; /* type of command */
179 unsigned char xdir: 2; /* data transfer direction */
180 unsigned char dcn: 1; /* disable disconnect */
181 unsigned char ca: 1; /* use cache (if available) */
182 unsigned char sg: 1; /* scatter/gather operation */
183 unsigned char target_id: 3; /* target SCSI id */
184 unsigned char ch_no: 2; /* SCSI channel (always 0 for 14f) */
185 unsigned char lun: 3; /* logical unit number */
186 unsigned int transfer_data PACKED; /* transfer data pointer */
187 unsigned int transfer_data_length PACKED; /* length in bytes */
188 unsigned int command_link PACKED; /* for linking command chains */
189 unsigned char scsi_command_link_id; /* identifies command in chain */
190 unsigned char number_of_sg_list; /* (if sg is set) 8 bytes per list */
191 unsigned char length_of_sense_byte;
192 unsigned char length_of_scsi_cdbs; /* 6, 10, or 12 */
193 unsigned char scsi_cdbs[12]; /* SCSI commands */
194 unsigned char adapter_status; /* non-zero indicates HA error */
195 unsigned char target_status; /* non-zero indicates target error */
196 u32 sense_data PACKED;
197 /* The following fields are for software only. They are included in
198 the MSCP structure because they are associated with SCSI requests. */
199 void (*done) (struct scsi_cmnd *);
200 struct scsi_cmnd *SCint;
201 ultrastor_sg_list sglist[ULTRASTOR_24F_MAX_SG]; /* use larger size for 24F */
202};
203
204
205/* Port addresses (relative to the base address) */
206#define U14F_PRODUCT_ID(port) ((port) + 0x4)
207#define CONFIG(port) ((port) + 0x6)
208
209/* Port addresses relative to the doorbell base address. */
210#define LCL_DOORBELL_MASK(port) ((port) + 0x0)
211#define LCL_DOORBELL_INTR(port) ((port) + 0x1)
212#define SYS_DOORBELL_MASK(port) ((port) + 0x2)
213#define SYS_DOORBELL_INTR(port) ((port) + 0x3)
214
215
216/* Used to store configuration info read from config i/o registers. Most of
217 this is not used yet, but might as well save it.
218
219 This structure also holds port addresses that are not at the same offset
220 on the 14F and 24F.
221
222 This structure holds all data that must be duplicated to support multiple
223 adapters. */
224
225static struct ultrastor_config
226{
227 unsigned short port_address; /* base address of card */
228 unsigned short doorbell_address; /* base address of doorbell CSRs */
229 unsigned short ogm_address; /* base address of OGM */
230 unsigned short icm_address; /* base address of ICM */
231 const void *bios_segment;
232 unsigned char interrupt: 4;
233 unsigned char dma_channel: 3;
234 unsigned char bios_drive_number: 1;
235 unsigned char heads;
236 unsigned char sectors;
237 unsigned char ha_scsi_id: 3;
238 unsigned char subversion: 4;
239 unsigned char revision;
240 /* The slot number is used to distinguish the 24F (slot != 0) from
241 the 14F and 34F (slot == 0). */
242 unsigned char slot;
243
244#ifdef PRINT_U24F_VERSION
245 volatile int csir_done;
246#endif
247
248 /* A pool of MSCP structures for this adapter, and a bitmask of
249 busy structures. (If ULTRASTOR_14F_MAX_CMDS == 1, a 1 byte
250 busy flag is used instead.) */
251
252#if ULTRASTOR_MAX_CMDS == 1
253 unsigned char mscp_busy;
254#else
255 unsigned long mscp_free;
256#endif
257 volatile unsigned char aborted[ULTRASTOR_MAX_CMDS];
258 struct mscp mscp[ULTRASTOR_MAX_CMDS];
259} config = {0};
260
261/* Set this to 1 to reset the SCSI bus on error. */
262static int ultrastor_bus_reset;
263
264
265/* Allowed BIOS base addresses (NULL indicates reserved) */
266static const void *const bios_segment_table[8] = {
267 NULL, (void *)0xC4000, (void *)0xC8000, (void *)0xCC000,
268 (void *)0xD0000, (void *)0xD4000, (void *)0xD8000, (void *)0xDC000,
269};
270
271/* Allowed IRQs for 14f */
272static const unsigned char interrupt_table_14f[4] = { 15, 14, 11, 10 };
273
274/* Allowed DMA channels for 14f (0 indicates reserved) */
275static const unsigned char dma_channel_table_14f[4] = { 5, 6, 7, 0 };
276
277/* Head/sector mappings allowed by 14f */
278static const struct {
279 unsigned char heads;
280 unsigned char sectors;
281} mapping_table[4] = { { 16, 63 }, { 64, 32 }, { 64, 63 }, { 64, 32 } };
282
283#ifndef PORT_OVERRIDE
284/* ??? A probe of address 0x310 screws up NE2000 cards */
285static const unsigned short ultrastor_ports_14f[] = {
286 0x330, 0x340, /*0x310,*/ 0x230, 0x240, 0x210, 0x130, 0x140,
287};
288#endif
289
290static void ultrastor_interrupt(void *);
291static irqreturn_t do_ultrastor_interrupt(int, void *);
292static inline void build_sg_list(struct mscp *, struct scsi_cmnd *SCpnt);
293
294
295/* Always called with host lock held */
296
297static inline int find_and_clear_bit_16(unsigned long *field)
298{
299 int rv;
300
301 if (*field == 0)
302 panic("No free mscp");
303
304 asm volatile (
305 "xorl %0,%0\n\t"
306 "0: bsfw %1,%w0\n\t"
307 "btr %0,%1\n\t"
308 "jnc 0b"
309 : "=&r" (rv), "+m" (*field) :);
310
311 return rv;
312}
313
314/* This has been re-implemented with the help of Richard Earnshaw,
315 <rwe@pegasus.esprit.ec.org> and works with gcc-2.5.8 and gcc-2.6.0.
316 The instability noted by jfc below appears to be a bug in
317 gcc-2.5.x when compiling w/o optimization. --Caleb
318
319 This asm is fragile: it doesn't work without the casts and it may
320 not work without optimization. Maybe I should add a swap builtin
321 to gcc. --jfc */
322static inline unsigned char xchgb(unsigned char reg,
323 volatile unsigned char *mem)
324{
325 __asm__ ("xchgb %0,%1" : "=q" (reg), "=m" (*mem) : "0" (reg));
326 return reg;
327}
328
329#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
330
331/* Always called with the host lock held */
332static void log_ultrastor_abort(struct ultrastor_config *config,
333 int command)
334{
335 static char fmt[80] = "abort %d (%x); MSCP free pool: %x;";
336 int i;
337
338 for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
339 {
340 fmt[20 + i*2] = ' ';
341 if (! (config->mscp_free & (1 << i)))
342 fmt[21 + i*2] = '0' + config->mscp[i].target_id;
343 else
344 fmt[21 + i*2] = '-';
345 }
346 fmt[20 + ULTRASTOR_MAX_CMDS * 2] = '\n';
347 fmt[21 + ULTRASTOR_MAX_CMDS * 2] = 0;
348 printk(fmt, command, &config->mscp[command], config->mscp_free);
349
350}
351#endif
352
353static int ultrastor_14f_detect(struct scsi_host_template * tpnt)
354{
355 size_t i;
356 unsigned char in_byte, version_byte = 0;
357 struct config_1 {
358 unsigned char bios_segment: 3;
359 unsigned char removable_disks_as_fixed: 1;
360 unsigned char interrupt: 2;
361 unsigned char dma_channel: 2;
362 } config_1;
363 struct config_2 {
364 unsigned char ha_scsi_id: 3;
365 unsigned char mapping_mode: 2;
366 unsigned char bios_drive_number: 1;
367 unsigned char tfr_port: 2;
368 } config_2;
369
370#if (ULTRASTOR_DEBUG & UD_DETECT)
371 printk("US14F: detect: called\n");
372#endif
373
374 /* If a 24F has already been configured, don't look for a 14F. */
375 if (config.bios_segment)
376 return FALSE;
377
378#ifdef PORT_OVERRIDE
379 if(!request_region(PORT_OVERRIDE, 0xc, "ultrastor")) {
380 printk("Ultrastor I/O space already in use\n");
381 return FALSE;
382 };
383 config.port_address = PORT_OVERRIDE;
384#else
385 for (i = 0; i < ARRAY_SIZE(ultrastor_ports_14f); i++) {
386 if(!request_region(ultrastor_ports_14f[i], 0x0c, "ultrastor")) continue;
387 config.port_address = ultrastor_ports_14f[i];
388#endif
389
390#if (ULTRASTOR_DEBUG & UD_DETECT)
391 printk("US14F: detect: testing port address %03X\n", config.port_address);
392#endif
393
394 in_byte = inb(U14F_PRODUCT_ID(config.port_address));
395 if (in_byte != US14F_PRODUCT_ID_0) {
396#if (ULTRASTOR_DEBUG & UD_DETECT)
397# ifdef PORT_OVERRIDE
398 printk("US14F: detect: wrong product ID 0 - %02X\n", in_byte);
399# else
400 printk("US14F: detect: no adapter at port %03X\n", config.port_address);
401# endif
402#endif
403#ifdef PORT_OVERRIDE
404 goto out_release_port;
405#else
406 release_region(config.port_address, 0x0c);
407 continue;
408#endif
409 }
410 in_byte = inb(U14F_PRODUCT_ID(config.port_address) + 1);
411 /* Only upper nibble is significant for Product ID 1 */
412 if ((in_byte & 0xF0) != US14F_PRODUCT_ID_1) {
413#if (ULTRASTOR_DEBUG & UD_DETECT)
414# ifdef PORT_OVERRIDE
415 printk("US14F: detect: wrong product ID 1 - %02X\n", in_byte);
416# else
417 printk("US14F: detect: no adapter at port %03X\n", config.port_address);
418# endif
419#endif
420#ifdef PORT_OVERRIDE
421 goto out_release_port;
422#else
423 release_region(config.port_address, 0x0c);
424 continue;
425#endif
426 }
427 version_byte = in_byte;
428#ifndef PORT_OVERRIDE
429 break;
430 }
431 if (i == ARRAY_SIZE(ultrastor_ports_14f)) {
432# if (ULTRASTOR_DEBUG & UD_DETECT)
433 printk("US14F: detect: no port address found!\n");
434# endif
435 /* all ports probed already released - we can just go straight out */
436 return FALSE;
437 }
438#endif
439
440#if (ULTRASTOR_DEBUG & UD_DETECT)
441 printk("US14F: detect: adapter found at port address %03X\n",
442 config.port_address);
443#endif
444
445 /* Set local doorbell mask to disallow bus reset unless
446 ultrastor_bus_reset is true. */
447 outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(config.port_address));
448
449 /* All above tests passed, must be the right thing. Get some useful
450 info. */
451
452 /* Register the I/O space that we use */
453
454 *(char *)&config_1 = inb(CONFIG(config.port_address + 0));
455 *(char *)&config_2 = inb(CONFIG(config.port_address + 1));
456 config.bios_segment = bios_segment_table[config_1.bios_segment];
457 config.doorbell_address = config.port_address;
458 config.ogm_address = config.port_address + 0x8;
459 config.icm_address = config.port_address + 0xC;
460 config.interrupt = interrupt_table_14f[config_1.interrupt];
461 config.ha_scsi_id = config_2.ha_scsi_id;
462 config.heads = mapping_table[config_2.mapping_mode].heads;
463 config.sectors = mapping_table[config_2.mapping_mode].sectors;
464 config.bios_drive_number = config_2.bios_drive_number;
465 config.subversion = (version_byte & 0x0F);
466 if (config.subversion == U34F)
467 config.dma_channel = 0;
468 else
469 config.dma_channel = dma_channel_table_14f[config_1.dma_channel];
470
471 if (!config.bios_segment) {
472#if (ULTRASTOR_DEBUG & UD_DETECT)
473 printk("US14F: detect: not detected.\n");
474#endif
475 goto out_release_port;
476 }
477
478 /* Final consistency check, verify previous info. */
479 if (config.subversion != U34F)
480 if (!config.dma_channel || !(config_2.tfr_port & 0x2)) {
481#if (ULTRASTOR_DEBUG & UD_DETECT)
482 printk("US14F: detect: consistency check failed\n");
483#endif
484 goto out_release_port;
485 }
486
487 /* If we were TRULY paranoid, we could issue a host adapter inquiry
488 command here and verify the data returned. But frankly, I'm
489 exhausted! */
490
491 /* Finally! Now I'm satisfied... */
492#if (ULTRASTOR_DEBUG & UD_DETECT)
493 printk("US14F: detect: detect succeeded\n"
494 " Port address: %03X\n"
495 " BIOS segment: %05X\n"
496 " Interrupt: %u\n"
497 " DMA channel: %u\n"
498 " H/A SCSI ID: %u\n"
499 " Subversion: %u\n",
500 config.port_address, config.bios_segment, config.interrupt,
501 config.dma_channel, config.ha_scsi_id, config.subversion);
502#endif
503 tpnt->this_id = config.ha_scsi_id;
504 tpnt->unchecked_isa_dma = (config.subversion != U34F);
505
506#if ULTRASTOR_MAX_CMDS > 1
507 config.mscp_free = ~0;
508#endif
509
510 /*
511 * Brrr, &config.mscp[0].SCint->host) it is something magical....
512 * XXX and FIXME
513 */
514 if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", &config.mscp[0].SCint->device->host)) {
515 printk("Unable to allocate IRQ%u for UltraStor controller.\n",
516 config.interrupt);
517 goto out_release_port;
518 }
519 if (config.dma_channel && request_dma(config.dma_channel,"Ultrastor")) {
520 printk("Unable to allocate DMA channel %u for UltraStor controller.\n",
521 config.dma_channel);
522 free_irq(config.interrupt, NULL);
523 goto out_release_port;
524 }
525 tpnt->sg_tablesize = ULTRASTOR_14F_MAX_SG;
526 printk("UltraStor driver version" VERSION ". Using %d SG lists.\n",
527 ULTRASTOR_14F_MAX_SG);
528
529 return TRUE;
530out_release_port:
531 release_region(config.port_address, 0x0c);
532 return FALSE;
533}
534
535static int ultrastor_24f_detect(struct scsi_host_template * tpnt)
536{
537 int i;
538 struct Scsi_Host * shpnt = NULL;
539
540#if (ULTRASTOR_DEBUG & UD_DETECT)
541 printk("US24F: detect");
542#endif
543
544 /* probe each EISA slot at slot address C80 */
545 for (i = 1; i < 15; i++)
546 {
547 unsigned char config_1, config_2;
548 unsigned short addr = (i << 12) | ULTRASTOR_24F_PORT;
549
550 if (inb(addr) != US24F_PRODUCT_ID_0 &&
551 inb(addr+1) != US24F_PRODUCT_ID_1 &&
552 inb(addr+2) != US24F_PRODUCT_ID_2)
553 continue;
554
555 config.revision = inb(addr+3);
556 config.slot = i;
557 if (! (inb(addr+4) & 1))
558 {
559#if (ULTRASTOR_DEBUG & UD_DETECT)
560 printk("U24F: found disabled card in slot %u\n", i);
561#endif
562 continue;
563 }
564#if (ULTRASTOR_DEBUG & UD_DETECT)
565 printk("U24F: found card in slot %u\n", i);
566#endif
567 config_1 = inb(addr + 5);
568 config.bios_segment = bios_segment_table[config_1 & 7];
569 switch(config_1 >> 4)
570 {
571 case 1:
572 config.interrupt = 15;
573 break;
574 case 2:
575 config.interrupt = 14;
576 break;
577 case 4:
578 config.interrupt = 11;
579 break;
580 case 8:
581 config.interrupt = 10;
582 break;
583 default:
584 printk("U24F: invalid IRQ\n");
585 return FALSE;
586 }
587
588 /* BIOS addr set */
589 /* base port set */
590 config.port_address = addr;
591 config.doorbell_address = addr + 12;
592 config.ogm_address = addr + 0x17;
593 config.icm_address = addr + 0x1C;
594 config_2 = inb(addr + 7);
595 config.ha_scsi_id = config_2 & 7;
596 config.heads = mapping_table[(config_2 >> 3) & 3].heads;
597 config.sectors = mapping_table[(config_2 >> 3) & 3].sectors;
598#if (ULTRASTOR_DEBUG & UD_DETECT)
599 printk("US24F: detect: detect succeeded\n"
600 " Port address: %03X\n"
601 " BIOS segment: %05X\n"
602 " Interrupt: %u\n"
603 " H/A SCSI ID: %u\n",
604 config.port_address, config.bios_segment,
605 config.interrupt, config.ha_scsi_id);
606#endif
607 tpnt->this_id = config.ha_scsi_id;
608 tpnt->unchecked_isa_dma = 0;
609 tpnt->sg_tablesize = ULTRASTOR_24F_MAX_SG;
610
611 shpnt = scsi_register(tpnt, 0);
612 if (!shpnt) {
613 printk(KERN_WARNING "(ultrastor:) Could not register scsi device. Aborting registration.\n");
614 free_irq(config.interrupt, do_ultrastor_interrupt);
615 return FALSE;
616 }
617
618 if (request_irq(config.interrupt, do_ultrastor_interrupt, 0, "Ultrastor", shpnt))
619 {
620 printk("Unable to allocate IRQ%u for UltraStor controller.\n",
621 config.interrupt);
622 return FALSE;
623 }
624
625 shpnt->irq = config.interrupt;
626 shpnt->dma_channel = config.dma_channel;
627 shpnt->io_port = config.port_address;
628
629#if ULTRASTOR_MAX_CMDS > 1
630 config.mscp_free = ~0;
631#endif
632 /* Mark ICM and OGM free */
633 outb(0, addr + 0x16);
634 outb(0, addr + 0x1B);
635
636 /* Set local doorbell mask to disallow bus reset unless
637 ultrastor_bus_reset is true. */
638 outb(ultrastor_bus_reset ? 0xc2 : 0x82, LCL_DOORBELL_MASK(addr+12));
639 outb(0x02, SYS_DOORBELL_MASK(addr+12));
640 printk("UltraStor driver version " VERSION ". Using %d SG lists.\n",
641 tpnt->sg_tablesize);
642 return TRUE;
643 }
644 return FALSE;
645}
646
647static int ultrastor_detect(struct scsi_host_template * tpnt)
648{
649 tpnt->proc_name = "ultrastor";
650 return ultrastor_14f_detect(tpnt) || ultrastor_24f_detect(tpnt);
651}
652
653static int ultrastor_release(struct Scsi_Host *shost)
654{
655 if (shost->irq)
656 free_irq(shost->irq, NULL);
657 if (shost->dma_channel != 0xff)
658 free_dma(shost->dma_channel);
659 if (shost->io_port && shost->n_io_port)
660 release_region(shost->io_port, shost->n_io_port);
661 scsi_unregister(shost);
662 return 0;
663}
664
665static const char *ultrastor_info(struct Scsi_Host * shpnt)
666{
667 static char buf[64];
668
669 if (config.slot)
670 sprintf(buf, "UltraStor 24F SCSI @ Slot %u IRQ%u",
671 config.slot, config.interrupt);
672 else if (config.subversion)
673 sprintf(buf, "UltraStor 34F SCSI @ Port %03X BIOS %05X IRQ%u",
674 config.port_address, (int)config.bios_segment,
675 config.interrupt);
676 else
677 sprintf(buf, "UltraStor 14F SCSI @ Port %03X BIOS %05X IRQ%u DMA%u",
678 config.port_address, (int)config.bios_segment,
679 config.interrupt, config.dma_channel);
680 return buf;
681}
682
683static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
684{
685 struct scatterlist *sg;
686 long transfer_length = 0;
687 int i, max;
688
689 max = scsi_sg_count(SCpnt);
690 scsi_for_each_sg(SCpnt, sg, max, i) {
691 mscp->sglist[i].address = isa_page_to_bus(sg_page(sg)) + sg->offset;
692 mscp->sglist[i].num_bytes = sg->length;
693 transfer_length += sg->length;
694 }
695 mscp->number_of_sg_list = max;
696 mscp->transfer_data = isa_virt_to_bus(mscp->sglist);
697 /* ??? May not be necessary. Docs are unclear as to whether transfer
698 length field is ignored or whether it should be set to the total
699 number of bytes of the transfer. */
700 mscp->transfer_data_length = transfer_length;
701}
702
703static int ultrastor_queuecommand_lck(struct scsi_cmnd *SCpnt,
704 void (*done) (struct scsi_cmnd *))
705{
706 struct mscp *my_mscp;
707#if ULTRASTOR_MAX_CMDS > 1
708 int mscp_index;
709#endif
710 unsigned int status;
711
712 /* Next test is for debugging; "can't happen" */
713 if ((config.mscp_free & ((1U << ULTRASTOR_MAX_CMDS) - 1)) == 0)
714 panic("ultrastor_queuecommand: no free MSCP\n");
715 mscp_index = find_and_clear_bit_16(&config.mscp_free);
716
717 /* Has the command been aborted? */
718 if (xchgb(0xff, &config.aborted[mscp_index]) != 0)
719 {
720 status = DID_ABORT << 16;
721 goto aborted;
722 }
723
724 my_mscp = &config.mscp[mscp_index];
725
726 *(unsigned char *)my_mscp = OP_SCSI | (DTD_SCSI << 3);
727
728 /* Tape drives don't work properly if the cache is used. The SCSI
729 READ command for a tape doesn't have a block offset, and the adapter
730 incorrectly assumes that all reads from the tape read the same
731 blocks. Results will depend on read buffer size and other disk
732 activity.
733
734 ??? Which other device types should never use the cache? */
735 my_mscp->ca = SCpnt->device->type != TYPE_TAPE;
736 my_mscp->target_id = SCpnt->device->id;
737 my_mscp->ch_no = 0;
738 my_mscp->lun = SCpnt->device->lun;
739 if (scsi_sg_count(SCpnt)) {
740 /* Set scatter/gather flag in SCSI command packet */
741 my_mscp->sg = TRUE;
742 build_sg_list(my_mscp, SCpnt);
743 } else {
744 /* Unset scatter/gather flag in SCSI command packet */
745 my_mscp->sg = FALSE;
746 my_mscp->transfer_data = isa_virt_to_bus(scsi_sglist(SCpnt));
747 my_mscp->transfer_data_length = scsi_bufflen(SCpnt);
748 }
749 my_mscp->command_link = 0; /*???*/
750 my_mscp->scsi_command_link_id = 0; /*???*/
751 my_mscp->length_of_sense_byte = SCSI_SENSE_BUFFERSIZE;
752 my_mscp->length_of_scsi_cdbs = SCpnt->cmd_len;
753 memcpy(my_mscp->scsi_cdbs, SCpnt->cmnd, my_mscp->length_of_scsi_cdbs);
754 my_mscp->adapter_status = 0;
755 my_mscp->target_status = 0;
756 my_mscp->sense_data = isa_virt_to_bus(&SCpnt->sense_buffer);
757 my_mscp->done = done;
758 my_mscp->SCint = SCpnt;
759 SCpnt->host_scribble = (unsigned char *)my_mscp;
760
761 /* Find free OGM slot. On 24F, look for OGM status byte == 0.
762 On 14F and 34F, wait for local interrupt pending flag to clear.
763
764 FIXME: now we are using new_eh we should punt here and let the
765 midlayer sort it out */
766
767retry:
768 if (config.slot)
769 while (inb(config.ogm_address - 1) != 0 && config.aborted[mscp_index] == 0xff)
770 barrier();
771
772 /* else??? */
773
774 while ((inb(LCL_DOORBELL_INTR(config.doorbell_address)) & (config.slot ? 2 : 1)) && config.aborted[mscp_index] == 0xff)
775 barrier();
776
777 /* To avoid race conditions, keep the code to write to the adapter
778 atomic. This simplifies the abort code. Right now the
779 scsi mid layer has the host_lock already held
780 */
781
782 if (inb(LCL_DOORBELL_INTR(config.doorbell_address)) & (config.slot ? 2 : 1))
783 goto retry;
784
785 status = xchgb(0, &config.aborted[mscp_index]);
786 if (status != 0xff) {
787
788#if ULTRASTOR_DEBUG & (UD_COMMAND | UD_ABORT)
789 printk("USx4F: queuecommand: aborted\n");
790#if ULTRASTOR_MAX_CMDS > 1
791 log_ultrastor_abort(&config, mscp_index);
792#endif
793#endif
794 status <<= 16;
795
796 aborted:
797 set_bit(mscp_index, &config.mscp_free);
798 /* If the driver queues commands, call the done proc here. Otherwise
799 return an error. */
800#if ULTRASTOR_MAX_CMDS > 1
801 SCpnt->result = status;
802 done(SCpnt);
803 return 0;
804#else
805 return status;
806#endif
807 }
808
809 /* Store pointer in OGM address bytes */
810 outl(isa_virt_to_bus(my_mscp), config.ogm_address);
811
812 /* Issue OGM interrupt */
813 if (config.slot) {
814 /* Write OGM command register on 24F */
815 outb(1, config.ogm_address - 1);
816 outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
817 } else {
818 outb(0x1, LCL_DOORBELL_INTR(config.doorbell_address));
819 }
820
821#if (ULTRASTOR_DEBUG & UD_COMMAND)
822 printk("USx4F: queuecommand: returning\n");
823#endif
824
825 return 0;
826}
827
828static DEF_SCSI_QCMD(ultrastor_queuecommand)
829
830/* This code must deal with 2 cases:
831
832 1. The command has not been written to the OGM. In this case, set
833 the abort flag and return.
834
835 2. The command has been written to the OGM and is stuck somewhere in
836 the adapter.
837
838 2a. On a 24F, ask the adapter to abort the command. It will interrupt
839 when it does.
840
841 2b. Call the command's done procedure.
842
843 */
844
845static int ultrastor_abort(struct scsi_cmnd *SCpnt)
846{
847#if ULTRASTOR_DEBUG & UD_ABORT
848 char out[108];
849 unsigned char icm_status = 0, ogm_status = 0;
850 unsigned int icm_addr = 0, ogm_addr = 0;
851#endif
852 unsigned int mscp_index;
853 unsigned char old_aborted;
854 unsigned long flags;
855 void (*done)(struct scsi_cmnd *);
856 struct Scsi_Host *host = SCpnt->device->host;
857
858 if(config.slot)
859 return FAILED; /* Do not attempt an abort for the 24f */
860
861 /* Simple consistency checking */
862 if(!SCpnt->host_scribble)
863 return FAILED;
864
865 mscp_index = ((struct mscp *)SCpnt->host_scribble) - config.mscp;
866 if (mscp_index >= ULTRASTOR_MAX_CMDS)
867 panic("Ux4F aborting invalid MSCP");
868
869#if ULTRASTOR_DEBUG & UD_ABORT
870 if (config.slot)
871 {
872 int port0 = (config.slot << 12) | 0xc80;
873 int i;
874 unsigned long flags;
875
876 spin_lock_irqsave(host->host_lock, flags);
877 strcpy(out, "OGM %d:%x ICM %d:%x ports: ");
878 for (i = 0; i < 16; i++)
879 {
880 unsigned char p = inb(port0 + i);
881 out[28 + i * 3] = "0123456789abcdef"[p >> 4];
882 out[29 + i * 3] = "0123456789abcdef"[p & 15];
883 out[30 + i * 3] = ' ';
884 }
885 out[28 + i * 3] = '\n';
886 out[29 + i * 3] = 0;
887 ogm_status = inb(port0 + 22);
888 ogm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 23));
889 icm_status = inb(port0 + 27);
890 icm_addr = (unsigned int)isa_bus_to_virt(inl(port0 + 28));
891 spin_unlock_irqrestore(host->host_lock, flags);
892 }
893
894 /* First check to see if an interrupt is pending. I suspect the SiS
895 chipset loses interrupts. (I also suspect is mangles data, but
896 one bug at a time... */
897 if (config.slot ? inb(config.icm_address - 1) == 2 :
898 (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
899 {
900 printk("Ux4F: abort while completed command pending\n");
901
902 spin_lock_irqsave(host->host_lock, flags);
903 /* FIXME: Ewww... need to think about passing host around properly */
904 ultrastor_interrupt(NULL);
905 spin_unlock_irqrestore(host->host_lock, flags);
906 return SUCCESS;
907 }
908#endif
909
910 old_aborted = xchgb(DID_ABORT, &config.aborted[mscp_index]);
911
912 /* aborted == 0xff is the signal that queuecommand has not yet sent
913 the command. It will notice the new abort flag and fail. */
914 if (old_aborted == 0xff)
915 return SUCCESS;
916
917 /* On 24F, send an abort MSCP request. The adapter will interrupt
918 and the interrupt handler will call done. */
919 if (config.slot && inb(config.ogm_address - 1) == 0)
920 {
921 unsigned long flags;
922
923 spin_lock_irqsave(host->host_lock, flags);
924 outl(isa_virt_to_bus(&config.mscp[mscp_index]), config.ogm_address);
925 udelay(8);
926 outb(0x80, config.ogm_address - 1);
927 outb(0x2, LCL_DOORBELL_INTR(config.doorbell_address));
928#if ULTRASTOR_DEBUG & UD_ABORT
929 log_ultrastor_abort(&config, mscp_index);
930 printk(out, ogm_status, ogm_addr, icm_status, icm_addr);
931#endif
932 spin_unlock_irqrestore(host->host_lock, flags);
933 /* FIXME: add a wait for the abort to complete */
934 return SUCCESS;
935 }
936
937#if ULTRASTOR_DEBUG & UD_ABORT
938 log_ultrastor_abort(&config, mscp_index);
939#endif
940
941 /* Can't request a graceful abort. Either this is not a 24F or
942 the OGM is busy. Don't free the command -- the adapter might
943 still be using it. Setting SCint = 0 causes the interrupt
944 handler to ignore the command. */
945
946 /* FIXME - devices that implement soft resets will still be running
947 the command after a bus reset. We would probably rather leave
948 the command in the queue. The upper level code will automatically
949 leave the command in the active state instead of requeueing it. ERY */
950
951#if ULTRASTOR_DEBUG & UD_ABORT
952 if (config.mscp[mscp_index].SCint != SCpnt)
953 printk("abort: command mismatch, %p != %p\n",
954 config.mscp[mscp_index].SCint, SCpnt);
955#endif
956 if (config.mscp[mscp_index].SCint == NULL)
957 return FAILED;
958
959 if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
960 config.mscp[mscp_index].SCint = NULL;
961 done = config.mscp[mscp_index].done;
962 config.mscp[mscp_index].done = NULL;
963 SCpnt->result = DID_ABORT << 16;
964
965 /* Take the host lock to guard against scsi layer re-entry */
966 done(SCpnt);
967
968 /* Need to set a timeout here in case command never completes. */
969 return SUCCESS;
970}
971
972static int ultrastor_host_reset(struct scsi_cmnd * SCpnt)
973{
974 unsigned long flags;
975 int i;
976 struct Scsi_Host *host = SCpnt->device->host;
977
978#if (ULTRASTOR_DEBUG & UD_RESET)
979 printk("US14F: reset: called\n");
980#endif
981
982 if(config.slot)
983 return FAILED;
984
985 spin_lock_irqsave(host->host_lock, flags);
986 /* Reset the adapter and SCSI bus. The SCSI bus reset can be
987 inhibited by clearing ultrastor_bus_reset before probe. */
988 outb(0xc0, LCL_DOORBELL_INTR(config.doorbell_address));
989 if (config.slot)
990 {
991 outb(0, config.ogm_address - 1);
992 outb(0, config.icm_address - 1);
993 }
994
995#if ULTRASTOR_MAX_CMDS == 1
996 if (config.mscp_busy && config.mscp->done && config.mscp->SCint)
997 {
998 config.mscp->SCint->result = DID_RESET << 16;
999 config.mscp->done(config.mscp->SCint);
1000 }
1001 config.mscp->SCint = 0;
1002#else
1003 for (i = 0; i < ULTRASTOR_MAX_CMDS; i++)
1004 {
1005 if (! (config.mscp_free & (1 << i)) &&
1006 config.mscp[i].done && config.mscp[i].SCint)
1007 {
1008 config.mscp[i].SCint->result = DID_RESET << 16;
1009 config.mscp[i].done(config.mscp[i].SCint);
1010 config.mscp[i].done = NULL;
1011 }
1012 config.mscp[i].SCint = NULL;
1013 }
1014#endif
1015
1016 /* FIXME - if the device implements soft resets, then the command
1017 will still be running. ERY
1018
1019 Even bigger deal with new_eh!
1020 */
1021
1022 memset((unsigned char *)config.aborted, 0, sizeof config.aborted);
1023#if ULTRASTOR_MAX_CMDS == 1
1024 config.mscp_busy = 0;
1025#else
1026 config.mscp_free = ~0;
1027#endif
1028
1029 spin_unlock_irqrestore(host->host_lock, flags);
1030 return SUCCESS;
1031
1032}
1033
1034int ultrastor_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1035 sector_t capacity, int * dkinfo)
1036{
1037 int size = capacity;
1038 unsigned int s = config.heads * config.sectors;
1039
1040 dkinfo[0] = config.heads;
1041 dkinfo[1] = config.sectors;
1042 dkinfo[2] = size / s; /* Ignore partial cylinders */
1043#if 0
1044 if (dkinfo[2] > 1024)
1045 dkinfo[2] = 1024;
1046#endif
1047 return 0;
1048}
1049
1050static void ultrastor_interrupt(void *dev_id)
1051{
1052 unsigned int status;
1053#if ULTRASTOR_MAX_CMDS > 1
1054 unsigned int mscp_index;
1055#endif
1056 struct mscp *mscp;
1057 void (*done) (struct scsi_cmnd *);
1058 struct scsi_cmnd *SCtmp;
1059
1060#if ULTRASTOR_MAX_CMDS == 1
1061 mscp = &config.mscp[0];
1062#else
1063 mscp = (struct mscp *)isa_bus_to_virt(inl(config.icm_address));
1064 mscp_index = mscp - config.mscp;
1065 if (mscp_index >= ULTRASTOR_MAX_CMDS) {
1066 printk("Ux4F interrupt: bad MSCP address %x\n", (unsigned int) mscp);
1067 /* A command has been lost. Reset and report an error
1068 for all commands. */
1069 ultrastor_host_reset(dev_id);
1070 return;
1071 }
1072#endif
1073
1074 /* Clean ICM slot (set ICMINT bit to 0) */
1075 if (config.slot) {
1076 unsigned char icm_status = inb(config.icm_address - 1);
1077#if ULTRASTOR_DEBUG & (UD_INTERRUPT|UD_ERROR|UD_ABORT)
1078 if (icm_status != 1 && icm_status != 2)
1079 printk("US24F: ICM status %x for MSCP %d (%x)\n", icm_status,
1080 mscp_index, (unsigned int) mscp);
1081#endif
1082 /* The manual says clear interrupt then write 0 to ICM status.
1083 This seems backwards, but I'll do it anyway. --jfc */
1084 outb(2, SYS_DOORBELL_INTR(config.doorbell_address));
1085 outb(0, config.icm_address - 1);
1086 if (icm_status == 4) {
1087 printk("UltraStor abort command failed\n");
1088 return;
1089 }
1090 if (icm_status == 3) {
1091 void (*done)(struct scsi_cmnd *) = mscp->done;
1092 if (done) {
1093 mscp->done = NULL;
1094 mscp->SCint->result = DID_ABORT << 16;
1095 done(mscp->SCint);
1096 }
1097 return;
1098 }
1099 } else {
1100 outb(1, SYS_DOORBELL_INTR(config.doorbell_address));
1101 }
1102
1103 SCtmp = mscp->SCint;
1104 mscp->SCint = NULL;
1105
1106 if (!SCtmp)
1107 {
1108#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
1109 printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
1110#endif
1111#if ULTRASTOR_MAX_CMDS == 1
1112 config.mscp_busy = FALSE;
1113#else
1114 set_bit(mscp_index, &config.mscp_free);
1115#endif
1116 config.aborted[mscp_index] = 0;
1117 return;
1118 }
1119
1120 /* Save done locally and zero before calling. This is needed as
1121 once we call done, we may get another command queued before this
1122 interrupt service routine can return. */
1123 done = mscp->done;
1124 mscp->done = NULL;
1125
1126 /* Let the higher levels know that we're done */
1127 switch (mscp->adapter_status)
1128 {
1129 case 0:
1130 status = DID_OK << 16;
1131 break;
1132 case 0x01: /* invalid command */
1133 case 0x02: /* invalid parameters */
1134 case 0x03: /* invalid data list */
1135 default:
1136 status = DID_ERROR << 16;
1137 break;
1138 case 0x84: /* SCSI bus abort */
1139 status = DID_ABORT << 16;
1140 break;
1141 case 0x91:
1142 status = DID_TIME_OUT << 16;
1143 break;
1144 }
1145
1146 SCtmp->result = status | mscp->target_status;
1147
1148 SCtmp->host_scribble = NULL;
1149
1150 /* Free up mscp block for next command */
1151#if ULTRASTOR_MAX_CMDS == 1
1152 config.mscp_busy = FALSE;
1153#else
1154 set_bit(mscp_index, &config.mscp_free);
1155#endif
1156
1157#if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
1158 if (config.aborted[mscp_index])
1159 printk("Ux4 interrupt: MSCP %d (%x) aborted = %d\n",
1160 mscp_index, (unsigned int) mscp, config.aborted[mscp_index]);
1161#endif
1162 config.aborted[mscp_index] = 0;
1163
1164 if (done)
1165 done(SCtmp);
1166 else
1167 printk("US14F: interrupt: unexpected interrupt\n");
1168
1169 if (config.slot ? inb(config.icm_address - 1) :
1170 (inb(SYS_DOORBELL_INTR(config.doorbell_address)) & 1))
1171#if (ULTRASTOR_DEBUG & UD_MULTI_CMD)
1172 printk("Ux4F: multiple commands completed\n");
1173#else
1174 ;
1175#endif
1176
1177#if (ULTRASTOR_DEBUG & UD_INTERRUPT)
1178 printk("USx4F: interrupt: returning\n");
1179#endif
1180}
1181
1182static irqreturn_t do_ultrastor_interrupt(int irq, void *dev_id)
1183{
1184 unsigned long flags;
1185 struct Scsi_Host *dev = dev_id;
1186
1187 spin_lock_irqsave(dev->host_lock, flags);
1188 ultrastor_interrupt(dev_id);
1189 spin_unlock_irqrestore(dev->host_lock, flags);
1190 return IRQ_HANDLED;
1191}
1192
1193MODULE_LICENSE("GPL");
1194
1195static struct scsi_host_template driver_template = {
1196 .name = "UltraStor 14F/24F/34F",
1197 .detect = ultrastor_detect,
1198 .release = ultrastor_release,
1199 .info = ultrastor_info,
1200 .queuecommand = ultrastor_queuecommand,
1201 .eh_abort_handler = ultrastor_abort,
1202 .eh_host_reset_handler = ultrastor_host_reset,
1203 .bios_param = ultrastor_biosparam,
1204 .can_queue = ULTRASTOR_MAX_CMDS,
1205 .sg_tablesize = ULTRASTOR_14F_MAX_SG,
1206 .cmd_per_lun = ULTRASTOR_MAX_CMDS_PER_LUN,
1207 .unchecked_isa_dma = 1,
1208 .use_clustering = ENABLE_CLUSTERING,
1209};
1210#include "scsi_module.c"
diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h
deleted file mode 100644
index 165c18b5cf5f..000000000000
--- a/drivers/scsi/ultrastor.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * ultrastor.c (C) 1991 David B. Gentzel
3 * Low-level scsi driver for UltraStor 14F
4 * by David B. Gentzel, Whitfield Software Services, Carnegie, PA
5 * (gentzel@nova.enet.dec.com)
6 * scatter/gather added by Scott Taylor (n217cg@tamuts.tamu.edu)
7 * 24F support by John F. Carr (jfc@athena.mit.edu)
8 * John's work modified by Caleb Epstein (cae@jpmorgan.com) and
9 * Eric Youngdale (eric@tantalus.nrl.navy.mil).
10 * Thanks to UltraStor for providing the necessary documentation
11 */
12
13#ifndef _ULTRASTOR_H
14#define _ULTRASTOR_H
15
16static int ultrastor_detect(struct scsi_host_template *);
17static const char *ultrastor_info(struct Scsi_Host *shpnt);
18static int ultrastor_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
19static int ultrastor_abort(struct scsi_cmnd *);
20static int ultrastor_host_reset(struct scsi_cmnd *);
21static int ultrastor_biosparam(struct scsi_device *, struct block_device *,
22 sector_t, int *);
23
24
25#define ULTRASTOR_14F_MAX_SG 16
26#define ULTRASTOR_24F_MAX_SG 33
27
28#define ULTRASTOR_MAX_CMDS_PER_LUN 5
29#define ULTRASTOR_MAX_CMDS 16
30
31#define ULTRASTOR_24F_PORT 0xC80
32
33
34#ifdef ULTRASTOR_PRIVATE
35
36#define UD_ABORT 0x0001
37#define UD_COMMAND 0x0002
38#define UD_DETECT 0x0004
39#define UD_INTERRUPT 0x0008
40#define UD_RESET 0x0010
41#define UD_MULTI_CMD 0x0020
42#define UD_CSIR 0x0040
43#define UD_ERROR 0x0080
44
45/* #define PORT_OVERRIDE 0x330 */
46
47/* Values for the PRODUCT_ID ports for the 14F */
48#define US14F_PRODUCT_ID_0 0x56
49#define US14F_PRODUCT_ID_1 0x40 /* NOTE: Only upper nibble is used */
50
51#define US24F_PRODUCT_ID_0 0x56
52#define US24F_PRODUCT_ID_1 0x63
53#define US24F_PRODUCT_ID_2 0x02
54
55/* Subversion values */
56#define U14F 0
57#define U34F 1
58
59/* MSCP field values */
60
61/* Opcode */
62#define OP_HOST_ADAPTER 0x1
63#define OP_SCSI 0x2
64#define OP_RESET 0x4
65
66/* Date Transfer Direction */
67#define DTD_SCSI 0x0
68#define DTD_IN 0x1
69#define DTD_OUT 0x2
70#define DTD_NONE 0x3
71
72/* Host Adapter command subcodes */
73#define HA_CMD_INQUIRY 0x1
74#define HA_CMD_SELF_DIAG 0x2
75#define HA_CMD_READ_BUFF 0x3
76#define HA_CMD_WRITE_BUFF 0x4
77
78#endif
79
80#endif
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index deefab3a94d0..ec91bd07f00a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -259,7 +259,7 @@ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
259 struct virtio_scsi_cmd *cmd = buf; 259 struct virtio_scsi_cmd *cmd = buf;
260 260
261 if (cmd->comp) 261 if (cmd->comp)
262 complete_all(cmd->comp); 262 complete(cmd->comp);
263} 263}
264 264
265static void virtscsi_ctrl_done(struct virtqueue *vq) 265static void virtscsi_ctrl_done(struct virtqueue *vq)
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
deleted file mode 100644
index 409f959845c4..000000000000
--- a/drivers/scsi/wd7000.c
+++ /dev/null
@@ -1,1657 +0,0 @@
1/* $Id: $
2 * linux/drivers/scsi/wd7000.c
3 *
4 * Copyright (C) 1992 Thomas Wuensche
5 * closely related to the aha1542 driver from Tommy Thorn
6 * ( as close as different hardware allows on a lowlevel-driver :-) )
7 *
8 * Revised (and renamed) by John Boyd <boyd@cis.ohio-state.edu> to
9 * accommodate Eric Youngdale's modifications to scsi.c. Nov 1992.
10 *
11 * Additional changes to support scatter/gather. Dec. 1992. tw/jb
12 *
13 * No longer tries to reset SCSI bus at boot (it wasn't working anyway).
14 * Rewritten to support multiple host adapters.
15 * Miscellaneous cleanup.
16 * So far, still doesn't do reset or abort correctly, since I have no idea
17 * how to do them with this board (8^(. Jan 1994 jb
18 *
19 * This driver now supports both of the two standard configurations (per
20 * the 3.36 Owner's Manual, my latest reference) by the same method as
21 * before; namely, by looking for a BIOS signature. Thus, the location of
22 * the BIOS signature determines the board configuration. Until I have
23 * time to do something more flexible, users should stick to one of the
24 * following:
25 *
26 * Standard configuration for single-adapter systems:
27 * - BIOS at CE00h
28 * - I/O base address 350h
29 * - IRQ level 15
30 * - DMA channel 6
31 * Standard configuration for a second adapter in a system:
32 * - BIOS at C800h
33 * - I/O base address 330h
34 * - IRQ level 11
35 * - DMA channel 5
36 *
37 * Anyone who can recompile the kernel is welcome to add others as need
38 * arises, but unpredictable results may occur if there are conflicts.
39 * In any event, if there are multiple adapters in a system, they MUST
40 * use different I/O bases, IRQ levels, and DMA channels, since they will be
41 * indistinguishable (and in direct conflict) otherwise.
42 *
43 * As a point of information, the NO_OP command toggles the CMD_RDY bit
44 * of the status port, and this fact could be used as a test for the I/O
45 * base address (or more generally, board detection). There is an interrupt
46 * status port, so IRQ probing could also be done. I suppose the full
47 * DMA diagnostic could be used to detect the DMA channel being used. I
48 * haven't done any of this, though, because I think there's too much of
49 * a chance that such explorations could be destructive, if some other
50 * board's resources are used inadvertently. So, call me a wimp, but I
51 * don't want to try it. The only kind of exploration I trust is memory
52 * exploration, since it's more certain that reading memory won't be
53 * destructive.
54 *
55 * More to my liking would be a LILO boot command line specification, such
56 * as is used by the aha152x driver (and possibly others). I'll look into
57 * it, as I have time...
58 *
59 * I get mail occasionally from people who either are using or are
60 * considering using a WD7000 with Linux. There is a variety of
61 * nomenclature describing WD7000's. To the best of my knowledge, the
62 * following is a brief summary (from an old WD doc - I don't work for
63 * them or anything like that):
64 *
65 * WD7000-FASST2: This is a WD7000 board with the real-mode SST ROM BIOS
66 * installed. Last I heard, the BIOS was actually done by Columbia
67 * Data Products. The BIOS is only used by this driver (and thus
68 * by Linux) to identify the board; none of it can be executed under
69 * Linux.
70 *
71 * WD7000-ASC: This is the original adapter board, with or without BIOS.
72 * The board uses a WD33C93 or WD33C93A SBIC, which in turn is
73 * controlled by an onboard Z80 processor. The board interface
74 * visible to the host CPU is defined effectively by the Z80's
75 * firmware, and it is this firmware's revision level that is
76 * determined and reported by this driver. (The version of the
77 * on-board BIOS is of no interest whatsoever.) The host CPU has
78 * no access to the SBIC; hence the fact that it is a WD33C93 is
79 * also of no interest to this driver.
80 *
81 * WD7000-AX:
82 * WD7000-MX:
83 * WD7000-EX: These are newer versions of the WD7000-ASC. The -ASC is
84 * largely built from discrete components; these boards use more
85 * integration. The -AX is an ISA bus board (like the -ASC),
86 * the -MX is an MCA (i.e., PS/2) bus board), and the -EX is an
87 * EISA bus board.
88 *
89 * At the time of my documentation, the -?X boards were "future" products,
90 * and were not yet available. However, I vaguely recall that Thomas
91 * Wuensche had an -AX, so I believe at least it is supported by this
92 * driver. I have no personal knowledge of either -MX or -EX boards.
93 *
94 * P.S. Just recently, I've discovered (directly from WD and Future
95 * Domain) that all but the WD7000-EX have been out of production for
96 * two years now. FD has production rights to the 7000-EX, and are
97 * producing it under a new name, and with a new BIOS. If anyone has
98 * one of the FD boards, it would be nice to come up with a signature
99 * for it.
100 * J.B. Jan 1994.
101 *
102 *
103 * Revisions by Miroslav Zagorac <zaga@fly.cc.fer.hr>
104 *
105 * 08/24/1996.
106 *
107 * Enhancement for wd7000_detect function has been made, so you don't have
108 * to enter BIOS ROM address in initialisation data (see struct Config).
109 * We cannot detect IRQ, DMA and I/O base address for now, so we have to
110 * enter them as arguments while wd_7000 is detected. If someone has IRQ,
111 * DMA or I/O base address set to some other value, he can enter them in
112 * configuration without any problem. Also I wrote a function wd7000_setup,
113 * so now you can enter WD-7000 definition as kernel arguments,
114 * as in lilo.conf:
115 *
116 * append="wd7000=IRQ,DMA,IO"
117 *
118 * PS: If card BIOS ROM is disabled, function wd7000_detect now will recognize
119 * adapter, unlike the old one. Anyway, BIOS ROM from WD7000 adapter is
120 * useless for Linux. B^)
121 *
122 *
123 * 09/06/1996.
124 *
125 * Autodetecting of I/O base address from wd7000_detect function is removed,
126 * some little bugs removed, etc...
127 *
128 * Thanks to Roger Scott for driver debugging.
129 *
130 * 06/07/1997
131 *
132 * Added support for /proc file system (/proc/scsi/wd7000/[0...] files).
133 * Now, driver can handle hard disks with capacity >1GB.
134 *
135 * 01/15/1998
136 *
137 * Added support for BUS_ON and BUS_OFF parameters in config line.
138 * Miscellaneous cleanup.
139 *
140 * 03/01/1998
141 *
142 * WD7000 driver now work on kernels >= 2.1.x
143 *
144 *
145 * 12/31/2001 - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
146 *
147 * use host->host_lock, not io_request_lock, cleanups
148 *
149 * 2002/10/04 - Alan Cox <alan@lxorguk.ukuu.org.uk>
150 *
151 * Use dev_id for interrupts, kill __func__ pasting
152 * Add a lock for the scb pool, clean up all other cli/sti usage stuff
153 * Use the adapter lock for the other places we had the cli's
154 *
155 * 2002/10/06 - Alan Cox <alan@lxorguk.ukuu.org.uk>
156 *
157 * Switch to new style error handling
158 * Clean up delay to udelay, and yielding sleeps
159 * Make host reset actually reset the card
160 * Make everything static
161 *
162 * 2003/02/12 - Christoph Hellwig <hch@infradead.org>
163 *
164 * Cleaned up host template definition
165 * Removed now obsolete wd7000.h
166 */
167
168#include <linux/delay.h>
169#include <linux/module.h>
170#include <linux/interrupt.h>
171#include <linux/kernel.h>
172#include <linux/types.h>
173#include <linux/string.h>
174#include <linux/spinlock.h>
175#include <linux/ioport.h>
176#include <linux/proc_fs.h>
177#include <linux/blkdev.h>
178#include <linux/init.h>
179#include <linux/stat.h>
180#include <linux/io.h>
181
182#include <asm/dma.h>
183
184#include <scsi/scsi.h>
185#include <scsi/scsi_cmnd.h>
186#include <scsi/scsi_device.h>
187#include <scsi/scsi_host.h>
188#include <scsi/scsicam.h>
189
190
191#undef WD7000_DEBUG /* general debug */
192#ifdef WD7000_DEBUG
193#define dprintk printk
194#else
195#define dprintk no_printk
196#endif
197
198/*
199 * Mailbox structure sizes.
200 * I prefer to keep the number of ICMBs much larger than the number of
201 * OGMBs. OGMBs are used very quickly by the driver to start one or
202 * more commands, while ICMBs are used by the host adapter per command.
203 */
204#define OGMB_CNT 16
205#define ICMB_CNT 32
206
207/*
208 * Scb's are shared by all active adapters. So, if they all become busy,
209 * callers may be made to wait in alloc_scbs for them to free. That can
210 * be avoided by setting MAX_SCBS to NUM_CONFIG * WD7000_Q. If you'd
211 * rather conserve memory, use a smaller number (> 0, of course) - things
212 * will should still work OK.
213 */
214#define MAX_SCBS 32
215
216/*
217 * In this version, sg_tablesize now defaults to WD7000_SG, and will
218 * be set to SG_NONE for older boards. This is the reverse of the
219 * previous default, and was changed so that the driver-level
220 * scsi_host_template would reflect the driver's support for scatter/
221 * gather.
222 *
223 * Also, it has been reported that boards at Revision 6 support scatter/
224 * gather, so the new definition of an "older" board has been changed
225 * accordingly.
226 */
227#define WD7000_Q 16
228#define WD7000_SG 16
229
230
231/*
232 * WD7000-specific mailbox structure
233 *
234 */
235typedef volatile struct mailbox {
236 unchar status;
237 unchar scbptr[3]; /* SCSI-style - MSB first (big endian) */
238} Mailbox;
239
240/*
241 * This structure should contain all per-adapter global data. I.e., any
242 * new global per-adapter data should put in here.
243 */
244typedef struct adapter {
245 struct Scsi_Host *sh; /* Pointer to Scsi_Host structure */
246 int iobase; /* This adapter's I/O base address */
247 int irq; /* This adapter's IRQ level */
248 int dma; /* This adapter's DMA channel */
249 int int_counter; /* This adapter's interrupt counter */
250 int bus_on; /* This adapter's BUS_ON time */
251 int bus_off; /* This adapter's BUS_OFF time */
252 struct { /* This adapter's mailboxes */
253 Mailbox ogmb[OGMB_CNT]; /* Outgoing mailboxes */
254 Mailbox icmb[ICMB_CNT]; /* Incoming mailboxes */
255 } mb;
256 int next_ogmb; /* to reduce contention at mailboxes */
257 unchar control; /* shadows CONTROL port value */
258 unchar rev1, rev2; /* filled in by wd7000_revision */
259} Adapter;
260
261/*
262 * (linear) base address for ROM BIOS
263 */
264static const long wd7000_biosaddr[] = {
265 0xc0000, 0xc2000, 0xc4000, 0xc6000, 0xc8000, 0xca000, 0xcc000, 0xce000,
266 0xd0000, 0xd2000, 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0xde000
267};
268#define NUM_ADDRS ARRAY_SIZE(wd7000_biosaddr)
269
270static const unsigned short wd7000_iobase[] = {
271 0x0300, 0x0308, 0x0310, 0x0318, 0x0320, 0x0328, 0x0330, 0x0338,
272 0x0340, 0x0348, 0x0350, 0x0358, 0x0360, 0x0368, 0x0370, 0x0378,
273 0x0380, 0x0388, 0x0390, 0x0398, 0x03a0, 0x03a8, 0x03b0, 0x03b8,
274 0x03c0, 0x03c8, 0x03d0, 0x03d8, 0x03e0, 0x03e8, 0x03f0, 0x03f8
275};
276#define NUM_IOPORTS ARRAY_SIZE(wd7000_iobase)
277
278static const short wd7000_irq[] = { 3, 4, 5, 7, 9, 10, 11, 12, 14, 15 };
279#define NUM_IRQS ARRAY_SIZE(wd7000_irq)
280
281static const short wd7000_dma[] = { 5, 6, 7 };
282#define NUM_DMAS ARRAY_SIZE(wd7000_dma)
283
284/*
285 * The following is set up by wd7000_detect, and used thereafter for
286 * proc and other global ookups
287 */
288
289#define UNITS 8
290static struct Scsi_Host *wd7000_host[UNITS];
291
292#define BUS_ON 64 /* x 125ns = 8000ns (BIOS default) */
293#define BUS_OFF 15 /* x 125ns = 1875ns (BIOS default) */
294
295/*
296 * Standard Adapter Configurations - used by wd7000_detect
297 */
298typedef struct {
299 short irq; /* IRQ level */
300 short dma; /* DMA channel */
301 unsigned iobase; /* I/O base address */
302 short bus_on; /* Time that WD7000 spends on the AT-bus when */
303 /* transferring data. BIOS default is 8000ns. */
304 short bus_off; /* Time that WD7000 spends OFF THE BUS after */
305 /* while it is transferring data. */
306 /* BIOS default is 1875ns */
307} Config;
308
309/*
310 * Add here your configuration...
311 */
312static Config configs[] = {
313 {15, 6, 0x350, BUS_ON, BUS_OFF}, /* defaults for single adapter */
314 {11, 5, 0x320, BUS_ON, BUS_OFF}, /* defaults for second adapter */
315 {7, 6, 0x350, BUS_ON, BUS_OFF}, /* My configuration (Zaga) */
316 {-1, -1, 0x0, BUS_ON, BUS_OFF} /* Empty slot */
317};
318#define NUM_CONFIGS ARRAY_SIZE(configs)
319
320/*
321 * The following list defines strings to look for in the BIOS that identify
322 * it as the WD7000-FASST2 SST BIOS. I suspect that something should be
323 * added for the Future Domain version.
324 */
325typedef struct signature {
326 const char *sig; /* String to look for */
327 unsigned long ofs; /* offset from BIOS base address */
328 unsigned len; /* length of string */
329} Signature;
330
331static const Signature signatures[] = {
332 {"SSTBIOS", 0x0000d, 7} /* "SSTBIOS" @ offset 0x0000d */
333};
334#define NUM_SIGNATURES ARRAY_SIZE(signatures)
335
336
337/*
338 * I/O Port Offsets and Bit Definitions
339 * 4 addresses are used. Those not defined here are reserved.
340 */
341#define ASC_STAT 0 /* Status, Read */
342#define ASC_COMMAND 0 /* Command, Write */
343#define ASC_INTR_STAT 1 /* Interrupt Status, Read */
344#define ASC_INTR_ACK 1 /* Acknowledge, Write */
345#define ASC_CONTROL 2 /* Control, Write */
346
347/*
348 * ASC Status Port
349 */
350#define INT_IM 0x80 /* Interrupt Image Flag */
351#define CMD_RDY 0x40 /* Command Port Ready */
352#define CMD_REJ 0x20 /* Command Port Byte Rejected */
353#define ASC_INIT 0x10 /* ASC Initialized Flag */
354#define ASC_STATMASK 0xf0 /* The lower 4 Bytes are reserved */
355
356/*
357 * COMMAND opcodes
358 *
359 * Unfortunately, I have no idea how to properly use some of these commands,
360 * as the OEM manual does not make it clear. I have not been able to use
361 * enable/disable unsolicited interrupts or the reset commands with any
362 * discernible effect whatsoever. I think they may be related to certain
363 * ICB commands, but again, the OEM manual doesn't make that clear.
364 */
365#define NO_OP 0 /* NO-OP toggles CMD_RDY bit in ASC_STAT */
366#define INITIALIZATION 1 /* initialization (10 bytes) */
367#define DISABLE_UNS_INTR 2 /* disable unsolicited interrupts */
368#define ENABLE_UNS_INTR 3 /* enable unsolicited interrupts */
369#define INTR_ON_FREE_OGMB 4 /* interrupt on free OGMB */
370#define SOFT_RESET 5 /* SCSI bus soft reset */
371#define HARD_RESET_ACK 6 /* SCSI bus hard reset acknowledge */
372#define START_OGMB 0x80 /* start command in OGMB (n) */
373#define SCAN_OGMBS 0xc0 /* start multiple commands, signature (n) */
374 /* where (n) = lower 6 bits */
375/*
376 * For INITIALIZATION:
377 */
378typedef struct initCmd {
379 unchar op; /* command opcode (= 1) */
380 unchar ID; /* Adapter's SCSI ID */
381 unchar bus_on; /* Bus on time, x 125ns (see below) */
382 unchar bus_off; /* Bus off time, "" "" */
383 unchar rsvd; /* Reserved */
384 unchar mailboxes[3]; /* Address of Mailboxes, MSB first */
385 unchar ogmbs; /* Number of outgoing MBs, max 64, 0,1 = 1 */
386 unchar icmbs; /* Number of incoming MBs, "" "" */
387} InitCmd;
388
389/*
390 * Interrupt Status Port - also returns diagnostic codes at ASC reset
391 *
392 * if msb is zero, the lower bits are diagnostic status
393 * Diagnostics:
394 * 01 No diagnostic error occurred
395 * 02 RAM failure
396 * 03 FIFO R/W failed
397 * 04 SBIC register read/write failed
398 * 05 Initialization D-FF failed
399 * 06 Host IRQ D-FF failed
400 * 07 ROM checksum error
401 * Interrupt status (bitwise):
402 * 10NNNNNN outgoing mailbox NNNNNN is free
403 * 11NNNNNN incoming mailbox NNNNNN needs service
404 */
405#define MB_INTR 0xC0 /* Mailbox Service possible/required */
406#define IMB_INTR 0x40 /* 1 Incoming / 0 Outgoing */
407#define MB_MASK 0x3f /* mask for mailbox number */
408
409/*
410 * CONTROL port bits
411 */
412#define INT_EN 0x08 /* Interrupt Enable */
413#define DMA_EN 0x04 /* DMA Enable */
414#define SCSI_RES 0x02 /* SCSI Reset */
415#define ASC_RES 0x01 /* ASC Reset */
416
417/*
418 * Driver data structures:
419 * - mb and scbs are required for interfacing with the host adapter.
420 * An SCB has extra fields not visible to the adapter; mb's
421 * _cannot_ do this, since the adapter assumes they are contiguous in
422 * memory, 4 bytes each, with ICMBs following OGMBs, and uses this fact
423 * to access them.
424 * - An icb is for host-only (non-SCSI) commands. ICBs are 16 bytes each;
425 * the additional bytes are used only by the driver.
426 * - For now, a pool of SCBs are kept in global storage by this driver,
427 * and are allocated and freed as needed.
428 *
429 * The 7000-FASST2 marks OGMBs empty as soon as it has _started_ a command,
430 * not when it has finished. Since the SCB must be around for completion,
431 * problems arise when SCBs correspond to OGMBs, which may be reallocated
432 * earlier (or delayed unnecessarily until a command completes).
433 * Mailboxes are used as transient data structures, simply for
434 * carrying SCB addresses to/from the 7000-FASST2.
435 *
436 * Note also since SCBs are not "permanently" associated with mailboxes,
437 * there is no need to keep a global list of scsi_cmnd pointers indexed
438 * by OGMB. Again, SCBs reference their scsi_cmnds directly, so mailbox
439 * indices need not be involved.
440 */
441
442/*
443 * WD7000-specific scatter/gather element structure
444 */
445typedef struct sgb {
446 unchar len[3];
447 unchar ptr[3]; /* Also SCSI-style - MSB first */
448} Sgb;
449
450typedef struct scb { /* Command Control Block 5.4.1 */
451 unchar op; /* Command Control Block Operation Code */
452 unchar idlun; /* op=0,2:Target Id, op=1:Initiator Id */
453 /* Outbound data transfer, length is checked */
454 /* Inbound data transfer, length is checked */
455 /* Logical Unit Number */
456 unchar cdb[12]; /* SCSI Command Block */
457 volatile unchar status; /* SCSI Return Status */
458 volatile unchar vue; /* Vendor Unique Error Code */
459 unchar maxlen[3]; /* Maximum Data Transfer Length */
460 unchar dataptr[3]; /* SCSI Data Block Pointer */
461 unchar linkptr[3]; /* Next Command Link Pointer */
462 unchar direc; /* Transfer Direction */
463 unchar reserved2[6]; /* SCSI Command Descriptor Block */
464 /* end of hardware SCB */
465 struct scsi_cmnd *SCpnt;/* scsi_cmnd using this SCB */
466 Sgb sgb[WD7000_SG]; /* Scatter/gather list for this SCB */
467 Adapter *host; /* host adapter */
468 struct scb *next; /* for lists of scbs */
469} Scb;
470
471/*
472 * This driver is written to allow host-only commands to be executed.
473 * These use a 16-byte block called an ICB. The format is extended by the
474 * driver to 18 bytes, to support the status returned in the ICMB and
475 * an execution phase code.
476 *
477 * There are other formats besides these; these are the ones I've tried
478 * to use. Formats for some of the defined ICB opcodes are not defined
479 * (notably, get/set unsolicited interrupt status) in my copy of the OEM
480 * manual, and others are ambiguous/hard to follow.
481 */
482#define ICB_OP_MASK 0x80 /* distinguishes scbs from icbs */
483#define ICB_OP_OPEN_RBUF 0x80 /* open receive buffer */
484#define ICB_OP_RECV_CMD 0x81 /* receive command from initiator */
485#define ICB_OP_RECV_DATA 0x82 /* receive data from initiator */
486#define ICB_OP_RECV_SDATA 0x83 /* receive data with status from init. */
487#define ICB_OP_SEND_DATA 0x84 /* send data with status to initiator */
488#define ICB_OP_SEND_STAT 0x86 /* send command status to initiator */
489 /* 0x87 is reserved */
490#define ICB_OP_READ_INIT 0x88 /* read initialization bytes */
491#define ICB_OP_READ_ID 0x89 /* read adapter's SCSI ID */
492#define ICB_OP_SET_UMASK 0x8A /* set unsolicited interrupt mask */
493#define ICB_OP_GET_UMASK 0x8B /* read unsolicited interrupt mask */
494#define ICB_OP_GET_REVISION 0x8C /* read firmware revision level */
495#define ICB_OP_DIAGNOSTICS 0x8D /* execute diagnostics */
496#define ICB_OP_SET_EPARMS 0x8E /* set execution parameters */
497#define ICB_OP_GET_EPARMS 0x8F /* read execution parameters */
498
499typedef struct icbRecvCmd {
500 unchar op;
501 unchar IDlun; /* Initiator SCSI ID/lun */
502 unchar len[3]; /* command buffer length */
503 unchar ptr[3]; /* command buffer address */
504 unchar rsvd[7]; /* reserved */
505 volatile unchar vue; /* vendor-unique error code */
506 volatile unchar status; /* returned (icmb) status */
507 volatile unchar phase; /* used by interrupt handler */
508} IcbRecvCmd;
509
510typedef struct icbSendStat {
511 unchar op;
512 unchar IDlun; /* Target SCSI ID/lun */
513 unchar stat; /* (outgoing) completion status byte 1 */
514 unchar rsvd[12]; /* reserved */
515 volatile unchar vue; /* vendor-unique error code */
516 volatile unchar status; /* returned (icmb) status */
517 volatile unchar phase; /* used by interrupt handler */
518} IcbSendStat;
519
520typedef struct icbRevLvl {
521 unchar op;
522 volatile unchar primary; /* primary revision level (returned) */
523 volatile unchar secondary; /* secondary revision level (returned) */
524 unchar rsvd[12]; /* reserved */
525 volatile unchar vue; /* vendor-unique error code */
526 volatile unchar status; /* returned (icmb) status */
527 volatile unchar phase; /* used by interrupt handler */
528} IcbRevLvl;
529
530typedef struct icbUnsMask { /* I'm totally guessing here */
531 unchar op;
532 volatile unchar mask[14]; /* mask bits */
533#if 0
534 unchar rsvd[12]; /* reserved */
535#endif
536 volatile unchar vue; /* vendor-unique error code */
537 volatile unchar status; /* returned (icmb) status */
538 volatile unchar phase; /* used by interrupt handler */
539} IcbUnsMask;
540
541typedef struct icbDiag {
542 unchar op;
543 unchar type; /* diagnostics type code (0-3) */
544 unchar len[3]; /* buffer length */
545 unchar ptr[3]; /* buffer address */
546 unchar rsvd[7]; /* reserved */
547 volatile unchar vue; /* vendor-unique error code */
548 volatile unchar status; /* returned (icmb) status */
549 volatile unchar phase; /* used by interrupt handler */
550} IcbDiag;
551
552#define ICB_DIAG_POWERUP 0 /* Power-up diags only */
553#define ICB_DIAG_WALKING 1 /* walking 1's pattern */
554#define ICB_DIAG_DMA 2 /* DMA - system memory diags */
555#define ICB_DIAG_FULL 3 /* do both 1 & 2 */
556
557typedef struct icbParms {
558 unchar op;
559 unchar rsvd1; /* reserved */
560 unchar len[3]; /* parms buffer length */
561 unchar ptr[3]; /* parms buffer address */
562 unchar idx[2]; /* index (MSB-LSB) */
563 unchar rsvd2[5]; /* reserved */
564 volatile unchar vue; /* vendor-unique error code */
565 volatile unchar status; /* returned (icmb) status */
566 volatile unchar phase; /* used by interrupt handler */
567} IcbParms;
568
569typedef struct icbAny {
570 unchar op;
571 unchar data[14]; /* format-specific data */
572 volatile unchar vue; /* vendor-unique error code */
573 volatile unchar status; /* returned (icmb) status */
574 volatile unchar phase; /* used by interrupt handler */
575} IcbAny;
576
577typedef union icb {
578 unchar op; /* ICB opcode */
579 IcbRecvCmd recv_cmd; /* format for receive command */
580 IcbSendStat send_stat; /* format for send status */
581 IcbRevLvl rev_lvl; /* format for get revision level */
582 IcbDiag diag; /* format for execute diagnostics */
583 IcbParms eparms; /* format for get/set exec parms */
584 IcbAny icb; /* generic format */
585 unchar data[18];
586} Icb;
587
588#ifdef MODULE
589static char *wd7000;
590module_param(wd7000, charp, 0);
591#endif
592
593/*
594 * Driver SCB structure pool.
595 *
596 * The SCBs declared here are shared by all host adapters; hence, this
597 * structure is not part of the Adapter structure.
598 */
599static Scb scbs[MAX_SCBS];
600static Scb *scbfree; /* free list */
601static int freescbs = MAX_SCBS; /* free list counter */
602static spinlock_t scbpool_lock; /* guards the scb free list and count */
603
604/*
605 * END of data/declarations - code follows.
606 */
607static void __init setup_error(char *mesg, int *ints)
608{
609 if (ints[0] == 3)
610 printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x\" -> %s\n", ints[1], ints[2], ints[3], mesg);
611 else if (ints[0] == 4)
612 printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x,%d\" -> %s\n", ints[1], ints[2], ints[3], ints[4], mesg);
613 else
614 printk(KERN_ERR "wd7000_setup: \"wd7000=%d,%d,0x%x,%d,%d\" -> %s\n", ints[1], ints[2], ints[3], ints[4], ints[5], mesg);
615}
616
617
618/*
619 * Note: You can now set these options from the kernel's "command line".
620 * The syntax is:
621 *
622 * wd7000=<IRQ>,<DMA>,<IO>[,<BUS_ON>[,<BUS_OFF>]]
623 *
624 * , where BUS_ON and BUS_OFF are in nanoseconds. BIOS default values
625 * are 8000ns for BUS_ON and 1875ns for BUS_OFF.
626 * eg:
627 * wd7000=7,6,0x350
628 *
629 * will configure the driver for a WD-7000 controller
630 * using IRQ 15 with a DMA channel 6, at IO base address 0x350.
631 */
632static int __init wd7000_setup(char *str)
633{
634 static short wd7000_card_num; /* .bss will zero this */
635 short i;
636 int ints[6];
637
638 (void) get_options(str, ARRAY_SIZE(ints), ints);
639
640 if (wd7000_card_num >= NUM_CONFIGS) {
641 printk(KERN_ERR "%s: Too many \"wd7000=\" configurations in " "command line!\n", __func__);
642 return 0;
643 }
644
645 if ((ints[0] < 3) || (ints[0] > 5)) {
646 printk(KERN_ERR "%s: Error in command line! " "Usage: wd7000=<IRQ>,<DMA>,IO>[,<BUS_ON>" "[,<BUS_OFF>]]\n", __func__);
647 } else {
648 for (i = 0; i < NUM_IRQS; i++)
649 if (ints[1] == wd7000_irq[i])
650 break;
651
652 if (i == NUM_IRQS) {
653 setup_error("invalid IRQ.", ints);
654 return 0;
655 } else
656 configs[wd7000_card_num].irq = ints[1];
657
658 for (i = 0; i < NUM_DMAS; i++)
659 if (ints[2] == wd7000_dma[i])
660 break;
661
662 if (i == NUM_DMAS) {
663 setup_error("invalid DMA channel.", ints);
664 return 0;
665 } else
666 configs[wd7000_card_num].dma = ints[2];
667
668 for (i = 0; i < NUM_IOPORTS; i++)
669 if (ints[3] == wd7000_iobase[i])
670 break;
671
672 if (i == NUM_IOPORTS) {
673 setup_error("invalid I/O base address.", ints);
674 return 0;
675 } else
676 configs[wd7000_card_num].iobase = ints[3];
677
678 if (ints[0] > 3) {
679 if ((ints[4] < 500) || (ints[4] > 31875)) {
680 setup_error("BUS_ON value is out of range (500" " to 31875 nanoseconds)!", ints);
681 configs[wd7000_card_num].bus_on = BUS_ON;
682 } else
683 configs[wd7000_card_num].bus_on = ints[4] / 125;
684 } else
685 configs[wd7000_card_num].bus_on = BUS_ON;
686
687 if (ints[0] > 4) {
688 if ((ints[5] < 500) || (ints[5] > 31875)) {
689 setup_error("BUS_OFF value is out of range (500" " to 31875 nanoseconds)!", ints);
690 configs[wd7000_card_num].bus_off = BUS_OFF;
691 } else
692 configs[wd7000_card_num].bus_off = ints[5] / 125;
693 } else
694 configs[wd7000_card_num].bus_off = BUS_OFF;
695
696 if (wd7000_card_num) {
697 for (i = 0; i < (wd7000_card_num - 1); i++) {
698 int j = i + 1;
699
700 for (; j < wd7000_card_num; j++)
701 if (configs[i].irq == configs[j].irq) {
702 setup_error("duplicated IRQ!", ints);
703 return 0;
704 }
705 if (configs[i].dma == configs[j].dma) {
706 setup_error("duplicated DMA " "channel!", ints);
707 return 0;
708 }
709 if (configs[i].iobase == configs[j].iobase) {
710 setup_error("duplicated I/O " "base address!", ints);
711 return 0;
712 }
713 }
714 }
715
716 dprintk(KERN_DEBUG "wd7000_setup: IRQ=%d, DMA=%d, I/O=0x%x, "
717 "BUS_ON=%dns, BUS_OFF=%dns\n", configs[wd7000_card_num].irq, configs[wd7000_card_num].dma, configs[wd7000_card_num].iobase, configs[wd7000_card_num].bus_on * 125, configs[wd7000_card_num].bus_off * 125);
718
719 wd7000_card_num++;
720 }
721 return 1;
722}
723
724__setup("wd7000=", wd7000_setup);
725
726static inline void any2scsi(unchar * scsi, int any)
727{
728 *scsi++ = (unsigned)any >> 16;
729 *scsi++ = (unsigned)any >> 8;
730 *scsi++ = any;
731}
732
733static inline int scsi2int(unchar * scsi)
734{
735 return (scsi[0] << 16) | (scsi[1] << 8) | scsi[2];
736}
737
738static inline void wd7000_enable_intr(Adapter * host)
739{
740 host->control |= INT_EN;
741 outb(host->control, host->iobase + ASC_CONTROL);
742}
743
744
745static inline void wd7000_enable_dma(Adapter * host)
746{
747 unsigned long flags;
748 host->control |= DMA_EN;
749 outb(host->control, host->iobase + ASC_CONTROL);
750
751 flags = claim_dma_lock();
752 set_dma_mode(host->dma, DMA_MODE_CASCADE);
753 enable_dma(host->dma);
754 release_dma_lock(flags);
755
756}
757
758
759#define WAITnexttimeout 200 /* 2 seconds */
760
761static inline short WAIT(unsigned port, unsigned mask, unsigned allof, unsigned noneof)
762{
763 unsigned WAITbits;
764 unsigned long WAITtimeout = jiffies + WAITnexttimeout;
765
766 while (time_before_eq(jiffies, WAITtimeout)) {
767 WAITbits = inb(port) & mask;
768
769 if (((WAITbits & allof) == allof) && ((WAITbits & noneof) == 0))
770 return (0);
771 }
772
773 return (1);
774}
775
776
777static inline int command_out(Adapter * host, unchar * cmd, int len)
778{
779 if (!WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
780 while (len--) {
781 do {
782 outb(*cmd, host->iobase + ASC_COMMAND);
783 WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0);
784 } while (inb(host->iobase + ASC_STAT) & CMD_REJ);
785
786 cmd++;
787 }
788
789 return (1);
790 }
791
792 printk(KERN_WARNING "wd7000 command_out: WAIT failed(%d)\n", len + 1);
793
794 return (0);
795}
796
797
798/*
799 * This version of alloc_scbs is in preparation for supporting multiple
800 * commands per lun and command chaining, by queueing pending commands.
801 * We will need to allocate Scbs in blocks since they will wait to be
802 * executed so there is the possibility of deadlock otherwise.
803 * Also, to keep larger requests from being starved by smaller requests,
804 * we limit access to this routine with an internal busy flag, so that
805 * the satisfiability of a request is not dependent on the size of the
806 * request.
807 */
808static inline Scb *alloc_scbs(struct Scsi_Host *host, int needed)
809{
810 Scb *scb, *p = NULL;
811 unsigned long flags;
812 unsigned long timeout = jiffies + WAITnexttimeout;
813 unsigned long now;
814 int i;
815
816 if (needed <= 0)
817 return (NULL); /* sanity check */
818
819 spin_unlock_irq(host->host_lock);
820
821 retry:
822 while (freescbs < needed) {
823 timeout = jiffies + WAITnexttimeout;
824 do {
825 /* FIXME: can we actually just yield here ?? */
826 for (now = jiffies; now == jiffies;)
827 cpu_relax(); /* wait a jiffy */
828 } while (freescbs < needed && time_before_eq(jiffies, timeout));
829 /*
830 * If we get here with enough free Scbs, we can take them.
831 * Otherwise, we timed out and didn't get enough.
832 */
833 if (freescbs < needed) {
834 printk(KERN_ERR "wd7000: can't get enough free SCBs.\n");
835 return (NULL);
836 }
837 }
838
839 /* Take the lock, then check we didn't get beaten, if so try again */
840 spin_lock_irqsave(&scbpool_lock, flags);
841 if (freescbs < needed) {
842 spin_unlock_irqrestore(&scbpool_lock, flags);
843 goto retry;
844 }
845
846 scb = scbfree;
847 freescbs -= needed;
848 for (i = 0; i < needed; i++) {
849 p = scbfree;
850 scbfree = p->next;
851 }
852 p->next = NULL;
853
854 spin_unlock_irqrestore(&scbpool_lock, flags);
855
856 spin_lock_irq(host->host_lock);
857 return (scb);
858}
859
860
861static inline void free_scb(Scb * scb)
862{
863 unsigned long flags;
864
865 spin_lock_irqsave(&scbpool_lock, flags);
866
867 memset(scb, 0, sizeof(Scb));
868 scb->next = scbfree;
869 scbfree = scb;
870 freescbs++;
871
872 spin_unlock_irqrestore(&scbpool_lock, flags);
873}
874
875
876static inline void init_scbs(void)
877{
878 int i;
879
880 spin_lock_init(&scbpool_lock);
881
882 /* This is only ever called before the SCB pool is active */
883
884 scbfree = &(scbs[0]);
885 memset(scbs, 0, sizeof(scbs));
886 for (i = 0; i < MAX_SCBS - 1; i++) {
887 scbs[i].next = &(scbs[i + 1]);
888 scbs[i].SCpnt = NULL;
889 }
890 scbs[MAX_SCBS - 1].next = NULL;
891 scbs[MAX_SCBS - 1].SCpnt = NULL;
892}
893
894
895static int mail_out(Adapter * host, Scb * scbptr)
896/*
897 * Note: this can also be used for ICBs; just cast to the parm type.
898 */
899{
900 int i, ogmb;
901 unsigned long flags;
902 unchar start_ogmb;
903 Mailbox *ogmbs = host->mb.ogmb;
904 int *next_ogmb = &(host->next_ogmb);
905
906 dprintk("wd7000_mail_out: 0x%06lx", (long) scbptr);
907
908 /* We first look for a free outgoing mailbox */
909 spin_lock_irqsave(host->sh->host_lock, flags);
910 ogmb = *next_ogmb;
911 for (i = 0; i < OGMB_CNT; i++) {
912 if (ogmbs[ogmb].status == 0) {
913 dprintk(" using OGMB 0x%x", ogmb);
914 ogmbs[ogmb].status = 1;
915 any2scsi((unchar *) ogmbs[ogmb].scbptr, (int) scbptr);
916
917 *next_ogmb = (ogmb + 1) % OGMB_CNT;
918 break;
919 } else
920 ogmb = (ogmb + 1) % OGMB_CNT;
921 }
922 spin_unlock_irqrestore(host->sh->host_lock, flags);
923
924 dprintk(", scb is 0x%06lx", (long) scbptr);
925
926 if (i >= OGMB_CNT) {
927 /*
928 * Alternatively, we might issue the "interrupt on free OGMB",
929 * and sleep, but it must be ensured that it isn't the init
930 * task running. Instead, this version assumes that the caller
931 * will be persistent, and try again. Since it's the adapter
932 * that marks OGMB's free, waiting even with interrupts off
933 * should work, since they are freed very quickly in most cases.
934 */
935 dprintk(", no free OGMBs.\n");
936 return (0);
937 }
938
939 wd7000_enable_intr(host);
940
941 start_ogmb = START_OGMB | ogmb;
942 command_out(host, &start_ogmb, 1);
943
944 dprintk(", awaiting interrupt.\n");
945
946 return (1);
947}
948
949
950static int make_code(unsigned hosterr, unsigned scsierr)
951{
952#ifdef WD7000_DEBUG
953 int in_error = hosterr;
954#endif
955
956 switch ((hosterr >> 8) & 0xff) {
957 case 0: /* Reserved */
958 hosterr = DID_ERROR;
959 break;
960 case 1: /* Command Complete, no errors */
961 hosterr = DID_OK;
962 break;
963 case 2: /* Command complete, error logged in scb status (scsierr) */
964 hosterr = DID_OK;
965 break;
966 case 4: /* Command failed to complete - timeout */
967 hosterr = DID_TIME_OUT;
968 break;
969 case 5: /* Command terminated; Bus reset by external device */
970 hosterr = DID_RESET;
971 break;
972 case 6: /* Unexpected Command Received w/ host as target */
973 hosterr = DID_BAD_TARGET;
974 break;
975 case 80: /* Unexpected Reselection */
976 case 81: /* Unexpected Selection */
977 hosterr = DID_BAD_INTR;
978 break;
979 case 82: /* Abort Command Message */
980 hosterr = DID_ABORT;
981 break;
982 case 83: /* SCSI Bus Software Reset */
983 case 84: /* SCSI Bus Hardware Reset */
984 hosterr = DID_RESET;
985 break;
986 default: /* Reserved */
987 hosterr = DID_ERROR;
988 }
989#ifdef WD7000_DEBUG
990 if (scsierr || hosterr)
991 dprintk("\nSCSI command error: SCSI 0x%02x host 0x%04x return %d\n", scsierr, in_error, hosterr);
992#endif
993 return (scsierr | (hosterr << 16));
994}
995
996#define wd7000_intr_ack(host) outb (0, host->iobase + ASC_INTR_ACK)
997
998
999static irqreturn_t wd7000_intr(int irq, void *dev_id)
1000{
1001 Adapter *host = (Adapter *) dev_id;
1002 int flag, icmb, errstatus, icmb_status;
1003 int host_error, scsi_error;
1004 Scb *scb; /* for SCSI commands */
1005 IcbAny *icb; /* for host commands */
1006 struct scsi_cmnd *SCpnt;
1007 Mailbox *icmbs = host->mb.icmb;
1008 unsigned long flags;
1009
1010 spin_lock_irqsave(host->sh->host_lock, flags);
1011 host->int_counter++;
1012
1013 dprintk("wd7000_intr: irq = %d, host = 0x%06lx\n", irq, (long) host);
1014
1015 flag = inb(host->iobase + ASC_INTR_STAT);
1016
1017 dprintk("wd7000_intr: intr stat = 0x%02x\n", flag);
1018
1019 if (!(inb(host->iobase + ASC_STAT) & INT_IM)) {
1020 /* NB: these are _very_ possible if IRQ 15 is being used, since
1021 * it's the "garbage collector" on the 2nd 8259 PIC. Specifically,
1022 * any interrupt signal into the 8259 which can't be identified
1023 * comes out as 7 from the 8259, which is 15 to the host. Thus, it
1024 * is a good thing the WD7000 has an interrupt status port, so we
1025 * can sort these out. Otherwise, electrical noise and other such
1026 * problems would be indistinguishable from valid interrupts...
1027 */
1028 dprintk("wd7000_intr: phantom interrupt...\n");
1029 goto ack;
1030 }
1031
1032 if (!(flag & MB_INTR))
1033 goto ack;
1034
1035 /* The interrupt is for a mailbox */
1036 if (!(flag & IMB_INTR)) {
1037 dprintk("wd7000_intr: free outgoing mailbox\n");
1038 /*
1039 * If sleep_on() and the "interrupt on free OGMB" command are
1040 * used in mail_out(), wake_up() should correspondingly be called
1041 * here. For now, we don't need to do anything special.
1042 */
1043 goto ack;
1044 }
1045
1046 /* The interrupt is for an incoming mailbox */
1047 icmb = flag & MB_MASK;
1048 icmb_status = icmbs[icmb].status;
1049 if (icmb_status & 0x80) { /* unsolicited - result in ICMB */
1050 dprintk("wd7000_intr: unsolicited interrupt 0x%02x\n", icmb_status);
1051 goto ack;
1052 }
1053
1054 /* Aaaargh! (Zaga) */
1055 scb = isa_bus_to_virt(scsi2int((unchar *) icmbs[icmb].scbptr));
1056 icmbs[icmb].status = 0;
1057 if (scb->op & ICB_OP_MASK) { /* an SCB is done */
1058 icb = (IcbAny *) scb;
1059 icb->status = icmb_status;
1060 icb->phase = 0;
1061 goto ack;
1062 }
1063
1064 SCpnt = scb->SCpnt;
1065 if (--(SCpnt->SCp.phase) <= 0) { /* all scbs are done */
1066 host_error = scb->vue | (icmb_status << 8);
1067 scsi_error = scb->status;
1068 errstatus = make_code(host_error, scsi_error);
1069 SCpnt->result = errstatus;
1070
1071 free_scb(scb);
1072
1073 SCpnt->scsi_done(SCpnt);
1074 }
1075
1076 ack:
1077 dprintk("wd7000_intr: return from interrupt handler\n");
1078 wd7000_intr_ack(host);
1079
1080 spin_unlock_irqrestore(host->sh->host_lock, flags);
1081 return IRQ_HANDLED;
1082}
1083
1084static int wd7000_queuecommand_lck(struct scsi_cmnd *SCpnt,
1085 void (*done)(struct scsi_cmnd *))
1086{
1087 Scb *scb;
1088 Sgb *sgb;
1089 unchar *cdb = (unchar *) SCpnt->cmnd;
1090 unchar idlun;
1091 short cdblen;
1092 int nseg;
1093 Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
1094
1095 cdblen = SCpnt->cmd_len;
1096 idlun = ((SCpnt->device->id << 5) & 0xe0) | (SCpnt->device->lun & 7);
1097 SCpnt->scsi_done = done;
1098 SCpnt->SCp.phase = 1;
1099 scb = alloc_scbs(SCpnt->device->host, 1);
1100 scb->idlun = idlun;
1101 memcpy(scb->cdb, cdb, cdblen);
1102 scb->direc = 0x40; /* Disable direction check */
1103
1104 scb->SCpnt = SCpnt; /* so we can find stuff later */
1105 SCpnt->host_scribble = (unchar *) scb;
1106 scb->host = host;
1107
1108 nseg = scsi_sg_count(SCpnt);
1109 if (nseg > 1) {
1110 struct scatterlist *sg;
1111 unsigned i;
1112
1113 dprintk("Using scatter/gather with %d elements.\n", nseg);
1114
1115 sgb = scb->sgb;
1116 scb->op = 1;
1117 any2scsi(scb->dataptr, (int) sgb);
1118 any2scsi(scb->maxlen, nseg * sizeof(Sgb));
1119
1120 scsi_for_each_sg(SCpnt, sg, nseg, i) {
1121 any2scsi(sgb[i].ptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
1122 any2scsi(sgb[i].len, sg->length);
1123 }
1124 } else {
1125 scb->op = 0;
1126 if (nseg) {
1127 struct scatterlist *sg = scsi_sglist(SCpnt);
1128 any2scsi(scb->dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset);
1129 }
1130 any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
1131 }
1132
1133 /* FIXME: drop lock and yield here ? */
1134
1135 while (!mail_out(host, scb))
1136 cpu_relax(); /* keep trying */
1137
1138 return 0;
1139}
1140
1141static DEF_SCSI_QCMD(wd7000_queuecommand)
1142
1143static int wd7000_diagnostics(Adapter * host, int code)
1144{
1145 static IcbDiag icb = { ICB_OP_DIAGNOSTICS };
1146 static unchar buf[256];
1147 unsigned long timeout;
1148
1149 icb.type = code;
1150 any2scsi(icb.len, sizeof(buf));
1151 any2scsi(icb.ptr, (int) &buf);
1152 icb.phase = 1;
1153 /*
1154 * This routine is only called at init, so there should be OGMBs
1155 * available. I'm assuming so here. If this is going to
1156 * fail, I can just let the timeout catch the failure.
1157 */
1158 mail_out(host, (struct scb *) &icb);
1159 timeout = jiffies + WAITnexttimeout; /* wait up to 2 seconds */
1160 while (icb.phase && time_before(jiffies, timeout)) {
1161 cpu_relax(); /* wait for completion */
1162 barrier();
1163 }
1164
1165 if (icb.phase) {
1166 printk("wd7000_diagnostics: timed out.\n");
1167 return (0);
1168 }
1169 if (make_code(icb.vue | (icb.status << 8), 0)) {
1170 printk("wd7000_diagnostics: failed (0x%02x,0x%02x)\n", icb.vue, icb.status);
1171 return (0);
1172 }
1173
1174 return (1);
1175}
1176
1177
1178static int wd7000_adapter_reset(Adapter * host)
1179{
1180 InitCmd init_cmd = {
1181 INITIALIZATION,
1182 7,
1183 host->bus_on,
1184 host->bus_off,
1185 0,
1186 {0, 0, 0},
1187 OGMB_CNT,
1188 ICMB_CNT
1189 };
1190 int diag;
1191 /*
1192 * Reset the adapter - only. The SCSI bus was initialized at power-up,
1193 * and we need to do this just so we control the mailboxes, etc.
1194 */
1195 outb(ASC_RES, host->iobase + ASC_CONTROL);
1196 udelay(40); /* reset pulse: this is 40us, only need 25us */
1197 outb(0, host->iobase + ASC_CONTROL);
1198 host->control = 0; /* this must always shadow ASC_CONTROL */
1199
1200 if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
1201 printk(KERN_ERR "wd7000_init: WAIT timed out.\n");
1202 return -1; /* -1 = not ok */
1203 }
1204
1205 if ((diag = inb(host->iobase + ASC_INTR_STAT)) != 1) {
1206 printk("wd7000_init: ");
1207
1208 switch (diag) {
1209 case 2:
1210 printk(KERN_ERR "RAM failure.\n");
1211 break;
1212 case 3:
1213 printk(KERN_ERR "FIFO R/W failed\n");
1214 break;
1215 case 4:
1216 printk(KERN_ERR "SBIC register R/W failed\n");
1217 break;
1218 case 5:
1219 printk(KERN_ERR "Initialization D-FF failed.\n");
1220 break;
1221 case 6:
1222 printk(KERN_ERR "Host IRQ D-FF failed.\n");
1223 break;
1224 case 7:
1225 printk(KERN_ERR "ROM checksum error.\n");
1226 break;
1227 default:
1228 printk(KERN_ERR "diagnostic code 0x%02Xh received.\n", diag);
1229 }
1230 return -1;
1231 }
1232 /* Clear mailboxes */
1233 memset(&(host->mb), 0, sizeof(host->mb));
1234
1235 /* Execute init command */
1236 any2scsi((unchar *) & (init_cmd.mailboxes), (int) &(host->mb));
1237 if (!command_out(host, (unchar *) & init_cmd, sizeof(init_cmd))) {
1238 printk(KERN_ERR "wd7000_adapter_reset: adapter initialization failed.\n");
1239 return -1;
1240 }
1241
1242 if (WAIT(host->iobase + ASC_STAT, ASC_STATMASK, ASC_INIT, 0)) {
1243 printk("wd7000_adapter_reset: WAIT timed out.\n");
1244 return -1;
1245 }
1246 return 0;
1247}
1248
1249static int wd7000_init(Adapter * host)
1250{
1251 if (wd7000_adapter_reset(host) == -1)
1252 return 0;
1253
1254
1255 if (request_irq(host->irq, wd7000_intr, 0, "wd7000", host)) {
1256 printk("wd7000_init: can't get IRQ %d.\n", host->irq);
1257 return (0);
1258 }
1259 if (request_dma(host->dma, "wd7000")) {
1260 printk("wd7000_init: can't get DMA channel %d.\n", host->dma);
1261 free_irq(host->irq, host);
1262 return (0);
1263 }
1264 wd7000_enable_dma(host);
1265 wd7000_enable_intr(host);
1266
1267 if (!wd7000_diagnostics(host, ICB_DIAG_FULL)) {
1268 free_dma(host->dma);
1269 free_irq(host->irq, NULL);
1270 return (0);
1271 }
1272
1273 return (1);
1274}
1275
1276
1277static void wd7000_revision(Adapter * host)
1278{
1279 static IcbRevLvl icb = { ICB_OP_GET_REVISION };
1280
1281 icb.phase = 1;
1282 /*
1283 * Like diagnostics, this is only done at init time, in fact, from
1284 * wd7000_detect, so there should be OGMBs available. If it fails,
1285 * the only damage will be that the revision will show up as 0.0,
1286 * which in turn means that scatter/gather will be disabled.
1287 */
1288 mail_out(host, (struct scb *) &icb);
1289 while (icb.phase) {
1290 cpu_relax(); /* wait for completion */
1291 barrier();
1292 }
1293 host->rev1 = icb.primary;
1294 host->rev2 = icb.secondary;
1295}
1296
1297
1298static int wd7000_set_info(struct Scsi_Host *host, char *buffer, int length)
1299{
1300 dprintk("Buffer = <%.*s>, length = %d\n", length, buffer, length);
1301
1302 /*
1303 * Currently this is a no-op
1304 */
1305 dprintk("Sorry, this function is currently out of order...\n");
1306 return (length);
1307}
1308
1309
1310static int wd7000_show_info(struct seq_file *m, struct Scsi_Host *host)
1311{
1312 Adapter *adapter = (Adapter *)host->hostdata;
1313 unsigned long flags;
1314#ifdef WD7000_DEBUG
1315 Mailbox *ogmbs, *icmbs;
1316 short count;
1317#endif
1318
1319 spin_lock_irqsave(host->host_lock, flags);
1320 seq_printf(m, "Host scsi%d: Western Digital WD-7000 (rev %d.%d)\n", host->host_no, adapter->rev1, adapter->rev2);
1321 seq_printf(m, " IO base: 0x%x\n", adapter->iobase);
1322 seq_printf(m, " IRQ: %d\n", adapter->irq);
1323 seq_printf(m, " DMA channel: %d\n", adapter->dma);
1324 seq_printf(m, " Interrupts: %d\n", adapter->int_counter);
1325 seq_printf(m, " BUS_ON time: %d nanoseconds\n", adapter->bus_on * 125);
1326 seq_printf(m, " BUS_OFF time: %d nanoseconds\n", adapter->bus_off * 125);
1327
1328#ifdef WD7000_DEBUG
1329 ogmbs = adapter->mb.ogmb;
1330 icmbs = adapter->mb.icmb;
1331
1332 seq_printf(m, "\nControl port value: 0x%x\n", adapter->control);
1333 seq_puts(m, "Incoming mailbox:\n");
1334 seq_printf(m, " size: %d\n", ICMB_CNT);
1335 seq_puts(m, " queued messages: ");
1336
1337 for (i = count = 0; i < ICMB_CNT; i++)
1338 if (icmbs[i].status) {
1339 count++;
1340 seq_printf(m, "0x%x ", i);
1341 }
1342
1343 seq_puts(m, count ? "\n" : "none\n");
1344
1345 seq_puts(m, "Outgoing mailbox:\n");
1346 seq_printf(m, " size: %d\n", OGMB_CNT);
1347 seq_printf(m, " next message: 0x%x\n", adapter->next_ogmb);
1348 seq_puts(m, " queued messages: ");
1349
1350 for (i = count = 0; i < OGMB_CNT; i++)
1351 if (ogmbs[i].status) {
1352 count++;
1353 seq_printf(m, "0x%x ", i);
1354 }
1355
1356 seq_puts(m, count ? "\n" : "none\n");
1357#endif
1358
1359 spin_unlock_irqrestore(host->host_lock, flags);
1360
1361 return 0;
1362}
1363
1364
1365/*
1366 * Returns the number of adapters this driver is supporting.
1367 *
1368 * The source for hosts.c says to wait to call scsi_register until 100%
1369 * sure about an adapter. We need to do it a little sooner here; we
1370 * need the storage set up by scsi_register before wd7000_init, and
1371 * changing the location of an Adapter structure is more trouble than
1372 * calling scsi_unregister.
1373 *
1374 */
1375
1376static __init int wd7000_detect(struct scsi_host_template *tpnt)
1377{
1378 short present = 0, biosaddr_ptr, sig_ptr, i, pass;
1379 short biosptr[NUM_CONFIGS];
1380 unsigned iobase;
1381 Adapter *host = NULL;
1382 struct Scsi_Host *sh;
1383 int unit = 0;
1384
1385 dprintk("wd7000_detect: started\n");
1386
1387#ifdef MODULE
1388 if (wd7000)
1389 wd7000_setup(wd7000);
1390#endif
1391
1392 for (i = 0; i < UNITS; wd7000_host[i++] = NULL);
1393 for (i = 0; i < NUM_CONFIGS; biosptr[i++] = -1);
1394
1395 tpnt->proc_name = "wd7000";
1396 tpnt->show_info = &wd7000_show_info;
1397 tpnt->write_info = wd7000_set_info;
1398
1399 /*
1400 * Set up SCB free list, which is shared by all adapters
1401 */
1402 init_scbs();
1403
1404 for (pass = 0; pass < NUM_CONFIGS; pass++) {
1405 /*
1406 * First, search for BIOS SIGNATURE...
1407 */
1408 for (biosaddr_ptr = 0; biosaddr_ptr < NUM_ADDRS; biosaddr_ptr++)
1409 for (sig_ptr = 0; sig_ptr < NUM_SIGNATURES; sig_ptr++) {
1410 for (i = 0; i < pass; i++)
1411 if (biosptr[i] == biosaddr_ptr)
1412 break;
1413
1414 if (i == pass) {
1415 void __iomem *biosaddr = ioremap(wd7000_biosaddr[biosaddr_ptr] + signatures[sig_ptr].ofs,
1416 signatures[sig_ptr].len);
1417 short bios_match = 1;
1418
1419 if (biosaddr)
1420 bios_match = check_signature(biosaddr, signatures[sig_ptr].sig, signatures[sig_ptr].len);
1421
1422 iounmap(biosaddr);
1423
1424 if (bios_match)
1425 goto bios_matched;
1426 }
1427 }
1428
1429 bios_matched:
1430 /*
1431 * BIOS SIGNATURE has been found.
1432 */
1433#ifdef WD7000_DEBUG
1434 dprintk("wd7000_detect: pass %d\n", pass + 1);
1435
1436 if (biosaddr_ptr == NUM_ADDRS)
1437 dprintk("WD-7000 SST BIOS not detected...\n");
1438 else
1439 dprintk("WD-7000 SST BIOS detected at 0x%lx: checking...\n", wd7000_biosaddr[biosaddr_ptr]);
1440#endif
1441
1442 if (configs[pass].irq < 0)
1443 continue;
1444
1445 if (unit == UNITS)
1446 continue;
1447
1448 iobase = configs[pass].iobase;
1449
1450 dprintk("wd7000_detect: check IO 0x%x region...\n", iobase);
1451
1452 if (request_region(iobase, 4, "wd7000")) {
1453
1454 dprintk("wd7000_detect: ASC reset (IO 0x%x) ...", iobase);
1455 /*
1456 * ASC reset...
1457 */
1458 outb(ASC_RES, iobase + ASC_CONTROL);
1459 msleep(10);
1460 outb(0, iobase + ASC_CONTROL);
1461
1462 if (WAIT(iobase + ASC_STAT, ASC_STATMASK, CMD_RDY, 0)) {
1463 dprintk("failed!\n");
1464 goto err_release;
1465 } else
1466 dprintk("ok!\n");
1467
1468 if (inb(iobase + ASC_INTR_STAT) == 1) {
1469 /*
1470 * We register here, to get a pointer to the extra space,
1471 * which we'll use as the Adapter structure (host) for
1472 * this adapter. It is located just after the registered
1473 * Scsi_Host structure (sh), and is located by the empty
1474 * array hostdata.
1475 */
1476 sh = scsi_register(tpnt, sizeof(Adapter));
1477 if (sh == NULL)
1478 goto err_release;
1479
1480 host = (Adapter *) sh->hostdata;
1481
1482 dprintk("wd7000_detect: adapter allocated at 0x%x\n", (int) host);
1483 memset(host, 0, sizeof(Adapter));
1484
1485 host->irq = configs[pass].irq;
1486 host->dma = configs[pass].dma;
1487 host->iobase = iobase;
1488 host->int_counter = 0;
1489 host->bus_on = configs[pass].bus_on;
1490 host->bus_off = configs[pass].bus_off;
1491 host->sh = wd7000_host[unit] = sh;
1492 unit++;
1493
1494 dprintk("wd7000_detect: Trying init WD-7000 card at IO " "0x%x, IRQ %d, DMA %d...\n", host->iobase, host->irq, host->dma);
1495
1496 if (!wd7000_init(host)) /* Initialization failed */
1497 goto err_unregister;
1498
1499 /*
1500 * OK from here - we'll use this adapter/configuration.
1501 */
1502 wd7000_revision(host); /* important for scatter/gather */
1503
1504 /*
1505 * For boards before rev 6.0, scatter/gather isn't supported.
1506 */
1507 if (host->rev1 < 6)
1508 sh->sg_tablesize = 1;
1509
1510 present++; /* count it */
1511
1512 if (biosaddr_ptr != NUM_ADDRS)
1513 biosptr[pass] = biosaddr_ptr;
1514
1515 printk(KERN_INFO "Western Digital WD-7000 (rev %d.%d) ", host->rev1, host->rev2);
1516 printk("using IO 0x%x, IRQ %d, DMA %d.\n", host->iobase, host->irq, host->dma);
1517 printk(" BUS_ON time: %dns, BUS_OFF time: %dns\n", host->bus_on * 125, host->bus_off * 125);
1518 }
1519 } else
1520 dprintk("wd7000_detect: IO 0x%x region already allocated!\n", iobase);
1521
1522 continue;
1523
1524 err_unregister:
1525 scsi_unregister(sh);
1526 err_release:
1527 release_region(iobase, 4);
1528
1529 }
1530
1531 if (!present)
1532 printk("Failed initialization of WD-7000 SCSI card!\n");
1533
1534 return (present);
1535}
1536
1537static int wd7000_release(struct Scsi_Host *shost)
1538{
1539 if (shost->irq)
1540 free_irq(shost->irq, NULL);
1541 if (shost->io_port && shost->n_io_port)
1542 release_region(shost->io_port, shost->n_io_port);
1543 scsi_unregister(shost);
1544 return 0;
1545}
1546
1547#if 0
1548/*
1549 * I have absolutely NO idea how to do an abort with the WD7000...
1550 */
1551static int wd7000_abort(Scsi_Cmnd * SCpnt)
1552{
1553 Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
1554
1555 if (inb(host->iobase + ASC_STAT) & INT_IM) {
1556 printk("wd7000_abort: lost interrupt\n");
1557 wd7000_intr_handle(host->irq, NULL, NULL);
1558 return FAILED;
1559 }
1560 return FAILED;
1561}
1562#endif
1563
1564/*
1565 * Last resort. Reinitialize the board.
1566 */
1567
1568static int wd7000_host_reset(struct scsi_cmnd *SCpnt)
1569{
1570 Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
1571
1572 spin_lock_irq(SCpnt->device->host->host_lock);
1573
1574 if (wd7000_adapter_reset(host) < 0) {
1575 spin_unlock_irq(SCpnt->device->host->host_lock);
1576 return FAILED;
1577 }
1578
1579 wd7000_enable_intr(host);
1580
1581 spin_unlock_irq(SCpnt->device->host->host_lock);
1582 return SUCCESS;
1583}
1584
1585/*
1586 * This was borrowed directly from aha1542.c. (Zaga)
1587 */
1588
1589static int wd7000_biosparam(struct scsi_device *sdev,
1590 struct block_device *bdev, sector_t capacity, int *ip)
1591{
1592 char b[BDEVNAME_SIZE];
1593
1594 dprintk("wd7000_biosparam: dev=%s, size=%llu, ",
1595 bdevname(bdev, b), (u64)capacity);
1596 (void)b; /* unused var warning? */
1597
1598 /*
1599 * try default translation
1600 */
1601 ip[0] = 64;
1602 ip[1] = 32;
1603 ip[2] = capacity >> 11;
1604
1605 /*
1606 * for disks >1GB do some guessing
1607 */
1608 if (ip[2] >= 1024) {
1609 int info[3];
1610
1611 /*
1612 * try to figure out the geometry from the partition table
1613 */
1614 if ((scsicam_bios_param(bdev, capacity, info) < 0) || !(((info[0] == 64) && (info[1] == 32)) || ((info[0] == 255) && (info[1] == 63)))) {
1615 printk("wd7000_biosparam: unable to verify geometry for disk with >1GB.\n" " using extended translation.\n");
1616
1617 ip[0] = 255;
1618 ip[1] = 63;
1619 ip[2] = (unsigned long) capacity / (255 * 63);
1620 } else {
1621 ip[0] = info[0];
1622 ip[1] = info[1];
1623 ip[2] = info[2];
1624
1625 if (info[0] == 255)
1626 printk(KERN_INFO "%s: current partition table is " "using extended translation.\n", __func__);
1627 }
1628 }
1629
1630 dprintk("bios geometry: head=%d, sec=%d, cyl=%d\n", ip[0], ip[1], ip[2]);
1631 dprintk("WARNING: check, if the bios geometry is correct.\n");
1632
1633 return (0);
1634}
1635
1636MODULE_AUTHOR("Thomas Wuensche, John Boyd, Miroslav Zagorac");
1637MODULE_DESCRIPTION("Driver for the WD7000 series ISA controllers");
1638MODULE_LICENSE("GPL");
1639
1640static struct scsi_host_template driver_template = {
1641 .proc_name = "wd7000",
1642 .show_info = wd7000_show_info,
1643 .write_info = wd7000_set_info,
1644 .name = "Western Digital WD-7000",
1645 .detect = wd7000_detect,
1646 .release = wd7000_release,
1647 .queuecommand = wd7000_queuecommand,
1648 .eh_host_reset_handler = wd7000_host_reset,
1649 .bios_param = wd7000_biosparam,
1650 .can_queue = WD7000_Q,
1651 .this_id = 7,
1652 .sg_tablesize = WD7000_SG,
1653 .unchecked_isa_dma = 1,
1654 .use_clustering = ENABLE_CLUSTERING,
1655};
1656
1657#include "scsi_module.c"
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index dd8de82cf5b5..9fba9dd33544 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -5,6 +5,26 @@
5#include <linux/blkdev.h> 5#include <linux/blkdev.h>
6 6
7/* 7/*
8 * A T10 PI-capable target device can be formatted with different
9 * protection schemes. Currently 0 through 3 are defined:
10 *
11 * Type 0 is regular (unprotected) I/O
12 *
13 * Type 1 defines the contents of the guard and reference tags
14 *
15 * Type 2 defines the contents of the guard and reference tags and
16 * uses 32-byte commands to seed the latter
17 *
18 * Type 3 defines the contents of the guard tag only
19 */
20enum t10_dif_type {
21 T10_PI_TYPE0_PROTECTION = 0x0,
22 T10_PI_TYPE1_PROTECTION = 0x1,
23 T10_PI_TYPE2_PROTECTION = 0x2,
24 T10_PI_TYPE3_PROTECTION = 0x3,
25};
26
27/*
8 * T10 Protection Information tuple. 28 * T10 Protection Information tuple.
9 */ 29 */
10struct t10_pi_tuple { 30struct t10_pi_tuple {
diff --git a/include/uapi/scsi/cxlflash_ioctl.h b/include/uapi/scsi/cxlflash_ioctl.h
index 2302f3ce5f86..6bf1f8a022b1 100644
--- a/include/uapi/scsi/cxlflash_ioctl.h
+++ b/include/uapi/scsi/cxlflash_ioctl.h
@@ -39,19 +39,28 @@ struct dk_cxlflash_hdr {
39 * at this time, this provides future flexibility. 39 * at this time, this provides future flexibility.
40 */ 40 */
41#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL 41#define DK_CXLFLASH_ALL_PORTS_ACTIVE 0x0000000000000001ULL
42#define DK_CXLFLASH_APP_CLOSE_ADAP_FD 0x0000000000000002ULL
42 43
43/* 44/*
44 * Notes: 45 * General Notes:
45 * ----- 46 * -------------
46 * The 'context_id' field of all ioctl structures contains the context 47 * The 'context_id' field of all ioctl structures contains the context
47 * identifier for a context in the lower 32-bits (upper 32-bits are not 48 * identifier for a context in the lower 32-bits (upper 32-bits are not
48 * to be used when identifying a context to the AFU). That said, the value 49 * to be used when identifying a context to the AFU). That said, the value
49 * in its entirety (all 64-bits) is to be treated as an opaque cookie and 50 * in its entirety (all 64-bits) is to be treated as an opaque cookie and
50 * should be presented as such when issuing ioctls. 51 * should be presented as such when issuing ioctls.
52 */
53
54/*
55 * DK_CXLFLASH_ATTACH Notes:
56 * ------------------------
57 * Read/write access permissions are specified via the O_RDONLY, O_WRONLY,
58 * and O_RDWR flags defined in the fcntl.h header file.
51 * 59 *
52 * For DK_CXLFLASH_ATTACH ioctl, user specifies read/write access 60 * A valid adapter file descriptor (fd >= 0) is only returned on the initial
53 * permissions via the O_RDONLY, O_WRONLY, and O_RDWR flags defined in 61 * attach (successful) of a context. When a context is shared(reused), the user
54 * the fcntl.h header file. 62 * is expected to already 'know' the adapter file descriptor associated with the
63 * context.
55 */ 64 */
56#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL 65#define DK_CXLFLASH_ATTACH_REUSE_CONTEXT 0x8000000000000000ULL
57 66