aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/esas2r
diff options
context:
space:
mode:
authorBradley Grove <bgrove@attotech.com>2013-08-23 10:35:45 -0400
committerJames Bottomley <JBottomley@Parallels.com>2013-09-03 10:27:58 -0400
commit26780d9e12edf45c0b98315de272b1feff5a8e93 (patch)
tree2a5a00f53fa007277c9f92ef8c99b85d99d3b633 /drivers/scsi/esas2r
parent127be355285d14b483da0478a33302a680204144 (diff)
[SCSI] esas2r: ATTO Technology ExpressSAS 6G SAS/SATA RAID Adapter Driver
This is a new driver for ATTO Technology's ExpressSAS series of hardware RAID adapters. It supports the following adapters: - ExpressSAS R60F - ExpressSAS R680 - ExpressSAS R608 - ExpressSAS R644 Signed-off-by: Bradley Grove <bgrove@attotech.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Diffstat (limited to 'drivers/scsi/esas2r')
-rw-r--r--drivers/scsi/esas2r/Kconfig5
-rw-r--r--drivers/scsi/esas2r/Makefile5
-rw-r--r--drivers/scsi/esas2r/atioctl.h1254
-rw-r--r--drivers/scsi/esas2r/atvda.h1319
-rw-r--r--drivers/scsi/esas2r/esas2r.h1441
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c1189
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1512
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c1773
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c941
-rw-r--r--drivers/scsi/esas2r/esas2r_io.c880
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2110
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c254
-rw-r--r--drivers/scsi/esas2r/esas2r_log.h118
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2032
-rw-r--r--drivers/scsi/esas2r/esas2r_targdb.c306
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c521
16 files changed, 15660 insertions, 0 deletions
diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig
new file mode 100644
index 000000000000..78fdbfd9b4b7
--- /dev/null
+++ b/drivers/scsi/esas2r/Kconfig
@@ -0,0 +1,5 @@
1config SCSI_ESAS2R
2 tristate "ATTO Technology's ExpressSAS RAID adapter driver"
3 depends on PCI && SCSI
4 ---help---
5 This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.
diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile
new file mode 100644
index 000000000000..c77160b8c8bd
--- /dev/null
+++ b/drivers/scsi/esas2r/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o
2
3esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
4 esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \
5 esas2r_vda.o esas2r_main.o
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
new file mode 100644
index 000000000000..4aca3d52c851
--- /dev/null
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -0,0 +1,1254 @@
1/* linux/drivers/scsi/esas2r/atioctl.h
2 * ATTO IOCTL Handling
3 *
4 * Copyright (c) 2001-2013 ATTO Technology, Inc.
5 * (mailto:linuxdrivers@attotech.com)
6 */
7/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
8/*
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41 */
42/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
43
44#include "atvda.h"
45
46#ifndef ATIOCTL_H
47#define ATIOCTL_H
48
49#define EXPRESS_IOCTL_SIGNATURE "Express"
50#define EXPRESS_IOCTL_SIGNATURE_SIZE 8
51
52/* structure definitions for IOCTls */
53
54struct __packed atto_express_ioctl_header {
55 u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE];
56 u8 return_code;
57
58#define IOCTL_SUCCESS 0
59#define IOCTL_ERR_INVCMD 101
60#define IOCTL_INIT_FAILED 102
61#define IOCTL_NOT_IMPLEMENTED 103
62#define IOCTL_BAD_CHANNEL 104
63#define IOCTL_TARGET_OVERRUN 105
64#define IOCTL_TARGET_NOT_ENABLED 106
65#define IOCTL_BAD_FLASH_IMGTYPE 107
66#define IOCTL_OUT_OF_RESOURCES 108
67#define IOCTL_GENERAL_ERROR 109
68#define IOCTL_INVALID_PARAM 110
69
70 u8 channel;
71 u8 retries;
72 u8 pad[5];
73};
74
75/*
76 * NOTE - if channel == 0xFF, the request is
77 * handled on the adapter it came in on.
78 */
79#define MAX_NODE_NAMES 256
80
81struct __packed atto_firmware_rw_request {
82 u8 function;
83 #define FUNC_FW_DOWNLOAD 0x09
84 #define FUNC_FW_UPLOAD 0x12
85
86 u8 img_type;
87 #define FW_IMG_FW 0x01
88 #define FW_IMG_BIOS 0x02
89 #define FW_IMG_NVR 0x03
90 #define FW_IMG_RAW 0x04
91 #define FW_IMG_FM_API 0x05
92 #define FW_IMG_FS_API 0x06
93
94 u8 pad[2];
95 u32 img_offset;
96 u32 img_size;
97 u8 image[0x80000];
98};
99
100struct __packed atto_param_rw_request {
101 u16 code;
102 char data_buffer[512];
103};
104
105#define MAX_CHANNEL 256
106
107struct __packed atto_channel_list {
108 u32 num_channels;
109 u8 channel[MAX_CHANNEL];
110};
111
112struct __packed atto_channel_info {
113 u8 major_rev;
114 u8 minor_rev;
115 u8 IRQ;
116 u8 revision_id;
117 u8 pci_bus;
118 u8 pci_dev_func;
119 u8 core_rev;
120 u8 host_no;
121 u16 device_id;
122 u16 vendor_id;
123 u16 ven_dev_id;
124 u8 pad[3];
125 u32 hbaapi_rev;
126};
127
128/*
129 * CSMI control codes
130 * class independent
131 */
132#define CSMI_CC_GET_DRVR_INFO 1
133#define CSMI_CC_GET_CNTLR_CFG 2
134#define CSMI_CC_GET_CNTLR_STS 3
135#define CSMI_CC_FW_DOWNLOAD 4
136
137/* RAID class */
138#define CSMI_CC_GET_RAID_INFO 10
139#define CSMI_CC_GET_RAID_CFG 11
140
141/* HBA class */
142#define CSMI_CC_GET_PHY_INFO 20
143#define CSMI_CC_SET_PHY_INFO 21
144#define CSMI_CC_GET_LINK_ERRORS 22
145#define CSMI_CC_SMP_PASSTHRU 23
146#define CSMI_CC_SSP_PASSTHRU 24
147#define CSMI_CC_STP_PASSTHRU 25
148#define CSMI_CC_GET_SATA_SIG 26
149#define CSMI_CC_GET_SCSI_ADDR 27
150#define CSMI_CC_GET_DEV_ADDR 28
151#define CSMI_CC_TASK_MGT 29
152#define CSMI_CC_GET_CONN_INFO 30
153
154/* PHY class */
155#define CSMI_CC_PHY_CTRL 60
156
157/*
158 * CSMI status codes
159 * class independent
160 */
161#define CSMI_STS_SUCCESS 0
162#define CSMI_STS_FAILED 1
163#define CSMI_STS_BAD_CTRL_CODE 2
164#define CSMI_STS_INV_PARAM 3
165#define CSMI_STS_WRITE_ATTEMPTED 4
166
167/* RAID class */
168#define CSMI_STS_INV_RAID_SET 1000
169
170/* HBA class */
171#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS
172#define CSMI_STS_PHY_UNCHANGEABLE 2000
173#define CSMI_STS_INV_LINK_RATE 2001
174#define CSMI_STS_INV_PHY 2002
175#define CSMI_STS_INV_PHY_FOR_PORT 2003
176#define CSMI_STS_PHY_UNSELECTABLE 2004
177#define CSMI_STS_SELECT_PHY_OR_PORT 2005
178#define CSMI_STS_INV_PORT 2006
179#define CSMI_STS_PORT_UNSELECTABLE 2007
180#define CSMI_STS_CONNECTION_FAILED 2008
181#define CSMI_STS_NO_SATA_DEV 2009
182#define CSMI_STS_NO_SATA_SIGNATURE 2010
183#define CSMI_STS_SCSI_EMULATION 2011
184#define CSMI_STS_NOT_AN_END_DEV 2012
185#define CSMI_STS_NO_SCSI_ADDR 2013
186#define CSMI_STS_NO_DEV_ADDR 2014
187
188/* CSMI class independent structures */
189struct atto_csmi_get_driver_info {
190 char name[81];
191 char description[81];
192 u16 major_rev;
193 u16 minor_rev;
194 u16 build_rev;
195 u16 release_rev;
196 u16 csmi_major_rev;
197 u16 csmi_minor_rev;
198 #define CSMI_MAJOR_REV_0_81 0
199 #define CSMI_MINOR_REV_0_81 81
200
201 #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81
202 #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81
203};
204
205struct atto_csmi_get_pci_bus_addr {
206 u8 bus_num;
207 u8 device_num;
208 u8 function_num;
209 u8 reserved;
210};
211
212struct atto_csmi_get_cntlr_cfg {
213 u32 base_io_addr;
214
215 struct {
216 u32 base_memaddr_lo;
217 u32 base_memaddr_hi;
218 };
219
220 u32 board_id;
221 u16 slot_num;
222 #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF
223
224 u8 cntlr_class;
225 #define CSMI_CNTLR_CLASS_HBA 5
226
227 u8 io_bus_type;
228 #define CSMI_BUS_TYPE_PCI 3
229 #define CSMI_BUS_TYPE_PCMCIA 4
230
231 union {
232 struct atto_csmi_get_pci_bus_addr pci_addr;
233 u8 reserved[32];
234 };
235
236 char serial_num[81];
237 u16 major_rev;
238 u16 minor_rev;
239 u16 build_rev;
240 u16 release_rev;
241 u16 bios_major_rev;
242 u16 bios_minor_rev;
243 u16 bios_build_rev;
244 u16 bios_release_rev;
245 u32 cntlr_flags;
246 #define CSMI_CNTLRF_SAS_HBA 0x00000001
247 #define CSMI_CNTLRF_SAS_RAID 0x00000002
248 #define CSMI_CNTLRF_SATA_HBA 0x00000004
249 #define CSMI_CNTLRF_SATA_RAID 0x00000008
250 #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000
251 #define CSMI_CNTLRF_FWD_ONLINE 0x00020000
252 #define CSMI_CNTLRF_FWD_SRESET 0x00040000
253 #define CSMI_CNTLRF_FWD_HRESET 0x00080000
254 #define CSMI_CNTLRF_FWD_RROM 0x00100000
255
256 u16 rrom_major_rev;
257 u16 rrom_minor_rev;
258 u16 rrom_build_rev;
259 u16 rrom_release_rev;
260 u16 rrom_biosmajor_rev;
261 u16 rrom_biosminor_rev;
262 u16 rrom_biosbuild_rev;
263 u16 rrom_biosrelease_rev;
264 u8 reserved2[7];
265};
266
267struct atto_csmi_get_cntlr_sts {
268 u32 status;
269 #define CSMI_CNTLR_STS_GOOD 1
270 #define CSMI_CNTLR_STS_FAILED 2
271 #define CSMI_CNTLR_STS_OFFLINE 3
272 #define CSMI_CNTLR_STS_POWEROFF 4
273
274 u32 offline_reason;
275 #define CSMI_OFFLINE_NO_REASON 0
276 #define CSMI_OFFLINE_INITIALIZING 1
277 #define CSMI_OFFLINE_BUS_DEGRADED 2
278 #define CSMI_OFFLINE_BUS_FAILURE 3
279
280 u8 reserved[28];
281};
282
283struct atto_csmi_fw_download {
284 u32 buffer_len;
285 u32 download_flags;
286 #define CSMI_FWDF_VALIDATE 0x00000001
287 #define CSMI_FWDF_SOFT_RESET 0x00000002
288 #define CSMI_FWDF_HARD_RESET 0x00000004
289
290 u8 reserved[32];
291 u16 status;
292 #define CSMI_FWD_STS_SUCCESS 0
293 #define CSMI_FWD_STS_FAILED 1
294 #define CSMI_FWD_STS_USING_RROM 2
295 #define CSMI_FWD_STS_REJECT 3
296 #define CSMI_FWD_STS_DOWNREV 4
297
298 u16 severity;
299 #define CSMI_FWD_SEV_INFO 0
300 #define CSMI_FWD_SEV_WARNING 1
301 #define CSMI_FWD_SEV_ERROR 2
302 #define CSMI_FWD_SEV_FATAL 3
303
304};
305
306/* CSMI RAID class structures */
307struct atto_csmi_get_raid_info {
308 u32 num_raid_sets;
309 u32 max_drivesper_set;
310 u8 reserved[92];
311};
312
313struct atto_csmi_raid_drives {
314 char model[40];
315 char firmware[8];
316 char serial_num[40];
317 u8 sas_addr[8];
318 u8 lun[8];
319 u8 drive_sts;
320 #define CSMI_DRV_STS_OK 0
321 #define CSMI_DRV_STS_REBUILDING 1
322 #define CSMI_DRV_STS_FAILED 2
323 #define CSMI_DRV_STS_DEGRADED 3
324
325 u8 drive_usage;
326 #define CSMI_DRV_USE_NOT_USED 0
327 #define CSMI_DRV_USE_MEMBER 1
328 #define CSMI_DRV_USE_SPARE 2
329
330 u8 reserved[30]; /* spec says 22 */
331};
332
333struct atto_csmi_get_raid_cfg {
334 u32 raid_set_index;
335 u32 capacity;
336 u32 stripe_size;
337 u8 raid_type;
338 u8 status;
339 u8 information;
340 u8 drive_cnt;
341 u8 reserved[20];
342
343 struct atto_csmi_raid_drives drives[1];
344};
345
346/* CSMI HBA class structures */
347struct atto_csmi_phy_entity {
348 u8 ident_frame[0x1C];
349 u8 port_id;
350 u8 neg_link_rate;
351 u8 min_link_rate;
352 u8 max_link_rate;
353 u8 phy_change_cnt;
354 u8 auto_discover;
355 #define CSMI_DISC_NOT_SUPPORTED 0x00
356 #define CSMI_DISC_NOT_STARTED 0x01
357 #define CSMI_DISC_IN_PROGRESS 0x02
358 #define CSMI_DISC_COMPLETE 0x03
359 #define CSMI_DISC_ERROR 0x04
360
361 u8 reserved[2];
362 u8 attach_ident_frame[0x1C];
363};
364
365struct atto_csmi_get_phy_info {
366 u8 number_of_phys;
367 u8 reserved[3];
368 struct atto_csmi_phy_entity
369 phy[32];
370};
371
372struct atto_csmi_set_phy_info {
373 u8 phy_id;
374 u8 neg_link_rate;
375 #define CSMI_NEG_RATE_NEGOTIATE 0x00
376 #define CSMI_NEG_RATE_PHY_DIS 0x01
377
378 u8 prog_minlink_rate;
379 u8 prog_maxlink_rate;
380 u8 signal_class;
381 #define CSMI_SIG_CLASS_UNKNOWN 0x00
382 #define CSMI_SIG_CLASS_DIRECT 0x01
383 #define CSMI_SIG_CLASS_SERVER 0x02
384 #define CSMI_SIG_CLASS_ENCLOSURE 0x03
385
386 u8 reserved[3];
387};
388
389struct atto_csmi_get_link_errors {
390 u8 phy_id;
391 u8 reset_cnts;
392 #define CSMI_RESET_CNTS_NO 0x00
393 #define CSMI_RESET_CNTS_YES 0x01
394
395 u8 reserved[2];
396 u32 inv_dw_cnt;
397 u32 disp_err_cnt;
398 u32 loss_ofdw_sync_cnt;
399 u32 phy_reseterr_cnt;
400
401 /*
402 * The following field has been added by ATTO for ease of
403 * implementation of additional statistics. Drivers must validate
404 * the length of the IOCTL payload prior to filling them in so CSMI
405 * complaint applications function correctly.
406 */
407
408 u32 crc_err_cnt;
409};
410
411struct atto_csmi_smp_passthru {
412 u8 phy_id;
413 u8 port_id;
414 u8 conn_rate;
415 u8 reserved;
416 u8 dest_sas_addr[8];
417 u32 req_len;
418 u8 smp_req[1020];
419 u8 conn_sts;
420 u8 reserved2[3];
421 u32 rsp_len;
422 u8 smp_rsp[1020];
423};
424
425struct atto_csmi_ssp_passthru_sts {
426 u8 conn_sts;
427 u8 reserved[3];
428 u8 data_present;
429 u8 status;
430 u16 rsp_length;
431 u8 rsp[256];
432 u32 data_bytes;
433};
434
435struct atto_csmi_ssp_passthru {
436 u8 phy_id;
437 u8 port_id;
438 u8 conn_rate;
439 u8 reserved;
440 u8 dest_sas_addr[8];
441 u8 lun[8];
442 u8 cdb_len;
443 u8 add_cdb_len;
444 u8 reserved2[2];
445 u8 cdb[16];
446 u32 flags;
447 #define CSMI_SSPF_DD_READ 0x00000001
448 #define CSMI_SSPF_DD_WRITE 0x00000002
449 #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004
450 #define CSMI_SSPF_TA_SIMPLE 0x00000000
451 #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010
452 #define CSMI_SSPF_TA_ORDERED 0x00000020
453 #define CSMI_SSPF_TA_ACA 0x00000040
454
455 u8 add_cdb[24];
456 u32 data_len;
457
458 struct atto_csmi_ssp_passthru_sts sts;
459};
460
461struct atto_csmi_stp_passthru_sts {
462 u8 conn_sts;
463 u8 reserved[3];
464 u8 sts_fis[20];
465 u32 scr[16];
466 u32 data_bytes;
467};
468
469struct atto_csmi_stp_passthru {
470 u8 phy_id;
471 u8 port_id;
472 u8 conn_rate;
473 u8 reserved;
474 u8 dest_sas_addr[8];
475 u8 reserved2[4];
476 u8 command_fis[20];
477 u32 flags;
478 #define CSMI_STPF_DD_READ 0x00000001
479 #define CSMI_STPF_DD_WRITE 0x00000002
480 #define CSMI_STPF_DD_UNSPECIFIED 0x00000004
481 #define CSMI_STPF_PIO 0x00000010
482 #define CSMI_STPF_DMA 0x00000020
483 #define CSMI_STPF_PACKET 0x00000040
484 #define CSMI_STPF_DMA_QUEUED 0x00000080
485 #define CSMI_STPF_EXECUTE_DIAG 0x00000100
486 #define CSMI_STPF_RESET_DEVICE 0x00000200
487
488 u32 data_len;
489
490 struct atto_csmi_stp_passthru_sts sts;
491};
492
493struct atto_csmi_get_sata_sig {
494 u8 phy_id;
495 u8 reserved[3];
496 u8 reg_dth_fis[20];
497};
498
499struct atto_csmi_get_scsi_addr {
500 u8 sas_addr[8];
501 u8 sas_lun[8];
502 u8 host_index;
503 u8 path_id;
504 u8 target_id;
505 u8 lun;
506};
507
508struct atto_csmi_get_dev_addr {
509 u8 host_index;
510 u8 path_id;
511 u8 target_id;
512 u8 lun;
513 u8 sas_addr[8];
514 u8 sas_lun[8];
515};
516
517struct atto_csmi_task_mgmt {
518 u8 host_index;
519 u8 path_id;
520 u8 target_id;
521 u8 lun;
522 u32 flags;
523 #define CSMI_TMF_TASK_IU 0x00000001
524 #define CSMI_TMF_HARD_RST 0x00000002
525 #define CSMI_TMF_SUPPRESS_RSLT 0x00000004
526
527 u32 queue_tag;
528 u32 reserved;
529 u8 task_mgt_func;
530 u8 reserved2[7];
531 u32 information;
532 #define CSMI_TM_INFO_TEST 1
533 #define CSMI_TM_INFO_EXCEEDED 2
534 #define CSMI_TM_INFO_DEMAND 3
535 #define CSMI_TM_INFO_TRIGGER 4
536
537 struct atto_csmi_ssp_passthru_sts sts;
538
539};
540
541struct atto_csmi_get_conn_info {
542 u32 pinout;
543 #define CSMI_CON_UNKNOWN 0x00000001
544 #define CSMI_CON_SFF_8482 0x00000002
545 #define CSMI_CON_SFF_8470_LANE_1 0x00000100
546 #define CSMI_CON_SFF_8470_LANE_2 0x00000200
547 #define CSMI_CON_SFF_8470_LANE_3 0x00000400
548 #define CSMI_CON_SFF_8470_LANE_4 0x00000800
549 #define CSMI_CON_SFF_8484_LANE_1 0x00010000
550 #define CSMI_CON_SFF_8484_LANE_2 0x00020000
551 #define CSMI_CON_SFF_8484_LANE_3 0x00040000
552 #define CSMI_CON_SFF_8484_LANE_4 0x00080000
553
554 u8 connector[16];
555 u8 location;
556 #define CSMI_CON_INTERNAL 0x02
557 #define CSMI_CON_EXTERNAL 0x04
558 #define CSMI_CON_SWITCHABLE 0x08
559 #define CSMI_CON_AUTO 0x10
560
561 u8 reserved[15];
562};
563
564/* CSMI PHY class structures */
565struct atto_csmi_character {
566 u8 type_flags;
567 #define CSMI_CTF_POS_DISP 0x01
568 #define CSMI_CTF_NEG_DISP 0x02
569 #define CSMI_CTF_CTRL_CHAR 0x04
570
571 u8 value;
572};
573
574struct atto_csmi_pc_ctrl {
575 u8 type;
576 #define CSMI_PC_TYPE_UNDEFINED 0x00
577 #define CSMI_PC_TYPE_SATA 0x01
578 #define CSMI_PC_TYPE_SAS 0x02
579 u8 rate;
580 u8 reserved[6];
581 u32 vendor_unique[8];
582 u32 tx_flags;
583 #define CSMI_PC_TXF_PREEMP_DIS 0x00000001
584
585 signed char tx_amplitude;
586 signed char tx_preemphasis;
587 signed char tx_slew_rate;
588 signed char tx_reserved[13];
589 u8 tx_vendor_unique[64];
590 u32 rx_flags;
591 #define CSMI_PC_RXF_EQ_DIS 0x00000001
592
593 signed char rx_threshold;
594 signed char rx_equalization_gain;
595 signed char rx_reserved[14];
596 u8 rx_vendor_unique[64];
597 u32 pattern_flags;
598 #define CSMI_PC_PATF_FIXED 0x00000001
599 #define CSMI_PC_PATF_DIS_SCR 0x00000002
600 #define CSMI_PC_PATF_DIS_ALIGN 0x00000004
601 #define CSMI_PC_PATF_DIS_SSC 0x00000008
602
603 u8 fixed_pattern;
604 #define CSMI_PC_FP_CJPAT 0x00000001
605 #define CSMI_PC_FP_ALIGN 0x00000002
606
607 u8 user_pattern_len;
608 u8 pattern_reserved[6];
609
610 struct atto_csmi_character user_pattern_buffer[16];
611};
612
613struct atto_csmi_phy_ctrl {
614 u32 function;
615 #define CSMI_PC_FUNC_GET_SETUP 0x00000100
616
617 u8 phy_id;
618 u16 len_of_cntl;
619 u8 num_of_cntls;
620 u8 reserved[4];
621 u32 link_flags;
622 #define CSMI_PHY_ACTIVATE_CTRL 0x00000001
623 #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002
624 #define CSMI_PHY_AUTO_COMWAKE 0x00000004
625
626 u8 spinup_rate;
627 u8 link_reserved[7];
628 u32 vendor_unique[8];
629
630 struct atto_csmi_pc_ctrl control[1];
631};
632
633union atto_ioctl_csmi {
634 struct atto_csmi_get_driver_info drvr_info;
635 struct atto_csmi_get_cntlr_cfg cntlr_cfg;
636 struct atto_csmi_get_cntlr_sts cntlr_sts;
637 struct atto_csmi_fw_download fw_dwnld;
638 struct atto_csmi_get_raid_info raid_info;
639 struct atto_csmi_get_raid_cfg raid_cfg;
640 struct atto_csmi_get_phy_info get_phy_info;
641 struct atto_csmi_set_phy_info set_phy_info;
642 struct atto_csmi_get_link_errors link_errs;
643 struct atto_csmi_smp_passthru smp_pass_thru;
644 struct atto_csmi_ssp_passthru ssp_pass_thru;
645 struct atto_csmi_stp_passthru stp_pass_thru;
646 struct atto_csmi_task_mgmt tsk_mgt;
647 struct atto_csmi_get_sata_sig sata_sig;
648 struct atto_csmi_get_scsi_addr scsi_addr;
649 struct atto_csmi_get_dev_addr dev_addr;
650 struct atto_csmi_get_conn_info conn_info[32];
651 struct atto_csmi_phy_ctrl phy_ctrl;
652};
653
654struct atto_csmi {
655 u32 control_code;
656 u32 status;
657 union atto_ioctl_csmi data;
658};
659
660struct atto_module_info {
661 void *adapter;
662 void *pci_dev;
663 void *scsi_host;
664 unsigned short host_no;
665 union {
666 struct {
667 u64 node_name;
668 u64 port_name;
669 };
670 u64 sas_addr;
671 };
672};
673
674#define ATTO_FUNC_GET_ADAP_INFO 0x00
675#define ATTO_VER_GET_ADAP_INFO0 0
676#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0
677
678struct __packed atto_hba_get_adapter_info {
679
680 struct {
681 u16 vendor_id;
682 u16 device_id;
683 u16 ss_vendor_id;
684 u16 ss_device_id;
685 u8 class_code[3];
686 u8 rev_id;
687 u8 bus_num;
688 u8 dev_num;
689 u8 func_num;
690 u8 link_width_max;
691 u8 link_width_curr;
692 #define ATTO_GAI_PCILW_UNKNOWN 0x00
693
694 u8 link_speed_max;
695 u8 link_speed_curr;
696 #define ATTO_GAI_PCILS_UNKNOWN 0x00
697 #define ATTO_GAI_PCILS_GEN1 0x01
698 #define ATTO_GAI_PCILS_GEN2 0x02
699 #define ATTO_GAI_PCILS_GEN3 0x03
700
701 u8 interrupt_mode;
702 #define ATTO_GAI_PCIIM_UNKNOWN 0x00
703 #define ATTO_GAI_PCIIM_LEGACY 0x01
704 #define ATTO_GAI_PCIIM_MSI 0x02
705 #define ATTO_GAI_PCIIM_MSIX 0x03
706
707 u8 msi_vector_cnt;
708 u8 reserved[19];
709 } pci;
710
711 u8 adap_type;
712 #define ATTO_GAI_AT_EPCIU320 0x00
713 #define ATTO_GAI_AT_ESASRAID 0x01
714 #define ATTO_GAI_AT_ESASRAID2 0x02
715 #define ATTO_GAI_AT_ESASHBA 0x03
716 #define ATTO_GAI_AT_ESASHBA2 0x04
717 #define ATTO_GAI_AT_CELERITY 0x05
718 #define ATTO_GAI_AT_CELERITY8 0x06
719 #define ATTO_GAI_AT_FASTFRAME 0x07
720 #define ATTO_GAI_AT_ESASHBA3 0x08
721 #define ATTO_GAI_AT_CELERITY16 0x09
722 #define ATTO_GAI_AT_TLSASHBA 0x0A
723 #define ATTO_GAI_AT_ESASHBA4 0x0B
724
725 u8 adap_flags;
726 #define ATTO_GAI_AF_DEGRADED 0x01
727 #define ATTO_GAI_AF_SPT_SUPP 0x02
728 #define ATTO_GAI_AF_DEVADDR_SUPP 0x04
729 #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08
730 #define ATTO_GAI_AF_TEST_SUPP 0x10
731 #define ATTO_GAI_AF_DIAG_SUPP 0x20
732 #define ATTO_GAI_AF_VIRT_SES 0x40
733 #define ATTO_GAI_AF_CONN_CTRL 0x80
734
735 u8 num_ports;
736 u8 num_phys;
737 u8 drvr_rev_major;
738 u8 drvr_rev_minor;
739 u8 drvr_revsub_minor;
740 u8 drvr_rev_build;
741 char drvr_rev_ascii[16];
742 char drvr_name[32];
743 char firmware_rev[16];
744 char flash_rev[16];
745 char model_name_short[16];
746 char model_name[32];
747 u32 num_targets;
748 u32 num_targsper_bus;
749 u32 num_lunsper_targ;
750 u8 num_busses;
751 u8 num_connectors;
752 u8 adap_flags2;
753 #define ATTO_GAI_AF2_FCOE_SUPP 0x01
754 #define ATTO_GAI_AF2_NIC_SUPP 0x02
755 #define ATTO_GAI_AF2_LOCATE_SUPP 0x04
756 #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08
757 #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10
758 #define ATTO_GAI_AF2_NPIV_SUPP 0x20
759 #define ATTO_GAI_AF2_MP_SUPP 0x40
760
761 u8 num_temp_sensors;
762 u32 num_targets_backend;
763 u32 tunnel_flags;
764 #define ATTO_GAI_TF_MEM_RW 0x00000001
765 #define ATTO_GAI_TF_TRACE 0x00000002
766 #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004
767 #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008
768 #define ATTO_GAI_TF_PHY_CTRL 0x00000010
769 #define ATTO_GAI_TF_CONN_CTRL 0x00000020
770 #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040
771
772 u8 reserved3[0x138];
773};
774
775#define ATTO_FUNC_GET_ADAP_ADDR 0x01
776#define ATTO_VER_GET_ADAP_ADDR0 0
777#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0
778
779struct __packed atto_hba_get_adapter_address {
780
781 u8 addr_type;
782 #define ATTO_GAA_AT_PORT 0x00
783 #define ATTO_GAA_AT_NODE 0x01
784 #define ATTO_GAA_AT_CURR_MAC 0x02
785 #define ATTO_GAA_AT_PERM_MAC 0x03
786 #define ATTO_GAA_AT_VNIC 0x04
787
788 u8 port_id;
789 u16 addr_len;
790 u8 address[256];
791};
792
793#define ATTO_FUNC_MEM_RW 0x02
794#define ATTO_VER_MEM_RW0 0
795#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0
796
797struct __packed atto_hba_memory_read_write {
798 u8 mem_func;
799 u8 mem_type;
800 union {
801 u8 pci_index;
802 u8 i2c_dev;
803 };
804 u8 i2c_status;
805 u32 length;
806 u64 address;
807 u8 reserved[48];
808
809};
810
811#define ATTO_FUNC_TRACE 0x03
812#define ATTO_VER_TRACE0 0
813#define ATTO_VER_TRACE1 1
814#define ATTO_VER_TRACE ATTO_VER_TRACE1
815
816struct __packed atto_hba_trace {
817 u8 trace_func;
818 #define ATTO_TRC_TF_GET_INFO 0x00
819 #define ATTO_TRC_TF_ENABLE 0x01
820 #define ATTO_TRC_TF_DISABLE 0x02
821 #define ATTO_TRC_TF_SET_MASK 0x03
822 #define ATTO_TRC_TF_UPLOAD 0x04
823 #define ATTO_TRC_TF_RESET 0x05
824
825 u8 trace_type;
826 #define ATTO_TRC_TT_DRIVER 0x00
827 #define ATTO_TRC_TT_FWCOREDUMP 0x01
828
829 u8 reserved[2];
830 u32 current_offset;
831 u32 total_length;
832 u32 trace_mask;
833 u8 reserved2[48];
834};
835
836#define ATTO_FUNC_SCSI_PASS_THRU 0x04
837#define ATTO_VER_SCSI_PASS_THRU0 0
838#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0
839
840struct __packed atto_hba_scsi_pass_thru {
841 u8 cdb[32];
842 u8 cdb_length;
843 u8 req_status;
844 #define ATTO_SPT_RS_SUCCESS 0x00
845 #define ATTO_SPT_RS_FAILED 0x01
846 #define ATTO_SPT_RS_OVERRUN 0x02
847 #define ATTO_SPT_RS_UNDERRUN 0x03
848 #define ATTO_SPT_RS_NO_DEVICE 0x04
849 #define ATTO_SPT_RS_NO_LUN 0x05
850 #define ATTO_SPT_RS_TIMEOUT 0x06
851 #define ATTO_SPT_RS_BUS_RESET 0x07
852 #define ATTO_SPT_RS_ABORTED 0x08
853 #define ATTO_SPT_RS_BUSY 0x09
854 #define ATTO_SPT_RS_DEGRADED 0x0A
855
856 u8 scsi_status;
857 u8 sense_length;
858 u32 flags;
859 #define ATTO_SPTF_DATA_IN 0x00000001
860 #define ATTO_SPTF_DATA_OUT 0x00000002
861 #define ATTO_SPTF_SIMPLE_Q 0x00000004
862 #define ATTO_SPTF_HEAD_OF_Q 0x00000008
863 #define ATTO_SPTF_ORDERED_Q 0x00000010
864
865 u32 timeout;
866 u32 target_id;
867 u8 lun[8];
868 u32 residual_length;
869 u8 sense_data[0xFC];
870 u8 reserved[0x28];
871};
872
873#define ATTO_FUNC_GET_DEV_ADDR 0x05
874#define ATTO_VER_GET_DEV_ADDR0 0
875#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0
876
877struct __packed atto_hba_get_device_address {
878 u8 addr_type;
879 #define ATTO_GDA_AT_PORT 0x00
880 #define ATTO_GDA_AT_NODE 0x01
881 #define ATTO_GDA_AT_MAC 0x02
882 #define ATTO_GDA_AT_PORTID 0x03
883 #define ATTO_GDA_AT_UNIQUE 0x04
884
885 u8 reserved;
886 u16 addr_len;
887 u32 target_id;
888 u8 address[256];
889};
890
891/* The following functions are supported by firmware but do not have any
892 * associated driver structures
893 */
894#define ATTO_FUNC_PHY_CTRL 0x06
895#define ATTO_FUNC_CONN_CTRL 0x0C
896#define ATTO_FUNC_ADAP_CTRL 0x0E
897#define ATTO_VER_ADAP_CTRL0 0
898#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0
899
900struct __packed atto_hba_adap_ctrl {
901 u8 adap_func;
902 #define ATTO_AC_AF_HARD_RST 0x00
903 #define ATTO_AC_AF_GET_STATE 0x01
904 #define ATTO_AC_AF_GET_TEMP 0x02
905
906 u8 adap_state;
907 #define ATTO_AC_AS_UNKNOWN 0x00
908 #define ATTO_AC_AS_OK 0x01
909 #define ATTO_AC_AS_RST_SCHED 0x02
910 #define ATTO_AC_AS_RST_IN_PROG 0x03
911 #define ATTO_AC_AS_RST_DISC 0x04
912 #define ATTO_AC_AS_DEGRADED 0x05
913 #define ATTO_AC_AS_DISABLED 0x06
914 #define ATTO_AC_AS_TEMP 0x07
915
916 u8 reserved[2];
917
918 union {
919 struct {
920 u8 temp_sensor;
921 u8 temp_state;
922
923 #define ATTO_AC_TS_UNSUPP 0x00
924 #define ATTO_AC_TS_UNKNOWN 0x01
925 #define ATTO_AC_TS_INIT_FAILED 0x02
926 #define ATTO_AC_TS_NORMAL 0x03
927 #define ATTO_AC_TS_OUT_OF_RANGE 0x04
928 #define ATTO_AC_TS_FAULT 0x05
929
930 signed short temp_value;
931 signed short temp_lower_lim;
932 signed short temp_upper_lim;
933 char temp_desc[32];
934 u8 reserved2[20];
935 };
936 };
937};
938
939#define ATTO_FUNC_GET_DEV_INFO 0x0F
940#define ATTO_VER_GET_DEV_INFO0 0
941#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0
942
943struct __packed atto_hba_sas_device_info {
944
945 #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16
946
947 u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */
948 #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV
949 u32 exp_target_id;
950 u32 sas_port_mask;
951 u8 sas_level;
952 #define ATTO_SDI_SAS_LVL_INV 0xFF
953
954 u8 slot_num;
955 #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV
956
957 u8 dev_type;
958 #define ATTO_SDI_DT_END_DEVICE 0
959 #define ATTO_SDI_DT_EXPANDER 1
960 #define ATTO_SDI_DT_PORT_MULT 2
961
962 u8 ini_flags;
963 u8 tgt_flags;
964 u8 link_rate; /* SMP_RATE_XXX */
965 u8 loc_flags;
966 #define ATTO_SDI_LF_DIRECT 0x01
967 #define ATTO_SDI_LF_EXPANDER 0x02
968 #define ATTO_SDI_LF_PORT_MULT 0x04
969 u8 pm_port;
970 u8 reserved[0x60];
971};
972
973union atto_hba_device_info {
974 struct atto_hba_sas_device_info sas_dev_info;
975};
976
977struct __packed atto_hba_get_device_info {
978 u32 target_id;
979 u8 info_type;
980 #define ATTO_GDI_IT_UNKNOWN 0x00
981 #define ATTO_GDI_IT_SAS 0x01
982 #define ATTO_GDI_IT_FC 0x02
983 #define ATTO_GDI_IT_FCOE 0x03
984
985 u8 reserved[11];
986 union atto_hba_device_info dev_info;
987};
988
989struct atto_ioctl {
990 u8 version;
991 u8 function; /* ATTO_FUNC_XXX */
992 u8 status;
993#define ATTO_STS_SUCCESS 0x00
994#define ATTO_STS_FAILED 0x01
995#define ATTO_STS_INV_VERSION 0x02
996#define ATTO_STS_OUT_OF_RSRC 0x03
997#define ATTO_STS_INV_FUNC 0x04
998#define ATTO_STS_UNSUPPORTED 0x05
999#define ATTO_STS_INV_ADAPTER 0x06
1000#define ATTO_STS_INV_DRVR_VER 0x07
1001#define ATTO_STS_INV_PARAM 0x08
1002#define ATTO_STS_TIMEOUT 0x09
1003#define ATTO_STS_NOT_APPL 0x0A
1004#define ATTO_STS_DEGRADED 0x0B
1005
1006 u8 flags;
1007 #define HBAF_TUNNEL 0x01
1008
1009 u32 data_length;
1010 u8 reserved2[56];
1011
1012 union {
1013 u8 byte[1];
1014 struct atto_hba_get_adapter_info get_adap_info;
1015 struct atto_hba_get_adapter_address get_adap_addr;
1016 struct atto_hba_scsi_pass_thru scsi_pass_thru;
1017 struct atto_hba_get_device_address get_dev_addr;
1018 struct atto_hba_adap_ctrl adap_ctrl;
1019 struct atto_hba_get_device_info get_dev_info;
1020 struct atto_hba_trace trace;
1021 } data;
1022
1023};
1024
1025struct __packed atto_ioctl_vda_scsi_cmd {
1026
1027 #define ATTO_VDA_SCSI_VER0 0
1028 #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0
1029
1030 u8 cdb[16];
1031 u32 flags;
1032 u32 data_length;
1033 u32 residual_length;
1034 u16 target_id;
1035 u8 sense_len;
1036 u8 scsi_stat;
1037 u8 reserved[8];
1038 u8 sense_data[80];
1039};
1040
1041struct __packed atto_ioctl_vda_flash_cmd {
1042
1043 #define ATTO_VDA_FLASH_VER0 0
1044 #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0
1045
1046 u32 flash_addr;
1047 u32 data_length;
1048 u8 sub_func;
1049 u8 reserved[15];
1050
1051 union {
1052 struct {
1053 u32 flash_size;
1054 u32 page_size;
1055 u8 prod_info[32];
1056 } info;
1057
1058 struct {
1059 char file_name[16]; /* 8.3 fname, NULL term, wc=* */
1060 u32 file_size;
1061 } file;
1062 } data;
1063
1064};
1065
1066struct __packed atto_ioctl_vda_diag_cmd {
1067
1068 #define ATTO_VDA_DIAG_VER0 0
1069 #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0
1070
1071 u64 local_addr;
1072 u32 data_length;
1073 u8 sub_func;
1074 u8 flags;
1075 u8 reserved[3];
1076};
1077
1078struct __packed atto_ioctl_vda_cli_cmd {
1079
1080 #define ATTO_VDA_CLI_VER0 0
1081 #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0
1082
1083 u32 cmd_rsp_len;
1084};
1085
1086struct __packed atto_ioctl_vda_smp_cmd {
1087
1088 #define ATTO_VDA_SMP_VER0 0
1089 #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0
1090
1091 u64 dest;
1092 u32 cmd_rsp_len;
1093};
1094
1095struct __packed atto_ioctl_vda_cfg_cmd {
1096
1097 #define ATTO_VDA_CFG_VER0 0
1098 #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0
1099
1100 u32 data_length;
1101 u8 cfg_func;
1102 u8 reserved[11];
1103
1104 union {
1105 u8 bytes[112];
1106 struct atto_vda_cfg_init init;
1107 } data;
1108
1109};
1110
1111struct __packed atto_ioctl_vda_mgt_cmd {
1112
1113 #define ATTO_VDA_MGT_VER0 0
1114 #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0
1115
1116 u8 mgt_func;
1117 u8 scan_generation;
1118 u16 dev_index;
1119 u32 data_length;
1120 u8 reserved[8];
1121 union {
1122 u8 bytes[112];
1123 struct atto_vda_devinfo dev_info;
1124 struct atto_vda_grp_info grp_info;
1125 struct atto_vdapart_info part_info;
1126 struct atto_vda_dh_info dh_info;
1127 struct atto_vda_metrics_info metrics_info;
1128 struct atto_vda_schedule_info sched_info;
1129 struct atto_vda_n_vcache_info nvcache_info;
1130 struct atto_vda_buzzer_info buzzer_info;
1131 struct atto_vda_adapter_info adapter_info;
1132 struct atto_vda_temp_info temp_info;
1133 struct atto_vda_fan_info fan_info;
1134 } data;
1135};
1136
1137struct __packed atto_ioctl_vda_gsv_cmd {
1138
1139 #define ATTO_VDA_GSV_VER0 0
1140 #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0
1141
1142 u8 rsp_len;
1143 u8 reserved[7];
1144 u8 version_info[1];
1145 #define ATTO_VDA_VER_UNSUPPORTED 0xFF
1146
1147};
1148
1149struct __packed atto_ioctl_vda {
1150 u8 version;
1151 u8 function; /* VDA_FUNC_XXXX */
1152 u8 status; /* ATTO_STS_XXX */
1153 u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */
1154 u32 data_length;
1155 u8 reserved[8];
1156
1157 union {
1158 struct atto_ioctl_vda_scsi_cmd scsi;
1159 struct atto_ioctl_vda_flash_cmd flash;
1160 struct atto_ioctl_vda_diag_cmd diag;
1161 struct atto_ioctl_vda_cli_cmd cli;
1162 struct atto_ioctl_vda_smp_cmd smp;
1163 struct atto_ioctl_vda_cfg_cmd cfg;
1164 struct atto_ioctl_vda_mgt_cmd mgt;
1165 struct atto_ioctl_vda_gsv_cmd gsv;
1166 u8 cmd_info[256];
1167 } cmd;
1168
1169 union {
1170 u8 data[1];
1171 struct atto_vda_devinfo2 dev_info2;
1172 } data;
1173
1174};
1175
1176struct __packed atto_ioctl_smp {
1177 u8 version;
1178 #define ATTO_SMP_VERSION0 0
1179 #define ATTO_SMP_VERSION1 1
1180 #define ATTO_SMP_VERSION2 2
1181 #define ATTO_SMP_VERSION ATTO_SMP_VERSION2
1182
1183 u8 function;
1184#define ATTO_SMP_FUNC_DISC_SMP 0x00
1185#define ATTO_SMP_FUNC_DISC_TARG 0x01
1186#define ATTO_SMP_FUNC_SEND_CMD 0x02
1187#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03
1188#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04
1189#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05
1190
1191 u8 status; /* ATTO_STS_XXX */
1192 u8 smp_status; /* if status == ATTO_STS_SUCCESS */
1193 #define ATTO_SMP_STS_SUCCESS 0x00
1194 #define ATTO_SMP_STS_FAILURE 0x01
1195 #define ATTO_SMP_STS_RESCAN 0x02
1196 #define ATTO_SMP_STS_NOT_FOUND 0x03
1197
1198 u16 target_id;
1199 u8 phy_id;
1200 u8 dev_index;
1201 u64 smp_sas_addr;
1202 u64 targ_sas_addr;
1203 u32 req_length;
1204 u32 rsp_length;
1205 u8 flags;
1206 #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */
1207
1208 u8 reserved[31];
1209
1210 union {
1211 u8 byte[1];
1212 u32 dword[1];
1213 } data;
1214
1215};
1216
1217struct __packed atto_express_ioctl {
1218 struct atto_express_ioctl_header header;
1219
1220 union {
1221 struct atto_firmware_rw_request fwrw;
1222 struct atto_param_rw_request prw;
1223 struct atto_channel_list chanlist;
1224 struct atto_channel_info chaninfo;
1225 struct atto_ioctl ioctl_hba;
1226 struct atto_module_info modinfo;
1227 struct atto_ioctl_vda ioctl_vda;
1228 struct atto_ioctl_smp ioctl_smp;
1229 struct atto_csmi csmi;
1230
1231 } data;
1232};
1233
1234/* The struct associated with the code is listed after the definition */
1235#define EXPRESS_IOCTL_MIN 0x4500
1236#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */
1237#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */
1238#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */
1239#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */
1240#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */
1241#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */
1242#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */
1243#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */
1244#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */
1245#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */
1246#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */
1247#define EXPRESS_CSMI 0x450B /* CSMI */
1248#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */
1249#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */
1250#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */
1251#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */
1252#define EXPRESS_IOCTL_MAX 0x450F
1253
1254#endif
diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h
new file mode 100644
index 000000000000..5fc1f991d24e
--- /dev/null
+++ b/drivers/scsi/esas2r/atvda.h
@@ -0,0 +1,1319 @@
1/* linux/drivers/scsi/esas2r/atvda.h
2 * ATTO VDA interface definitions
3 *
4 * Copyright (c) 2001-2013 ATTO Technology, Inc.
5 * (mailto:linuxdrivers@attotech.com)
6 */
7/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
8/*
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
41 */
42/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
43
44
45#ifndef ATVDA_H
46#define ATVDA_H
47
48struct __packed atto_dev_addr {
49 u64 dev_port;
50 u64 hba_port;
51 u8 lun;
52 u8 flags;
53 #define VDA_DEVADDRF_SATA 0x01
54 #define VDA_DEVADDRF_SSD 0x02
55 u8 link_speed; /* VDALINKSPEED_xxx */
56 u8 pad[1];
57};
58
59/* dev_addr2 was added for 64-bit alignment */
60
61struct __packed atto_dev_addr2 {
62 u64 dev_port;
63 u64 hba_port;
64 u8 lun;
65 u8 flags;
66 u8 link_speed;
67 u8 pad[5];
68};
69
70struct __packed atto_vda_sge {
71 u32 length;
72 u64 address;
73};
74
75
76/* VDA request function codes */
77
78#define VDA_FUNC_SCSI 0x00
79#define VDA_FUNC_FLASH 0x01
80#define VDA_FUNC_DIAG 0x02
81#define VDA_FUNC_AE 0x03
82#define VDA_FUNC_CLI 0x04
83#define VDA_FUNC_IOCTL 0x05
84#define VDA_FUNC_CFG 0x06
85#define VDA_FUNC_MGT 0x07
86#define VDA_FUNC_GSV 0x08
87
88
89/* VDA request status values. for host driver considerations, values for
90 * SCSI requests start at zero. other requests may use these values as well. */
91
92#define RS_SUCCESS 0x00 /*! successful completion */
93#define RS_INV_FUNC 0x01 /*! invalid command function */
94#define RS_BUSY 0x02 /*! insufficient resources */
95#define RS_SEL 0x03 /*! no target at target_id */
96#define RS_NO_LUN 0x04 /*! invalid LUN */
97#define RS_TIMEOUT 0x05 /*! request timeout */
98#define RS_OVERRUN 0x06 /*! data overrun */
99#define RS_UNDERRUN 0x07 /*! data underrun */
100#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */
101#define RS_ABORTED 0x0A /*! command aborted */
102#define RS_RESID_MISM 0x0B /*! residual length incorrect */
103#define RS_TM_FAILED 0x0C /*! task management failed */
104#define RS_RESET 0x0D /*! aborted due to bus reset */
105#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */
106#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */
107#define RS_UNSUPPORTED 0x10 /*! unsupported request */
108#define RS_SEL2 0x70 /*! internal generated RS_SEL */
109#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */
110#define RS_MGT_BASE 0x80 /*! base of VDA management errors */
111#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00)
112#define RS_DEV_INVALID (RS_MGT_BASE + 0x01)
113#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02)
114#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03)
115#define RS_DEV_LOST (RS_MGT_BASE + 0x04)
116#define RS_SCAN_GEN (RS_MGT_BASE + 0x05)
117#define RS_GRP_INVALID (RS_MGT_BASE + 0x08)
118#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09)
119#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A)
120#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B)
121#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C)
122#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D)
123#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E)
124#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F)
125#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10)
126#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11)
127#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12)
128#define RS_CFG_SAVE (RS_MGT_BASE + 0x14)
129#define RS_PART_LAST (RS_MGT_BASE + 0x18)
130#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19)
131#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A)
132#define RS_PART_TARGET (RS_MGT_BASE + 0x1B)
133#define RS_PART_LUN (RS_MGT_BASE + 0x1C)
134#define RS_PART_DUP (RS_MGT_BASE + 0x1D)
135#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E)
136#define RS_PART_MAX (RS_MGT_BASE + 0x1F)
137#define RS_PART_CAP (RS_MGT_BASE + 0x20)
138#define RS_PART_STATE (RS_MGT_BASE + 0x21)
139#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22)
140#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23)
141#define RS_HS_ERROR (RS_MGT_BASE + 0x24)
142#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25)
143#define RS_BAD_PARAM (RS_MGT_BASE + 0x26)
144#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27)
145#define RS_FLS_BASE 0xB0 /*! base of VDA errors */
146#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00)
147#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01)
148#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02)
149#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03)
150#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04)
151#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05)
152#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06)
153#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07)
154#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08)
155#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */
156#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0)
157#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1)
158#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2)
159#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3)
160#define RS_DEGRADED 0xFB /*! degraded mode */
161#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */
162#define RS_VDA_INTERNAL 0xFD /*! catch-all */
163#define RS_PENDING 0xFE /*! pending, not started */
164#define RS_STARTED 0xFF /*! started */
165
166
167/* flash request subfunctions. these are used in both the IOCTL and the
168 * driver-firmware interface (VDA_FUNC_FLASH). */
169
170#define VDA_FLASH_BEGINW 0x00
171#define VDA_FLASH_READ 0x01
172#define VDA_FLASH_WRITE 0x02
173#define VDA_FLASH_COMMIT 0x03
174#define VDA_FLASH_CANCEL 0x04
175#define VDA_FLASH_INFO 0x05
176#define VDA_FLASH_FREAD 0x06
177#define VDA_FLASH_FWRITE 0x07
178#define VDA_FLASH_FINFO 0x08
179
180
181/* IOCTL request subfunctions. these identify the payload type for
182 * VDA_FUNC_IOCTL.
183 */
184
185#define VDA_IOCTL_HBA 0x00
186#define VDA_IOCTL_CSMI 0x01
187#define VDA_IOCTL_SMP 0x02
188
189struct __packed atto_vda_devinfo {
190 struct atto_dev_addr dev_addr;
191 u8 vendor_id[8];
192 u8 product_id[16];
193 u8 revision[4];
194 u64 capacity;
195 u32 block_size;
196 u8 dev_type;
197
198 union {
199 u8 dev_status;
200 #define VDADEVSTAT_INVALID 0x00
201 #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID
202 #define VDADEVSTAT_ASSIGNED 0x01
203 #define VDADEVSTAT_SPARE 0x02
204 #define VDADEVSTAT_UNAVAIL 0x03
205 #define VDADEVSTAT_PT_MAINT 0x04
206 #define VDADEVSTAT_LCLSPARE 0x05
207 #define VDADEVSTAT_UNUSEABLE 0x06
208 #define VDADEVSTAT_AVAIL 0xFF
209
210 u8 op_ctrl;
211 #define VDA_DEV_OP_CTRL_START 0x01
212 #define VDA_DEV_OP_CTRL_HALT 0x02
213 #define VDA_DEV_OP_CTRL_RESUME 0x03
214 #define VDA_DEV_OP_CTRL_CANCEL 0x04
215 };
216
217 u8 member_state;
218 #define VDAMBRSTATE_ONLINE 0x00
219 #define VDAMBRSTATE_DEGRADED 0x01
220 #define VDAMBRSTATE_UNAVAIL 0x02
221 #define VDAMBRSTATE_FAULTED 0x03
222 #define VDAMBRSTATE_MISREAD 0x04
223 #define VDAMBRSTATE_INCOMPAT 0x05
224
225 u8 operation;
226 #define VDAOP_NONE 0x00
227 #define VDAOP_REBUILD 0x01
228 #define VDAOP_ERASE 0x02
229 #define VDAOP_PATTERN 0x03
230 #define VDAOP_CONVERSION 0x04
231 #define VDAOP_FULL_INIT 0x05
232 #define VDAOP_QUICK_INIT 0x06
233 #define VDAOP_SECT_SCAN 0x07
234 #define VDAOP_SECT_SCAN_PARITY 0x08
235 #define VDAOP_SECT_SCAN_PARITY_FIX 0x09
236 #define VDAOP_RECOV_REBUILD 0x0A
237
238 u8 op_status;
239 #define VDAOPSTAT_OK 0x00
240 #define VDAOPSTAT_FAULTED 0x01
241 #define VDAOPSTAT_HALTED 0x02
242 #define VDAOPSTAT_INT 0x03
243
244 u8 progress; /* 0 - 100% */
245 u16 ses_dev_index;
246 #define VDASESDI_INVALID 0xFFFF
247
248 u8 serial_no[32];
249
250 union {
251 u16 target_id;
252 #define VDATGTID_INVALID 0xFFFF
253
254 u16 features_mask;
255 };
256
257 u16 lun;
258 u16 features;
259 #define VDADEVFEAT_ENC_SERV 0x0001
260 #define VDADEVFEAT_IDENT 0x0002
261 #define VDADEVFEAT_DH_SUPP 0x0004
262 #define VDADEVFEAT_PHYS_ID 0x0008
263
264 u8 ses_element_id;
265 u8 link_speed;
266 #define VDALINKSPEED_UNKNOWN 0x00
267 #define VDALINKSPEED_1GB 0x01
268 #define VDALINKSPEED_1_5GB 0x02
269 #define VDALINKSPEED_2GB 0x03
270 #define VDALINKSPEED_3GB 0x04
271 #define VDALINKSPEED_4GB 0x05
272 #define VDALINKSPEED_6GB 0x06
273 #define VDALINKSPEED_8GB 0x07
274
275 u16 phys_target_id;
276 u8 reserved[2];
277};
278
279
280/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it
281 * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore,
282 * the entire structure is DMaed between the firmware and host buffer and
283 * the data will always be in little endian format.
284 */
285
286struct __packed atto_vda_devinfo2 {
287 struct atto_dev_addr dev_addr;
288 u8 vendor_id[8];
289 u8 product_id[16];
290 u8 revision[4];
291 u64 capacity;
292 u32 block_size;
293 u8 dev_type;
294 u8 dev_status;
295 u8 member_state;
296 u8 operation;
297 u8 op_status;
298 u8 progress;
299 u16 ses_dev_index;
300 u8 serial_no[32];
301 union {
302 u16 target_id;
303 u16 features_mask;
304 };
305
306 u16 lun;
307 u16 features;
308 u8 ses_element_id;
309 u8 link_speed;
310 u16 phys_target_id;
311 u8 reserved[2];
312
313/* This is where fields specific to struct atto_vda_devinfo2 begin. Note
314 * that the structure version started at one so applications that unionize this
315 * structure with atto_vda_dev_info can differentiate them if desired.
316 */
317
318 u8 version;
319 #define VDADEVINFO_VERSION0 0x00
320 #define VDADEVINFO_VERSION1 0x01
321 #define VDADEVINFO_VERSION2 0x02
322 #define VDADEVINFO_VERSION3 0x03
323 #define VDADEVINFO_VERSION VDADEVINFO_VERSION3
324
325 u8 reserved2[3];
326
327 /* sector scanning fields */
328
329 u32 ss_curr_errors;
330 u64 ss_curr_scanned;
331 u32 ss_curr_recvrd;
332 u32 ss_scan_length;
333 u32 ss_total_errors;
334 u32 ss_total_recvrd;
335 u32 ss_num_scans;
336
337 /* grp_name was added in version 2 of this structure. */
338
339 char grp_name[15];
340 u8 reserved3[4];
341
342 /* dev_addr_list was added in version 3 of this structure. */
343
344 u8 num_dev_addr;
345 struct atto_dev_addr2 dev_addr_list[8];
346};
347
348
349struct __packed atto_vda_grp_info {
350 u8 grp_index;
351 #define VDA_MAX_RAID_GROUPS 32
352
353 char grp_name[15];
354 u64 capacity;
355 u32 block_size;
356 u32 interleave;
357 u8 type;
358 #define VDA_GRP_TYPE_RAID0 0
359 #define VDA_GRP_TYPE_RAID1 1
360 #define VDA_GRP_TYPE_RAID4 4
361 #define VDA_GRP_TYPE_RAID5 5
362 #define VDA_GRP_TYPE_RAID6 6
363 #define VDA_GRP_TYPE_RAID10 10
364 #define VDA_GRP_TYPE_RAID40 40
365 #define VDA_GRP_TYPE_RAID50 50
366 #define VDA_GRP_TYPE_RAID60 60
367 #define VDA_GRP_TYPE_DVRAID_HS 252
368 #define VDA_GRP_TYPE_DVRAID_NOHS 253
369 #define VDA_GRP_TYPE_JBOD 254
370 #define VDA_GRP_TYPE_SPARE 255
371
372 union {
373 u8 status;
374 #define VDA_GRP_STAT_INVALID 0x00
375 #define VDA_GRP_STAT_NEW 0x01
376 #define VDA_GRP_STAT_WAITING 0x02
377 #define VDA_GRP_STAT_ONLINE 0x03
378 #define VDA_GRP_STAT_DEGRADED 0x04
379 #define VDA_GRP_STAT_OFFLINE 0x05
380 #define VDA_GRP_STAT_DELETED 0x06
381 #define VDA_GRP_STAT_RECOV_BASIC 0x07
382 #define VDA_GRP_STAT_RECOV_EXTREME 0x08
383
384 u8 op_ctrl;
385 #define VDA_GRP_OP_CTRL_START 0x01
386 #define VDA_GRP_OP_CTRL_HALT 0x02
387 #define VDA_GRP_OP_CTRL_RESUME 0x03
388 #define VDA_GRP_OP_CTRL_CANCEL 0x04
389 };
390
391 u8 rebuild_state;
392 #define VDA_RBLD_NONE 0x00
393 #define VDA_RBLD_REBUILD 0x01
394 #define VDA_RBLD_ERASE 0x02
395 #define VDA_RBLD_PATTERN 0x03
396 #define VDA_RBLD_CONV 0x04
397 #define VDA_RBLD_FULL_INIT 0x05
398 #define VDA_RBLD_QUICK_INIT 0x06
399 #define VDA_RBLD_SECT_SCAN 0x07
400 #define VDA_RBLD_SECT_SCAN_PARITY 0x08
401 #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09
402 #define VDA_RBLD_RECOV_REBUILD 0x0A
403 #define VDA_RBLD_RECOV_BASIC 0x0B
404 #define VDA_RBLD_RECOV_EXTREME 0x0C
405
406 u8 span_depth;
407 u8 progress;
408 u8 mirror_width;
409 u8 stripe_width;
410 u8 member_cnt;
411
412 union {
413 u16 members[32];
414 #define VDA_MEMBER_MISSING 0xFFFF
415 #define VDA_MEMBER_NEW 0xFFFE
416 u16 features_mask;
417 };
418
419 u16 features;
420 #define VDA_GRP_FEAT_HOTSWAP 0x0001
421 #define VDA_GRP_FEAT_SPDRD_MASK 0x0006
422 #define VDA_GRP_FEAT_SPDRD_DIS 0x0000
423 #define VDA_GRP_FEAT_SPDRD_ENB 0x0002
424 #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004
425 #define VDA_GRP_FEAT_IDENT 0x0008
426 #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030
427 #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010
428 #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020
429 #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030
430 #define VDA_GRP_FEAT_WRITE_CACHE 0x0040
431 #define VDA_GRP_FEAT_RBLD_RESUME 0x0080
432 #define VDA_GRP_FEAT_SECT_RESUME 0x0100
433 #define VDA_GRP_FEAT_INIT_RESUME 0x0200
434 #define VDA_GRP_FEAT_SSD 0x0400
435 #define VDA_GRP_FEAT_BOOT_DEV 0x0800
436
437 /*
438 * for backward compatibility, a prefetch value of zero means the
439 * setting is ignored/unsupported. therefore, the firmware supported
440 * 0-6 values are incremented to 1-7.
441 */
442
443 u8 prefetch;
444 u8 op_status;
445 #define VDAGRPOPSTAT_MASK 0x0F
446 #define VDAGRPOPSTAT_INVALID 0x00
447 #define VDAGRPOPSTAT_OK 0x01
448 #define VDAGRPOPSTAT_FAULTED 0x02
449 #define VDAGRPOPSTAT_HALTED 0x03
450 #define VDAGRPOPSTAT_INT 0x04
451 #define VDAGRPOPPROC_MASK 0xF0
452 #define VDAGRPOPPROC_STARTABLE 0x10
453 #define VDAGRPOPPROC_CANCELABLE 0x20
454 #define VDAGRPOPPROC_RESUMABLE 0x40
455 #define VDAGRPOPPROC_HALTABLE 0x80
456 u8 over_provision;
457 u8 reserved[3];
458
459};
460
461
462struct __packed atto_vdapart_info {
463 u8 part_no;
464 #define VDA_MAX_PARTITIONS 128
465
466 char grp_name[15];
467 u64 part_size;
468 u64 start_lba;
469 u32 block_size;
470 u16 target_id;
471 u8 LUN;
472 char serial_no[41];
473 u8 features;
474 #define VDAPI_FEAT_WRITE_CACHE 0x01
475
476 u8 reserved[7];
477};
478
479
480struct __packed atto_vda_dh_info {
481 u8 req_type;
482 #define VDADH_RQTYPE_CACHE 0x01
483 #define VDADH_RQTYPE_FETCH 0x02
484 #define VDADH_RQTYPE_SET_STAT 0x03
485 #define VDADH_RQTYPE_GET_STAT 0x04
486
487 u8 req_qual;
488 #define VDADH_RQQUAL_SMART 0x01
489 #define VDADH_RQQUAL_MEDDEF 0x02
490 #define VDADH_RQQUAL_INFOEXC 0x04
491
492 u8 num_smart_attribs;
493 u8 status;
494 #define VDADH_STAT_DISABLE 0x00
495 #define VDADH_STAT_ENABLE 0x01
496
497 u32 med_defect_cnt;
498 u32 info_exc_cnt;
499 u8 smart_status;
500 #define VDADH_SMARTSTAT_OK 0x00
501 #define VDADH_SMARTSTAT_ERR 0x01
502
503 u8 reserved[35];
504 struct atto_vda_sge sge[1];
505};
506
507
508struct __packed atto_vda_dh_smart {
509 u8 attrib_id;
510 u8 current_val;
511 u8 worst;
512 u8 threshold;
513 u8 raw_data[6];
514 u8 raw_attrib_status;
515 #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01
516 #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02
517 #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04
518 #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08
519 #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10
520 #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20
521
522 u8 calc_attrib_status;
523 #define VDADHSM_CALCSTAT_UNKNOWN 0x00
524 #define VDADHSM_CALCSTAT_GOOD 0x01
525 #define VDADHSM_CALCSTAT_PREFAIL 0x02
526 #define VDADHSM_CALCSTAT_OLDAGE 0x03
527
528 u8 reserved[4];
529};
530
531
532struct __packed atto_vda_metrics_info {
533 u8 data_version;
534 #define VDAMET_VERSION0 0x00
535 #define VDAMET_VERSION VDAMET_VERSION0
536
537 u8 metrics_action;
538 #define VDAMET_METACT_NONE 0x00
539 #define VDAMET_METACT_START 0x01
540 #define VDAMET_METACT_STOP 0x02
541 #define VDAMET_METACT_RETRIEVE 0x03
542 #define VDAMET_METACT_CLEAR 0x04
543
544 u8 test_action;
545 #define VDAMET_TSTACT_NONE 0x00
546 #define VDAMET_TSTACT_STRT_INIT 0x01
547 #define VDAMET_TSTACT_STRT_READ 0x02
548 #define VDAMET_TSTACT_STRT_VERIFY 0x03
549 #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04
550 #define VDAMET_TSTACT_STOP 0x05
551
552 u8 num_dev_indexes;
553 #define VDAMET_ALL_DEVICES 0xFF
554
555 u16 dev_indexes[32];
556 u8 reserved[12];
557 struct atto_vda_sge sge[1];
558};
559
560
561struct __packed atto_vda_metrics_data {
562 u16 dev_index;
563 u16 length;
564 #define VDAMD_LEN_LAST 0x8000
565 #define VDAMD_LEN_MASK 0x0FFF
566
567 u32 flags;
568 #define VDAMDF_RUN 0x00000007
569 #define VDAMDF_RUN_READ 0x00000001
570 #define VDAMDF_RUN_WRITE 0x00000002
571 #define VDAMDF_RUN_ALL 0x00000004
572 #define VDAMDF_READ 0x00000010
573 #define VDAMDF_WRITE 0x00000020
574 #define VDAMDF_ALL 0x00000040
575 #define VDAMDF_DRIVETEST 0x40000000
576 #define VDAMDF_NEW 0x80000000
577
578 u64 total_read_data;
579 u64 total_write_data;
580 u64 total_read_io;
581 u64 total_write_io;
582 u64 read_start_time;
583 u64 read_stop_time;
584 u64 write_start_time;
585 u64 write_stop_time;
586 u64 read_maxio_time;
587 u64 wpvdadmetricsdatarite_maxio_time;
588 u64 read_totalio_time;
589 u64 write_totalio_time;
590 u64 read_total_errs;
591 u64 write_total_errs;
592 u64 read_recvrd_errs;
593 u64 write_recvrd_errs;
594 u64 miscompares;
595};
596
597
598struct __packed atto_vda_schedule_info {
599 u8 schedule_type;
600 #define VDASI_SCHTYPE_ONETIME 0x01
601 #define VDASI_SCHTYPE_DAILY 0x02
602 #define VDASI_SCHTYPE_WEEKLY 0x03
603
604 u8 operation;
605 #define VDASI_OP_NONE 0x00
606 #define VDASI_OP_CREATE 0x01
607 #define VDASI_OP_CANCEL 0x02
608
609 u8 hour;
610 u8 minute;
611 u8 day;
612 #define VDASI_DAY_NONE 0x00
613
614 u8 progress;
615 #define VDASI_PROG_NONE 0xFF
616
617 u8 event_type;
618 #define VDASI_EVTTYPE_SECT_SCAN 0x01
619 #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02
620 #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03
621
622 u8 recurrences;
623 #define VDASI_RECUR_FOREVER 0x00
624
625 u32 id;
626 #define VDASI_ID_NONE 0x00
627
628 char grp_name[15];
629 u8 reserved[85];
630};
631
632
633struct __packed atto_vda_n_vcache_info {
634 u8 super_cap_status;
635 #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00
636 #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01
637 #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02
638
639 u8 nvcache_module_status;
640 #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00
641 #define VDANVCI_NVCACHEMODULE_PRESENT 0x01
642
643 u8 protection_mode;
644 #define VDANVCI_PROTMODE_HI_PROTECT 0x00
645 #define VDANVCI_PROTMODE_HI_PERFORM 0x01
646
647 u8 reserved[109];
648};
649
650
651struct __packed atto_vda_buzzer_info {
652 u8 status;
653 #define VDABUZZI_BUZZER_OFF 0x00
654 #define VDABUZZI_BUZZER_ON 0x01
655 #define VDABUZZI_BUZZER_LAST 0x02
656
657 u8 reserved[3];
658 u32 duration;
659 #define VDABUZZI_DURATION_INDEFINITE 0xffffffff
660
661 u8 reserved2[104];
662};
663
664
665struct __packed atto_vda_adapter_info {
666 u8 version;
667 #define VDAADAPINFO_VERSION0 0x00
668 #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0
669
670 u8 reserved;
671 signed short utc_offset;
672 u32 utc_time;
673 u32 features;
674 #define VDA_ADAP_FEAT_IDENT 0x0001
675 #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002
676 #define VDA_ADAP_FEAT_UTC_TIME 0x0004
677
678 u32 valid_features;
679 char active_config[33];
680 u8 temp_count;
681 u8 fan_count;
682 u8 reserved3[61];
683};
684
685
686struct __packed atto_vda_temp_info {
687 u8 temp_index;
688 u8 max_op_temp;
689 u8 min_op_temp;
690 u8 op_temp_warn;
691 u8 temperature;
692 u8 type;
693 #define VDA_TEMP_TYPE_CPU 1
694
695 u8 reserved[106];
696};
697
698
699struct __packed atto_vda_fan_info {
700 u8 fan_index;
701 u8 status;
702 #define VDA_FAN_STAT_UNKNOWN 0
703 #define VDA_FAN_STAT_NORMAL 1
704 #define VDA_FAN_STAT_FAIL 2
705
706 u16 crit_pvdafaninfothreshold;
707 u16 warn_threshold;
708 u16 speed;
709 u8 reserved[104];
710};
711
712
713/* VDA management commands */
714
715#define VDAMGT_DEV_SCAN 0x00
716#define VDAMGT_DEV_INFO 0x01
717#define VDAMGT_DEV_CLEAN 0x02
718#define VDAMGT_DEV_IDENTIFY 0x03
719#define VDAMGT_DEV_IDENTSTOP 0x04
720#define VDAMGT_DEV_PT_INFO 0x05
721#define VDAMGT_DEV_FEATURES 0x06
722#define VDAMGT_DEV_PT_FEATURES 0x07
723#define VDAMGT_DEV_HEALTH_REQ 0x08
724#define VDAMGT_DEV_METRICS 0x09
725#define VDAMGT_DEV_INFO2 0x0A
726#define VDAMGT_DEV_OPERATION 0x0B
727#define VDAMGT_DEV_INFO2_BYADDR 0x0C
728#define VDAMGT_GRP_INFO 0x10
729#define VDAMGT_GRP_CREATE 0x11
730#define VDAMGT_GRP_DELETE 0x12
731#define VDAMGT_ADD_STORAGE 0x13
732#define VDAMGT_MEMBER_ADD 0x14
733#define VDAMGT_GRP_COMMIT 0x15
734#define VDAMGT_GRP_REBUILD 0x16
735#define VDAMGT_GRP_COMMIT_INIT 0x17
736#define VDAMGT_QUICK_RAID 0x18
737#define VDAMGT_GRP_FEATURES 0x19
738#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A
739#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B
740#define VDAMGT_GRP_OPERATION 0x1C
741#define VDAMGT_CFG_SAVE 0x20
742#define VDAMGT_LAST_ERROR 0x21
743#define VDAMGT_ADAP_INFO 0x22
744#define VDAMGT_ADAP_FEATURES 0x23
745#define VDAMGT_TEMP_INFO 0x24
746#define VDAMGT_FAN_INFO 0x25
747#define VDAMGT_PART_INFO 0x30
748#define VDAMGT_PART_MAP 0x31
749#define VDAMGT_PART_UNMAP 0x32
750#define VDAMGT_PART_AUTOMAP 0x33
751#define VDAMGT_PART_SPLIT 0x34
752#define VDAMGT_PART_MERGE 0x35
753#define VDAMGT_SPARE_LIST 0x40
754#define VDAMGT_SPARE_ADD 0x41
755#define VDAMGT_SPARE_REMOVE 0x42
756#define VDAMGT_LOCAL_SPARE_ADD 0x43
757#define VDAMGT_SCHEDULE_EVENT 0x50
758#define VDAMGT_SCHEDULE_INFO 0x51
759#define VDAMGT_NVCACHE_INFO 0x60
760#define VDAMGT_NVCACHE_SET 0x61
761#define VDAMGT_BUZZER_INFO 0x70
762#define VDAMGT_BUZZER_SET 0x71
763
764
765struct __packed atto_vda_ae_hdr {
766 u8 bylength;
767 u8 byflags;
768 #define VDAAE_HDRF_EVENT_ACK 0x01
769
770 u8 byversion;
771 #define VDAAE_HDR_VER_0 0
772
773 u8 bytype;
774 #define VDAAE_HDR_TYPE_RAID 1
775 #define VDAAE_HDR_TYPE_LU 2
776 #define VDAAE_HDR_TYPE_DISK 3
777 #define VDAAE_HDR_TYPE_RESET 4
778 #define VDAAE_HDR_TYPE_LOG_INFO 5
779 #define VDAAE_HDR_TYPE_LOG_WARN 6
780 #define VDAAE_HDR_TYPE_LOG_CRIT 7
781 #define VDAAE_HDR_TYPE_LOG_FAIL 8
782 #define VDAAE_HDR_TYPE_NVC 9
783 #define VDAAE_HDR_TYPE_TLG_INFO 10
784 #define VDAAE_HDR_TYPE_TLG_WARN 11
785 #define VDAAE_HDR_TYPE_TLG_CRIT 12
786 #define VDAAE_HDR_TYPE_PWRMGT 13
787 #define VDAAE_HDR_TYPE_MUTE 14
788 #define VDAAE_HDR_TYPE_DEV 15
789};
790
791
792struct __packed atto_vda_ae_raid {
793 struct atto_vda_ae_hdr hdr;
794 u32 dwflags;
795 #define VDAAE_GROUP_STATE 0x00000001
796 #define VDAAE_RBLD_STATE 0x00000002
797 #define VDAAE_RBLD_PROG 0x00000004
798 #define VDAAE_MEMBER_CHG 0x00000008
799 #define VDAAE_PART_CHG 0x00000010
800 #define VDAAE_MEM_STATE_CHG 0x00000020
801
802 u8 bygroup_state;
803 #define VDAAE_RAID_INVALID 0
804 #define VDAAE_RAID_NEW 1
805 #define VDAAE_RAID_WAITING 2
806 #define VDAAE_RAID_ONLINE 3
807 #define VDAAE_RAID_DEGRADED 4
808 #define VDAAE_RAID_OFFLINE 5
809 #define VDAAE_RAID_DELETED 6
810 #define VDAAE_RAID_BASIC 7
811 #define VDAAE_RAID_EXTREME 8
812 #define VDAAE_RAID_UNKNOWN 9
813
814 u8 byrebuild_state;
815 #define VDAAE_RBLD_NONE 0
816 #define VDAAE_RBLD_REBUILD 1
817 #define VDAAE_RBLD_ERASE 2
818 #define VDAAE_RBLD_PATTERN 3
819 #define VDAAE_RBLD_CONV 4
820 #define VDAAE_RBLD_FULL_INIT 5
821 #define VDAAE_RBLD_QUICK_INIT 6
822 #define VDAAE_RBLD_SECT_SCAN 7
823 #define VDAAE_RBLD_SECT_SCAN_PARITY 8
824 #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9
825 #define VDAAE_RBLD_RECOV_REBUILD 10
826 #define VDAAE_RBLD_UNKNOWN 11
827
828 u8 byrebuild_progress;
829 u8 op_status;
830 #define VDAAE_GRPOPSTAT_MASK 0x0F
831 #define VDAAE_GRPOPSTAT_INVALID 0x00
832 #define VDAAE_GRPOPSTAT_OK 0x01
833 #define VDAAE_GRPOPSTAT_FAULTED 0x02
834 #define VDAAE_GRPOPSTAT_HALTED 0x03
835 #define VDAAE_GRPOPSTAT_INT 0x04
836 #define VDAAE_GRPOPPROC_MASK 0xF0
837 #define VDAAE_GRPOPPROC_STARTABLE 0x10
838 #define VDAAE_GRPOPPROC_CANCELABLE 0x20
839 #define VDAAE_GRPOPPROC_RESUMABLE 0x40
840 #define VDAAE_GRPOPPROC_HALTABLE 0x80
841 char acname[15];
842 u8 byreserved;
843 u8 byreserved2[0x80 - 0x1C];
844};
845
846
847struct __packed atto_vda_ae_lu_tgt_lun {
848 u16 wtarget_id;
849 u8 bylun;
850 u8 byreserved;
851};
852
853
854struct __packed atto_vda_ae_lu_tgt_lun_raid {
855 u16 wtarget_id;
856 u8 bylun;
857 u8 byreserved;
858 u32 dwinterleave;
859 u32 dwblock_size;
860};
861
862
863struct __packed atto_vda_ae_lu {
864 struct atto_vda_ae_hdr hdr;
865 u32 dwevent;
866 #define VDAAE_LU_DISC 0x00000001
867 #define VDAAE_LU_LOST 0x00000002
868 #define VDAAE_LU_STATE 0x00000004
869 #define VDAAE_LU_PASSTHROUGH 0x10000000
870 #define VDAAE_LU_PHYS_ID 0x20000000
871
872 u8 bystate;
873 #define VDAAE_LU_UNDEFINED 0
874 #define VDAAE_LU_NOT_PRESENT 1
875 #define VDAAE_LU_OFFLINE 2
876 #define VDAAE_LU_ONLINE 3
877 #define VDAAE_LU_DEGRADED 4
878 #define VDAAE_LU_FACTORY_DISABLED 5
879 #define VDAAE_LU_DELETED 6
880 #define VDAAE_LU_BUSSCAN 7
881 #define VDAAE_LU_UNKNOWN 8
882
883 u8 byreserved;
884 u16 wphys_target_id;
885
886 union {
887 struct atto_vda_ae_lu_tgt_lun tgtlun;
888 struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid;
889 } id;
890};
891
892
893struct __packed atto_vda_ae_disk {
894 struct atto_vda_ae_hdr hdr;
895};
896
897
898#define VDAAE_LOG_STRSZ 64
899
900struct __packed atto_vda_ae_log {
901 struct atto_vda_ae_hdr hdr;
902 char aclog_ascii[VDAAE_LOG_STRSZ];
903};
904
905
906#define VDAAE_TLG_STRSZ 56
907
908struct __packed atto_vda_ae_timestamp_log {
909 struct atto_vda_ae_hdr hdr;
910 u32 dwtimestamp;
911 char aclog_ascii[VDAAE_TLG_STRSZ];
912};
913
914
915struct __packed atto_vda_ae_nvc {
916 struct atto_vda_ae_hdr hdr;
917};
918
919
920struct __packed atto_vda_ae_dev {
921 struct atto_vda_ae_hdr hdr;
922 struct atto_dev_addr devaddr;
923};
924
925
926union atto_vda_ae {
927 struct atto_vda_ae_hdr hdr;
928 struct atto_vda_ae_disk disk;
929 struct atto_vda_ae_lu lu;
930 struct atto_vda_ae_raid raid;
931 struct atto_vda_ae_log log;
932 struct atto_vda_ae_timestamp_log tslog;
933 struct atto_vda_ae_nvc nvcache;
934 struct atto_vda_ae_dev dev;
935};
936
937
938struct __packed atto_vda_date_and_time {
939 u8 flags;
940 #define VDA_DT_DAY_MASK 0x07
941 #define VDA_DT_DAY_NONE 0x00
942 #define VDA_DT_DAY_SUN 0x01
943 #define VDA_DT_DAY_MON 0x02
944 #define VDA_DT_DAY_TUE 0x03
945 #define VDA_DT_DAY_WED 0x04
946 #define VDA_DT_DAY_THU 0x05
947 #define VDA_DT_DAY_FRI 0x06
948 #define VDA_DT_DAY_SAT 0x07
949 #define VDA_DT_PM 0x40
950 #define VDA_DT_MILITARY 0x80
951
952 u8 seconds;
953 u8 minutes;
954 u8 hours;
955 u8 day;
956 u8 month;
957 u16 year;
958};
959
960#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */
961#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */
962#define SGE_LAST 0x01000000 /*! last entry */
963#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */
964#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */
965#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */
966#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */
967
968
969struct __packed atto_vda_cfg_init {
970 struct atto_vda_date_and_time date_time;
971 u32 sgl_page_size;
972 u32 vda_version;
973 u32 fw_version;
974 u32 fw_build;
975 u32 fw_release;
976 u32 epoch_time;
977 u32 ioctl_tunnel;
978 #define VDA_ITF_MEM_RW 0x00000001
979 #define VDA_ITF_TRACE 0x00000002
980 #define VDA_ITF_SCSI_PASS_THRU 0x00000004
981 #define VDA_ITF_GET_DEV_ADDR 0x00000008
982 #define VDA_ITF_PHY_CTRL 0x00000010
983 #define VDA_ITF_CONN_CTRL 0x00000020
984 #define VDA_ITF_GET_DEV_INFO 0x00000040
985
986 u32 num_targets_backend;
987 u8 reserved[0x48];
988};
989
990
991/* configuration commands */
992
993#define VDA_CFG_INIT 0x00
994#define VDA_CFG_GET_INIT 0x01
995#define VDA_CFG_GET_INIT2 0x02
996
997
998/*! physical region descriptor (PRD) aka scatter/gather entry */
999
1000struct __packed atto_physical_region_description {
1001 u64 address;
1002 u32 ctl_len;
1003 #define PRD_LEN_LIMIT 0x003FFFFF
1004 #define PRD_LEN_MAX 0x003FF000
1005 #define PRD_NXT_PRD_CNT 0x0000007F
1006 #define PRD_CHAIN 0x01000000
1007 #define PRD_DATA 0x00000000
1008 #define PRD_INT_SEL 0xF0000000
1009 #define PRD_INT_SEL_F0 0x00000000
1010 #define PRD_INT_SEL_F1 0x40000000
1011 #define PRD_INT_SEL_F2 0x80000000
1012 #define PRD_INT_SEL_F3 0xc0000000
1013 #define PRD_INT_SEL_SRAM 0x10000000
1014 #define PRD_INT_SEL_PBSR 0x20000000
1015
1016};
1017
1018/* Request types. NOTE that ALL requests have the same layout for the first
1019 * few bytes.
1020 */
1021struct __packed atto_vda_req_header {
1022 u32 length;
1023 u8 function;
1024 u8 variable1;
1025 u8 chain_offset;
1026 u8 sg_list_offset;
1027 u32 handle;
1028};
1029
1030
1031#define FCP_CDB_SIZE 16
1032
1033struct __packed atto_vda_scsi_req {
1034 u32 length;
1035 u8 function; /* VDA_FUNC_SCSI */
1036 u8 sense_len;
1037 u8 chain_offset;
1038 u8 sg_list_offset;
1039 u32 handle;
1040 u32 flags;
1041 #define FCP_CMND_LUN_MASK 0x000000FF
1042 #define FCP_CMND_TA_MASK 0x00000700
1043 #define FCP_CMND_TA_SIMPL_Q 0x00000000
1044 #define FCP_CMND_TA_HEAD_Q 0x00000100
1045 #define FCP_CMND_TA_ORDRD_Q 0x00000200
1046 #define FCP_CMND_TA_ACA 0x00000400
1047 #define FCP_CMND_PRI_MASK 0x00007800
1048 #define FCP_CMND_TM_MASK 0x00FF0000
1049 #define FCP_CMND_ATS 0x00020000
1050 #define FCP_CMND_CTS 0x00040000
1051 #define FCP_CMND_LRS 0x00100000
1052 #define FCP_CMND_TRS 0x00200000
1053 #define FCP_CMND_CLA 0x00400000
1054 #define FCP_CMND_TRM 0x00800000
1055 #define FCP_CMND_DATA_DIR 0x03000000
1056 #define FCP_CMND_WRD 0x01000000
1057 #define FCP_CMND_RDD 0x02000000
1058
1059 u8 cdb[FCP_CDB_SIZE];
1060 union {
1061 struct __packed {
1062 u64 ppsense_buf;
1063 u16 target_id;
1064 u8 iblk_cnt_prd;
1065 u8 reserved;
1066 };
1067
1068 struct atto_physical_region_description sense_buff_prd;
1069 };
1070
1071 union {
1072 struct atto_vda_sge sge[1];
1073
1074 u32 abort_handle;
1075 u32 dwords[245];
1076 struct atto_physical_region_description prd[1];
1077 } u;
1078};
1079
1080
1081struct __packed atto_vda_flash_req {
1082 u32 length;
1083 u8 function; /* VDA_FUNC_FLASH */
1084 u8 sub_func;
1085 u8 chain_offset;
1086 u8 sg_list_offset;
1087 u32 handle;
1088 u32 flash_addr;
1089 u8 checksum;
1090 u8 rsvd[3];
1091
1092 union {
1093 struct {
1094 char file_name[16]; /* 8.3 fname, NULL term, wc=* */
1095 struct atto_vda_sge sge[1];
1096 } file;
1097
1098 struct atto_vda_sge sge[1];
1099 struct atto_physical_region_description prde[2];
1100 } data;
1101};
1102
1103
1104struct __packed atto_vda_diag_req {
1105 u32 length;
1106 u8 function; /* VDA_FUNC_DIAG */
1107 u8 sub_func;
1108 #define VDA_DIAG_STATUS 0x00
1109 #define VDA_DIAG_RESET 0x01
1110 #define VDA_DIAG_PAUSE 0x02
1111 #define VDA_DIAG_RESUME 0x03
1112 #define VDA_DIAG_READ 0x04
1113 #define VDA_DIAG_WRITE 0x05
1114
1115 u8 chain_offset;
1116 u8 sg_list_offset;
1117 u32 handle;
1118 u32 rsvd;
1119 u64 local_addr;
1120 struct atto_vda_sge sge[1];
1121};
1122
1123
1124struct __packed atto_vda_ae_req {
1125 u32 length;
1126 u8 function; /* VDA_FUNC_AE */
1127 u8 reserved1;
1128 u8 chain_offset;
1129 u8 sg_list_offset;
1130 u32 handle;
1131
1132 union {
1133 struct atto_vda_sge sge[1];
1134 struct atto_physical_region_description prde[1];
1135 };
1136};
1137
1138
1139struct __packed atto_vda_cli_req {
1140 u32 length;
1141 u8 function; /* VDA_FUNC_CLI */
1142 u8 reserved1;
1143 u8 chain_offset;
1144 u8 sg_list_offset;
1145 u32 handle;
1146 u32 cmd_rsp_len;
1147 struct atto_vda_sge sge[1];
1148};
1149
1150
1151struct __packed atto_vda_ioctl_req {
1152 u32 length;
1153 u8 function; /* VDA_FUNC_IOCTL */
1154 u8 sub_func;
1155 u8 chain_offset;
1156 u8 sg_list_offset;
1157 u32 handle;
1158
1159 union {
1160 struct atto_vda_sge reserved_sge;
1161 struct atto_physical_region_description reserved_prde;
1162 };
1163
1164 union {
1165 struct {
1166 u32 ctrl_code;
1167 u16 target_id;
1168 u8 lun;
1169 u8 reserved;
1170 } csmi;
1171 };
1172
1173 union {
1174 struct atto_vda_sge sge[1];
1175 struct atto_physical_region_description prde[1];
1176 };
1177};
1178
1179
1180struct __packed atto_vda_cfg_req {
1181 u32 length;
1182 u8 function; /* VDA_FUNC_CFG */
1183 u8 sub_func;
1184 u8 rsvd1;
1185 u8 sg_list_offset;
1186 u32 handle;
1187
1188 union {
1189 u8 bytes[116];
1190 struct atto_vda_cfg_init init;
1191 struct atto_vda_sge sge;
1192 struct atto_physical_region_description prde;
1193 } data;
1194};
1195
1196
1197struct __packed atto_vda_mgmt_req {
1198 u32 length;
1199 u8 function; /* VDA_FUNC_MGT */
1200 u8 mgt_func;
1201 u8 chain_offset;
1202 u8 sg_list_offset;
1203 u32 handle;
1204 u8 scan_generation;
1205 u8 payld_sglst_offset;
1206 u16 dev_index;
1207 u32 payld_length;
1208 u32 pad;
1209 union {
1210 struct atto_vda_sge sge[2];
1211 struct atto_physical_region_description prde[2];
1212 };
1213 struct atto_vda_sge payld_sge[1];
1214};
1215
1216
1217union atto_vda_req {
1218 struct atto_vda_scsi_req scsi;
1219 struct atto_vda_flash_req flash;
1220 struct atto_vda_diag_req diag;
1221 struct atto_vda_ae_req ae;
1222 struct atto_vda_cli_req cli;
1223 struct atto_vda_ioctl_req ioctl;
1224 struct atto_vda_cfg_req cfg;
1225 struct atto_vda_mgmt_req mgt;
1226 u8 bytes[1024];
1227};
1228
1229/* Outbound response structures */
1230
1231struct __packed atto_vda_scsi_rsp {
1232 u8 scsi_stat;
1233 u8 sense_len;
1234 u8 rsvd[2];
1235 u32 residual_length;
1236};
1237
1238struct __packed atto_vda_flash_rsp {
1239 u32 file_size;
1240};
1241
1242struct __packed atto_vda_ae_rsp {
1243 u32 length;
1244};
1245
1246struct __packed atto_vda_cli_rsp {
1247 u32 cmd_rsp_len;
1248};
1249
1250struct __packed atto_vda_ioctl_rsp {
1251 union {
1252 struct {
1253 u32 csmi_status;
1254 u16 target_id;
1255 u8 lun;
1256 u8 reserved;
1257 } csmi;
1258 };
1259};
1260
1261struct __packed atto_vda_cfg_rsp {
1262 u16 vda_version;
1263 u16 fw_release;
1264 u32 fw_build;
1265};
1266
1267struct __packed atto_vda_mgmt_rsp {
1268 u32 length;
1269 u16 dev_index;
1270 u8 scan_generation;
1271};
1272
1273union atto_vda_func_rsp {
1274 struct atto_vda_scsi_rsp scsi_rsp;
1275 struct atto_vda_flash_rsp flash_rsp;
1276 struct atto_vda_ae_rsp ae_rsp;
1277 struct atto_vda_cli_rsp cli_rsp;
1278 struct atto_vda_ioctl_rsp ioctl_rsp;
1279 struct atto_vda_cfg_rsp cfg_rsp;
1280 struct atto_vda_mgmt_rsp mgt_rsp;
1281 u32 dwords[2];
1282};
1283
1284struct __packed atto_vda_ob_rsp {
1285 u32 handle;
1286 u8 req_stat;
1287 u8 rsvd[3];
1288
1289 union atto_vda_func_rsp
1290 func_rsp;
1291};
1292
1293struct __packed atto_vda_ae_data {
1294 u8 event_data[256];
1295};
1296
1297struct __packed atto_vda_mgmt_data {
1298 union {
1299 u8 bytes[112];
1300 struct atto_vda_devinfo dev_info;
1301 struct atto_vda_grp_info grp_info;
1302 struct atto_vdapart_info part_info;
1303 struct atto_vda_dh_info dev_health_info;
1304 struct atto_vda_metrics_info metrics_info;
1305 struct atto_vda_schedule_info sched_info;
1306 struct atto_vda_n_vcache_info nvcache_info;
1307 struct atto_vda_buzzer_info buzzer_info;
1308 } data;
1309};
1310
1311union atto_vda_rsp_data {
1312 struct atto_vda_ae_data ae_data;
1313 struct atto_vda_mgmt_data mgt_data;
1314 u8 sense_data[252];
1315 #define SENSE_DATA_SZ 252;
1316 u8 bytes[256];
1317};
1318
1319#endif
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
new file mode 100644
index 000000000000..0838e265e0b9
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -0,0 +1,1441 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r.h
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include <linux/kernel.h>
45#include <linux/delay.h>
46#include <linux/pci.h>
47#include <linux/proc_fs.h>
48#include <linux/workqueue.h>
49#include <linux/interrupt.h>
50#include <linux/module.h>
51#include <linux/vmalloc.h>
52#include <scsi/scsi.h>
53#include <scsi/scsi_host.h>
54#include <scsi/scsi_cmnd.h>
55#include <scsi/scsi_device.h>
56#include <scsi/scsi_eh.h>
57#include <scsi/scsi_tcq.h>
58
59#include "esas2r_log.h"
60#include "atioctl.h"
61#include "atvda.h"
62
63#ifndef ESAS2R_H
64#define ESAS2R_H
65
66/* Global Variables */
67extern struct esas2r_adapter *esas2r_adapters[];
68extern u8 *esas2r_buffered_ioctl;
69extern dma_addr_t esas2r_buffered_ioctl_addr;
70extern u32 esas2r_buffered_ioctl_size;
71extern struct pci_dev *esas2r_buffered_ioctl_pcid;
72#define SGL_PG_SZ_MIN 64
73#define SGL_PG_SZ_MAX 1024
74extern int sgl_page_size;
75#define NUM_SGL_MIN 8
76#define NUM_SGL_MAX 2048
77extern int num_sg_lists;
78#define NUM_REQ_MIN 4
79#define NUM_REQ_MAX 256
80extern int num_requests;
81#define NUM_AE_MIN 2
82#define NUM_AE_MAX 8
83extern int num_ae_requests;
84extern int cmd_per_lun;
85extern int can_queue;
86extern int esas2r_max_sectors;
87extern int sg_tablesize;
88extern int interrupt_mode;
89extern int num_io_requests;
90
91/* Macro defintions */
92#define ESAS2R_MAX_ID 255
93#define MAX_ADAPTERS 32
94#define ESAS2R_DRVR_NAME "esas2r"
95#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter"
96#define ESAS2R_MAX_DEVICES 32
97#define ATTONODE_NAME "ATTONode"
98#define ESAS2R_MAJOR_REV 1
99#define ESAS2R_MINOR_REV 00
100#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
101 DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
102#define ESAS2R_COPYRIGHT_YEARS "2001-2013"
103#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
104#define ESAS2R_DEFAULT_CMD_PER_LUN 64
105#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
106#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
107#define NUM_TO_STR(num) #num
108
109#define ESAS2R_SGL_ALIGN 16
110#define ESAS2R_LIST_ALIGN 16
111#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA
112#define ESAS2R_DATA_BUF_LEN 256
113#define ESAS2R_DEFAULT_TMO 5000
114#define ESAS2R_DISC_BUF_LEN 512
115#define ESAS2R_FWCOREDUMP_SZ 0x80000
116#define ESAS2R_NUM_PHYS 8
117#define ESAS2R_TARG_ID_INV 0xFFFF
118#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK
119#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK
120#define ESAS2R_INT_DIS_MASK 0
121#define ESAS2R_MAX_TARGETS 256
122#define ESAS2R_KOBJ_NAME_LEN 20
123
124/* u16 (WORD) component macros */
125#define LOBYTE(w) ((u8)(u16)(w))
126#define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
127#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
128
129/* u32 (DWORD) component macros */
130#define LOWORD(d) ((u16)(u32)(d))
131#define HIWORD(d) ((u16)(((u32)(d)) >> 16))
132#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
133
134/* macro to get the lowest nonzero bit of a value */
135#define LOBIT(x) ((x) & (0 - (x)))
136
137/* These functions are provided to access the chip's control registers.
138 * The register is specified by its byte offset from the register base
139 * for the adapter.
140 */
141#define esas2r_read_register_dword(a, reg) \
142 readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
143
144#define esas2r_write_register_dword(a, reg, data) \
145 writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
146
147#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
148
149/* This function is provided to access the chip's data window. The
150 * register is specified by its byte offset from the window base
151 * for the adapter.
152 */
153#define esas2r_read_data_byte(a, reg) \
154 readb((void __iomem *)a->data_window + (reg))
155
156/* ATTO vendor and device Ids */
157#define ATTO_VENDOR_ID 0x117C
158#define ATTO_DID_INTEL_IOP348 0x002C
159#define ATTO_DID_MV_88RC9580 0x0049
160#define ATTO_DID_MV_88RC9580TS 0x0066
161#define ATTO_DID_MV_88RC9580TSE 0x0067
162#define ATTO_DID_MV_88RC9580TL 0x0068
163
164/* ATTO subsystem device Ids */
165#define ATTO_SSDID_TBT 0x4000
166#define ATTO_TSSC_3808 0x4066
167#define ATTO_TSSC_3808E 0x4067
168#define ATTO_TLSH_1068 0x4068
169#define ATTO_ESAS_R680 0x0049
170#define ATTO_ESAS_R608 0x004A
171#define ATTO_ESAS_R60F 0x004B
172#define ATTO_ESAS_R6F0 0x004C
173#define ATTO_ESAS_R644 0x004D
174#define ATTO_ESAS_R648 0x004E
175
176/*
177 * flash definitions & structures
178 * define the code types
179 */
180#define FBT_CPYR 0xAA00
181#define FBT_SETUP 0xAA02
182#define FBT_FLASH_VER 0xAA04
183
184/* offsets to various locations in flash */
185#define FLS_OFFSET_BOOT (u32)(0x00700000)
186#define FLS_OFFSET_NVR (u32)(0x007C0000)
187#define FLS_OFFSET_CPYR FLS_OFFSET_NVR
188#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
189#define FLS_BLOCK_SIZE (u32)(0x00020000)
190#define FI_NVR_2KB 0x0800
191#define FI_NVR_8KB 0x2000
192#define FM_BUF_SZ 0x800
193
194/*
195 * marvell frey (88R9580) register definitions
196 * chip revision identifiers
197 */
198#define MVR_FREY_B2 0xB2
199
200/*
201 * memory window definitions. window 0 is the data window with definitions
202 * of MW_DATA_XXX. window 1 is the register window with definitions of
203 * MW_REG_XXX.
204 */
205#define MW_REG_WINDOW_SIZE (u32)(0x00040000)
206#define MW_REG_OFFSET_HWREG (u32)(0x00000000)
207#define MW_REG_OFFSET_PCI (u32)(0x00008000)
208#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
209#define MW_DATA_WINDOW_SIZE (u32)(0x00020000)
210#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000)
211#define MW_DATA_ADDR_SRAM (u32)(0xF4000000)
212#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000)
213
214/*
215 * the following registers are for the communication
216 * list interface (AKA message unit (MU))
217 */
218#define MU_IN_LIST_ADDR_LO (u32)(0x00004000)
219#define MU_IN_LIST_ADDR_HI (u32)(0x00004004)
220
221#define MU_IN_LIST_WRITE (u32)(0x00004018)
222 #define MU_ILW_TOGGLE (u32)(0x00004000)
223
224#define MU_IN_LIST_READ (u32)(0x0000401C)
225 #define MU_ILR_TOGGLE (u32)(0x00004000)
226 #define MU_ILIC_LIST (u32)(0x0000000F)
227 #define MU_ILIC_LIST_F0 (u32)(0x00000000)
228 #define MU_ILIC_DEST (u32)(0x00000F00)
229 #define MU_ILIC_DEST_DDR (u32)(0x00000200)
230#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028)
231
232#define MU_IN_LIST_CONFIG (u32)(0x0000402C)
233 #define MU_ILC_ENABLE (u32)(0x00000001)
234 #define MU_ILC_ENTRY_MASK (u32)(0x000000F0)
235 #define MU_ILC_ENTRY_4_DW (u32)(0x00000020)
236 #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000)
237 #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000)
238 #define MU_ILC_NUMBER_SHIFT 16
239
240#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050)
241#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054)
242
243#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
244#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
245
246#define MU_OUT_LIST_WRITE (u32)(0x00004068)
247 #define MU_OLW_TOGGLE (u32)(0x00004000)
248
249#define MU_OUT_LIST_COPY (u32)(0x0000406C)
250 #define MU_OLC_TOGGLE (u32)(0x00004000)
251 #define MU_OLC_WRT_PTR (u32)(0x00003FFF)
252
253#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078)
254 #define MU_OLIC_LIST (u32)(0x0000000F)
255 #define MU_OLIC_LIST_F0 (u32)(0x00000000)
256 #define MU_OLIC_SOURCE (u32)(0x00000F00)
257 #define MU_OLIC_SOURCE_DDR (u32)(0x00000200)
258
259#define MU_OUT_LIST_CONFIG (u32)(0x0000407C)
260 #define MU_OLC_ENABLE (u32)(0x00000001)
261 #define MU_OLC_ENTRY_MASK (u32)(0x000000F0)
262 #define MU_OLC_ENTRY_4_DW (u32)(0x00000020)
263 #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000)
264 #define MU_OLC_NUMBER_SHIFT 16
265
266#define MU_OUT_LIST_INT_STAT (u32)(0x00004088)
267 #define MU_OLIS_INT (u32)(0x00000001)
268
269#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C)
270 #define MU_OLIS_MASK (u32)(0x00000001)
271
272/*
273 * the maximum size of the communication lists is two greater than the
274 * maximum amount of VDA requests. the extra are to prevent queue overflow.
275 */
276#define ESAS2R_MAX_NUM_REQS 256
277#define ESAS2R_NUM_EXTRA 2
278#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
279
280/*
281 * the following registers are for the CPU interface
282 */
283#define MU_CTL_STATUS_IN (u32)(0x00010108)
284 #define MU_CTL_IN_FULL_RST (u32)(0x00000020)
285#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130)
286 #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
287#define MU_DOORBELL_IN (u32)(0x00010460)
288 #define DRBL_RESET_BUS (u32)(0x00000002)
289 #define DRBL_PAUSE_AE (u32)(0x00000004)
290 #define DRBL_RESUME_AE (u32)(0x00000008)
291 #define DRBL_MSG_IFC_DOWN (u32)(0x00000010)
292 #define DRBL_FLASH_REQ (u32)(0x00000020)
293 #define DRBL_FLASH_DONE (u32)(0x00000040)
294 #define DRBL_FORCE_INT (u32)(0x00000080)
295 #define DRBL_MSG_IFC_INIT (u32)(0x00000100)
296 #define DRBL_POWER_DOWN (u32)(0x00000200)
297 #define DRBL_DRV_VER_1 (u32)(0x00010000)
298 #define DRBL_DRV_VER DRBL_DRV_VER_1
299#define MU_DOORBELL_IN_ENB (u32)(0x00010464)
300#define MU_DOORBELL_OUT (u32)(0x00010480)
301 #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
302 #define DRBL_UNUSED_HANDLER (u32)(0x00100000)
303 #define DRBL_UNDEF_INSTR (u32)(0x00200000)
304 #define DRBL_PREFETCH_ABORT (u32)(0x00300000)
305 #define DRBL_DATA_ABORT (u32)(0x00400000)
306 #define DRBL_JUMP_TO_ZERO (u32)(0x00500000)
307 #define DRBL_FW_RESET (u32)(0x00080000)
308 #define DRBL_FW_VER_MSK (u32)(0x00070000)
309 #define DRBL_FW_VER_0 (u32)(0x00000000)
310 #define DRBL_FW_VER_1 (u32)(0x00010000)
311 #define DRBL_FW_VER DRBL_FW_VER_1
312#define MU_DOORBELL_OUT_ENB (u32)(0x00010484)
313 #define DRBL_ENB_MASK (u32)(0x00F803FF)
314#define MU_INT_STATUS_OUT (u32)(0x00010200)
315 #define MU_INTSTAT_POST_OUT (u32)(0x00000010)
316 #define MU_INTSTAT_DRBL_IN (u32)(0x00000100)
317 #define MU_INTSTAT_DRBL (u32)(0x00001000)
318 #define MU_INTSTAT_MASK (u32)(0x00001010)
319#define MU_INT_MASK_OUT (u32)(0x0001020C)
320
321/* PCI express registers accessed via window 1 */
322#define MVR_PCI_WIN1_REMAP (u32)(0x00008438)
323 #define MVRPW1R_ENABLE (u32)(0x00000001)
324
325
326/* structures */
327
328/* inbound list dynamic source entry */
329struct esas2r_inbound_list_source_entry {
330 u64 address;
331 u32 length;
332 #define HWILSE_INTERFACE_F0 0x00000000
333 u32 reserved;
334};
335
336/* PCI data structure in expansion ROM images */
337struct __packed esas2r_boot_header {
338 char signature[4];
339 u16 vendor_id;
340 u16 device_id;
341 u16 VPD;
342 u16 struct_length;
343 u8 struct_revision;
344 u8 class_code[3];
345 u16 image_length;
346 u16 code_revision;
347 u8 code_type;
348 #define CODE_TYPE_PC 0
349 #define CODE_TYPE_OPEN 1
350 #define CODE_TYPE_EFI 3
351 u8 indicator;
352 #define INDICATOR_LAST 0x80
353 u8 reserved[2];
354};
355
356struct __packed esas2r_boot_image {
357 u16 signature;
358 u8 reserved[22];
359 u16 header_offset;
360 u16 pnp_offset;
361};
362
363struct __packed esas2r_pc_image {
364 u16 signature;
365 u8 length;
366 u8 entry_point[3];
367 u8 checksum;
368 u16 image_end;
369 u16 min_size;
370 u8 rom_flags;
371 u8 reserved[12];
372 u16 header_offset;
373 u16 pnp_offset;
374 struct esas2r_boot_header boot_image;
375};
376
377struct __packed esas2r_efi_image {
378 u16 signature;
379 u16 length;
380 u32 efi_signature;
381 #define EFI_ROM_SIG 0x00000EF1
382 u16 image_type;
383 #define EFI_IMAGE_APP 10
384 #define EFI_IMAGE_BSD 11
385 #define EFI_IMAGE_RTD 12
386 u16 machine_type;
387 #define EFI_MACHINE_IA32 0x014c
388 #define EFI_MACHINE_IA64 0x0200
389 #define EFI_MACHINE_X64 0x8664
390 #define EFI_MACHINE_EBC 0x0EBC
391 u16 compression;
392 #define EFI_UNCOMPRESSED 0x0000
393 #define EFI_COMPRESSED 0x0001
394 u8 reserved[8];
395 u16 efi_offset;
396 u16 header_offset;
397 u16 reserved2;
398 struct esas2r_boot_header boot_image;
399};
400
401struct esas2r_adapter;
402struct esas2r_sg_context;
403struct esas2r_request;
404
405typedef void (*RQCALLBK) (struct esas2r_adapter *a,
406 struct esas2r_request *rq);
407typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a,
408 struct esas2r_sg_context *sgc);
409
410struct esas2r_component_header {
411 u8 img_type;
412 #define CH_IT_FW 0x00
413 #define CH_IT_NVR 0x01
414 #define CH_IT_BIOS 0x02
415 #define CH_IT_MAC 0x03
416 #define CH_IT_CFG 0x04
417 #define CH_IT_EFI 0x05
418 u8 status;
419 #define CH_STAT_PENDING 0xff
420 #define CH_STAT_FAILED 0x00
421 #define CH_STAT_SUCCESS 0x01
422 #define CH_STAT_RETRY 0x02
423 #define CH_STAT_INVALID 0x03
424 u8 pad[2];
425 u32 version;
426 u32 length;
427 u32 image_offset;
428};
429
430#define FI_REL_VER_SZ 16
431
432struct esas2r_flash_img_v0 {
433 u8 fi_version;
434 #define FI_VERSION_0 00
435 u8 status;
436 u8 adap_typ;
437 u8 action;
438 u32 length;
439 u16 checksum;
440 u16 driver_error;
441 u16 flags;
442 u16 num_comps;
443 #define FI_NUM_COMPS_V0 5
444 u8 rel_version[FI_REL_VER_SZ];
445 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
446 u8 scratch_buf[FM_BUF_SZ];
447};
448
449struct esas2r_flash_img {
450 u8 fi_version;
451 #define FI_VERSION_1 01
452 u8 status;
453 #define FI_STAT_SUCCESS 0x00
454 #define FI_STAT_FAILED 0x01
455 #define FI_STAT_REBOOT 0x02
456 #define FI_STAT_ADAPTYP 0x03
457 #define FI_STAT_INVALID 0x04
458 #define FI_STAT_CHKSUM 0x05
459 #define FI_STAT_LENGTH 0x06
460 #define FI_STAT_UNKNOWN 0x07
461 #define FI_STAT_IMG_VER 0x08
462 #define FI_STAT_BUSY 0x09
463 #define FI_STAT_DUAL 0x0A
464 #define FI_STAT_MISSING 0x0B
465 #define FI_STAT_UNSUPP 0x0C
466 #define FI_STAT_ERASE 0x0D
467 #define FI_STAT_FLASH 0x0E
468 #define FI_STAT_DEGRADED 0x0F
469 u8 adap_typ;
470 #define FI_AT_UNKNWN 0xFF
471 #define FI_AT_SUN_LAKE 0x0B
472 #define FI_AT_MV_9580 0x0F
473 u8 action;
474 #define FI_ACT_DOWN 0x00
475 #define FI_ACT_UP 0x01
476 #define FI_ACT_UPSZ 0x02
477 #define FI_ACT_MAX 0x02
478 #define FI_ACT_DOWN1 0x80
479 u32 length;
480 u16 checksum;
481 u16 driver_error;
482 u16 flags;
483 #define FI_FLG_NVR_DEF 0x0001
484 u16 num_comps;
485 #define FI_NUM_COMPS_V1 6
486 u8 rel_version[FI_REL_VER_SZ];
487 struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
488 u8 scratch_buf[FM_BUF_SZ];
489};
490
491/* definitions for flash script (FS) commands */
492struct esas2r_ioctlfs_command {
493 u8 command;
494 #define ESAS2R_FS_CMD_ERASE 0
495 #define ESAS2R_FS_CMD_READ 1
496 #define ESAS2R_FS_CMD_BEGINW 2
497 #define ESAS2R_FS_CMD_WRITE 3
498 #define ESAS2R_FS_CMD_COMMIT 4
499 #define ESAS2R_FS_CMD_CANCEL 5
500 u8 checksum;
501 u8 reserved[2];
502 u32 flash_addr;
503 u32 length;
504 u32 image_offset;
505};
506
507struct esas2r_ioctl_fs {
508 u8 version;
509 #define ESAS2R_FS_VER 0
510 u8 status;
511 u8 driver_error;
512 u8 adap_type;
513 #define ESAS2R_FS_AT_ESASRAID2 3
514 #define ESAS2R_FS_AT_TSSASRAID2 4
515 #define ESAS2R_FS_AT_TSSASRAID2E 5
516 #define ESAS2R_FS_AT_TLSASHBA 6
517 u8 driver_ver;
518 u8 reserved[11];
519 struct esas2r_ioctlfs_command command;
520 u8 data[1];
521};
522
523struct esas2r_sas_nvram {
524 u8 signature[4];
525 u8 version;
526 #define SASNVR_VERSION_0 0x00
527 #define SASNVR_VERSION SASNVR_VERSION_0
528 u8 checksum;
529 #define SASNVR_CKSUM_SEED 0x5A
530 u8 max_lun_for_target;
531 u8 pci_latency;
532 #define SASNVR_PCILAT_DIS 0x00
533 #define SASNVR_PCILAT_MIN 0x10
534 #define SASNVR_PCILAT_MAX 0xF8
535 u8 options1;
536 #define SASNVR1_BOOT_DRVR 0x01
537 #define SASNVR1_BOOT_SCAN 0x02
538 #define SASNVR1_DIS_PCI_MWI 0x04
539 #define SASNVR1_FORCE_ORD_Q 0x08
540 #define SASNVR1_CACHELINE_0 0x10
541 #define SASNVR1_DIS_DEVSORT 0x20
542 #define SASNVR1_PWR_MGT_EN 0x40
543 #define SASNVR1_WIDEPORT 0x80
544 u8 options2;
545 #define SASNVR2_SINGLE_BUS 0x01
546 #define SASNVR2_SLOT_BIND 0x02
547 #define SASNVR2_EXP_PROG 0x04
548 #define SASNVR2_CMDTHR_LUN 0x08
549 #define SASNVR2_HEARTBEAT 0x10
550 #define SASNVR2_INT_CONNECT 0x20
551 #define SASNVR2_SW_MUX_CTRL 0x40
552 #define SASNVR2_DISABLE_NCQ 0x80
553 u8 int_coalescing;
554 #define SASNVR_COAL_DIS 0x00
555 #define SASNVR_COAL_LOW 0x01
556 #define SASNVR_COAL_MED 0x02
557 #define SASNVR_COAL_HI 0x03
558 u8 cmd_throttle;
559 #define SASNVR_CMDTHR_NONE 0x00
560 u8 dev_wait_time;
561 u8 dev_wait_count;
562 u8 spin_up_delay;
563 #define SASNVR_SPINUP_MAX 0x14
564 u8 ssp_align_rate;
565 u8 sas_addr[8];
566 u8 phy_speed[16];
567 #define SASNVR_SPEED_AUTO 0x00
568 #define SASNVR_SPEED_1_5GB 0x01
569 #define SASNVR_SPEED_3GB 0x02
570 #define SASNVR_SPEED_6GB 0x03
571 #define SASNVR_SPEED_12GB 0x04
572 u8 phy_mux[16];
573 #define SASNVR_MUX_DISABLED 0x00
574 #define SASNVR_MUX_1_5GB 0x01
575 #define SASNVR_MUX_3GB 0x02
576 #define SASNVR_MUX_6GB 0x03
577 u8 phy_flags[16];
578 #define SASNVR_PHF_DISABLED 0x01
579 #define SASNVR_PHF_RD_ONLY 0x02
580 u8 sort_type;
581 #define SASNVR_SORT_SAS_ADDR 0x00
582 #define SASNVR_SORT_H308_CONN 0x01
583 #define SASNVR_SORT_PHY_ID 0x02
584 #define SASNVR_SORT_SLOT_ID 0x03
585 u8 dpm_reqcmd_lmt;
586 u8 dpm_stndby_time;
587 u8 dpm_active_time;
588 u8 phy_target_id[16];
589 #define SASNVR_PTI_DISABLED 0xFF
590 u8 virt_ses_mode;
591 #define SASNVR_VSMH_DISABLED 0x00
592 u8 read_write_mode;
593 #define SASNVR_RWM_DEFAULT 0x00
594 u8 link_down_to;
595 u8 reserved[0xA1];
596};
597
598typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
599
600struct esas2r_sg_context {
601 struct esas2r_adapter *adapter;
602 struct esas2r_request *first_req;
603 u32 length;
604 u8 *cur_offset;
605 PGETPHYSADDR get_phys_addr;
606 union {
607 struct {
608 struct atto_vda_sge *curr;
609 struct atto_vda_sge *last;
610 struct atto_vda_sge *limit;
611 struct atto_vda_sge *chain;
612 } a64;
613 struct {
614 struct atto_physical_region_description *curr;
615 struct atto_physical_region_description *chain;
616 u32 sgl_max_cnt;
617 u32 sge_cnt;
618 } prd;
619 } sge;
620 struct scatterlist *cur_sgel;
621 u8 *exp_offset;
622 int num_sgel;
623 int sgel_count;
624};
625
626struct esas2r_target {
627 u8 flags;
628 #define TF_PASS_THRU 0x01
629 #define TF_USED 0x02
630 u8 new_target_state;
631 u8 target_state;
632 u8 buffered_target_state;
633#define TS_NOT_PRESENT 0x00
634#define TS_PRESENT 0x05
635#define TS_LUN_CHANGE 0x06
636#define TS_INVALID 0xFF
637 u32 block_size;
638 u32 inter_block;
639 u32 inter_byte;
640 u16 virt_targ_id;
641 u16 phys_targ_id;
642 u8 identifier_len;
643 u64 sas_addr;
644 u8 identifier[60];
645 struct atto_vda_ae_lu lu_event;
646};
647
648struct esas2r_request {
649 struct list_head comp_list;
650 struct list_head req_list;
651 union atto_vda_req *vrq;
652 struct esas2r_mem_desc *vrq_md;
653 union {
654 void *data_buf;
655 union atto_vda_rsp_data *vda_rsp_data;
656 };
657 u8 *sense_buf;
658 struct list_head sg_table_head;
659 struct esas2r_mem_desc *sg_table;
660 u32 timeout;
661 #define RQ_TIMEOUT_S1 0xFFFFFFFF
662 #define RQ_TIMEOUT_S2 0xFFFFFFFE
663 #define RQ_MAX_TIMEOUT 0xFFFFFFFD
664 u16 target_id;
665 u8 req_type;
666 #define RT_INI_REQ 0x01
667 #define RT_DISC_REQ 0x02
668 u8 sense_len;
669 union atto_vda_func_rsp func_rsp;
670 RQCALLBK comp_cb;
671 RQCALLBK interrupt_cb;
672 void *interrupt_cx;
673 u8 flags;
674 #define RF_1ST_IBLK_BASE 0x04
675 #define RF_FAILURE_OK 0x08
676 u8 req_stat;
677 u16 vda_req_sz;
678 #define RQ_SIZE_DEFAULT 0
679 u64 lba;
680 RQCALLBK aux_req_cb;
681 void *aux_req_cx;
682 u32 blk_len;
683 u32 max_blk_len;
684 union {
685 struct scsi_cmnd *cmd;
686 u8 *task_management_status_ptr;
687 };
688};
689
690struct esas2r_flash_context {
691 struct esas2r_flash_img *fi;
692 RQCALLBK interrupt_cb;
693 u8 *sgc_offset;
694 u8 *scratch;
695 u32 fi_hdr_len;
696 u8 task;
697 #define FMTSK_ERASE_BOOT 0
698 #define FMTSK_WRTBIOS 1
699 #define FMTSK_READBIOS 2
700 #define FMTSK_WRTMAC 3
701 #define FMTSK_READMAC 4
702 #define FMTSK_WRTEFI 5
703 #define FMTSK_READEFI 6
704 #define FMTSK_WRTCFG 7
705 #define FMTSK_READCFG 8
706 u8 func;
707 u16 num_comps;
708 u32 cmp_len;
709 u32 flsh_addr;
710 u32 curr_len;
711 u8 comp_typ;
712 struct esas2r_sg_context sgc;
713};
714
715struct esas2r_disc_context {
716 u8 disc_evt;
717 #define DCDE_DEV_CHANGE 0x01
718 #define DCDE_DEV_SCAN 0x02
719 u8 state;
720 #define DCS_DEV_RMV 0x00
721 #define DCS_DEV_ADD 0x01
722 #define DCS_BLOCK_DEV_SCAN 0x02
723 #define DCS_RAID_GRP_INFO 0x03
724 #define DCS_PART_INFO 0x04
725 #define DCS_PT_DEV_INFO 0x05
726 #define DCS_PT_DEV_ADDR 0x06
727 #define DCS_DISC_DONE 0xFF
728 u16 flags;
729 #define DCF_DEV_CHANGE 0x0001
730 #define DCF_DEV_SCAN 0x0002
731 #define DCF_POLLED 0x8000
732 u32 interleave;
733 u32 block_size;
734 u16 dev_ix;
735 u8 part_num;
736 u8 raid_grp_ix;
737 char raid_grp_name[16];
738 struct esas2r_target *curr_targ;
739 u16 curr_virt_id;
740 u16 curr_phys_id;
741 u8 scan_gen;
742 u8 dev_addr_type;
743 u64 sas_addr;
744};
745
746struct esas2r_mem_desc {
747 struct list_head next_desc;
748 void *virt_addr;
749 u64 phys_addr;
750 void *pad;
751 void *esas2r_data;
752 u32 esas2r_param;
753 u32 size;
754};
755
756enum fw_event_type {
757 fw_event_null,
758 fw_event_lun_change,
759 fw_event_present,
760 fw_event_not_present,
761 fw_event_vda_ae
762};
763
764struct esas2r_vda_ae {
765 u32 signature;
766#define ESAS2R_VDA_EVENT_SIG 0x4154544F
767 u8 bus_number;
768 u8 devfn;
769 u8 pad[2];
770 union atto_vda_ae vda_ae;
771};
772
773struct esas2r_fw_event_work {
774 struct list_head list;
775 struct delayed_work work;
776 struct esas2r_adapter *a;
777 enum fw_event_type type;
778 u8 data[sizeof(struct esas2r_vda_ae)];
779};
780
781enum state {
782 FW_INVALID_ST,
783 FW_STATUS_ST,
784 FW_COMMAND_ST
785};
786
787struct esas2r_firmware {
788 enum state state;
789 struct esas2r_flash_img header;
790 u8 *data;
791 u64 phys;
792 int orig_len;
793 void *header_buff;
794 u64 header_buff_phys;
795};
796
797struct esas2r_adapter {
798 struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
799 struct esas2r_target *targetdb_end;
800 unsigned char *regs;
801 unsigned char *data_window;
802 u32 volatile flags;
803 #define AF_PORT_CHANGE (u32)(0x00000001)
804 #define AF_CHPRST_NEEDED (u32)(0x00000004)
805 #define AF_CHPRST_PENDING (u32)(0x00000008)
806 #define AF_CHPRST_DETECTED (u32)(0x00000010)
807 #define AF_BUSRST_NEEDED (u32)(0x00000020)
808 #define AF_BUSRST_PENDING (u32)(0x00000040)
809 #define AF_BUSRST_DETECTED (u32)(0x00000080)
810 #define AF_DISABLED (u32)(0x00000100)
811 #define AF_FLASH_LOCK (u32)(0x00000200)
812 #define AF_OS_RESET (u32)(0x00002000)
813 #define AF_FLASHING (u32)(0x00004000)
814 #define AF_POWER_MGT (u32)(0x00008000)
815 #define AF_NVR_VALID (u32)(0x00010000)
816 #define AF_DEGRADED_MODE (u32)(0x00020000)
817 #define AF_DISC_PENDING (u32)(0x00040000)
818 #define AF_TASKLET_SCHEDULED (u32)(0x00080000)
819 #define AF_HEARTBEAT (u32)(0x00200000)
820 #define AF_HEARTBEAT_ENB (u32)(0x00400000)
821 #define AF_NOT_PRESENT (u32)(0x00800000)
822 #define AF_CHPRST_STARTED (u32)(0x01000000)
823 #define AF_FIRST_INIT (u32)(0x02000000)
824 #define AF_POWER_DOWN (u32)(0x04000000)
825 #define AF_DISC_IN_PROG (u32)(0x08000000)
826 #define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
827 #define AF_LEGACY_SGE_MODE (u32)(0x20000000)
828 #define AF_DISC_POLLED (u32)(0x40000000)
829 u32 volatile flags2;
830 #define AF2_SERIAL_FLASH (u32)(0x00000001)
831 #define AF2_DEV_SCAN (u32)(0x00000002)
832 #define AF2_DEV_CNT_OK (u32)(0x00000004)
833 #define AF2_COREDUMP_AVAIL (u32)(0x00000008)
834 #define AF2_COREDUMP_SAVED (u32)(0x00000010)
835 #define AF2_VDA_POWER_DOWN (u32)(0x00000100)
836 #define AF2_THUNDERLINK (u32)(0x00000200)
837 #define AF2_THUNDERBOLT (u32)(0x00000400)
838 #define AF2_INIT_DONE (u32)(0x00000800)
839 #define AF2_INT_PENDING (u32)(0x00001000)
840 #define AF2_TIMER_TICK (u32)(0x00002000)
841 #define AF2_IRQ_CLAIMED (u32)(0x00004000)
842 #define AF2_MSI_ENABLED (u32)(0x00008000)
843 atomic_t disable_cnt;
844 atomic_t dis_ints_cnt;
845 u32 int_stat;
846 u32 int_mask;
847 u32 volatile *outbound_copy;
848 struct list_head avail_request;
849 spinlock_t request_lock;
850 spinlock_t sg_list_lock;
851 spinlock_t queue_lock;
852 spinlock_t mem_lock;
853 struct list_head free_sg_list_head;
854 struct esas2r_mem_desc *sg_list_mds;
855 struct list_head active_list;
856 struct list_head defer_list;
857 struct esas2r_request **req_table;
858 union {
859 u16 prev_dev_cnt;
860 u32 heartbeat_time;
861 #define ESAS2R_HEARTBEAT_TIME (3000)
862 };
863 u32 chip_uptime;
864 #define ESAS2R_CHP_UPTIME_MAX (60000)
865 #define ESAS2R_CHP_UPTIME_CNT (20000)
866 u64 uncached_phys;
867 u8 *uncached;
868 struct esas2r_sas_nvram *nvram;
869 struct esas2r_request general_req;
870 u8 init_msg;
871 #define ESAS2R_INIT_MSG_START 1
872 #define ESAS2R_INIT_MSG_INIT 2
873 #define ESAS2R_INIT_MSG_GET_INIT 3
874 #define ESAS2R_INIT_MSG_REINIT 4
875 u16 cmd_ref_no;
876 u32 fw_version;
877 u32 fw_build;
878 u32 chip_init_time;
879 #define ESAS2R_CHPRST_TIME (180000)
880 #define ESAS2R_CHPRST_WAIT_TIME (2000)
881 u32 last_tick_time;
882 u32 window_base;
883 RQBUILDSGL build_sgl;
884 struct esas2r_request *first_ae_req;
885 u32 list_size;
886 u32 last_write;
887 u32 last_read;
888 u16 max_vdareq_size;
889 u16 disc_wait_cnt;
890 struct esas2r_mem_desc inbound_list_md;
891 struct esas2r_mem_desc outbound_list_md;
892 struct esas2r_disc_context disc_ctx;
893 u8 *disc_buffer;
894 u32 disc_start_time;
895 u32 disc_wait_time;
896 u32 flash_ver;
897 char flash_rev[16];
898 char fw_rev[16];
899 char image_type[16];
900 struct esas2r_flash_context flash_context;
901 u32 num_targets_backend;
902 u32 ioctl_tunnel;
903 struct tasklet_struct tasklet;
904 struct pci_dev *pcid;
905 struct Scsi_Host *host;
906 unsigned int index;
907 char name[32];
908 struct timer_list timer;
909 struct esas2r_firmware firmware;
910 wait_queue_head_t nvram_waiter;
911 int nvram_command_done;
912 wait_queue_head_t fm_api_waiter;
913 int fm_api_command_done;
914 wait_queue_head_t vda_waiter;
915 int vda_command_done;
916 u8 *vda_buffer;
917 u64 ppvda_buffer;
918#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
919#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ)
920 wait_queue_head_t fs_api_waiter;
921 int fs_api_command_done;
922 u64 ppfs_api_buffer;
923 u8 *fs_api_buffer;
924 u32 fs_api_buffer_size;
925 wait_queue_head_t buffered_ioctl_waiter;
926 int buffered_ioctl_done;
927 int uncached_size;
928 struct workqueue_struct *fw_event_q;
929 struct list_head fw_event_list;
930 spinlock_t fw_event_lock;
931 u8 fw_events_off; /* if '1', then ignore events */
932 char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
933 /*
934 * intr_mode stores the interrupt mode currently being used by this
935 * adapter. it is based on the interrupt_mode module parameter, but
936 * can be changed based on the ability (or not) to utilize the
937 * mode requested by the parameter.
938 */
939 int intr_mode;
940#define INTR_MODE_LEGACY 0
941#define INTR_MODE_MSI 1
942#define INTR_MODE_MSIX 2
943 struct esas2r_sg_context fm_api_sgc;
944 u8 *save_offset;
945 struct list_head vrq_mds_head;
946 struct esas2r_mem_desc *vrq_mds;
947 int num_vrqs;
948 struct semaphore fm_api_semaphore;
949 struct semaphore fs_api_semaphore;
950 struct semaphore nvram_semaphore;
951 struct atto_ioctl *local_atto_ioctl;
952 u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
953 unsigned int sysfs_fw_created:1;
954 unsigned int sysfs_fs_created:1;
955 unsigned int sysfs_vda_created:1;
956 unsigned int sysfs_hw_created:1;
957 unsigned int sysfs_live_nvram_created:1;
958 unsigned int sysfs_default_nvram_created:1;
959};
960
961/*
962 * Function Declarations
963 * SCSI functions
964 */
965int esas2r_release(struct Scsi_Host *);
966const char *esas2r_info(struct Scsi_Host *);
967int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
968 struct esas2r_sas_nvram *data);
969int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
970int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
971u8 handle_hba_ioctl(struct esas2r_adapter *a,
972 struct atto_ioctl *ioctl_hba);
973int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
974int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
975int esas2r_slave_alloc(struct scsi_device *dev);
976int esas2r_slave_configure(struct scsi_device *dev);
977void esas2r_slave_destroy(struct scsi_device *dev);
978int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
979int esas2r_change_queue_type(struct scsi_device *dev, int type);
980long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
981
982/* SCSI error handler (eh) functions */
983int esas2r_eh_abort(struct scsi_cmnd *cmd);
984int esas2r_device_reset(struct scsi_cmnd *cmd);
985int esas2r_host_reset(struct scsi_cmnd *cmd);
986int esas2r_bus_reset(struct scsi_cmnd *cmd);
987int esas2r_target_reset(struct scsi_cmnd *cmd);
988
989/* Internal functions */
990int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
991 int index);
992int esas2r_cleanup(struct Scsi_Host *host);
993int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
994int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
995 int count);
996int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
997int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
998 int count);
999int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
1000int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
1001 int count);
1002void esas2r_adapter_tasklet(unsigned long context);
1003irqreturn_t esas2r_interrupt(int irq, void *dev_id);
1004irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
1005void esas2r_kickoff_timer(struct esas2r_adapter *a);
1006int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
1007int esas2r_resume(struct pci_dev *pcid);
1008void esas2r_fw_event_off(struct esas2r_adapter *a);
1009void esas2r_fw_event_on(struct esas2r_adapter *a);
1010bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1011 struct esas2r_sas_nvram *nvram);
1012void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
1013 struct esas2r_sas_nvram *nvram);
1014void esas2r_complete_request_cb(struct esas2r_adapter *a,
1015 struct esas2r_request *rq);
1016void esas2r_reset_detected(struct esas2r_adapter *a);
1017void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
1018 u8 state);
1019int esas2r_req_status_to_error(u8 req_stat);
1020void esas2r_kill_adapter(int i);
1021void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1022struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
1023u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
1024bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
1025 void **uncached_area);
1026bool esas2r_check_adapter(struct esas2r_adapter *a);
1027bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
1028void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1029bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
1030 struct esas2r_request *rqaux, u8 task_mgt_func);
1031void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
1032void esas2r_adapter_interrupt(struct esas2r_adapter *a);
1033void esas2r_do_deferred_processes(struct esas2r_adapter *a);
1034void esas2r_reset_bus(struct esas2r_adapter *a);
1035void esas2r_reset_adapter(struct esas2r_adapter *a);
1036void esas2r_timer_tick(struct esas2r_adapter *a);
1037const char *esas2r_get_model_name(struct esas2r_adapter *a);
1038const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
1039u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
1040 u32 *delay);
1041void esas2r_build_flash_req(struct esas2r_adapter *a,
1042 struct esas2r_request *rq,
1043 u8 sub_func,
1044 u8 cksum,
1045 u32 addr,
1046 u32 length);
1047void esas2r_build_mgt_req(struct esas2r_adapter *a,
1048 struct esas2r_request *rq,
1049 u8 sub_func,
1050 u8 scan_gen,
1051 u16 dev_index,
1052 u32 length,
1053 void *data);
1054void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
1055void esas2r_build_cli_req(struct esas2r_adapter *a,
1056 struct esas2r_request *rq,
1057 u32 length,
1058 u32 cmd_rsp_len);
1059void esas2r_build_ioctl_req(struct esas2r_adapter *a,
1060 struct esas2r_request *rq,
1061 u32 length,
1062 u8 sub_func);
1063void esas2r_build_cfg_req(struct esas2r_adapter *a,
1064 struct esas2r_request *rq,
1065 u8 sub_func,
1066 u32 length,
1067 void *data);
1068void esas2r_power_down(struct esas2r_adapter *a);
1069bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
1070void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1071u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
1072bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
1073 struct esas2r_ioctl_fs *fs,
1074 struct esas2r_request *rq,
1075 struct esas2r_sg_context *sgc);
1076bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
1077 u32 size);
1078bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
1079 u32 size);
1080bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
1081 struct esas2r_request *rq, struct esas2r_sg_context *sgc);
1082void esas2r_force_interrupt(struct esas2r_adapter *a);
1083void esas2r_local_start_request(struct esas2r_adapter *a,
1084 struct esas2r_request *rq);
1085void esas2r_process_adapter_reset(struct esas2r_adapter *a);
1086void esas2r_complete_request(struct esas2r_adapter *a,
1087 struct esas2r_request *rq);
1088void esas2r_dummy_complete(struct esas2r_adapter *a,
1089 struct esas2r_request *rq);
1090void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
1091void esas2r_start_vda_request(struct esas2r_adapter *a,
1092 struct esas2r_request *rq);
1093bool esas2r_read_flash_rev(struct esas2r_adapter *a);
1094bool esas2r_read_image_type(struct esas2r_adapter *a);
1095bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
1096bool esas2r_nvram_validate(struct esas2r_adapter *a);
1097void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
1098bool esas2r_print_flash_rev(struct esas2r_adapter *a);
1099void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
1100bool esas2r_init_msgs(struct esas2r_adapter *a);
1101bool esas2r_is_adapter_present(struct esas2r_adapter *a);
1102void esas2r_nuxi_mgt_data(u8 function, void *data);
1103void esas2r_nuxi_cfg_data(u8 function, void *data);
1104void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
1105void esas2r_reset_chip(struct esas2r_adapter *a);
1106void esas2r_log_request_failure(struct esas2r_adapter *a,
1107 struct esas2r_request *rq);
1108void esas2r_polled_interrupt(struct esas2r_adapter *a);
1109bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
1110 u8 status);
1111bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
1112 struct esas2r_sg_context *sgc);
1113bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
1114 struct esas2r_sg_context *sgc);
1115void esas2r_targ_db_initialize(struct esas2r_adapter *a);
1116void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
1117void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
1118struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
1119 struct esas2r_disc_context *dc);
1120struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
1121 struct esas2r_disc_context *dc,
1122 u8 *ident,
1123 u8 ident_len);
1124void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
1125struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
1126 u64 *sas_addr);
1127struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
1128 void *identifier,
1129 u8 ident_len);
1130u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
1131struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
1132 u16 virt_id);
1133u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
1134void esas2r_disc_initialize(struct esas2r_adapter *a);
1135void esas2r_disc_start_waiting(struct esas2r_adapter *a);
1136void esas2r_disc_check_for_work(struct esas2r_adapter *a);
1137void esas2r_disc_check_complete(struct esas2r_adapter *a);
1138void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
1139bool esas2r_disc_start_port(struct esas2r_adapter *a);
1140void esas2r_disc_local_start_request(struct esas2r_adapter *a,
1141 struct esas2r_request *rq);
1142bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
1143bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
1144 struct atto_ioctl_vda *vi,
1145 struct esas2r_request *rq,
1146 struct esas2r_sg_context *sgc);
1147void esas2r_queue_fw_event(struct esas2r_adapter *a,
1148 enum fw_event_type type,
1149 void *data,
1150 int data_sz);
1151
1152/* Inline functions */
1153static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
1154{
1155 return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
1156}
1157
1158static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
1159{
1160 return test_and_clear_bit(ilog2(bits),
1161 (volatile unsigned long *)flags);
1162}
1163
1164/* Allocate a chip scatter/gather list entry */
1165static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
1166{
1167 unsigned long flags;
1168 struct list_head *sgl;
1169 struct esas2r_mem_desc *result = NULL;
1170
1171 spin_lock_irqsave(&a->sg_list_lock, flags);
1172 if (likely(!list_empty(&a->free_sg_list_head))) {
1173 sgl = a->free_sg_list_head.next;
1174 result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
1175 list_del_init(sgl);
1176 }
1177 spin_unlock_irqrestore(&a->sg_list_lock, flags);
1178
1179 return result;
1180}
1181
1182/* Initialize a scatter/gather context */
1183static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
1184 struct esas2r_adapter *a,
1185 struct esas2r_request *rq,
1186 struct atto_vda_sge *first)
1187{
1188 sgc->adapter = a;
1189 sgc->first_req = rq;
1190
1191 /*
1192 * set the limit pointer such that an SGE pointer above this value
1193 * would be the first one to overflow the SGL.
1194 */
1195 sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
1196 + (sizeof(union
1197 atto_vda_req) /
1198 8)
1199 - sizeof(struct
1200 atto_vda_sge));
1201 if (first) {
1202 sgc->sge.a64.last =
1203 sgc->sge.a64.curr = first;
1204 rq->vrq->scsi.sg_list_offset = (u8)
1205 ((u8 *)first -
1206 (u8 *)rq->vrq);
1207 } else {
1208 sgc->sge.a64.last =
1209 sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
1210 rq->vrq->scsi.sg_list_offset =
1211 (u8)offsetof(struct atto_vda_scsi_req, u.sge);
1212 }
1213 sgc->sge.a64.chain = NULL;
1214}
1215
1216static inline void esas2r_rq_init_request(struct esas2r_request *rq,
1217 struct esas2r_adapter *a)
1218{
1219 union atto_vda_req *vrq = rq->vrq;
1220 u32 handle;
1221
1222 INIT_LIST_HEAD(&rq->sg_table_head);
1223 rq->data_buf = (void *)(vrq + 1);
1224 rq->interrupt_cb = NULL;
1225 rq->comp_cb = esas2r_complete_request_cb;
1226 rq->flags = 0;
1227 rq->timeout = 0;
1228 rq->req_stat = RS_PENDING;
1229 rq->req_type = RT_INI_REQ;
1230
1231 /* clear the outbound response */
1232 rq->func_rsp.dwords[0] = 0;
1233 rq->func_rsp.dwords[1] = 0;
1234
1235 /*
1236 * clear the size of the VDA request. esas2r_build_sg_list() will
1237 * only allow the size of the request to grow. there are some
1238 * management requests that go through there twice and the second
1239 * time through sets a smaller request size. if this is not modified
1240 * at all we'll set it to the size of the entire VDA request.
1241 */
1242 rq->vda_req_sz = RQ_SIZE_DEFAULT;
1243
1244 /* req_table entry should be NULL at this point - if not, halt */
1245
1246 if (a->req_table[LOWORD(vrq->scsi.handle)])
1247 esas2r_bugon();
1248
1249 /* fill in the table for this handle so we can get back to the
1250 * request.
1251 */
1252 a->req_table[LOWORD(vrq->scsi.handle)] = rq;
1253
1254 /*
1255 * add a reference number to the handle to make it unique (until it
1256 * wraps of course) while preserving the upper word
1257 */
1258
1259 handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
1260 vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
1261
1262 /*
1263 * the following formats a SCSI request. the caller can override as
1264 * necessary. clear_vda_request can be called to clear the VDA
1265 * request for another type of request.
1266 */
1267 vrq->scsi.function = VDA_FUNC_SCSI;
1268 vrq->scsi.sense_len = SENSE_DATA_SZ;
1269
1270 /* clear out sg_list_offset and chain_offset */
1271 vrq->scsi.sg_list_offset = 0;
1272 vrq->scsi.chain_offset = 0;
1273 vrq->scsi.flags = 0;
1274 vrq->scsi.reserved = 0;
1275
1276 /* set the sense buffer to be the data payload buffer */
1277 vrq->scsi.ppsense_buf
1278 = cpu_to_le64(rq->vrq_md->phys_addr +
1279 sizeof(union atto_vda_req));
1280}
1281
1282static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
1283 struct esas2r_adapter *a)
1284{
1285 unsigned long flags;
1286
1287 if (list_empty(&rq->sg_table_head))
1288 return;
1289
1290 spin_lock_irqsave(&a->sg_list_lock, flags);
1291 list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
1292 spin_unlock_irqrestore(&a->sg_list_lock, flags);
1293}
1294
1295static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
1296 struct esas2r_adapter *a)
1297
1298{
1299 esas2r_rq_free_sg_lists(rq, a);
1300 a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
1301 rq->data_buf = NULL;
1302}
1303
1304static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
1305{
1306 return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
1307 | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
1308 | AF_PORT_CHANGE))
1309 ? true : false;
1310}
1311
1312/*
1313 * Build the scatter/gather list for an I/O request according to the
1314 * specifications placed in the esas2r_sg_context. The caller must initialize
1315 * struct esas2r_sg_context prior to the initial call by calling
1316 * esas2r_sgc_init()
1317 */
1318static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
1319 struct esas2r_request *rq,
1320 struct esas2r_sg_context *sgc)
1321{
1322 if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
1323 return true;
1324
1325 return (*a->build_sgl)(a, sgc);
1326}
1327
1328static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
1329{
1330 if (atomic_inc_return(&a->dis_ints_cnt) == 1)
1331 esas2r_write_register_dword(a, MU_INT_MASK_OUT,
1332 ESAS2R_INT_DIS_MASK);
1333}
1334
1335static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
1336{
1337 if (atomic_dec_return(&a->dis_ints_cnt) == 0)
1338 esas2r_write_register_dword(a, MU_INT_MASK_OUT,
1339 ESAS2R_INT_ENB_MASK);
1340}
1341
1342/* Schedule a TASKLET to perform non-interrupt tasks that may require delays
1343 * or long completion times.
1344 */
1345static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
1346{
1347 /* make sure we don't schedule twice */
1348 if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
1349 ilog2(AF_TASKLET_SCHEDULED)))
1350 tasklet_hi_schedule(&a->tasklet);
1351}
1352
1353static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
1354{
1355 if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
1356 && (a->nvram->options2 & SASNVR2_HEARTBEAT))
1357 esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
1358 else
1359 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
1360}
1361
1362static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
1363{
1364 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
1365 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
1366}
1367
1368/* Set the initial state for resetting the adapter on the next pass through
1369 * esas2r_do_deferred.
1370 */
1371static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
1372{
1373 esas2r_disable_heartbeat(a);
1374
1375 esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
1376 esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
1377 esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
1378}
1379
1380/* See if an interrupt is pending on the adapter. */
1381static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
1382{
1383 u32 intstat;
1384
1385 if (a->int_mask == 0)
1386 return false;
1387
1388 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
1389
1390 if ((intstat & a->int_mask) == 0)
1391 return false;
1392
1393 esas2r_disable_chip_interrupts(a);
1394
1395 a->int_stat = intstat;
1396 a->int_mask = 0;
1397
1398 return true;
1399}
1400
1401static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
1402 struct esas2r_adapter *a)
1403{
1404 return (u16)(uintptr_t)(t - a->targetdb);
1405}
1406
1407/* Build and start an asynchronous event request */
1408static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
1409 struct esas2r_request *rq)
1410{
1411 unsigned long flags;
1412
1413 esas2r_build_ae_req(a, rq);
1414
1415 spin_lock_irqsave(&a->queue_lock, flags);
1416 esas2r_start_vda_request(a, rq);
1417 spin_unlock_irqrestore(&a->queue_lock, flags);
1418}
1419
1420static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
1421 struct list_head *comp_list)
1422{
1423 struct esas2r_request *rq;
1424 struct list_head *element, *next;
1425
1426 list_for_each_safe(element, next, comp_list) {
1427 rq = list_entry(element, struct esas2r_request, comp_list);
1428 list_del_init(element);
1429 esas2r_complete_request(a, rq);
1430 }
1431}
1432
1433/* sysfs handlers */
1434extern struct bin_attribute bin_attr_fw;
1435extern struct bin_attribute bin_attr_fs;
1436extern struct bin_attribute bin_attr_vda;
1437extern struct bin_attribute bin_attr_hw;
1438extern struct bin_attribute bin_attr_live_nvram;
1439extern struct bin_attribute bin_attr_default_nvram;
1440
1441#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
new file mode 100644
index 000000000000..dec6c334ce3e
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -0,0 +1,1189 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_disc.c
3 * esas2r device discovery routines
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44
45#include "esas2r.h"
46
47/* Miscellaneous internal discovery routines */
48static void esas2r_disc_abort(struct esas2r_adapter *a,
49 struct esas2r_request *rq);
50static bool esas2r_disc_continue(struct esas2r_adapter *a,
51 struct esas2r_request *rq);
52static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
53static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
54static bool esas2r_disc_start_request(struct esas2r_adapter *a,
55 struct esas2r_request *rq);
56
57/* Internal discovery routines that process the states */
58static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
59 struct esas2r_request *rq);
60static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
61 struct esas2r_request *rq);
62static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
63 struct esas2r_request *rq);
64static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
65 struct esas2r_request *rq);
66static bool esas2r_disc_part_info(struct esas2r_adapter *a,
67 struct esas2r_request *rq);
68static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
69 struct esas2r_request *rq);
70static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
71 struct esas2r_request *rq);
72static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
73 struct esas2r_request *rq);
74static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
75 struct esas2r_request *rq);
76static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
77 struct esas2r_request *rq);
78static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
79 struct esas2r_request *rq);
80static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
81 struct esas2r_request *rq);
82
83void esas2r_disc_initialize(struct esas2r_adapter *a)
84{
85 struct esas2r_sas_nvram *nvr = a->nvram;
86
87 esas2r_trace_enter();
88
89 esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
90 esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
91 esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
92
93 a->disc_start_time = jiffies_to_msecs(jiffies);
94 a->disc_wait_time = nvr->dev_wait_time * 1000;
95 a->disc_wait_cnt = nvr->dev_wait_count;
96
97 if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
98 a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
99
100 /*
101 * If we are doing chip reset or power management processing, always
102 * wait for devices. use the NVRAM device count if it is greater than
103 * previously discovered devices.
104 */
105
106 esas2r_hdebug("starting discovery...");
107
108 a->general_req.interrupt_cx = NULL;
109
110 if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
111 if (a->prev_dev_cnt == 0) {
112 /* Don't bother waiting if there is nothing to wait
113 * for.
114 */
115 a->disc_wait_time = 0;
116 } else {
117 /*
118 * Set the device wait count to what was previously
119 * found. We don't care if the user only configured
120 * a time because we know the exact count to wait for.
121 * There is no need to honor the user's wishes to
122 * always wait the full time.
123 */
124 a->disc_wait_cnt = a->prev_dev_cnt;
125
126 /*
127 * bump the minimum wait time to 15 seconds since the
128 * default is 3 (system boot or the boot driver usually
129 * buys us more time).
130 */
131 if (a->disc_wait_time < 15000)
132 a->disc_wait_time = 15000;
133 }
134 }
135
136 esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
137 esas2r_trace("disc wait time: %d", a->disc_wait_time);
138
139 if (a->disc_wait_time == 0)
140 esas2r_disc_check_complete(a);
141
142 esas2r_trace_exit();
143}
144
145void esas2r_disc_start_waiting(struct esas2r_adapter *a)
146{
147 unsigned long flags;
148
149 spin_lock_irqsave(&a->mem_lock, flags);
150
151 if (a->disc_ctx.disc_evt)
152 esas2r_disc_start_port(a);
153
154 spin_unlock_irqrestore(&a->mem_lock, flags);
155}
156
157void esas2r_disc_check_for_work(struct esas2r_adapter *a)
158{
159 struct esas2r_request *rq = &a->general_req;
160
161 /* service any pending interrupts first */
162
163 esas2r_polled_interrupt(a);
164
165 /*
166 * now, interrupt processing may have queued up a discovery event. go
167 * see if we have one to start. we couldn't start it in the ISR since
168 * polled discovery would cause a deadlock.
169 */
170
171 esas2r_disc_start_waiting(a);
172
173 if (rq->interrupt_cx == NULL)
174 return;
175
176 if (rq->req_stat == RS_STARTED
177 && rq->timeout <= RQ_MAX_TIMEOUT) {
178 /* wait for the current discovery request to complete. */
179 esas2r_wait_request(a, rq);
180
181 if (rq->req_stat == RS_TIMEOUT) {
182 esas2r_disc_abort(a, rq);
183 esas2r_local_reset_adapter(a);
184 return;
185 }
186 }
187
188 if (rq->req_stat == RS_PENDING
189 || rq->req_stat == RS_STARTED)
190 return;
191
192 esas2r_disc_continue(a, rq);
193}
194
195void esas2r_disc_check_complete(struct esas2r_adapter *a)
196{
197 unsigned long flags;
198
199 esas2r_trace_enter();
200
201 /* check to see if we should be waiting for devices */
202 if (a->disc_wait_time) {
203 u32 currtime = jiffies_to_msecs(jiffies);
204 u32 time = currtime - a->disc_start_time;
205
206 /*
207 * Wait until the device wait time is exhausted or the device
208 * wait count is satisfied.
209 */
210 if (time < a->disc_wait_time
211 && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
212 || a->disc_wait_cnt == 0)) {
213 /* After three seconds of waiting, schedule a scan. */
214 if (time >= 3000
215 && !(esas2r_lock_set_flags(&a->flags2,
216 AF2_DEV_SCAN) &
217 ilog2(AF2_DEV_SCAN))) {
218 spin_lock_irqsave(&a->mem_lock, flags);
219 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
220 spin_unlock_irqrestore(&a->mem_lock, flags);
221 }
222
223 esas2r_trace_exit();
224 return;
225 }
226
227 /*
228 * We are done waiting...we think. Adjust the wait time to
229 * consume events after the count is met.
230 */
231 if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
232 & ilog2(AF2_DEV_CNT_OK)))
233 a->disc_wait_time = time + 3000;
234
235 /* If we haven't done a full scan yet, do it now. */
236 if (!(esas2r_lock_set_flags(&a->flags2,
237 AF2_DEV_SCAN) &
238 ilog2(AF2_DEV_SCAN))) {
239 spin_lock_irqsave(&a->mem_lock, flags);
240 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
241 spin_unlock_irqrestore(&a->mem_lock, flags);
242
243 esas2r_trace_exit();
244 return;
245 }
246
247 /*
248 * Now, if there is still time left to consume events, continue
249 * waiting.
250 */
251 if (time < a->disc_wait_time) {
252 esas2r_trace_exit();
253 return;
254 }
255 } else {
256 if (!(esas2r_lock_set_flags(&a->flags2,
257 AF2_DEV_SCAN) &
258 ilog2(AF2_DEV_SCAN))) {
259 spin_lock_irqsave(&a->mem_lock, flags);
260 esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
261 spin_unlock_irqrestore(&a->mem_lock, flags);
262 }
263 }
264
265 /* We want to stop waiting for devices. */
266 a->disc_wait_time = 0;
267
268 if ((a->flags & AF_DISC_POLLED)
269 && (a->flags & AF_DISC_IN_PROG)) {
270 /*
271 * Polled discovery is still pending so continue the active
272 * discovery until it is done. At that point, we will stop
273 * polled discovery and transition to interrupt driven
274 * discovery.
275 */
276 } else {
277 /*
278 * Done waiting for devices. Note that we get here immediately
279 * after deferred waiting completes because that is interrupt
280 * driven; i.e. There is no transition.
281 */
282 esas2r_disc_fix_curr_requests(a);
283 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
284
285 /*
286 * We have deferred target state changes until now because we
287 * don't want to report any removals (due to the first arrival)
288 * until the device wait time expires.
289 */
290 esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
291 }
292
293 esas2r_trace_exit();
294}
295
296void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
297{
298 struct esas2r_disc_context *dc = &a->disc_ctx;
299
300 esas2r_trace_enter();
301
302 esas2r_trace("disc_event: %d", disc_evt);
303
304 /* Initialize the discovery context */
305 dc->disc_evt |= disc_evt;
306
307 /*
308 * Don't start discovery before or during polled discovery. if we did,
309 * we would have a deadlock if we are in the ISR already.
310 */
311 if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
312 esas2r_disc_start_port(a);
313
314 esas2r_trace_exit();
315}
316
317bool esas2r_disc_start_port(struct esas2r_adapter *a)
318{
319 struct esas2r_request *rq = &a->general_req;
320 struct esas2r_disc_context *dc = &a->disc_ctx;
321 bool ret;
322
323 esas2r_trace_enter();
324
325 if (a->flags & AF_DISC_IN_PROG) {
326 esas2r_trace_exit();
327
328 return false;
329 }
330
331 /* If there is a discovery waiting, process it. */
332 if (dc->disc_evt) {
333 if ((a->flags & AF_DISC_POLLED)
334 && a->disc_wait_time == 0) {
335 /*
336 * We are doing polled discovery, but we no longer want
337 * to wait for devices. Stop polled discovery and
338 * transition to interrupt driven discovery.
339 */
340
341 esas2r_trace_exit();
342
343 return false;
344 }
345 } else {
346 /* Discovery is complete. */
347
348 esas2r_hdebug("disc done");
349
350 esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
351
352 esas2r_trace_exit();
353
354 return false;
355 }
356
357 /* Handle the discovery context */
358 esas2r_trace("disc_evt: %d", dc->disc_evt);
359 esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
360 dc->flags = 0;
361
362 if (a->flags & AF_DISC_POLLED)
363 dc->flags |= DCF_POLLED;
364
365 rq->interrupt_cx = dc;
366 rq->req_stat = RS_SUCCESS;
367
368 /* Decode the event code */
369 if (dc->disc_evt & DCDE_DEV_SCAN) {
370 dc->disc_evt &= ~DCDE_DEV_SCAN;
371
372 dc->flags |= DCF_DEV_SCAN;
373 dc->state = DCS_BLOCK_DEV_SCAN;
374 } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
375 dc->disc_evt &= ~DCDE_DEV_CHANGE;
376
377 dc->flags |= DCF_DEV_CHANGE;
378 dc->state = DCS_DEV_RMV;
379 }
380
381 /* Continue interrupt driven discovery */
382 if (!(a->flags & AF_DISC_POLLED))
383 ret = esas2r_disc_continue(a, rq);
384 else
385 ret = true;
386
387 esas2r_trace_exit();
388
389 return ret;
390}
391
392static bool esas2r_disc_continue(struct esas2r_adapter *a,
393 struct esas2r_request *rq)
394{
395 struct esas2r_disc_context *dc =
396 (struct esas2r_disc_context *)rq->interrupt_cx;
397 bool rslt;
398
399 /* Device discovery/removal */
400 while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
401 rslt = false;
402
403 switch (dc->state) {
404 case DCS_DEV_RMV:
405
406 rslt = esas2r_disc_dev_remove(a, rq);
407 break;
408
409 case DCS_DEV_ADD:
410
411 rslt = esas2r_disc_dev_add(a, rq);
412 break;
413
414 case DCS_BLOCK_DEV_SCAN:
415
416 rslt = esas2r_disc_block_dev_scan(a, rq);
417 break;
418
419 case DCS_RAID_GRP_INFO:
420
421 rslt = esas2r_disc_raid_grp_info(a, rq);
422 break;
423
424 case DCS_PART_INFO:
425
426 rslt = esas2r_disc_part_info(a, rq);
427 break;
428
429 case DCS_PT_DEV_INFO:
430
431 rslt = esas2r_disc_passthru_dev_info(a, rq);
432 break;
433 case DCS_PT_DEV_ADDR:
434
435 rslt = esas2r_disc_passthru_dev_addr(a, rq);
436 break;
437 case DCS_DISC_DONE:
438
439 dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
440 break;
441
442 default:
443
444 esas2r_bugon();
445 dc->state = DCS_DISC_DONE;
446 break;
447 }
448
449 if (rslt)
450 return true;
451 }
452
453 /* Discovery is done...for now. */
454 rq->interrupt_cx = NULL;
455
456 if (!(a->flags & AF_DISC_PENDING))
457 esas2r_disc_fix_curr_requests(a);
458
459 esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
460
461 /* Start the next discovery. */
462 return esas2r_disc_start_port(a);
463}
464
465static bool esas2r_disc_start_request(struct esas2r_adapter *a,
466 struct esas2r_request *rq)
467{
468 unsigned long flags;
469
470 /* Set the timeout to a minimum value. */
471 if (rq->timeout < ESAS2R_DEFAULT_TMO)
472 rq->timeout = ESAS2R_DEFAULT_TMO;
473
474 /*
475 * Override the request type to distinguish discovery requests. If we
476 * end up deferring the request, esas2r_disc_local_start_request()
477 * will be called to restart it.
478 */
479 rq->req_type = RT_DISC_REQ;
480
481 spin_lock_irqsave(&a->queue_lock, flags);
482
483 if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
484 esas2r_disc_local_start_request(a, rq);
485 else
486 list_add_tail(&rq->req_list, &a->defer_list);
487
488 spin_unlock_irqrestore(&a->queue_lock, flags);
489
490 return true;
491}
492
493void esas2r_disc_local_start_request(struct esas2r_adapter *a,
494 struct esas2r_request *rq)
495{
496 esas2r_trace_enter();
497
498 list_add_tail(&rq->req_list, &a->active_list);
499
500 esas2r_start_vda_request(a, rq);
501
502 esas2r_trace_exit();
503
504 return;
505}
506
507static void esas2r_disc_abort(struct esas2r_adapter *a,
508 struct esas2r_request *rq)
509{
510 struct esas2r_disc_context *dc =
511 (struct esas2r_disc_context *)rq->interrupt_cx;
512
513 esas2r_trace_enter();
514
515 /* abort the current discovery */
516
517 dc->state = DCS_DISC_DONE;
518
519 esas2r_trace_exit();
520}
521
522static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
523 struct esas2r_request *rq)
524{
525 struct esas2r_disc_context *dc =
526 (struct esas2r_disc_context *)rq->interrupt_cx;
527 bool rslt;
528
529 esas2r_trace_enter();
530
531 esas2r_rq_init_request(rq, a);
532
533 esas2r_build_mgt_req(a,
534 rq,
535 VDAMGT_DEV_SCAN,
536 0,
537 0,
538 0,
539 NULL);
540
541 rq->comp_cb = esas2r_disc_block_dev_scan_cb;
542
543 rq->timeout = 30000;
544 rq->interrupt_cx = dc;
545
546 rslt = esas2r_disc_start_request(a, rq);
547
548 esas2r_trace_exit();
549
550 return rslt;
551}
552
553static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
554 struct esas2r_request *rq)
555{
556 struct esas2r_disc_context *dc =
557 (struct esas2r_disc_context *)rq->interrupt_cx;
558 unsigned long flags;
559
560 esas2r_trace_enter();
561
562 spin_lock_irqsave(&a->mem_lock, flags);
563
564 if (rq->req_stat == RS_SUCCESS)
565 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
566
567 dc->state = DCS_RAID_GRP_INFO;
568 dc->raid_grp_ix = 0;
569
570 esas2r_rq_destroy_request(rq, a);
571
572 /* continue discovery if it's interrupt driven */
573
574 if (!(dc->flags & DCF_POLLED))
575 esas2r_disc_continue(a, rq);
576
577 spin_unlock_irqrestore(&a->mem_lock, flags);
578
579 esas2r_trace_exit();
580}
581
582static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
583 struct esas2r_request *rq)
584{
585 struct esas2r_disc_context *dc =
586 (struct esas2r_disc_context *)rq->interrupt_cx;
587 bool rslt;
588 struct atto_vda_grp_info *grpinfo;
589
590 esas2r_trace_enter();
591
592 esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
593
594 if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
595 dc->state = DCS_DISC_DONE;
596
597 esas2r_trace_exit();
598
599 return false;
600 }
601
602 esas2r_rq_init_request(rq, a);
603
604 grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
605
606 memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
607
608 esas2r_build_mgt_req(a,
609 rq,
610 VDAMGT_GRP_INFO,
611 dc->scan_gen,
612 0,
613 sizeof(struct atto_vda_grp_info),
614 NULL);
615
616 grpinfo->grp_index = dc->raid_grp_ix;
617
618 rq->comp_cb = esas2r_disc_raid_grp_info_cb;
619
620 rq->interrupt_cx = dc;
621
622 rslt = esas2r_disc_start_request(a, rq);
623
624 esas2r_trace_exit();
625
626 return rslt;
627}
628
629static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
630 struct esas2r_request *rq)
631{
632 struct esas2r_disc_context *dc =
633 (struct esas2r_disc_context *)rq->interrupt_cx;
634 unsigned long flags;
635 struct atto_vda_grp_info *grpinfo;
636
637 esas2r_trace_enter();
638
639 spin_lock_irqsave(&a->mem_lock, flags);
640
641 if (rq->req_stat == RS_SCAN_GEN) {
642 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
643 dc->raid_grp_ix = 0;
644 goto done;
645 }
646
647 if (rq->req_stat == RS_SUCCESS) {
648 grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
649
650 if (grpinfo->status != VDA_GRP_STAT_ONLINE
651 && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
652 /* go to the next group. */
653
654 dc->raid_grp_ix++;
655 } else {
656 memcpy(&dc->raid_grp_name[0],
657 &grpinfo->grp_name[0],
658 sizeof(grpinfo->grp_name));
659
660 dc->interleave = le32_to_cpu(grpinfo->interleave);
661 dc->block_size = le32_to_cpu(grpinfo->block_size);
662
663 dc->state = DCS_PART_INFO;
664 dc->part_num = 0;
665 }
666 } else {
667 if (!(rq->req_stat == RS_GRP_INVALID)) {
668 esas2r_log(ESAS2R_LOG_WARN,
669 "A request for RAID group info failed - "
670 "returned with %x",
671 rq->req_stat);
672 }
673
674 dc->dev_ix = 0;
675 dc->state = DCS_PT_DEV_INFO;
676 }
677
678done:
679
680 esas2r_rq_destroy_request(rq, a);
681
682 /* continue discovery if it's interrupt driven */
683
684 if (!(dc->flags & DCF_POLLED))
685 esas2r_disc_continue(a, rq);
686
687 spin_unlock_irqrestore(&a->mem_lock, flags);
688
689 esas2r_trace_exit();
690}
691
692static bool esas2r_disc_part_info(struct esas2r_adapter *a,
693 struct esas2r_request *rq)
694{
695 struct esas2r_disc_context *dc =
696 (struct esas2r_disc_context *)rq->interrupt_cx;
697 bool rslt;
698 struct atto_vdapart_info *partinfo;
699
700 esas2r_trace_enter();
701
702 esas2r_trace("part_num: %d", dc->part_num);
703
704 if (dc->part_num >= VDA_MAX_PARTITIONS) {
705 dc->state = DCS_RAID_GRP_INFO;
706 dc->raid_grp_ix++;
707
708 esas2r_trace_exit();
709
710 return false;
711 }
712
713 esas2r_rq_init_request(rq, a);
714
715 partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
716
717 memset(partinfo, 0, sizeof(struct atto_vdapart_info));
718
719 esas2r_build_mgt_req(a,
720 rq,
721 VDAMGT_PART_INFO,
722 dc->scan_gen,
723 0,
724 sizeof(struct atto_vdapart_info),
725 NULL);
726
727 partinfo->part_no = dc->part_num;
728
729 memcpy(&partinfo->grp_name[0],
730 &dc->raid_grp_name[0],
731 sizeof(partinfo->grp_name));
732
733 rq->comp_cb = esas2r_disc_part_info_cb;
734
735 rq->interrupt_cx = dc;
736
737 rslt = esas2r_disc_start_request(a, rq);
738
739 esas2r_trace_exit();
740
741 return rslt;
742}
743
744static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
745 struct esas2r_request *rq)
746{
747 struct esas2r_disc_context *dc =
748 (struct esas2r_disc_context *)rq->interrupt_cx;
749 unsigned long flags;
750 struct atto_vdapart_info *partinfo;
751
752 esas2r_trace_enter();
753
754 spin_lock_irqsave(&a->mem_lock, flags);
755
756 if (rq->req_stat == RS_SCAN_GEN) {
757 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
758 dc->raid_grp_ix = 0;
759 dc->state = DCS_RAID_GRP_INFO;
760 } else if (rq->req_stat == RS_SUCCESS) {
761 partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
762
763 dc->part_num = partinfo->part_no;
764
765 dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
766
767 esas2r_targ_db_add_raid(a, dc);
768
769 dc->part_num++;
770 } else {
771 if (!(rq->req_stat == RS_PART_LAST)) {
772 esas2r_log(ESAS2R_LOG_WARN,
773 "A request for RAID group partition info "
774 "failed - status:%d", rq->req_stat);
775 }
776
777 dc->state = DCS_RAID_GRP_INFO;
778 dc->raid_grp_ix++;
779 }
780
781 esas2r_rq_destroy_request(rq, a);
782
783 /* continue discovery if it's interrupt driven */
784
785 if (!(dc->flags & DCF_POLLED))
786 esas2r_disc_continue(a, rq);
787
788 spin_unlock_irqrestore(&a->mem_lock, flags);
789
790 esas2r_trace_exit();
791}
792
793static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
794 struct esas2r_request *rq)
795{
796 struct esas2r_disc_context *dc =
797 (struct esas2r_disc_context *)rq->interrupt_cx;
798 bool rslt;
799 struct atto_vda_devinfo *devinfo;
800
801 esas2r_trace_enter();
802
803 esas2r_trace("dev_ix: %d", dc->dev_ix);
804
805 esas2r_rq_init_request(rq, a);
806
807 devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
808
809 memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
810
811 esas2r_build_mgt_req(a,
812 rq,
813 VDAMGT_DEV_PT_INFO,
814 dc->scan_gen,
815 dc->dev_ix,
816 sizeof(struct atto_vda_devinfo),
817 NULL);
818
819 rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
820
821 rq->interrupt_cx = dc;
822
823 rslt = esas2r_disc_start_request(a, rq);
824
825 esas2r_trace_exit();
826
827 return rslt;
828}
829
830static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
831 struct esas2r_request *rq)
832{
833 struct esas2r_disc_context *dc =
834 (struct esas2r_disc_context *)rq->interrupt_cx;
835 unsigned long flags;
836 struct atto_vda_devinfo *devinfo;
837
838 esas2r_trace_enter();
839
840 spin_lock_irqsave(&a->mem_lock, flags);
841
842 if (rq->req_stat == RS_SCAN_GEN) {
843 dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
844 dc->dev_ix = 0;
845 dc->state = DCS_PT_DEV_INFO;
846 } else if (rq->req_stat == RS_SUCCESS) {
847 devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
848
849 dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
850
851 dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
852
853 if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
854 dc->curr_phys_id =
855 le16_to_cpu(devinfo->phys_target_id);
856 dc->dev_addr_type = ATTO_GDA_AT_PORT;
857 dc->state = DCS_PT_DEV_ADDR;
858
859 esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
860 esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
861 } else {
862 dc->dev_ix++;
863 }
864 } else {
865 if (!(rq->req_stat == RS_DEV_INVALID)) {
866 esas2r_log(ESAS2R_LOG_WARN,
867 "A request for device information failed - "
868 "status:%d", rq->req_stat);
869 }
870
871 dc->state = DCS_DISC_DONE;
872 }
873
874 esas2r_rq_destroy_request(rq, a);
875
876 /* continue discovery if it's interrupt driven */
877
878 if (!(dc->flags & DCF_POLLED))
879 esas2r_disc_continue(a, rq);
880
881 spin_unlock_irqrestore(&a->mem_lock, flags);
882
883 esas2r_trace_exit();
884}
885
886static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
887 struct esas2r_request *rq)
888{
889 struct esas2r_disc_context *dc =
890 (struct esas2r_disc_context *)rq->interrupt_cx;
891 bool rslt;
892 struct atto_ioctl *hi;
893 struct esas2r_sg_context sgc;
894
895 esas2r_trace_enter();
896
897 esas2r_rq_init_request(rq, a);
898
899 /* format the request. */
900
901 sgc.cur_offset = NULL;
902 sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
903 sgc.length = offsetof(struct atto_ioctl, data)
904 + sizeof(struct atto_hba_get_device_address);
905
906 esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
907
908 esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
909
910 if (!esas2r_build_sg_list(a, rq, &sgc)) {
911 esas2r_rq_destroy_request(rq, a);
912
913 esas2r_trace_exit();
914
915 return false;
916 }
917
918 rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
919
920 rq->interrupt_cx = dc;
921
922 /* format the IOCTL data. */
923
924 hi = (struct atto_ioctl *)a->disc_buffer;
925
926 memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
927
928 hi->version = ATTO_VER_GET_DEV_ADDR0;
929 hi->function = ATTO_FUNC_GET_DEV_ADDR;
930 hi->flags = HBAF_TUNNEL;
931
932 hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
933 hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
934
935 /* start it up. */
936
937 rslt = esas2r_disc_start_request(a, rq);
938
939 esas2r_trace_exit();
940
941 return rslt;
942}
943
944static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
945 struct esas2r_request *rq)
946{
947 struct esas2r_disc_context *dc =
948 (struct esas2r_disc_context *)rq->interrupt_cx;
949 struct esas2r_target *t = NULL;
950 unsigned long flags;
951 struct atto_ioctl *hi;
952 u16 addrlen;
953
954 esas2r_trace_enter();
955
956 spin_lock_irqsave(&a->mem_lock, flags);
957
958 hi = (struct atto_ioctl *)a->disc_buffer;
959
960 if (rq->req_stat == RS_SUCCESS
961 && hi->status == ATTO_STS_SUCCESS) {
962 addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
963
964 if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
965 if (addrlen == sizeof(u64))
966 memcpy(&dc->sas_addr,
967 &hi->data.get_dev_addr.address[0],
968 addrlen);
969 else
970 memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
971
972 /* Get the unique identifier. */
973 dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
974
975 goto next_dev_addr;
976 } else {
977 /* Add the pass through target. */
978 if (HIBYTE(addrlen) == 0) {
979 t = esas2r_targ_db_add_pthru(a,
980 dc,
981 &hi->data.
982 get_dev_addr.
983 address[0],
984 (u8)hi->data.
985 get_dev_addr.
986 addr_len);
987
988 if (t)
989 memcpy(&t->sas_addr, &dc->sas_addr,
990 sizeof(t->sas_addr));
991 } else {
992 /* getting the back end data failed */
993
994 esas2r_log(ESAS2R_LOG_WARN,
995 "an error occurred retrieving the "
996 "back end data (%s:%d)",
997 __func__,
998 __LINE__);
999 }
1000 }
1001 } else {
1002 /* getting the back end data failed */
1003
1004 esas2r_log(ESAS2R_LOG_WARN,
1005 "an error occurred retrieving the back end data - "
1006 "rq->req_stat:%d hi->status:%d",
1007 rq->req_stat, hi->status);
1008 }
1009
1010 /* proceed to the next device. */
1011
1012 if (dc->flags & DCF_DEV_SCAN) {
1013 dc->dev_ix++;
1014 dc->state = DCS_PT_DEV_INFO;
1015 } else if (dc->flags & DCF_DEV_CHANGE) {
1016 dc->curr_targ++;
1017 dc->state = DCS_DEV_ADD;
1018 } else {
1019 esas2r_bugon();
1020 }
1021
1022next_dev_addr:
1023 esas2r_rq_destroy_request(rq, a);
1024
1025 /* continue discovery if it's interrupt driven */
1026
1027 if (!(dc->flags & DCF_POLLED))
1028 esas2r_disc_continue(a, rq);
1029
1030 spin_unlock_irqrestore(&a->mem_lock, flags);
1031
1032 esas2r_trace_exit();
1033}
1034
1035static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
1036{
1037 struct esas2r_adapter *a = sgc->adapter;
1038
1039 if (sgc->length > ESAS2R_DISC_BUF_LEN)
1040 esas2r_bugon();
1041
1042 *addr = a->uncached_phys
1043 + (u64)((u8 *)a->disc_buffer - a->uncached);
1044
1045 return sgc->length;
1046}
1047
1048static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
1049 struct esas2r_request *rq)
1050{
1051 struct esas2r_disc_context *dc =
1052 (struct esas2r_disc_context *)rq->interrupt_cx;
1053 struct esas2r_target *t;
1054 struct esas2r_target *t2;
1055
1056 esas2r_trace_enter();
1057
1058 /* process removals. */
1059
1060 for (t = a->targetdb; t < a->targetdb_end; t++) {
1061 if (t->new_target_state != TS_NOT_PRESENT)
1062 continue;
1063
1064 t->new_target_state = TS_INVALID;
1065
1066 /* remove the right target! */
1067
1068 t2 =
1069 esas2r_targ_db_find_by_virt_id(a,
1070 esas2r_targ_get_id(t,
1071 a));
1072
1073 if (t2)
1074 esas2r_targ_db_remove(a, t2);
1075 }
1076
1077 /* removals complete. process arrivals. */
1078
1079 dc->state = DCS_DEV_ADD;
1080 dc->curr_targ = a->targetdb;
1081
1082 esas2r_trace_exit();
1083
1084 return false;
1085}
1086
1087static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
1088 struct esas2r_request *rq)
1089{
1090 struct esas2r_disc_context *dc =
1091 (struct esas2r_disc_context *)rq->interrupt_cx;
1092 struct esas2r_target *t = dc->curr_targ;
1093
1094 if (t >= a->targetdb_end) {
1095 /* done processing state changes. */
1096
1097 dc->state = DCS_DISC_DONE;
1098 } else if (t->new_target_state == TS_PRESENT) {
1099 struct atto_vda_ae_lu *luevt = &t->lu_event;
1100
1101 esas2r_trace_enter();
1102
1103 /* clear this now in case more events come in. */
1104
1105 t->new_target_state = TS_INVALID;
1106
1107 /* setup the discovery context for adding this device. */
1108
1109 dc->curr_virt_id = esas2r_targ_get_id(t, a);
1110
1111 if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1112 + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
1113 && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
1114 dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
1115 dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
1116 } else {
1117 dc->block_size = 0;
1118 dc->interleave = 0;
1119 }
1120
1121 /* determine the device type being added. */
1122
1123 if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
1124 if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
1125 dc->state = DCS_PT_DEV_ADDR;
1126 dc->dev_addr_type = ATTO_GDA_AT_PORT;
1127 dc->curr_phys_id = luevt->wphys_target_id;
1128 } else {
1129 esas2r_log(ESAS2R_LOG_WARN,
1130 "luevt->dwevent does not have the "
1131 "VDAAE_LU_PHYS_ID bit set (%s:%d)",
1132 __func__, __LINE__);
1133 }
1134 } else {
1135 dc->raid_grp_name[0] = 0;
1136
1137 esas2r_targ_db_add_raid(a, dc);
1138 }
1139
1140 esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
1141 esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
1142 esas2r_trace("dwevent: %d", luevt->dwevent);
1143
1144 esas2r_trace_exit();
1145 }
1146
1147 if (dc->state == DCS_DEV_ADD) {
1148 /* go to the next device. */
1149
1150 dc->curr_targ++;
1151 }
1152
1153 return false;
1154}
1155
1156/*
1157 * When discovery is done, find all requests on defer queue and
1158 * test if they need to be modified. If a target is no longer present
1159 * then complete the request with RS_SEL. Otherwise, update the
1160 * target_id since after a hibernate it can be a different value.
1161 * VDA does not make passthrough target IDs persistent.
1162 */
1163static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
1164{
1165 unsigned long flags;
1166 struct esas2r_target *t;
1167 struct esas2r_request *rq;
1168 struct list_head *element;
1169
1170 /* update virt_targ_id in any outstanding esas2r_requests */
1171
1172 spin_lock_irqsave(&a->queue_lock, flags);
1173
1174 list_for_each(element, &a->defer_list) {
1175 rq = list_entry(element, struct esas2r_request, req_list);
1176 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1177 t = a->targetdb + rq->target_id;
1178
1179 if (t->target_state == TS_PRESENT)
1180 rq->vrq->scsi.target_id = le16_to_cpu(
1181 t->virt_targ_id);
1182 else
1183 rq->req_stat = RS_SEL;
1184 }
1185
1186 }
1187
1188 spin_unlock_irqrestore(&a->queue_lock, flags);
1189}
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
new file mode 100644
index 000000000000..8582929b1fef
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -0,0 +1,1512 @@
1
2/*
3 * linux/drivers/scsi/esas2r/esas2r_flash.c
4 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
5 *
6 * Copyright (c) 2001-2013 ATTO Technology, Inc.
7 * (mailto:linuxdrivers@attotech.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
43 */
44
45#include "esas2r.h"
46
47/* local macro defs */
48#define esas2r_nvramcalc_cksum(n) \
49 (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \
50 SASNVR_CKSUM_SEED))
51#define esas2r_nvramcalc_xor_cksum(n) \
52 (esas2r_calc_byte_xor_cksum((u8 *)(n), \
53 sizeof(struct esas2r_sas_nvram), 0))
54
55#define ESAS2R_FS_DRVR_VER 2
56
57static struct esas2r_sas_nvram default_sas_nvram = {
58 { 'E', 'S', 'A', 'S' }, /* signature */
59 SASNVR_VERSION, /* version */
60 0, /* checksum */
61 31, /* max_lun_for_target */
62 SASNVR_PCILAT_MAX, /* pci_latency */
63 SASNVR1_BOOT_DRVR, /* options1 */
64 SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */
65 | SASNVR2_SW_MUX_CTRL,
66 SASNVR_COAL_DIS, /* int_coalescing */
67 SASNVR_CMDTHR_NONE, /* cmd_throttle */
68 3, /* dev_wait_time */
69 1, /* dev_wait_count */
70 0, /* spin_up_delay */
71 0, /* ssp_align_rate */
72 { 0x50, 0x01, 0x08, 0x60, /* sas_addr */
73 0x00, 0x00, 0x00, 0x00 },
74 { SASNVR_SPEED_AUTO }, /* phy_speed */
75 { SASNVR_MUX_DISABLED }, /* SAS multiplexing */
76 { 0 }, /* phy_flags */
77 SASNVR_SORT_SAS_ADDR, /* sort_type */
78 3, /* dpm_reqcmd_lmt */
79 3, /* dpm_stndby_time */
80 0, /* dpm_active_time */
81 { 0 }, /* phy_target_id */
82 SASNVR_VSMH_DISABLED, /* virt_ses_mode */
83 SASNVR_RWM_DEFAULT, /* read_write_mode */
84 0, /* link down timeout */
85 { 0 } /* reserved */
86};
87
88static u8 cmd_to_fls_func[] = {
89 0xFF,
90 VDA_FLASH_READ,
91 VDA_FLASH_BEGINW,
92 VDA_FLASH_WRITE,
93 VDA_FLASH_COMMIT,
94 VDA_FLASH_CANCEL
95};
96
97static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed)
98{
99 u32 cksum = seed;
100 u8 *p = (u8 *)&cksum;
101
102 while (len) {
103 if (((uintptr_t)addr & 3) == 0)
104 break;
105
106 cksum = cksum ^ *addr;
107 addr++;
108 len--;
109 }
110 while (len >= sizeof(u32)) {
111 cksum = cksum ^ *(u32 *)addr;
112 addr += 4;
113 len -= 4;
114 }
115 while (len--) {
116 cksum = cksum ^ *addr;
117 addr++;
118 }
119 return p[0] ^ p[1] ^ p[2] ^ p[3];
120}
121
122static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed)
123{
124 u8 *p = (u8 *)addr;
125 u8 cksum = seed;
126
127 while (len--)
128 cksum = cksum + p[len];
129 return cksum;
130}
131
132/* Interrupt callback to process FM API write requests. */
133static void esas2r_fmapi_callback(struct esas2r_adapter *a,
134 struct esas2r_request *rq)
135{
136 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
137 struct esas2r_flash_context *fc =
138 (struct esas2r_flash_context *)rq->interrupt_cx;
139
140 if (rq->req_stat == RS_SUCCESS) {
141 /* Last request was successful. See what to do now. */
142 switch (vrq->sub_func) {
143 case VDA_FLASH_BEGINW:
144 if (fc->sgc.cur_offset == NULL)
145 goto commit;
146
147 vrq->sub_func = VDA_FLASH_WRITE;
148 rq->req_stat = RS_PENDING;
149 break;
150
151 case VDA_FLASH_WRITE:
152commit:
153 vrq->sub_func = VDA_FLASH_COMMIT;
154 rq->req_stat = RS_PENDING;
155 rq->interrupt_cb = fc->interrupt_cb;
156 break;
157
158 default:
159 break;
160 }
161 }
162
163 if (rq->req_stat != RS_PENDING)
164 /*
165 * All done. call the real callback to complete the FM API
166 * request. We should only get here if a BEGINW or WRITE
167 * operation failed.
168 */
169 (*fc->interrupt_cb)(a, rq);
170}
171
172/*
173 * Build a flash request based on the flash context. The request status
174 * is filled in on an error.
175 */
176static void build_flash_msg(struct esas2r_adapter *a,
177 struct esas2r_request *rq)
178{
179 struct esas2r_flash_context *fc =
180 (struct esas2r_flash_context *)rq->interrupt_cx;
181 struct esas2r_sg_context *sgc = &fc->sgc;
182 u8 cksum = 0;
183
184 /* calculate the checksum */
185 if (fc->func == VDA_FLASH_BEGINW) {
186 if (sgc->cur_offset)
187 cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset,
188 sgc->length,
189 0);
190 rq->interrupt_cb = esas2r_fmapi_callback;
191 } else {
192 rq->interrupt_cb = fc->interrupt_cb;
193 }
194 esas2r_build_flash_req(a,
195 rq,
196 fc->func,
197 cksum,
198 fc->flsh_addr,
199 sgc->length);
200
201 esas2r_rq_free_sg_lists(rq, a);
202
203 /*
204 * remember the length we asked for. we have to keep track of
205 * the current amount done so we know how much to compare when
206 * doing the verification phase.
207 */
208 fc->curr_len = fc->sgc.length;
209
210 if (sgc->cur_offset) {
211 /* setup the S/G context to build the S/G table */
212 esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
213
214 if (!esas2r_build_sg_list(a, rq, sgc)) {
215 rq->req_stat = RS_BUSY;
216 return;
217 }
218 } else {
219 fc->sgc.length = 0;
220 }
221
222 /* update the flsh_addr to the next one to write to */
223 fc->flsh_addr += fc->curr_len;
224}
225
226/* determine the method to process the flash request */
227static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
228{
229 /*
230 * assume we have more to do. if we return with the status set to
231 * RS_PENDING, FM API tasks will continue.
232 */
233 rq->req_stat = RS_PENDING;
234 if (a->flags & AF_DEGRADED_MODE)
235 /* not suppported for now */;
236 else
237 build_flash_msg(a, rq);
238
239 return rq->req_stat == RS_PENDING;
240}
241
242/* boot image fixer uppers called before downloading the image. */
243static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
244{
245 struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS];
246 struct esas2r_pc_image *pi;
247 struct esas2r_boot_header *bh;
248
249 pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset);
250 bh =
251 (struct esas2r_boot_header *)((u8 *)pi +
252 le16_to_cpu(pi->header_offset));
253 bh->device_id = cpu_to_le16(a->pcid->device);
254
255 /* Recalculate the checksum in the PNP header if there */
256 if (pi->pnp_offset) {
257 u8 *pnp_header_bytes =
258 ((u8 *)pi + le16_to_cpu(pi->pnp_offset));
259
260 /* Identifier - dword that starts at byte 10 */
261 *((u32 *)&pnp_header_bytes[10]) =
262 cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor,
263 a->pcid->subsystem_device));
264
265 /* Checksum - byte 9 */
266 pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes,
267 32, 0);
268 }
269
270 /* Recalculate the checksum needed by the PC */
271 pi->checksum = pi->checksum -
272 esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0);
273}
274
275static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
276{
277 struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI];
278 u32 len = ch->length;
279 u32 offset = ch->image_offset;
280 struct esas2r_efi_image *ei;
281 struct esas2r_boot_header *bh;
282
283 while (len) {
284 u32 thislen;
285
286 ei = (struct esas2r_efi_image *)((u8 *)fi + offset);
287 bh = (struct esas2r_boot_header *)((u8 *)ei +
288 le16_to_cpu(
289 ei->header_offset));
290 bh->device_id = cpu_to_le16(a->pcid->device);
291 thislen = (u32)le16_to_cpu(bh->image_length) * 512;
292
293 if (thislen > len)
294 break;
295
296 len -= thislen;
297 offset += thislen;
298 }
299}
300
301/* Complete a FM API request with the specified status. */
302static bool complete_fmapi_req(struct esas2r_adapter *a,
303 struct esas2r_request *rq, u8 fi_stat)
304{
305 struct esas2r_flash_context *fc =
306 (struct esas2r_flash_context *)rq->interrupt_cx;
307 struct esas2r_flash_img *fi = fc->fi;
308
309 fi->status = fi_stat;
310 fi->driver_error = rq->req_stat;
311 rq->interrupt_cb = NULL;
312 rq->req_stat = RS_SUCCESS;
313
314 if (fi_stat != FI_STAT_IMG_VER)
315 memset(fc->scratch, 0, FM_BUF_SZ);
316
317 esas2r_enable_heartbeat(a);
318 esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
319 return false;
320}
321
322/* Process each phase of the flash download process. */
323static void fw_download_proc(struct esas2r_adapter *a,
324 struct esas2r_request *rq)
325{
326 struct esas2r_flash_context *fc =
327 (struct esas2r_flash_context *)rq->interrupt_cx;
328 struct esas2r_flash_img *fi = fc->fi;
329 struct esas2r_component_header *ch;
330 u32 len;
331 u8 *p, *q;
332
333 /* If the previous operation failed, just return. */
334 if (rq->req_stat != RS_SUCCESS)
335 goto error;
336
337 /*
338 * If an upload just completed and the compare length is non-zero,
339 * then we just read back part of the image we just wrote. verify the
340 * section and continue reading until the entire image is verified.
341 */
342 if (fc->func == VDA_FLASH_READ
343 && fc->cmp_len) {
344 ch = &fi->cmp_hdr[fc->comp_typ];
345
346 p = fc->scratch;
347 q = (u8 *)fi /* start of the whole gob */
348 + ch->image_offset /* start of the current image */
349 + ch->length /* end of the current image */
350 - fc->cmp_len; /* where we are now */
351
352 /*
353 * NOTE - curr_len is the exact count of bytes for the read
354 * even when the end is read and its not a full buffer
355 */
356 for (len = fc->curr_len; len; len--)
357 if (*p++ != *q++)
358 goto error;
359
360 fc->cmp_len -= fc->curr_len; /* # left to compare */
361
362 /* Update fc and determine the length for the next upload */
363 if (fc->cmp_len > FM_BUF_SZ)
364 fc->sgc.length = FM_BUF_SZ;
365 else
366 fc->sgc.length = fc->cmp_len;
367
368 fc->sgc.cur_offset = fc->sgc_offset +
369 ((u8 *)fc->scratch - (u8 *)fi);
370 }
371
372 /*
373 * This code uses a 'while' statement since the next component may
374 * have a length = zero. This can happen since some components are
375 * not required. At the end of this 'while' we set up the length
376 * for the next request and therefore sgc.length can be = 0.
377 */
378 while (fc->sgc.length == 0) {
379 ch = &fi->cmp_hdr[fc->comp_typ];
380
381 switch (fc->task) {
382 case FMTSK_ERASE_BOOT:
383 /* the BIOS image is written next */
384 ch = &fi->cmp_hdr[CH_IT_BIOS];
385 if (ch->length == 0)
386 goto no_bios;
387
388 fc->task = FMTSK_WRTBIOS;
389 fc->func = VDA_FLASH_BEGINW;
390 fc->comp_typ = CH_IT_BIOS;
391 fc->flsh_addr = FLS_OFFSET_BOOT;
392 fc->sgc.length = ch->length;
393 fc->sgc.cur_offset = fc->sgc_offset +
394 ch->image_offset;
395 break;
396
397 case FMTSK_WRTBIOS:
398 /*
399 * The BIOS image has been written - read it and
400 * verify it
401 */
402 fc->task = FMTSK_READBIOS;
403 fc->func = VDA_FLASH_READ;
404 fc->flsh_addr = FLS_OFFSET_BOOT;
405 fc->cmp_len = ch->length;
406 fc->sgc.length = FM_BUF_SZ;
407 fc->sgc.cur_offset = fc->sgc_offset
408 + ((u8 *)fc->scratch -
409 (u8 *)fi);
410 break;
411
412 case FMTSK_READBIOS:
413no_bios:
414 /*
415 * Mark the component header status for the image
416 * completed
417 */
418 ch->status = CH_STAT_SUCCESS;
419
420 /* The MAC image is written next */
421 ch = &fi->cmp_hdr[CH_IT_MAC];
422 if (ch->length == 0)
423 goto no_mac;
424
425 fc->task = FMTSK_WRTMAC;
426 fc->func = VDA_FLASH_BEGINW;
427 fc->comp_typ = CH_IT_MAC;
428 fc->flsh_addr = FLS_OFFSET_BOOT
429 + fi->cmp_hdr[CH_IT_BIOS].length;
430 fc->sgc.length = ch->length;
431 fc->sgc.cur_offset = fc->sgc_offset +
432 ch->image_offset;
433 break;
434
435 case FMTSK_WRTMAC:
436 /* The MAC image has been written - read and verify */
437 fc->task = FMTSK_READMAC;
438 fc->func = VDA_FLASH_READ;
439 fc->flsh_addr -= ch->length;
440 fc->cmp_len = ch->length;
441 fc->sgc.length = FM_BUF_SZ;
442 fc->sgc.cur_offset = fc->sgc_offset
443 + ((u8 *)fc->scratch -
444 (u8 *)fi);
445 break;
446
447 case FMTSK_READMAC:
448no_mac:
449 /*
450 * Mark the component header status for the image
451 * completed
452 */
453 ch->status = CH_STAT_SUCCESS;
454
455 /* The EFI image is written next */
456 ch = &fi->cmp_hdr[CH_IT_EFI];
457 if (ch->length == 0)
458 goto no_efi;
459
460 fc->task = FMTSK_WRTEFI;
461 fc->func = VDA_FLASH_BEGINW;
462 fc->comp_typ = CH_IT_EFI;
463 fc->flsh_addr = FLS_OFFSET_BOOT
464 + fi->cmp_hdr[CH_IT_BIOS].length
465 + fi->cmp_hdr[CH_IT_MAC].length;
466 fc->sgc.length = ch->length;
467 fc->sgc.cur_offset = fc->sgc_offset +
468 ch->image_offset;
469 break;
470
471 case FMTSK_WRTEFI:
472 /* The EFI image has been written - read and verify */
473 fc->task = FMTSK_READEFI;
474 fc->func = VDA_FLASH_READ;
475 fc->flsh_addr -= ch->length;
476 fc->cmp_len = ch->length;
477 fc->sgc.length = FM_BUF_SZ;
478 fc->sgc.cur_offset = fc->sgc_offset
479 + ((u8 *)fc->scratch -
480 (u8 *)fi);
481 break;
482
483 case FMTSK_READEFI:
484no_efi:
485 /*
486 * Mark the component header status for the image
487 * completed
488 */
489 ch->status = CH_STAT_SUCCESS;
490
491 /* The CFG image is written next */
492 ch = &fi->cmp_hdr[CH_IT_CFG];
493
494 if (ch->length == 0)
495 goto no_cfg;
496 fc->task = FMTSK_WRTCFG;
497 fc->func = VDA_FLASH_BEGINW;
498 fc->comp_typ = CH_IT_CFG;
499 fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
500 fc->sgc.length = ch->length;
501 fc->sgc.cur_offset = fc->sgc_offset +
502 ch->image_offset;
503 break;
504
505 case FMTSK_WRTCFG:
506 /* The CFG image has been written - read and verify */
507 fc->task = FMTSK_READCFG;
508 fc->func = VDA_FLASH_READ;
509 fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
510 fc->cmp_len = ch->length;
511 fc->sgc.length = FM_BUF_SZ;
512 fc->sgc.cur_offset = fc->sgc_offset
513 + ((u8 *)fc->scratch -
514 (u8 *)fi);
515 break;
516
517 case FMTSK_READCFG:
518no_cfg:
519 /*
520 * Mark the component header status for the image
521 * completed
522 */
523 ch->status = CH_STAT_SUCCESS;
524
525 /*
526 * The download is complete. If in degraded mode,
527 * attempt a chip reset.
528 */
529 if (a->flags & AF_DEGRADED_MODE)
530 esas2r_local_reset_adapter(a);
531
532 a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
533 esas2r_print_flash_rev(a);
534
535 /* Update the type of boot image on the card */
536 memcpy(a->image_type, fi->rel_version,
537 sizeof(fi->rel_version));
538 complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
539 return;
540 }
541
542 /* If verifying, don't try reading more than what's there */
543 if (fc->func == VDA_FLASH_READ
544 && fc->sgc.length > fc->cmp_len)
545 fc->sgc.length = fc->cmp_len;
546 }
547
548 /* Build the request to perform the next action */
549 if (!load_image(a, rq)) {
550error:
551 if (fc->comp_typ < fi->num_comps) {
552 ch = &fi->cmp_hdr[fc->comp_typ];
553 ch->status = CH_STAT_FAILED;
554 }
555
556 complete_fmapi_req(a, rq, FI_STAT_FAILED);
557 }
558}
559
560/* Determine the flash image adaptyp for this adapter */
561static u8 get_fi_adap_type(struct esas2r_adapter *a)
562{
563 u8 type;
564
565 /* use the device ID to get the correct adap_typ for this HBA */
566 switch (a->pcid->device) {
567 case ATTO_DID_INTEL_IOP348:
568 type = FI_AT_SUN_LAKE;
569 break;
570
571 case ATTO_DID_MV_88RC9580:
572 case ATTO_DID_MV_88RC9580TS:
573 case ATTO_DID_MV_88RC9580TSE:
574 case ATTO_DID_MV_88RC9580TL:
575 type = FI_AT_MV_9580;
576 break;
577
578 default:
579 type = FI_AT_UNKNWN;
580 break;
581 }
582
583 return type;
584}
585
586/* Size of config + copyright + flash_ver images, 0 for failure. */
587static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver)
588{
589 u16 *pw = (u16 *)cfg - 1;
590 u32 sz = 0;
591 u32 len = length;
592
593 if (len == 0)
594 len = FM_BUF_SZ;
595
596 if (flash_ver)
597 *flash_ver = 0;
598
599 while (true) {
600 u16 type;
601 u16 size;
602
603 type = le16_to_cpu(*pw--);
604 size = le16_to_cpu(*pw--);
605
606 if (type != FBT_CPYR
607 && type != FBT_SETUP
608 && type != FBT_FLASH_VER)
609 break;
610
611 if (type == FBT_FLASH_VER
612 && flash_ver)
613 *flash_ver = le32_to_cpu(*(u32 *)(pw - 1));
614
615 sz += size + (2 * sizeof(u16));
616 pw -= size / sizeof(u16);
617
618 if (sz > len - (2 * sizeof(u16)))
619 break;
620 }
621
622 /* See if we are comparing the size to the specified length */
623 if (length && sz != length)
624 return 0;
625
626 return sz;
627}
628
629/* Verify that the boot image is valid */
630static u8 chk_boot(u8 *boot_img, u32 length)
631{
632 struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img;
633 u16 hdroffset = le16_to_cpu(bi->header_offset);
634 struct esas2r_boot_header *bh;
635
636 if (bi->signature != le16_to_cpu(0xaa55)
637 || (long)hdroffset >
638 (long)(65536L - sizeof(struct esas2r_boot_header))
639 || (hdroffset & 3)
640 || (hdroffset < sizeof(struct esas2r_boot_image))
641 || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length))
642 return 0xff;
643
644 bh = (struct esas2r_boot_header *)((char *)bi + hdroffset);
645
646 if (bh->signature[0] != 'P'
647 || bh->signature[1] != 'C'
648 || bh->signature[2] != 'I'
649 || bh->signature[3] != 'R'
650 || le16_to_cpu(bh->struct_length) <
651 (u16)sizeof(struct esas2r_boot_header)
652 || bh->class_code[2] != 0x01
653 || bh->class_code[1] != 0x04
654 || bh->class_code[0] != 0x00
655 || (bh->code_type != CODE_TYPE_PC
656 && bh->code_type != CODE_TYPE_OPEN
657 && bh->code_type != CODE_TYPE_EFI))
658 return 0xff;
659
660 return bh->code_type;
661}
662
663/* The sum of all the WORDS of the image */
664static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
665{
666 struct esas2r_flash_img *fi = fc->fi;
667 u16 cksum;
668 u32 len;
669 u16 *pw;
670
671 for (len = (fi->length - fc->fi_hdr_len) / 2,
672 pw = (u16 *)((u8 *)fi + fc->fi_hdr_len),
673 cksum = 0;
674 len;
675 len--, pw++)
676 cksum = cksum + le16_to_cpu(*pw);
677
678 return cksum;
679}
680
681/*
682 * Verify the flash image structure. The following verifications will
683 * be performed:
684 * 1) verify the fi_version is correct
685 * 2) verify the checksum of the entire image.
686 * 3) validate the adap_typ, action and length fields.
687 * 4) valdiate each component header. check the img_type and
688 * length fields
689 * 5) valdiate each component image. validate signatures and
690 * local checksums
691 */
692static bool verify_fi(struct esas2r_adapter *a,
693 struct esas2r_flash_context *fc)
694{
695 struct esas2r_flash_img *fi = fc->fi;
696 u8 type;
697 bool imgerr;
698 u16 i;
699 u32 len;
700 struct esas2r_component_header *ch;
701
702 /* Verify the length - length must even since we do a word checksum */
703 len = fi->length;
704
705 if ((len & 1)
706 || len < fc->fi_hdr_len) {
707 fi->status = FI_STAT_LENGTH;
708 return false;
709 }
710
711 /* Get adapter type and verify type in flash image */
712 type = get_fi_adap_type(a);
713 if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) {
714 fi->status = FI_STAT_ADAPTYP;
715 return false;
716 }
717
718 /*
719 * Loop through each component and verify the img_type and length
720 * fields. Keep a running count of the sizes sooze we can verify total
721 * size to additive size.
722 */
723 imgerr = false;
724
725 for (i = 0, len = 0, ch = fi->cmp_hdr;
726 i < fi->num_comps;
727 i++, ch++) {
728 bool cmperr = false;
729
730 /*
731 * Verify that the component header has the same index as the
732 * image type. The headers must be ordered correctly
733 */
734 if (i != ch->img_type) {
735 imgerr = true;
736 ch->status = CH_STAT_INVALID;
737 continue;
738 }
739
740 switch (ch->img_type) {
741 case CH_IT_BIOS:
742 type = CODE_TYPE_PC;
743 break;
744
745 case CH_IT_MAC:
746 type = CODE_TYPE_OPEN;
747 break;
748
749 case CH_IT_EFI:
750 type = CODE_TYPE_EFI;
751 break;
752 }
753
754 switch (ch->img_type) {
755 case CH_IT_FW:
756 case CH_IT_NVR:
757 break;
758
759 case CH_IT_BIOS:
760 case CH_IT_MAC:
761 case CH_IT_EFI:
762 if (ch->length & 0x1ff)
763 cmperr = true;
764
765 /* Test if component image is present */
766 if (ch->length == 0)
767 break;
768
769 /* Image is present - verify the image */
770 if (chk_boot((u8 *)fi + ch->image_offset, ch->length)
771 != type)
772 cmperr = true;
773
774 break;
775
776 case CH_IT_CFG:
777
778 /* Test if component image is present */
779 if (ch->length == 0) {
780 cmperr = true;
781 break;
782 }
783
784 /* Image is present - verify the image */
785 if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length,
786 ch->length, NULL))
787 cmperr = true;
788
789 break;
790
791 default:
792
793 fi->status = FI_STAT_UNKNOWN;
794 return false;
795 }
796
797 if (cmperr) {
798 imgerr = true;
799 ch->status = CH_STAT_INVALID;
800 } else {
801 ch->status = CH_STAT_PENDING;
802 len += ch->length;
803 }
804 }
805
806 if (imgerr) {
807 fi->status = FI_STAT_MISSING;
808 return false;
809 }
810
811 /* Compare fi->length to the sum of ch->length fields */
812 if (len != fi->length - fc->fi_hdr_len) {
813 fi->status = FI_STAT_LENGTH;
814 return false;
815 }
816
817 /* Compute the checksum - it should come out zero */
818 if (fi->checksum != calc_fi_checksum(fc)) {
819 fi->status = FI_STAT_CHKSUM;
820 return false;
821 }
822
823 return true;
824}
825
826/* Fill in the FS IOCTL response data from a completed request. */
827static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a,
828 struct esas2r_request *rq)
829{
830 struct esas2r_ioctl_fs *fs =
831 (struct esas2r_ioctl_fs *)rq->interrupt_cx;
832
833 if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
834 esas2r_enable_heartbeat(a);
835
836 fs->driver_error = rq->req_stat;
837
838 if (fs->driver_error == RS_SUCCESS)
839 fs->status = ATTO_STS_SUCCESS;
840 else
841 fs->status = ATTO_STS_FAILED;
842}
843
844/* Prepare an FS IOCTL request to be sent to the firmware. */
845bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
846 struct esas2r_ioctl_fs *fs,
847 struct esas2r_request *rq,
848 struct esas2r_sg_context *sgc)
849{
850 u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func);
851 struct esas2r_ioctlfs_command *fsc = &fs->command;
852 u8 func = 0;
853 u32 datalen;
854
855 fs->status = ATTO_STS_FAILED;
856 fs->driver_error = RS_PENDING;
857
858 if (fs->version > ESAS2R_FS_VER) {
859 fs->status = ATTO_STS_INV_VERSION;
860 return false;
861 }
862
863 func = cmd_to_fls_func[fsc->command];
864 if (fsc->command >= cmdcnt || func == 0xFF) {
865 fs->status = ATTO_STS_INV_FUNC;
866 return false;
867 }
868
869 if (fsc->command != ESAS2R_FS_CMD_CANCEL) {
870 if ((a->pcid->device != ATTO_DID_MV_88RC9580
871 || fs->adap_type != ESAS2R_FS_AT_ESASRAID2)
872 && (a->pcid->device != ATTO_DID_MV_88RC9580TS
873 || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2)
874 && (a->pcid->device != ATTO_DID_MV_88RC9580TSE
875 || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E)
876 && (a->pcid->device != ATTO_DID_MV_88RC9580TL
877 || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) {
878 fs->status = ATTO_STS_INV_ADAPTER;
879 return false;
880 }
881
882 if (fs->driver_ver > ESAS2R_FS_DRVR_VER) {
883 fs->status = ATTO_STS_INV_DRVR_VER;
884 return false;
885 }
886 }
887
888 if (a->flags & AF_DEGRADED_MODE) {
889 fs->status = ATTO_STS_DEGRADED;
890 return false;
891 }
892
893 rq->interrupt_cb = esas2r_complete_fs_ioctl;
894 rq->interrupt_cx = fs;
895 datalen = le32_to_cpu(fsc->length);
896 esas2r_build_flash_req(a,
897 rq,
898 func,
899 fsc->checksum,
900 le32_to_cpu(fsc->flash_addr),
901 datalen);
902
903 if (func == VDA_FLASH_WRITE
904 || func == VDA_FLASH_READ) {
905 if (datalen == 0) {
906 fs->status = ATTO_STS_INV_FUNC;
907 return false;
908 }
909
910 esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
911 sgc->length = datalen;
912
913 if (!esas2r_build_sg_list(a, rq, sgc)) {
914 fs->status = ATTO_STS_OUT_OF_RSRC;
915 return false;
916 }
917 }
918
919 if (func == VDA_FLASH_COMMIT)
920 esas2r_disable_heartbeat(a);
921
922 esas2r_start_request(a, rq);
923
924 return true;
925}
926
927static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
928{
929 u32 starttime;
930 u32 timeout;
931 u32 intstat;
932 u32 doorbell;
933
934 /* Disable chip interrupts awhile */
935 if (function == DRBL_FLASH_REQ)
936 esas2r_disable_chip_interrupts(a);
937
938 /* Issue the request to the firmware */
939 esas2r_write_register_dword(a, MU_DOORBELL_IN, function);
940
941 /* Now wait for the firmware to process it */
942 starttime = jiffies_to_msecs(jiffies);
943 timeout = a->flags &
944 (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
945
946 while (true) {
947 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
948
949 if (intstat & MU_INTSTAT_DRBL) {
950 /* Got a doorbell interrupt. Check for the function */
951 doorbell =
952 esas2r_read_register_dword(a, MU_DOORBELL_OUT);
953 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
954 doorbell);
955 if (doorbell & function)
956 break;
957 }
958
959 schedule_timeout_interruptible(msecs_to_jiffies(100));
960
961 if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
962 /*
963 * Iimeout. If we were requesting flash access,
964 * indicate we are done so the firmware knows we gave
965 * up. If this was a REQ, we also need to re-enable
966 * chip interrupts.
967 */
968 if (function == DRBL_FLASH_REQ) {
969 esas2r_hdebug("flash access timeout");
970 esas2r_write_register_dword(a, MU_DOORBELL_IN,
971 DRBL_FLASH_DONE);
972 esas2r_enable_chip_interrupts(a);
973 } else {
974 esas2r_hdebug("flash release timeout");
975 }
976
977 return false;
978 }
979 }
980
981 /* if we're done, re-enable chip interrupts */
982 if (function == DRBL_FLASH_DONE)
983 esas2r_enable_chip_interrupts(a);
984
985 return true;
986}
987
988#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE)
989
990bool esas2r_read_flash_block(struct esas2r_adapter *a,
991 void *to,
992 u32 from,
993 u32 size)
994{
995 u8 *end = (u8 *)to;
996
997 /* Try to acquire access to the flash */
998 if (!esas2r_flash_access(a, DRBL_FLASH_REQ))
999 return false;
1000
1001 while (size) {
1002 u32 len;
1003 u32 offset;
1004 u32 iatvr;
1005
1006 if (a->flags2 & AF2_SERIAL_FLASH)
1007 iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
1008 else
1009 iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
1010
1011 esas2r_map_data_window(a, iatvr);
1012 offset = from & (WINDOW_SIZE - 1);
1013 len = size;
1014
1015 if (len > WINDOW_SIZE - offset)
1016 len = WINDOW_SIZE - offset;
1017
1018 from += len;
1019 size -= len;
1020
1021 while (len--) {
1022 *end++ = esas2r_read_data_byte(a, offset);
1023 offset++;
1024 }
1025 }
1026
1027 /* Release flash access */
1028 esas2r_flash_access(a, DRBL_FLASH_DONE);
1029 return true;
1030}
1031
1032bool esas2r_read_flash_rev(struct esas2r_adapter *a)
1033{
1034 u8 bytes[256];
1035 u16 *pw;
1036 u16 *pwstart;
1037 u16 type;
1038 u16 size;
1039 u32 sz;
1040
1041 sz = sizeof(bytes);
1042 pw = (u16 *)(bytes + sz);
1043 pwstart = (u16 *)bytes + 2;
1044
1045 if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz))
1046 goto invalid_rev;
1047
1048 while (pw >= pwstart) {
1049 pw--;
1050 type = le16_to_cpu(*pw);
1051 pw--;
1052 size = le16_to_cpu(*pw);
1053 pw -= size / 2;
1054
1055 if (type == FBT_CPYR
1056 || type == FBT_SETUP
1057 || pw < pwstart)
1058 continue;
1059
1060 if (type == FBT_FLASH_VER)
1061 a->flash_ver = le32_to_cpu(*(u32 *)pw);
1062
1063 break;
1064 }
1065
1066invalid_rev:
1067 return esas2r_print_flash_rev(a);
1068}
1069
1070bool esas2r_print_flash_rev(struct esas2r_adapter *a)
1071{
1072 u16 year = LOWORD(a->flash_ver);
1073 u8 day = LOBYTE(HIWORD(a->flash_ver));
1074 u8 month = HIBYTE(HIWORD(a->flash_ver));
1075
1076 if (day == 0
1077 || month == 0
1078 || day > 31
1079 || month > 12
1080 || year < 2006
1081 || year > 9999) {
1082 strcpy(a->flash_rev, "not found");
1083 a->flash_ver = 0;
1084 return false;
1085 }
1086
1087 sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year);
1088 esas2r_hdebug("flash version: %s", a->flash_rev);
1089 return true;
1090}
1091
1092/*
1093 * Find the type of boot image type that is currently in the flash.
1094 * The chip only has a 64 KB PCI-e expansion ROM
1095 * size so only one image can be flashed at a time.
1096 */
1097bool esas2r_read_image_type(struct esas2r_adapter *a)
1098{
1099 u8 bytes[256];
1100 struct esas2r_boot_image *bi;
1101 struct esas2r_boot_header *bh;
1102 u32 sz;
1103 u32 len;
1104 u32 offset;
1105
1106 /* Start at the base of the boot images and look for a valid image */
1107 sz = sizeof(bytes);
1108 len = FLS_LENGTH_BOOT;
1109 offset = 0;
1110
1111 while (true) {
1112 if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT +
1113 offset,
1114 sz))
1115 goto invalid_rev;
1116
1117 bi = (struct esas2r_boot_image *)bytes;
1118 bh = (struct esas2r_boot_header *)((u8 *)bi +
1119 le16_to_cpu(
1120 bi->header_offset));
1121 if (bi->signature != cpu_to_le16(0xAA55))
1122 goto invalid_rev;
1123
1124 if (bh->code_type == CODE_TYPE_PC) {
1125 strcpy(a->image_type, "BIOS");
1126
1127 return true;
1128 } else if (bh->code_type == CODE_TYPE_EFI) {
1129 struct esas2r_efi_image *ei;
1130
1131 /*
1132 * So we have an EFI image. There are several types
1133 * so see which architecture we have.
1134 */
1135 ei = (struct esas2r_efi_image *)bytes;
1136
1137 switch (le16_to_cpu(ei->machine_type)) {
1138 case EFI_MACHINE_IA32:
1139 strcpy(a->image_type, "EFI 32-bit");
1140 return true;
1141
1142 case EFI_MACHINE_IA64:
1143 strcpy(a->image_type, "EFI itanium");
1144 return true;
1145
1146 case EFI_MACHINE_X64:
1147 strcpy(a->image_type, "EFI 64-bit");
1148 return true;
1149
1150 case EFI_MACHINE_EBC:
1151 strcpy(a->image_type, "EFI EBC");
1152 return true;
1153
1154 default:
1155 goto invalid_rev;
1156 }
1157 } else {
1158 u32 thislen;
1159
1160 /* jump to the next image */
1161 thislen = (u32)le16_to_cpu(bh->image_length) * 512;
1162 if (thislen == 0
1163 || thislen + offset > len
1164 || bh->indicator == INDICATOR_LAST)
1165 break;
1166
1167 offset += thislen;
1168 }
1169 }
1170
1171invalid_rev:
1172 strcpy(a->image_type, "no boot images");
1173 return false;
1174}
1175
1176/*
1177 * Read and validate current NVRAM parameters by accessing
1178 * physical NVRAM directly. if currently stored parameters are
1179 * invalid, use the defaults.
1180 */
1181bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
1182{
1183 bool result;
1184
1185 if (down_interruptible(&a->nvram_semaphore))
1186 return false;
1187
1188 if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
1189 sizeof(struct esas2r_sas_nvram))) {
1190 esas2r_hdebug("NVRAM read failed, using defaults");
1191 return false;
1192 }
1193
1194 result = esas2r_nvram_validate(a);
1195
1196 up(&a->nvram_semaphore);
1197
1198 return result;
1199}
1200
1201/* Interrupt callback to process NVRAM completions. */
1202static void esas2r_nvram_callback(struct esas2r_adapter *a,
1203 struct esas2r_request *rq)
1204{
1205 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
1206
1207 if (rq->req_stat == RS_SUCCESS) {
1208 /* last request was successful. see what to do now. */
1209
1210 switch (vrq->sub_func) {
1211 case VDA_FLASH_BEGINW:
1212 vrq->sub_func = VDA_FLASH_WRITE;
1213 rq->req_stat = RS_PENDING;
1214 break;
1215
1216 case VDA_FLASH_WRITE:
1217 vrq->sub_func = VDA_FLASH_COMMIT;
1218 rq->req_stat = RS_PENDING;
1219 break;
1220
1221 case VDA_FLASH_READ:
1222 esas2r_nvram_validate(a);
1223 break;
1224
1225 case VDA_FLASH_COMMIT:
1226 default:
1227 break;
1228 }
1229 }
1230
1231 if (rq->req_stat != RS_PENDING) {
1232 /* update the NVRAM state */
1233 if (rq->req_stat == RS_SUCCESS)
1234 esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
1235 else
1236 esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
1237
1238 esas2r_enable_heartbeat(a);
1239
1240 up(&a->nvram_semaphore);
1241 }
1242}
1243
1244/*
1245 * Write the contents of nvram to the adapter's physical NVRAM.
1246 * The cached copy of the NVRAM is also updated.
1247 */
1248bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1249 struct esas2r_sas_nvram *nvram)
1250{
1251 struct esas2r_sas_nvram *n = nvram;
1252 u8 sas_address_bytes[8];
1253 u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
1254 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
1255
1256 if (a->flags & AF_DEGRADED_MODE)
1257 return false;
1258
1259 if (down_interruptible(&a->nvram_semaphore))
1260 return false;
1261
1262 if (n == NULL)
1263 n = a->nvram;
1264
1265 /* check the validity of the settings */
1266 if (n->version > SASNVR_VERSION) {
1267 up(&a->nvram_semaphore);
1268 return false;
1269 }
1270
1271 memcpy(&sas_address_bytes[0], n->sas_addr, 8);
1272
1273 if (sas_address_bytes[0] != 0x50
1274 || sas_address_bytes[1] != 0x01
1275 || sas_address_bytes[2] != 0x08
1276 || (sas_address_bytes[3] & 0xF0) != 0x60
1277 || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) {
1278 up(&a->nvram_semaphore);
1279 return false;
1280 }
1281
1282 if (n->spin_up_delay > SASNVR_SPINUP_MAX)
1283 n->spin_up_delay = SASNVR_SPINUP_MAX;
1284
1285 n->version = SASNVR_VERSION;
1286 n->checksum = n->checksum - esas2r_nvramcalc_cksum(n);
1287 memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram));
1288
1289 /* write the NVRAM */
1290 n = a->nvram;
1291 esas2r_disable_heartbeat(a);
1292
1293 esas2r_build_flash_req(a,
1294 rq,
1295 VDA_FLASH_BEGINW,
1296 esas2r_nvramcalc_xor_cksum(n),
1297 FLS_OFFSET_NVR,
1298 sizeof(struct esas2r_sas_nvram));
1299
1300 if (a->flags & AF_LEGACY_SGE_MODE) {
1301
1302 vrq->data.sge[0].length =
1303 cpu_to_le32(SGE_LAST |
1304 sizeof(struct esas2r_sas_nvram));
1305 vrq->data.sge[0].address = cpu_to_le64(
1306 a->uncached_phys + (u64)((u8 *)n - a->uncached));
1307 } else {
1308 vrq->data.prde[0].ctl_len =
1309 cpu_to_le32(sizeof(struct esas2r_sas_nvram));
1310 vrq->data.prde[0].address = cpu_to_le64(
1311 a->uncached_phys
1312 + (u64)((u8 *)n - a->uncached));
1313 }
1314 rq->interrupt_cb = esas2r_nvram_callback;
1315 esas2r_start_request(a, rq);
1316 return true;
1317}
1318
1319/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */
1320bool esas2r_nvram_validate(struct esas2r_adapter *a)
1321{
1322 struct esas2r_sas_nvram *n = a->nvram;
1323 bool rslt = false;
1324
1325 if (n->signature[0] != 'E'
1326 || n->signature[1] != 'S'
1327 || n->signature[2] != 'A'
1328 || n->signature[3] != 'S') {
1329 esas2r_hdebug("invalid NVRAM signature");
1330 } else if (esas2r_nvramcalc_cksum(n)) {
1331 esas2r_hdebug("invalid NVRAM checksum");
1332 } else if (n->version > SASNVR_VERSION) {
1333 esas2r_hdebug("invalid NVRAM version");
1334 } else {
1335 esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
1336 rslt = true;
1337 }
1338
1339 if (rslt == false) {
1340 esas2r_hdebug("using defaults");
1341 esas2r_nvram_set_defaults(a);
1342 }
1343
1344 return rslt;
1345}
1346
1347/*
1348 * Set the cached NVRAM to defaults. note that this function sets the default
1349 * NVRAM when it has been determined that the physical NVRAM is invalid.
1350 * In this case, the SAS address is fabricated.
1351 */
1352void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
1353{
1354 struct esas2r_sas_nvram *n = a->nvram;
1355 u32 time = jiffies_to_msecs(jiffies);
1356
1357 esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
1358 memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
1359 n->sas_addr[3] |= 0x0F;
1360 n->sas_addr[4] = HIBYTE(LOWORD(time));
1361 n->sas_addr[5] = LOBYTE(LOWORD(time));
1362 n->sas_addr[6] = a->pcid->bus->number;
1363 n->sas_addr[7] = a->pcid->devfn;
1364}
1365
1366void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
1367 struct esas2r_sas_nvram *nvram)
1368{
1369 u8 sas_addr[8];
1370
1371 /*
1372 * in case we are copying the defaults into the adapter, copy the SAS
1373 * address out first.
1374 */
1375 memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
1376 memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
1377 memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
1378}
1379
1380bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
1381 struct esas2r_request *rq, struct esas2r_sg_context *sgc)
1382{
1383 struct esas2r_flash_context *fc = &a->flash_context;
1384 u8 j;
1385 struct esas2r_component_header *ch;
1386
1387 if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
1388 /* flag was already set */
1389 fi->status = FI_STAT_BUSY;
1390 return false;
1391 }
1392
1393 memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context));
1394 sgc = &fc->sgc;
1395 fc->fi = fi;
1396 fc->sgc_offset = sgc->cur_offset;
1397 rq->req_stat = RS_SUCCESS;
1398 rq->interrupt_cx = fc;
1399
1400 switch (fi->fi_version) {
1401 case FI_VERSION_1:
1402 fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf;
1403 fc->num_comps = FI_NUM_COMPS_V1;
1404 fc->fi_hdr_len = sizeof(struct esas2r_flash_img);
1405 break;
1406
1407 default:
1408 return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
1409 }
1410
1411 if (a->flags & AF_DEGRADED_MODE)
1412 return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
1413
1414 switch (fi->action) {
1415 case FI_ACT_DOWN: /* Download the components */
1416 /* Verify the format of the flash image */
1417 if (!verify_fi(a, fc))
1418 return complete_fmapi_req(a, rq, fi->status);
1419
1420 /* Adjust the BIOS fields that are dependent on the HBA */
1421 ch = &fi->cmp_hdr[CH_IT_BIOS];
1422
1423 if (ch->length)
1424 fix_bios(a, fi);
1425
1426 /* Adjust the EFI fields that are dependent on the HBA */
1427 ch = &fi->cmp_hdr[CH_IT_EFI];
1428
1429 if (ch->length)
1430 fix_efi(a, fi);
1431
1432 /*
1433 * Since the image was just modified, compute the checksum on
1434 * the modified image. First update the CRC for the composite
1435 * expansion ROM image.
1436 */
1437 fi->checksum = calc_fi_checksum(fc);
1438
1439 /* Disable the heartbeat */
1440 esas2r_disable_heartbeat(a);
1441
1442 /* Now start up the download sequence */
1443 fc->task = FMTSK_ERASE_BOOT;
1444 fc->func = VDA_FLASH_BEGINW;
1445 fc->comp_typ = CH_IT_CFG;
1446 fc->flsh_addr = FLS_OFFSET_BOOT;
1447 fc->sgc.length = FLS_LENGTH_BOOT;
1448 fc->sgc.cur_offset = NULL;
1449
1450 /* Setup the callback address */
1451 fc->interrupt_cb = fw_download_proc;
1452 break;
1453
1454 case FI_ACT_UPSZ: /* Get upload sizes */
1455 fi->adap_typ = get_fi_adap_type(a);
1456 fi->flags = 0;
1457 fi->num_comps = fc->num_comps;
1458 fi->length = fc->fi_hdr_len;
1459
1460 /* Report the type of boot image in the rel_version string */
1461 memcpy(fi->rel_version, a->image_type,
1462 sizeof(fi->rel_version));
1463
1464 /* Build the component headers */
1465 for (j = 0, ch = fi->cmp_hdr;
1466 j < fi->num_comps;
1467 j++, ch++) {
1468 ch->img_type = j;
1469 ch->status = CH_STAT_PENDING;
1470 ch->length = 0;
1471 ch->version = 0xffffffff;
1472 ch->image_offset = 0;
1473 ch->pad[0] = 0;
1474 ch->pad[1] = 0;
1475 }
1476
1477 if (a->flash_ver != 0) {
1478 fi->cmp_hdr[CH_IT_BIOS].version =
1479 fi->cmp_hdr[CH_IT_MAC].version =
1480 fi->cmp_hdr[CH_IT_EFI].version =
1481 fi->cmp_hdr[CH_IT_CFG].version
1482 = a->flash_ver;
1483
1484 fi->cmp_hdr[CH_IT_BIOS].status =
1485 fi->cmp_hdr[CH_IT_MAC].status =
1486 fi->cmp_hdr[CH_IT_EFI].status =
1487 fi->cmp_hdr[CH_IT_CFG].status =
1488 CH_STAT_SUCCESS;
1489
1490 return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
1491 }
1492
1493 /* fall through */
1494
1495 case FI_ACT_UP: /* Upload the components */
1496 default:
1497 return complete_fmapi_req(a, rq, FI_STAT_INVALID);
1498 }
1499
1500 /*
1501 * If we make it here, fc has been setup to do the first task. Call
1502 * load_image to format the request, start it, and get out. The
1503 * interrupt code will call the callback when the first message is
1504 * complete.
1505 */
1506 if (!load_image(a, rq))
1507 return complete_fmapi_req(a, rq, FI_STAT_FAILED);
1508
1509 esas2r_start_request(a, rq);
1510
1511 return true;
1512}
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
new file mode 100644
index 000000000000..3a798e7d5c56
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -0,0 +1,1773 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_init.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
47 struct esas2r_mem_desc *mem_desc,
48 u32 align)
49{
50 mem_desc->esas2r_param = mem_desc->size + align;
51 mem_desc->virt_addr = NULL;
52 mem_desc->phys_addr = 0;
53 mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
54 (size_t)mem_desc->
55 esas2r_param,
56 (dma_addr_t *)&mem_desc->
57 phys_addr,
58 GFP_KERNEL);
59
60 if (mem_desc->esas2r_data == NULL) {
61 esas2r_log(ESAS2R_LOG_CRIT,
62 "failed to allocate %lu bytes of consistent memory!",
63 (long
64 unsigned
65 int)mem_desc->esas2r_param);
66 return false;
67 }
68
69 mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
70 mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
71 memset(mem_desc->virt_addr, 0, mem_desc->size);
72 return true;
73}
74
75static void esas2r_initmem_free(struct esas2r_adapter *a,
76 struct esas2r_mem_desc *mem_desc)
77{
78 if (mem_desc->virt_addr == NULL)
79 return;
80
81 /*
82 * Careful! phys_addr and virt_addr may have been adjusted from the
83 * original allocation in order to return the desired alignment. That
84 * means we have to use the original address (in esas2r_data) and size
85 * (esas2r_param) and calculate the original physical address based on
86 * the difference between the requested and actual allocation size.
87 */
88 if (mem_desc->phys_addr) {
89 int unalign = ((u8 *)mem_desc->virt_addr) -
90 ((u8 *)mem_desc->esas2r_data);
91
92 dma_free_coherent(&a->pcid->dev,
93 (size_t)mem_desc->esas2r_param,
94 mem_desc->esas2r_data,
95 (dma_addr_t)(mem_desc->phys_addr - unalign));
96 } else {
97 kfree(mem_desc->esas2r_data);
98 }
99
100 mem_desc->virt_addr = NULL;
101}
102
103static bool alloc_vda_req(struct esas2r_adapter *a,
104 struct esas2r_request *rq)
105{
106 struct esas2r_mem_desc *memdesc = kzalloc(
107 sizeof(struct esas2r_mem_desc), GFP_KERNEL);
108
109 if (memdesc == NULL) {
110 esas2r_hdebug("could not alloc mem for vda request memdesc\n");
111 return false;
112 }
113
114 memdesc->size = sizeof(union atto_vda_req) +
115 ESAS2R_DATA_BUF_LEN;
116
117 if (!esas2r_initmem_alloc(a, memdesc, 256)) {
118 esas2r_hdebug("could not alloc mem for vda request\n");
119 kfree(memdesc);
120 return false;
121 }
122
123 a->num_vrqs++;
124 list_add(&memdesc->next_desc, &a->vrq_mds_head);
125
126 rq->vrq_md = memdesc;
127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
128 rq->vrq->scsi.handle = a->num_vrqs;
129
130 return true;
131}
132
133static void esas2r_unmap_regions(struct esas2r_adapter *a)
134{
135 if (a->regs)
136 iounmap((void __iomem *)a->regs);
137
138 a->regs = NULL;
139
140 pci_release_region(a->pcid, 2);
141
142 if (a->data_window)
143 iounmap((void __iomem *)a->data_window);
144
145 a->data_window = NULL;
146
147 pci_release_region(a->pcid, 0);
148}
149
150static int esas2r_map_regions(struct esas2r_adapter *a)
151{
152 int error;
153
154 a->regs = NULL;
155 a->data_window = NULL;
156
157 error = pci_request_region(a->pcid, 2, a->name);
158 if (error != 0) {
159 esas2r_log(ESAS2R_LOG_CRIT,
160 "pci_request_region(2) failed, error %d",
161 error);
162
163 return error;
164 }
165
166 a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
167 pci_resource_len(a->pcid, 2));
168 if (a->regs == NULL) {
169 esas2r_log(ESAS2R_LOG_CRIT,
170 "ioremap failed for regs mem region\n");
171 pci_release_region(a->pcid, 2);
172 return -EFAULT;
173 }
174
175 error = pci_request_region(a->pcid, 0, a->name);
176 if (error != 0) {
177 esas2r_log(ESAS2R_LOG_CRIT,
178 "pci_request_region(2) failed, error %d",
179 error);
180 esas2r_unmap_regions(a);
181 return error;
182 }
183
184 a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
185 0),
186 pci_resource_len(a->pcid, 0));
187 if (a->data_window == NULL) {
188 esas2r_log(ESAS2R_LOG_CRIT,
189 "ioremap failed for data_window mem region\n");
190 esas2r_unmap_regions(a);
191 return -EFAULT;
192 }
193
194 return 0;
195}
196
197static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
198{
199 int i;
200
201 /* Set up interrupt mode based on the requested value */
202 switch (intr_mode) {
203 case INTR_MODE_LEGACY:
204use_legacy_interrupts:
205 a->intr_mode = INTR_MODE_LEGACY;
206 break;
207
208 case INTR_MODE_MSI:
209 i = pci_enable_msi(a->pcid);
210 if (i != 0) {
211 esas2r_log(ESAS2R_LOG_WARN,
212 "failed to enable MSI for adapter %d, "
213 "falling back to legacy interrupts "
214 "(err=%d)", a->index,
215 i);
216 goto use_legacy_interrupts;
217 }
218 a->intr_mode = INTR_MODE_MSI;
219 esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
220 break;
221
222
223 default:
224 esas2r_log(ESAS2R_LOG_WARN,
225 "unknown interrupt_mode %d requested, "
226 "falling back to legacy interrupt",
227 interrupt_mode);
228 goto use_legacy_interrupts;
229 }
230}
231
232static void esas2r_claim_interrupts(struct esas2r_adapter *a)
233{
234 unsigned long flags = IRQF_DISABLED;
235
236 if (a->intr_mode == INTR_MODE_LEGACY)
237 flags |= IRQF_SHARED;
238
239 esas2r_log(ESAS2R_LOG_INFO,
240 "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
241 a->pcid->irq, a, a->name, flags);
242
243 if (request_irq(a->pcid->irq,
244 (a->intr_mode ==
245 INTR_MODE_LEGACY) ? esas2r_interrupt :
246 esas2r_msi_interrupt,
247 flags,
248 a->name,
249 a)) {
250 esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
251 a->pcid->irq);
252 return;
253 }
254
255 esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
256 esas2r_log(ESAS2R_LOG_INFO,
257 "claimed IRQ %d flags: 0x%lx",
258 a->pcid->irq, flags);
259}
260
261int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
262 int index)
263{
264 struct esas2r_adapter *a;
265 u64 bus_addr = 0;
266 int i;
267 void *next_uncached;
268 struct esas2r_request *first_request, *last_request;
269
270 if (index >= MAX_ADAPTERS) {
271 esas2r_log(ESAS2R_LOG_CRIT,
272 "tried to init invalid adapter index %u!",
273 index);
274 return 0;
275 }
276
277 if (esas2r_adapters[index]) {
278 esas2r_log(ESAS2R_LOG_CRIT,
279 "tried to init existing adapter index %u!",
280 index);
281 return 0;
282 }
283
284 a = (struct esas2r_adapter *)host->hostdata;
285 memset(a, 0, sizeof(struct esas2r_adapter));
286 a->pcid = pcid;
287 a->host = host;
288
289 if (sizeof(dma_addr_t) > 4) {
290 const uint64_t required_mask = dma_get_required_mask
291 (&pcid->dev);
292 if (required_mask > DMA_BIT_MASK(32)
293 && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
294 && !pci_set_consistent_dma_mask(pcid,
295 DMA_BIT_MASK(64))) {
296 esas2r_log_dev(ESAS2R_LOG_INFO,
297 &(a->pcid->dev),
298 "64-bit PCI addressing enabled\n");
299 } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
300 && !pci_set_consistent_dma_mask(pcid,
301 DMA_BIT_MASK(32))) {
302 esas2r_log_dev(ESAS2R_LOG_INFO,
303 &(a->pcid->dev),
304 "32-bit PCI addressing enabled\n");
305 } else {
306 esas2r_log(ESAS2R_LOG_CRIT,
307 "failed to set DMA mask");
308 esas2r_kill_adapter(index);
309 return 0;
310 }
311 } else {
312 if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
313 && !pci_set_consistent_dma_mask(pcid,
314 DMA_BIT_MASK(32))) {
315 esas2r_log_dev(ESAS2R_LOG_INFO,
316 &(a->pcid->dev),
317 "32-bit PCI addressing enabled\n");
318 } else {
319 esas2r_log(ESAS2R_LOG_CRIT,
320 "failed to set DMA mask");
321 esas2r_kill_adapter(index);
322 return 0;
323 }
324 }
325 esas2r_adapters[index] = a;
326 sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
327 esas2r_debug("new adapter %p, name %s", a, a->name);
328 spin_lock_init(&a->request_lock);
329 spin_lock_init(&a->fw_event_lock);
330 sema_init(&a->fm_api_semaphore, 1);
331 sema_init(&a->fs_api_semaphore, 1);
332 sema_init(&a->nvram_semaphore, 1);
333
334 esas2r_fw_event_off(a);
335 snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
336 a->index);
337 a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
338
339 init_waitqueue_head(&a->buffered_ioctl_waiter);
340 init_waitqueue_head(&a->nvram_waiter);
341 init_waitqueue_head(&a->fm_api_waiter);
342 init_waitqueue_head(&a->fs_api_waiter);
343 init_waitqueue_head(&a->vda_waiter);
344
345 INIT_LIST_HEAD(&a->general_req.req_list);
346 INIT_LIST_HEAD(&a->active_list);
347 INIT_LIST_HEAD(&a->defer_list);
348 INIT_LIST_HEAD(&a->free_sg_list_head);
349 INIT_LIST_HEAD(&a->avail_request);
350 INIT_LIST_HEAD(&a->vrq_mds_head);
351 INIT_LIST_HEAD(&a->fw_event_list);
352
353 first_request = (struct esas2r_request *)((u8 *)(a + 1));
354
355 for (last_request = first_request, i = 1; i < num_requests;
356 last_request++, i++) {
357 INIT_LIST_HEAD(&last_request->req_list);
358 list_add_tail(&last_request->comp_list, &a->avail_request);
359 if (!alloc_vda_req(a, last_request)) {
360 esas2r_log(ESAS2R_LOG_CRIT,
361 "failed to allocate a VDA request!");
362 esas2r_kill_adapter(index);
363 return 0;
364 }
365 }
366
367 esas2r_debug("requests: %p to %p (%d, %d)", first_request,
368 last_request,
369 sizeof(*first_request),
370 num_requests);
371
372 if (esas2r_map_regions(a) != 0) {
373 esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
374 esas2r_kill_adapter(index);
375 return 0;
376 }
377
378 a->index = index;
379
380 /* interrupts will be disabled until we are done with init */
381 atomic_inc(&a->dis_ints_cnt);
382 atomic_inc(&a->disable_cnt);
383 a->flags |= AF_CHPRST_PENDING
384 | AF_DISC_PENDING
385 | AF_FIRST_INIT
386 | AF_LEGACY_SGE_MODE;
387
388 a->init_msg = ESAS2R_INIT_MSG_START;
389 a->max_vdareq_size = 128;
390 a->build_sgl = esas2r_build_sg_list_sge;
391
392 esas2r_setup_interrupts(a, interrupt_mode);
393
394 a->uncached_size = esas2r_get_uncached_size(a);
395 a->uncached = dma_alloc_coherent(&pcid->dev,
396 (size_t)a->uncached_size,
397 (dma_addr_t *)&bus_addr,
398 GFP_KERNEL);
399 if (a->uncached == NULL) {
400 esas2r_log(ESAS2R_LOG_CRIT,
401 "failed to allocate %d bytes of consistent memory!",
402 a->uncached_size);
403 esas2r_kill_adapter(index);
404 return 0;
405 }
406
407 a->uncached_phys = bus_addr;
408
409 esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
410 a->uncached_size,
411 a->uncached,
412 upper_32_bits(bus_addr),
413 lower_32_bits(bus_addr));
414 memset(a->uncached, 0, a->uncached_size);
415 next_uncached = a->uncached;
416
417 if (!esas2r_init_adapter_struct(a,
418 &next_uncached)) {
419 esas2r_log(ESAS2R_LOG_CRIT,
420 "failed to initialize adapter structure (2)!");
421 esas2r_kill_adapter(index);
422 return 0;
423 }
424
425 tasklet_init(&a->tasklet,
426 esas2r_adapter_tasklet,
427 (unsigned long)a);
428
429 /*
430 * Disable chip interrupts to prevent spurious interrupts
431 * until we claim the IRQ.
432 */
433 esas2r_disable_chip_interrupts(a);
434 esas2r_check_adapter(a);
435
436 if (!esas2r_init_adapter_hw(a, true))
437 esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
438 else
439 esas2r_debug("esas2r_init_adapter ok");
440
441 esas2r_claim_interrupts(a);
442
443 if (a->flags2 & AF2_IRQ_CLAIMED)
444 esas2r_enable_chip_interrupts(a);
445
446 esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
447 if (!(a->flags & AF_DEGRADED_MODE))
448 esas2r_kickoff_timer(a);
449 esas2r_debug("esas2r_init_adapter done for %p (%d)",
450 a, a->disable_cnt);
451
452 return 1;
453}
454
455static void esas2r_adapter_power_down(struct esas2r_adapter *a,
456 int power_management)
457{
458 struct esas2r_mem_desc *memdesc, *next;
459
460 if ((a->flags2 & AF2_INIT_DONE)
461 && (!(a->flags & AF_DEGRADED_MODE))) {
462 if (!power_management) {
463 del_timer_sync(&a->timer);
464 tasklet_kill(&a->tasklet);
465 }
466 esas2r_power_down(a);
467
468 /*
469 * There are versions of firmware that do not handle the sync
470 * cache command correctly. Stall here to ensure that the
471 * cache is lazily flushed.
472 */
473 mdelay(500);
474 esas2r_debug("chip halted");
475 }
476
477 /* Remove sysfs binary files */
478 if (a->sysfs_fw_created) {
479 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
480 a->sysfs_fw_created = 0;
481 }
482
483 if (a->sysfs_fs_created) {
484 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
485 a->sysfs_fs_created = 0;
486 }
487
488 if (a->sysfs_vda_created) {
489 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
490 a->sysfs_vda_created = 0;
491 }
492
493 if (a->sysfs_hw_created) {
494 sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
495 a->sysfs_hw_created = 0;
496 }
497
498 if (a->sysfs_live_nvram_created) {
499 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
500 &bin_attr_live_nvram);
501 a->sysfs_live_nvram_created = 0;
502 }
503
504 if (a->sysfs_default_nvram_created) {
505 sysfs_remove_bin_file(&a->host->shost_dev.kobj,
506 &bin_attr_default_nvram);
507 a->sysfs_default_nvram_created = 0;
508 }
509
510 /* Clean up interrupts */
511 if (a->flags2 & AF2_IRQ_CLAIMED) {
512 esas2r_log_dev(ESAS2R_LOG_INFO,
513 &(a->pcid->dev),
514 "free_irq(%d) called", a->pcid->irq);
515
516 free_irq(a->pcid->irq, a);
517 esas2r_debug("IRQ released");
518 esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
519 }
520
521 if (a->flags2 & AF2_MSI_ENABLED) {
522 pci_disable_msi(a->pcid);
523 esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
524 esas2r_debug("MSI disabled");
525 }
526
527 if (a->inbound_list_md.virt_addr)
528 esas2r_initmem_free(a, &a->inbound_list_md);
529
530 if (a->outbound_list_md.virt_addr)
531 esas2r_initmem_free(a, &a->outbound_list_md);
532
533 list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
534 next_desc) {
535 esas2r_initmem_free(a, memdesc);
536 }
537
538 /* Following frees everything allocated via alloc_vda_req */
539 list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
540 esas2r_initmem_free(a, memdesc);
541 list_del(&memdesc->next_desc);
542 kfree(memdesc);
543 }
544
545 kfree(a->first_ae_req);
546 a->first_ae_req = NULL;
547
548 kfree(a->sg_list_mds);
549 a->sg_list_mds = NULL;
550
551 kfree(a->req_table);
552 a->req_table = NULL;
553
554 if (a->regs) {
555 esas2r_unmap_regions(a);
556 a->regs = NULL;
557 a->data_window = NULL;
558 esas2r_debug("regions unmapped");
559 }
560}
561
562/* Release/free allocated resources for specified adapters. */
563void esas2r_kill_adapter(int i)
564{
565 struct esas2r_adapter *a = esas2r_adapters[i];
566
567 if (a) {
568 unsigned long flags;
569 struct workqueue_struct *wq;
570 esas2r_debug("killing adapter %p [%d] ", a, i);
571 esas2r_fw_event_off(a);
572 esas2r_adapter_power_down(a, 0);
573 if (esas2r_buffered_ioctl &&
574 (a->pcid == esas2r_buffered_ioctl_pcid)) {
575 dma_free_coherent(&a->pcid->dev,
576 (size_t)esas2r_buffered_ioctl_size,
577 esas2r_buffered_ioctl,
578 esas2r_buffered_ioctl_addr);
579 esas2r_buffered_ioctl = NULL;
580 }
581
582 if (a->vda_buffer) {
583 dma_free_coherent(&a->pcid->dev,
584 (size_t)VDA_MAX_BUFFER_SIZE,
585 a->vda_buffer,
586 (dma_addr_t)a->ppvda_buffer);
587 a->vda_buffer = NULL;
588 }
589 if (a->fs_api_buffer) {
590 dma_free_coherent(&a->pcid->dev,
591 (size_t)a->fs_api_buffer_size,
592 a->fs_api_buffer,
593 (dma_addr_t)a->ppfs_api_buffer);
594 a->fs_api_buffer = NULL;
595 }
596
597 kfree(a->local_atto_ioctl);
598 a->local_atto_ioctl = NULL;
599
600 spin_lock_irqsave(&a->fw_event_lock, flags);
601 wq = a->fw_event_q;
602 a->fw_event_q = NULL;
603 spin_unlock_irqrestore(&a->fw_event_lock, flags);
604 if (wq)
605 destroy_workqueue(wq);
606
607 if (a->uncached) {
608 dma_free_coherent(&a->pcid->dev,
609 (size_t)a->uncached_size,
610 a->uncached,
611 (dma_addr_t)a->uncached_phys);
612 a->uncached = NULL;
613 esas2r_debug("uncached area freed");
614 }
615
616 esas2r_log_dev(ESAS2R_LOG_INFO,
617 &(a->pcid->dev),
618 "pci_disable_device() called. msix_enabled: %d "
619 "msi_enabled: %d irq: %d pin: %d",
620 a->pcid->msix_enabled,
621 a->pcid->msi_enabled,
622 a->pcid->irq,
623 a->pcid->pin);
624
625 esas2r_log_dev(ESAS2R_LOG_INFO,
626 &(a->pcid->dev),
627 "before pci_disable_device() enable_cnt: %d",
628 a->pcid->enable_cnt.counter);
629
630 pci_disable_device(a->pcid);
631 esas2r_log_dev(ESAS2R_LOG_INFO,
632 &(a->pcid->dev),
633 "after pci_disable_device() enable_cnt: %d",
634 a->pcid->enable_cnt.counter);
635
636 esas2r_log_dev(ESAS2R_LOG_INFO,
637 &(a->pcid->dev),
638 "pci_set_drv_data(%p, NULL) called",
639 a->pcid);
640
641 pci_set_drvdata(a->pcid, NULL);
642 esas2r_adapters[i] = NULL;
643
644 if (a->flags2 & AF2_INIT_DONE) {
645 esas2r_lock_clear_flags(&a->flags2,
646 AF2_INIT_DONE);
647
648 esas2r_lock_set_flags(&a->flags,
649 AF_DEGRADED_MODE);
650
651 esas2r_log_dev(ESAS2R_LOG_INFO,
652 &(a->host->shost_gendev),
653 "scsi_remove_host() called");
654
655 scsi_remove_host(a->host);
656
657 esas2r_log_dev(ESAS2R_LOG_INFO,
658 &(a->host->shost_gendev),
659 "scsi_host_put() called");
660
661 scsi_host_put(a->host);
662 }
663 }
664}
665
666int esas2r_cleanup(struct Scsi_Host *host)
667{
668 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
669 int index;
670
671 if (host == NULL) {
672 int i;
673
674 esas2r_debug("esas2r_cleanup everything");
675 for (i = 0; i < MAX_ADAPTERS; i++)
676 esas2r_kill_adapter(i);
677 return -1;
678 }
679
680 esas2r_debug("esas2r_cleanup called for host %p", host);
681 index = a->index;
682 esas2r_kill_adapter(index);
683 return index;
684}
685
686int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
687{
688 struct Scsi_Host *host = pci_get_drvdata(pdev);
689 u32 device_state;
690 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
691
692 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
693 if (!a)
694 return -ENODEV;
695
696 esas2r_adapter_power_down(a, 1);
697 device_state = pci_choose_state(pdev, state);
698 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
699 "pci_save_state() called");
700 pci_save_state(pdev);
701 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
702 "pci_disable_device() called");
703 pci_disable_device(pdev);
704 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
705 "pci_set_power_state() called");
706 pci_set_power_state(pdev, device_state);
707 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
708 return 0;
709}
710
711int esas2r_resume(struct pci_dev *pdev)
712{
713 struct Scsi_Host *host = pci_get_drvdata(pdev);
714 struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
715 int rez;
716
717 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
718 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
719 "pci_set_power_state(PCI_D0) "
720 "called");
721 pci_set_power_state(pdev, PCI_D0);
722 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
723 "pci_enable_wake(PCI_D0, 0) "
724 "called");
725 pci_enable_wake(pdev, PCI_D0, 0);
726 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
727 "pci_restore_state() called");
728 pci_restore_state(pdev);
729 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
730 "pci_enable_device() called");
731 rez = pci_enable_device(pdev);
732 pci_set_master(pdev);
733
734 if (!a) {
735 rez = -ENODEV;
736 goto error_exit;
737 }
738
739 if (esas2r_map_regions(a) != 0) {
740 esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
741 rez = -ENOMEM;
742 goto error_exit;
743 }
744
745 /* Set up interupt mode */
746 esas2r_setup_interrupts(a, a->intr_mode);
747
748 /*
749 * Disable chip interrupts to prevent spurious interrupts until we
750 * claim the IRQ.
751 */
752 esas2r_disable_chip_interrupts(a);
753 if (!esas2r_power_up(a, true)) {
754 esas2r_debug("yikes, esas2r_power_up failed");
755 rez = -ENOMEM;
756 goto error_exit;
757 }
758
759 esas2r_claim_interrupts(a);
760
761 if (a->flags2 & AF2_IRQ_CLAIMED) {
762 /*
763 * Now that system interrupt(s) are claimed, we can enable
764 * chip interrupts.
765 */
766 esas2r_enable_chip_interrupts(a);
767 esas2r_kickoff_timer(a);
768 } else {
769 esas2r_debug("yikes, unable to claim IRQ");
770 esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
771 rez = -ENOMEM;
772 goto error_exit;
773 }
774
775error_exit:
776 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
777 rez);
778 return rez;
779}
780
781bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
782{
783 esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
784 esas2r_log(ESAS2R_LOG_CRIT,
785 "setting adapter to degraded mode: %s\n", error_str);
786 return false;
787}
788
789u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
790{
791 return sizeof(struct esas2r_sas_nvram)
792 + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
793 + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
794 + 8
795 + (num_sg_lists * (u16)sgl_page_size)
796 + ALIGN((num_requests + num_ae_requests + 1 +
797 ESAS2R_LIST_EXTRA) *
798 sizeof(struct esas2r_inbound_list_source_entry),
799 8)
800 + ALIGN((num_requests + num_ae_requests + 1 +
801 ESAS2R_LIST_EXTRA) *
802 sizeof(struct atto_vda_ob_rsp), 8)
803 + 256; /* VDA request and buffer align */
804}
805
806static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
807{
808 int pcie_cap_reg;
809
810 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
811 if (0xffff && pcie_cap_reg) {
812 u16 devcontrol;
813
814 pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
815 &devcontrol);
816
817 if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
818 esas2r_log(ESAS2R_LOG_INFO,
819 "max read request size > 512B");
820
821 devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
822 devcontrol |= 0x2000;
823 pci_write_config_word(a->pcid,
824 pcie_cap_reg + PCI_EXP_DEVCTL,
825 devcontrol);
826 }
827 }
828}
829
830/*
831 * Determine the organization of the uncached data area and
832 * finish initializing the adapter structure
833 */
834bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
835 void **uncached_area)
836{
837 u32 i;
838 u8 *high;
839 struct esas2r_inbound_list_source_entry *element;
840 struct esas2r_request *rq;
841 struct esas2r_mem_desc *sgl;
842
843 spin_lock_init(&a->sg_list_lock);
844 spin_lock_init(&a->mem_lock);
845 spin_lock_init(&a->queue_lock);
846
847 a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
848
849 if (!alloc_vda_req(a, &a->general_req)) {
850 esas2r_hdebug(
851 "failed to allocate a VDA request for the general req!");
852 return false;
853 }
854
855 /* allocate requests for asynchronous events */
856 a->first_ae_req =
857 kzalloc(num_ae_requests * sizeof(struct esas2r_request),
858 GFP_KERNEL);
859
860 if (a->first_ae_req == NULL) {
861 esas2r_log(ESAS2R_LOG_CRIT,
862 "failed to allocate memory for asynchronous events");
863 return false;
864 }
865
866 /* allocate the S/G list memory descriptors */
867 a->sg_list_mds = kzalloc(
868 num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
869
870 if (a->sg_list_mds == NULL) {
871 esas2r_log(ESAS2R_LOG_CRIT,
872 "failed to allocate memory for s/g list descriptors");
873 return false;
874 }
875
876 /* allocate the request table */
877 a->req_table =
878 kzalloc((num_requests + num_ae_requests +
879 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
880
881 if (a->req_table == NULL) {
882 esas2r_log(ESAS2R_LOG_CRIT,
883 "failed to allocate memory for the request table");
884 return false;
885 }
886
887 /* initialize PCI configuration space */
888 esas2r_init_pci_cfg_space(a);
889
890 /*
891 * the thunder_stream boards all have a serial flash part that has a
892 * different base address on the AHB bus.
893 */
894 if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
895 && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
896 a->flags2 |= AF2_THUNDERBOLT;
897
898 if (a->flags2 & AF2_THUNDERBOLT)
899 a->flags2 |= AF2_SERIAL_FLASH;
900
901 if (a->pcid->subsystem_device == ATTO_TLSH_1068)
902 a->flags2 |= AF2_THUNDERLINK;
903
904 /* Uncached Area */
905 high = (u8 *)*uncached_area;
906
907 /* initialize the scatter/gather table pages */
908
909 for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
910 sgl->size = sgl_page_size;
911
912 list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
913
914 if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
915 /* Allow the driver to load if the minimum count met. */
916 if (i < NUM_SGL_MIN)
917 return false;
918 break;
919 }
920 }
921
922 /* compute the size of the lists */
923 a->list_size = num_requests + ESAS2R_LIST_EXTRA;
924
925 /* allocate the inbound list */
926 a->inbound_list_md.size = a->list_size *
927 sizeof(struct
928 esas2r_inbound_list_source_entry);
929
930 if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
931 esas2r_hdebug("failed to allocate IB list");
932 return false;
933 }
934
935 /* allocate the outbound list */
936 a->outbound_list_md.size = a->list_size *
937 sizeof(struct atto_vda_ob_rsp);
938
939 if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
940 ESAS2R_LIST_ALIGN)) {
941 esas2r_hdebug("failed to allocate IB list");
942 return false;
943 }
944
945 /* allocate the NVRAM structure */
946 a->nvram = (struct esas2r_sas_nvram *)high;
947 high += sizeof(struct esas2r_sas_nvram);
948
949 /* allocate the discovery buffer */
950 a->disc_buffer = high;
951 high += ESAS2R_DISC_BUF_LEN;
952 high = PTR_ALIGN(high, 8);
953
954 /* allocate the outbound list copy pointer */
955 a->outbound_copy = (u32 volatile *)high;
956 high += sizeof(u32);
957
958 if (!(a->flags & AF_NVR_VALID))
959 esas2r_nvram_set_defaults(a);
960
961 /* update the caller's uncached memory area pointer */
962 *uncached_area = (void *)high;
963
964 /* initialize the allocated memory */
965 if (a->flags & AF_FIRST_INIT) {
966 memset(a->req_table, 0,
967 (num_requests + num_ae_requests +
968 1) * sizeof(struct esas2r_request *));
969
970 esas2r_targ_db_initialize(a);
971
972 /* prime parts of the inbound list */
973 element =
974 (struct esas2r_inbound_list_source_entry *)a->
975 inbound_list_md.
976 virt_addr;
977
978 for (i = 0; i < a->list_size; i++) {
979 element->address = 0;
980 element->reserved = 0;
981 element->length = cpu_to_le32(HWILSE_INTERFACE_F0
982 | (sizeof(union
983 atto_vda_req)
984 /
985 sizeof(u32)));
986 element++;
987 }
988
989 /* init the AE requests */
990 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
991 i++) {
992 INIT_LIST_HEAD(&rq->req_list);
993 if (!alloc_vda_req(a, rq)) {
994 esas2r_hdebug(
995 "failed to allocate a VDA request!");
996 return false;
997 }
998
999 esas2r_rq_init_request(rq, a);
1000
1001 /* override the completion function */
1002 rq->comp_cb = esas2r_ae_complete;
1003 }
1004 }
1005
1006 return true;
1007}
1008
1009/* This code will verify that the chip is operational. */
1010bool esas2r_check_adapter(struct esas2r_adapter *a)
1011{
1012 u32 starttime;
1013 u32 doorbell;
1014 u64 ppaddr;
1015 u32 dw;
1016
1017 /*
1018 * if the chip reset detected flag is set, we can bypass a bunch of
1019 * stuff.
1020 */
1021 if (a->flags & AF_CHPRST_DETECTED)
1022 goto skip_chip_reset;
1023
1024 /*
1025 * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
1026 * may have left them enabled or we may be recovering from a fault.
1027 */
1028 esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
1029 esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
1030
1031 /*
1032 * wait for the firmware to become ready by forcing an interrupt and
1033 * waiting for a response.
1034 */
1035 starttime = jiffies_to_msecs(jiffies);
1036
1037 while (true) {
1038 esas2r_force_interrupt(a);
1039 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1040 if (doorbell == 0xFFFFFFFF) {
1041 /*
1042 * Give the firmware up to two seconds to enable
1043 * register access after a reset.
1044 */
1045 if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
1046 return esas2r_set_degraded_mode(a,
1047 "unable to access registers");
1048 } else if (doorbell & DRBL_FORCE_INT) {
1049 u32 ver = (doorbell & DRBL_FW_VER_MSK);
1050
1051 /*
1052 * This driver supports version 0 and version 1 of
1053 * the API
1054 */
1055 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1056 doorbell);
1057
1058 if (ver == DRBL_FW_VER_0) {
1059 esas2r_lock_set_flags(&a->flags,
1060 AF_LEGACY_SGE_MODE);
1061
1062 a->max_vdareq_size = 128;
1063 a->build_sgl = esas2r_build_sg_list_sge;
1064 } else if (ver == DRBL_FW_VER_1) {
1065 esas2r_lock_clear_flags(&a->flags,
1066 AF_LEGACY_SGE_MODE);
1067
1068 a->max_vdareq_size = 1024;
1069 a->build_sgl = esas2r_build_sg_list_prd;
1070 } else {
1071 return esas2r_set_degraded_mode(a,
1072 "unknown firmware version");
1073 }
1074 break;
1075 }
1076
1077 schedule_timeout_interruptible(msecs_to_jiffies(100));
1078
1079 if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
1080 esas2r_hdebug("FW ready TMO");
1081 esas2r_bugon();
1082
1083 return esas2r_set_degraded_mode(a,
1084 "firmware start has timed out");
1085 }
1086 }
1087
1088 /* purge any asynchronous events since we will repost them later */
1089 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
1090 starttime = jiffies_to_msecs(jiffies);
1091
1092 while (true) {
1093 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1094 if (doorbell & DRBL_MSG_IFC_DOWN) {
1095 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1096 doorbell);
1097 break;
1098 }
1099
1100 schedule_timeout_interruptible(msecs_to_jiffies(50));
1101
1102 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1103 esas2r_hdebug("timeout waiting for interface down");
1104 break;
1105 }
1106 }
1107skip_chip_reset:
1108 /*
1109 * first things first, before we go changing any of these registers
1110 * disable the communication lists.
1111 */
1112 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1113 dw &= ~MU_ILC_ENABLE;
1114 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1115 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1116 dw &= ~MU_OLC_ENABLE;
1117 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1118
1119 /* configure the communication list addresses */
1120 ppaddr = a->inbound_list_md.phys_addr;
1121 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
1122 lower_32_bits(ppaddr));
1123 esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
1124 upper_32_bits(ppaddr));
1125 ppaddr = a->outbound_list_md.phys_addr;
1126 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
1127 lower_32_bits(ppaddr));
1128 esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
1129 upper_32_bits(ppaddr));
1130 ppaddr = a->uncached_phys +
1131 ((u8 *)a->outbound_copy - a->uncached);
1132 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
1133 lower_32_bits(ppaddr));
1134 esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
1135 upper_32_bits(ppaddr));
1136
1137 /* reset the read and write pointers */
1138 *a->outbound_copy =
1139 a->last_write =
1140 a->last_read = a->list_size - 1;
1141 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
1142 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
1143 a->last_write);
1144 esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
1145 a->last_write);
1146 esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
1147 a->last_write);
1148 esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
1149 MU_OLW_TOGGLE | a->last_write);
1150
1151 /* configure the interface select fields */
1152 dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
1153 dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
1154 esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
1155 (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
1156 dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
1157 dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
1158 esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
1159 (dw | MU_OLIC_LIST_F0 |
1160 MU_OLIC_SOURCE_DDR));
1161
1162 /* finish configuring the communication lists */
1163 dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
1164 dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
1165 dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
1166 | (a->list_size << MU_ILC_NUMBER_SHIFT);
1167 esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
1168 dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
1169 dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
1170 dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
1171 esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
1172
1173 /*
1174 * notify the firmware that we're done setting up the communication
1175 * list registers. wait here until the firmware is done configuring
1176 * its lists. it will signal that it is done by enabling the lists.
1177 */
1178 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
1179 starttime = jiffies_to_msecs(jiffies);
1180
1181 while (true) {
1182 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1183 if (doorbell & DRBL_MSG_IFC_INIT) {
1184 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1185 doorbell);
1186 break;
1187 }
1188
1189 schedule_timeout_interruptible(msecs_to_jiffies(100));
1190
1191 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1192 esas2r_hdebug(
1193 "timeout waiting for communication list init");
1194 esas2r_bugon();
1195 return esas2r_set_degraded_mode(a,
1196 "timeout waiting for communication list init");
1197 }
1198 }
1199
1200 /*
1201 * flag whether the firmware supports the power down doorbell. we
1202 * determine this by reading the inbound doorbell enable mask.
1203 */
1204 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
1205 if (doorbell & DRBL_POWER_DOWN)
1206 esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
1207 else
1208 esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
1209
1210 /*
1211 * enable assertion of outbound queue and doorbell interrupts in the
1212 * main interrupt cause register.
1213 */
1214 esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
1215 esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
1216 return true;
1217}
1218
1219/* Process the initialization message just completed and format the next one. */
1220static bool esas2r_format_init_msg(struct esas2r_adapter *a,
1221 struct esas2r_request *rq)
1222{
1223 u32 msg = a->init_msg;
1224 struct atto_vda_cfg_init *ci;
1225
1226 a->init_msg = 0;
1227
1228 switch (msg) {
1229 case ESAS2R_INIT_MSG_START:
1230 case ESAS2R_INIT_MSG_REINIT:
1231 {
1232 struct timeval now;
1233 do_gettimeofday(&now);
1234 esas2r_hdebug("CFG init");
1235 esas2r_build_cfg_req(a,
1236 rq,
1237 VDA_CFG_INIT,
1238 0,
1239 NULL);
1240 ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
1241 ci->sgl_page_size = sgl_page_size;
1242 ci->epoch_time = now.tv_sec;
1243 rq->flags |= RF_FAILURE_OK;
1244 a->init_msg = ESAS2R_INIT_MSG_INIT;
1245 break;
1246 }
1247
1248 case ESAS2R_INIT_MSG_INIT:
1249 if (rq->req_stat == RS_SUCCESS) {
1250 u32 major;
1251 u32 minor;
1252
1253 a->fw_version = le16_to_cpu(
1254 rq->func_rsp.cfg_rsp.vda_version);
1255 a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
1256 major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
1257 minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
1258 a->fw_version += (major << 16) + (minor << 24);
1259 } else {
1260 esas2r_hdebug("FAILED");
1261 }
1262
1263 /*
1264 * the 2.71 and earlier releases of R6xx firmware did not error
1265 * unsupported config requests correctly.
1266 */
1267
1268 if ((a->flags2 & AF2_THUNDERBOLT)
1269 || (be32_to_cpu(a->fw_version) >
1270 be32_to_cpu(0x47020052))) {
1271 esas2r_hdebug("CFG get init");
1272 esas2r_build_cfg_req(a,
1273 rq,
1274 VDA_CFG_GET_INIT2,
1275 sizeof(struct atto_vda_cfg_init),
1276 NULL);
1277
1278 rq->vrq->cfg.sg_list_offset = offsetof(
1279 struct atto_vda_cfg_req,
1280 data.sge);
1281 rq->vrq->cfg.data.prde.ctl_len =
1282 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
1283 rq->vrq->cfg.data.prde.address = cpu_to_le64(
1284 rq->vrq_md->phys_addr +
1285 sizeof(union atto_vda_req));
1286 rq->flags |= RF_FAILURE_OK;
1287 a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
1288 break;
1289 }
1290
1291 case ESAS2R_INIT_MSG_GET_INIT:
1292 if (msg == ESAS2R_INIT_MSG_GET_INIT) {
1293 ci = (struct atto_vda_cfg_init *)rq->data_buf;
1294 if (rq->req_stat == RS_SUCCESS) {
1295 a->num_targets_backend =
1296 le32_to_cpu(ci->num_targets_backend);
1297 a->ioctl_tunnel =
1298 le32_to_cpu(ci->ioctl_tunnel);
1299 } else {
1300 esas2r_hdebug("FAILED");
1301 }
1302 }
1303 /* fall through */
1304
1305 default:
1306 rq->req_stat = RS_SUCCESS;
1307 return false;
1308 }
1309 return true;
1310}
1311
1312/*
1313 * Perform initialization messages via the request queue. Messages are
1314 * performed with interrupts disabled.
1315 */
1316bool esas2r_init_msgs(struct esas2r_adapter *a)
1317{
1318 bool success = true;
1319 struct esas2r_request *rq = &a->general_req;
1320
1321 esas2r_rq_init_request(rq, a);
1322 rq->comp_cb = esas2r_dummy_complete;
1323
1324 if (a->init_msg == 0)
1325 a->init_msg = ESAS2R_INIT_MSG_REINIT;
1326
1327 while (a->init_msg) {
1328 if (esas2r_format_init_msg(a, rq)) {
1329 unsigned long flags;
1330 while (true) {
1331 spin_lock_irqsave(&a->queue_lock, flags);
1332 esas2r_start_vda_request(a, rq);
1333 spin_unlock_irqrestore(&a->queue_lock, flags);
1334 esas2r_wait_request(a, rq);
1335 if (rq->req_stat != RS_PENDING)
1336 break;
1337 }
1338 }
1339
1340 if (rq->req_stat == RS_SUCCESS
1341 || ((rq->flags & RF_FAILURE_OK)
1342 && rq->req_stat != RS_TIMEOUT))
1343 continue;
1344
1345 esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
1346 a->init_msg, rq->req_stat, rq->flags);
1347 a->init_msg = ESAS2R_INIT_MSG_START;
1348 success = false;
1349 break;
1350 }
1351
1352 esas2r_rq_destroy_request(rq, a);
1353 return success;
1354}
1355
1356/* Initialize the adapter chip */
1357bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
1358{
1359 bool rslt = false;
1360 struct esas2r_request *rq;
1361 u32 i;
1362
1363 if (a->flags & AF_DEGRADED_MODE)
1364 goto exit;
1365
1366 if (!(a->flags & AF_NVR_VALID)) {
1367 if (!esas2r_nvram_read_direct(a))
1368 esas2r_log(ESAS2R_LOG_WARN,
1369 "invalid/missing NVRAM parameters");
1370 }
1371
1372 if (!esas2r_init_msgs(a)) {
1373 esas2r_set_degraded_mode(a, "init messages failed");
1374 goto exit;
1375 }
1376
1377 /* The firmware is ready. */
1378 esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
1379 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
1380
1381 /* Post all the async event requests */
1382 for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
1383 esas2r_start_ae_request(a, rq);
1384
1385 if (!a->flash_rev[0])
1386 esas2r_read_flash_rev(a);
1387
1388 if (!a->image_type[0])
1389 esas2r_read_image_type(a);
1390
1391 if (a->fw_version == 0)
1392 a->fw_rev[0] = 0;
1393 else
1394 sprintf(a->fw_rev, "%1d.%02d",
1395 (int)LOBYTE(HIWORD(a->fw_version)),
1396 (int)HIBYTE(HIWORD(a->fw_version)));
1397
1398 esas2r_hdebug("firmware revision: %s", a->fw_rev);
1399
1400 if ((a->flags & AF_CHPRST_DETECTED)
1401 && (a->flags & AF_FIRST_INIT)) {
1402 esas2r_enable_chip_interrupts(a);
1403 return true;
1404 }
1405
1406 /* initialize discovery */
1407 esas2r_disc_initialize(a);
1408
1409 /*
1410 * wait for the device wait time to expire here if requested. this is
1411 * usually requested during initial driver load and possibly when
1412 * resuming from a low power state. deferred device waiting will use
1413 * interrupts. chip reset recovery always defers device waiting to
1414 * avoid being in a TASKLET too long.
1415 */
1416 if (init_poll) {
1417 u32 currtime = a->disc_start_time;
1418 u32 nexttick = 100;
1419 u32 deltatime;
1420
1421 /*
1422 * Block Tasklets from getting scheduled and indicate this is
1423 * polled discovery.
1424 */
1425 esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
1426 esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
1427
1428 /*
1429 * Temporarily bring the disable count to zero to enable
1430 * deferred processing. Note that the count is already zero
1431 * after the first initialization.
1432 */
1433 if (a->flags & AF_FIRST_INIT)
1434 atomic_dec(&a->disable_cnt);
1435
1436 while (a->flags & AF_DISC_PENDING) {
1437 schedule_timeout_interruptible(msecs_to_jiffies(100));
1438
1439 /*
1440 * Determine the need for a timer tick based on the
1441 * delta time between this and the last iteration of
1442 * this loop. We don't use the absolute time because
1443 * then we would have to worry about when nexttick
1444 * wraps and currtime hasn't yet.
1445 */
1446 deltatime = jiffies_to_msecs(jiffies) - currtime;
1447 currtime += deltatime;
1448
1449 /*
1450 * Process any waiting discovery as long as the chip is
1451 * up. If a chip reset happens during initial polling,
1452 * we have to make sure the timer tick processes the
1453 * doorbell indicating the firmware is ready.
1454 */
1455 if (!(a->flags & AF_CHPRST_PENDING))
1456 esas2r_disc_check_for_work(a);
1457
1458 /* Simulate a timer tick. */
1459 if (nexttick <= deltatime) {
1460
1461 /* Time for a timer tick */
1462 nexttick += 100;
1463 esas2r_timer_tick(a);
1464 }
1465
1466 if (nexttick > deltatime)
1467 nexttick -= deltatime;
1468
1469 /* Do any deferred processing */
1470 if (esas2r_is_tasklet_pending(a))
1471 esas2r_do_tasklet_tasks(a);
1472
1473 }
1474
1475 if (a->flags & AF_FIRST_INIT)
1476 atomic_inc(&a->disable_cnt);
1477
1478 esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
1479 esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
1480 }
1481
1482
1483 esas2r_targ_db_report_changes(a);
1484
1485 /*
1486 * For cases where (a) the initialization messages processing may
1487 * handle an interrupt for a port event and a discovery is waiting, but
1488 * we are not waiting for devices, or (b) the device wait time has been
1489 * exhausted but there is still discovery pending, start any leftover
1490 * discovery in interrupt driven mode.
1491 */
1492 esas2r_disc_start_waiting(a);
1493
1494 /* Enable chip interrupts */
1495 a->int_mask = ESAS2R_INT_STS_MASK;
1496 esas2r_enable_chip_interrupts(a);
1497 esas2r_enable_heartbeat(a);
1498 rslt = true;
1499
1500exit:
1501 /*
1502 * Regardless of whether initialization was successful, certain things
1503 * need to get done before we exit.
1504 */
1505
1506 if ((a->flags & AF_CHPRST_DETECTED)
1507 && (a->flags & AF_FIRST_INIT)) {
1508 /*
1509 * Reinitialization was performed during the first
1510 * initialization. Only clear the chip reset flag so the
1511 * original device polling is not cancelled.
1512 */
1513 if (!rslt)
1514 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
1515 } else {
1516 /* First initialization or a subsequent re-init is complete. */
1517 if (!rslt) {
1518 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
1519 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
1520 }
1521
1522
1523 /* Enable deferred processing after the first initialization. */
1524 if (a->flags & AF_FIRST_INIT) {
1525 esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
1526
1527 if (atomic_dec_return(&a->disable_cnt) == 0)
1528 esas2r_do_deferred_processes(a);
1529 }
1530 }
1531
1532 return rslt;
1533}
1534
1535void esas2r_reset_adapter(struct esas2r_adapter *a)
1536{
1537 esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
1538 esas2r_local_reset_adapter(a);
1539 esas2r_schedule_tasklet(a);
1540}
1541
1542void esas2r_reset_chip(struct esas2r_adapter *a)
1543{
1544 if (!esas2r_is_adapter_present(a))
1545 return;
1546
1547 /*
1548 * Before we reset the chip, save off the VDA core dump. The VDA core
1549 * dump is located in the upper 512KB of the onchip SRAM. Make sure
1550 * to not overwrite a previous crash that was saved.
1551 */
1552 if ((a->flags2 & AF2_COREDUMP_AVAIL)
1553 && !(a->flags2 & AF2_COREDUMP_SAVED)
1554 && a->fw_coredump_buff) {
1555 esas2r_read_mem_block(a,
1556 a->fw_coredump_buff,
1557 MW_DATA_ADDR_SRAM + 0x80000,
1558 ESAS2R_FWCOREDUMP_SZ);
1559
1560 esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
1561 }
1562
1563 esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
1564
1565 /* Reset the chip */
1566 if (a->pcid->revision == MVR_FREY_B2)
1567 esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
1568 MU_CTL_IN_FULL_RST2);
1569 else
1570 esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
1571 MU_CTL_IN_FULL_RST);
1572
1573
1574 /* Stall a little while to let the reset condition clear */
1575 mdelay(10);
1576}
1577
1578static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
1579{
1580 u32 starttime;
1581 u32 doorbell;
1582
1583 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
1584 starttime = jiffies_to_msecs(jiffies);
1585
1586 while (true) {
1587 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1588 if (doorbell & DRBL_POWER_DOWN) {
1589 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1590 doorbell);
1591 break;
1592 }
1593
1594 schedule_timeout_interruptible(msecs_to_jiffies(100));
1595
1596 if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
1597 esas2r_hdebug("Timeout waiting for power down");
1598 break;
1599 }
1600 }
1601}
1602
1603/*
1604 * Perform power management processing including managing device states, adapter
1605 * states, interrupts, and I/O.
1606 */
1607void esas2r_power_down(struct esas2r_adapter *a)
1608{
1609 esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
1610 esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
1611
1612 if (!(a->flags & AF_DEGRADED_MODE)) {
1613 u32 starttime;
1614 u32 doorbell;
1615
1616 /*
1617 * We are currently running OK and will be reinitializing later.
1618 * increment the disable count to coordinate with
1619 * esas2r_init_adapter. We don't have to do this in degraded
1620 * mode since we never enabled interrupts in the first place.
1621 */
1622 esas2r_disable_chip_interrupts(a);
1623 esas2r_disable_heartbeat(a);
1624
1625 /* wait for any VDA activity to clear before continuing */
1626 esas2r_write_register_dword(a, MU_DOORBELL_IN,
1627 DRBL_MSG_IFC_DOWN);
1628 starttime = jiffies_to_msecs(jiffies);
1629
1630 while (true) {
1631 doorbell =
1632 esas2r_read_register_dword(a, MU_DOORBELL_OUT);
1633 if (doorbell & DRBL_MSG_IFC_DOWN) {
1634 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
1635 doorbell);
1636 break;
1637 }
1638
1639 schedule_timeout_interruptible(msecs_to_jiffies(100));
1640
1641 if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
1642 esas2r_hdebug(
1643 "timeout waiting for interface down");
1644 break;
1645 }
1646 }
1647
1648 /*
1649 * For versions of firmware that support it tell them the driver
1650 * is powering down.
1651 */
1652 if (a->flags2 & AF2_VDA_POWER_DOWN)
1653 esas2r_power_down_notify_firmware(a);
1654 }
1655
1656 /* Suspend I/O processing. */
1657 esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
1658 esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
1659 esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
1660
1661 esas2r_process_adapter_reset(a);
1662
1663 /* Remove devices now that I/O is cleaned up. */
1664 a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
1665 esas2r_targ_db_remove_all(a, false);
1666}
1667
1668/*
1669 * Perform power management processing including managing device states, adapter
1670 * states, interrupts, and I/O.
1671 */
1672bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
1673{
1674 bool ret;
1675
1676 esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
1677 esas2r_init_pci_cfg_space(a);
1678 esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
1679 atomic_inc(&a->disable_cnt);
1680
1681 /* reinitialize the adapter */
1682 ret = esas2r_check_adapter(a);
1683 if (!esas2r_init_adapter_hw(a, init_poll))
1684 ret = false;
1685
1686 /* send the reset asynchronous event */
1687 esas2r_send_reset_ae(a, true);
1688
1689 /* clear this flag after initialization. */
1690 esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
1691 return ret;
1692}
1693
1694bool esas2r_is_adapter_present(struct esas2r_adapter *a)
1695{
1696 if (a->flags & AF_NOT_PRESENT)
1697 return false;
1698
1699 if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
1700 esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
1701
1702 return false;
1703 }
1704 return true;
1705}
1706
1707const char *esas2r_get_model_name(struct esas2r_adapter *a)
1708{
1709 switch (a->pcid->subsystem_device) {
1710 case ATTO_ESAS_R680:
1711 return "ATTO ExpressSAS R680";
1712
1713 case ATTO_ESAS_R608:
1714 return "ATTO ExpressSAS R608";
1715
1716 case ATTO_ESAS_R60F:
1717 return "ATTO ExpressSAS R60F";
1718
1719 case ATTO_ESAS_R6F0:
1720 return "ATTO ExpressSAS R6F0";
1721
1722 case ATTO_ESAS_R644:
1723 return "ATTO ExpressSAS R644";
1724
1725 case ATTO_ESAS_R648:
1726 return "ATTO ExpressSAS R648";
1727
1728 case ATTO_TSSC_3808:
1729 return "ATTO ThunderStream SC 3808D";
1730
1731 case ATTO_TSSC_3808E:
1732 return "ATTO ThunderStream SC 3808E";
1733
1734 case ATTO_TLSH_1068:
1735 return "ATTO ThunderLink SH 1068";
1736 }
1737
1738 return "ATTO SAS Controller";
1739}
1740
1741const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
1742{
1743 switch (a->pcid->subsystem_device) {
1744 case ATTO_ESAS_R680:
1745 return "R680";
1746
1747 case ATTO_ESAS_R608:
1748 return "R608";
1749
1750 case ATTO_ESAS_R60F:
1751 return "R60F";
1752
1753 case ATTO_ESAS_R6F0:
1754 return "R6F0";
1755
1756 case ATTO_ESAS_R644:
1757 return "R644";
1758
1759 case ATTO_ESAS_R648:
1760 return "R648";
1761
1762 case ATTO_TSSC_3808:
1763 return "SC 3808D";
1764
1765 case ATTO_TSSC_3808E:
1766 return "SC 3808E";
1767
1768 case ATTO_TLSH_1068:
1769 return "SH 1068";
1770 }
1771
1772 return "unknown";
1773}
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
new file mode 100644
index 000000000000..c2d4ff57c5c3
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -0,0 +1,941 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_int.c
3 * esas2r interrupt handling
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44
45#include "esas2r.h"
46
47/* Local function prototypes */
48static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
49static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
50static void esas2r_process_bus_reset(struct esas2r_adapter *a);
51
52/*
53 * Poll the adapter for interrupts and service them.
54 * This function handles both legacy interrupts and MSI.
55 */
56void esas2r_polled_interrupt(struct esas2r_adapter *a)
57{
58 u32 intstat;
59 u32 doorbell;
60
61 esas2r_disable_chip_interrupts(a);
62
63 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
64
65 if (intstat & MU_INTSTAT_POST_OUT) {
66 /* clear the interrupt */
67
68 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
69 MU_OLIS_INT);
70 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
71
72 esas2r_get_outbound_responses(a);
73 }
74
75 if (intstat & MU_INTSTAT_DRBL) {
76 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
77 if (doorbell != 0)
78 esas2r_doorbell_interrupt(a, doorbell);
79 }
80
81 esas2r_enable_chip_interrupts(a);
82
83 if (atomic_read(&a->disable_cnt) == 0)
84 esas2r_do_deferred_processes(a);
85}
86
87/*
88 * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
89 * schedules a TASKLET to process events, whereas the MSI handler just
90 * processes interrupt events directly.
91 */
92irqreturn_t esas2r_interrupt(int irq, void *dev_id)
93{
94 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
95
96 if (!esas2r_adapter_interrupt_pending(a))
97 return IRQ_NONE;
98
99 esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
100 esas2r_schedule_tasklet(a);
101
102 return IRQ_HANDLED;
103}
104
105void esas2r_adapter_interrupt(struct esas2r_adapter *a)
106{
107 u32 doorbell;
108
109 if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
110 /* clear the interrupt */
111 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
112 MU_OLIS_INT);
113 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
114 esas2r_get_outbound_responses(a);
115 }
116
117 if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
118 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
119 if (doorbell != 0)
120 esas2r_doorbell_interrupt(a, doorbell);
121 }
122
123 a->int_mask = ESAS2R_INT_STS_MASK;
124
125 esas2r_enable_chip_interrupts(a);
126
127 if (likely(atomic_read(&a->disable_cnt) == 0))
128 esas2r_do_deferred_processes(a);
129}
130
131irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
132{
133 struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
134 u32 intstat;
135 u32 doorbell;
136
137 intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
138
139 if (likely(intstat & MU_INTSTAT_POST_OUT)) {
140 /* clear the interrupt */
141
142 esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
143 MU_OLIS_INT);
144 esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
145
146 esas2r_get_outbound_responses(a);
147 }
148
149 if (unlikely(intstat & MU_INTSTAT_DRBL)) {
150 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
151 if (doorbell != 0)
152 esas2r_doorbell_interrupt(a, doorbell);
153 }
154
155 /*
156 * Work around a chip bug and force a new MSI to be sent if one is
157 * still pending.
158 */
159 esas2r_disable_chip_interrupts(a);
160 esas2r_enable_chip_interrupts(a);
161
162 if (likely(atomic_read(&a->disable_cnt) == 0))
163 esas2r_do_deferred_processes(a);
164
165 esas2r_do_tasklet_tasks(a);
166
167 return 1;
168}
169
170
171
172static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
173 struct esas2r_request *rq,
174 struct atto_vda_ob_rsp *rsp)
175{
176
177 /*
178 * For I/O requests, only copy the response if an error
179 * occurred and setup a callback to do error processing.
180 */
181 if (unlikely(rq->req_stat != RS_SUCCESS)) {
182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
183
184 if (rq->req_stat == RS_ABORTED) {
185 if (rq->timeout > RQ_MAX_TIMEOUT)
186 rq->req_stat = RS_TIMEOUT;
187 } else if (rq->req_stat == RS_SCSI_ERROR) {
188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
189
190 esas2r_trace("scsistatus: %x", scsistatus);
191
192 /* Any of these are a good result. */
193 if (scsistatus == SAM_STAT_GOOD || scsistatus ==
194 SAM_STAT_CONDITION_MET || scsistatus ==
195 SAM_STAT_INTERMEDIATE || scsistatus ==
196 SAM_STAT_INTERMEDIATE_CONDITION_MET) {
197 rq->req_stat = RS_SUCCESS;
198 rq->func_rsp.scsi_rsp.scsi_stat =
199 SAM_STAT_GOOD;
200 }
201 }
202 }
203}
204
205static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
206{
207 struct atto_vda_ob_rsp *rsp;
208 u32 rspput_ptr;
209 u32 rspget_ptr;
210 struct esas2r_request *rq;
211 u32 handle;
212 unsigned long flags;
213
214 LIST_HEAD(comp_list);
215
216 esas2r_trace_enter();
217
218 spin_lock_irqsave(&a->queue_lock, flags);
219
220 /* Get the outbound limit and pointers */
221 rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
222 rspget_ptr = a->last_read;
223
224 esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
225
226 /* If we don't have anything to process, get out */
227 if (unlikely(rspget_ptr == rspput_ptr)) {
228 spin_unlock_irqrestore(&a->queue_lock, flags);
229 esas2r_trace_exit();
230 return;
231 }
232
233 /* Make sure the firmware is healthy */
234 if (unlikely(rspput_ptr >= a->list_size)) {
235 spin_unlock_irqrestore(&a->queue_lock, flags);
236 esas2r_bugon();
237 esas2r_local_reset_adapter(a);
238 esas2r_trace_exit();
239 return;
240 }
241
242 do {
243 rspget_ptr++;
244
245 if (rspget_ptr >= a->list_size)
246 rspget_ptr = 0;
247
248 rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
249 + rspget_ptr;
250
251 handle = rsp->handle;
252
253 /* Verify the handle range */
254 if (unlikely(LOWORD(handle) == 0
255 || LOWORD(handle) > num_requests +
256 num_ae_requests + 1)) {
257 esas2r_bugon();
258 continue;
259 }
260
261 /* Get the request for this handle */
262 rq = a->req_table[LOWORD(handle)];
263
264 if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
265 esas2r_bugon();
266 continue;
267 }
268
269 list_del(&rq->req_list);
270
271 /* Get the completion status */
272 rq->req_stat = rsp->req_stat;
273
274 esas2r_trace("handle: %x", handle);
275 esas2r_trace("rq: %p", rq);
276 esas2r_trace("req_status: %x", rq->req_stat);
277
278 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
279 esas2r_handle_outbound_rsp_err(a, rq, rsp);
280 } else {
281 /*
282 * Copy the outbound completion struct for non-I/O
283 * requests.
284 */
285 memcpy(&rq->func_rsp, &rsp->func_rsp,
286 sizeof(rsp->func_rsp));
287 }
288
289 /* Queue the request for completion. */
290 list_add_tail(&rq->comp_list, &comp_list);
291
292 } while (rspget_ptr != rspput_ptr);
293
294 a->last_read = rspget_ptr;
295 spin_unlock_irqrestore(&a->queue_lock, flags);
296
297 esas2r_comp_list_drain(a, &comp_list);
298 esas2r_trace_exit();
299}
300
301/*
302 * Perform all deferred processes for the adapter. Deferred
303 * processes can only be done while the current interrupt
304 * disable_cnt for the adapter is zero.
305 */
306void esas2r_do_deferred_processes(struct esas2r_adapter *a)
307{
308 int startreqs = 2;
309 struct esas2r_request *rq;
310 unsigned long flags;
311
312 /*
313 * startreqs is used to control starting requests
314 * that are on the deferred queue
315 * = 0 - do not start any requests
316 * = 1 - can start discovery requests
317 * = 2 - can start any request
318 */
319
320 if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
321 startreqs = 0;
322 else if (a->flags & AF_DISC_PENDING)
323 startreqs = 1;
324
325 atomic_inc(&a->disable_cnt);
326
327 /* Clear off the completed list to be processed later. */
328
329 if (esas2r_is_tasklet_pending(a)) {
330 esas2r_schedule_tasklet(a);
331
332 startreqs = 0;
333 }
334
335 /*
336 * If we can start requests then traverse the defer queue
337 * looking for requests to start or complete
338 */
339 if (startreqs && !list_empty(&a->defer_list)) {
340 LIST_HEAD(comp_list);
341 struct list_head *element, *next;
342
343 spin_lock_irqsave(&a->queue_lock, flags);
344
345 list_for_each_safe(element, next, &a->defer_list) {
346 rq = list_entry(element, struct esas2r_request,
347 req_list);
348
349 if (rq->req_stat != RS_PENDING) {
350 list_del(element);
351 list_add_tail(&rq->comp_list, &comp_list);
352 }
353 /*
354 * Process discovery and OS requests separately. We
355 * can't hold up discovery requests when discovery is
356 * pending. In general, there may be different sets of
357 * conditions for starting different types of requests.
358 */
359 else if (rq->req_type == RT_DISC_REQ) {
360 list_del(element);
361 esas2r_disc_local_start_request(a, rq);
362 } else if (startreqs == 2) {
363 list_del(element);
364 esas2r_local_start_request(a, rq);
365
366 /*
367 * Flashing could have been set by last local
368 * start
369 */
370 if (a->flags & AF_FLASHING)
371 break;
372 }
373 }
374
375 spin_unlock_irqrestore(&a->queue_lock, flags);
376 esas2r_comp_list_drain(a, &comp_list);
377 }
378
379 atomic_dec(&a->disable_cnt);
380}
381
382/*
383 * Process an adapter reset (or one that is about to happen)
384 * by making sure all outstanding requests are completed that
385 * haven't been already.
386 */
387void esas2r_process_adapter_reset(struct esas2r_adapter *a)
388{
389 struct esas2r_request *rq = &a->general_req;
390 unsigned long flags;
391 struct esas2r_disc_context *dc;
392
393 LIST_HEAD(comp_list);
394 struct list_head *element;
395
396 esas2r_trace_enter();
397
398 spin_lock_irqsave(&a->queue_lock, flags);
399
400 /* abort the active discovery, if any. */
401
402 if (rq->interrupt_cx) {
403 dc = (struct esas2r_disc_context *)rq->interrupt_cx;
404
405 dc->disc_evt = 0;
406
407 esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
408 }
409
410 /*
411 * just clear the interrupt callback for now. it will be dequeued if
412 * and when we find it on the active queue and we don't want the
413 * callback called. also set the dummy completion callback in case we
414 * were doing an I/O request.
415 */
416
417 rq->interrupt_cx = NULL;
418 rq->interrupt_cb = NULL;
419
420 rq->comp_cb = esas2r_dummy_complete;
421
422 /* Reset the read and write pointers */
423
424 *a->outbound_copy =
425 a->last_write =
426 a->last_read = a->list_size - 1;
427
428 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
429
430 /* Kill all the requests on the active list */
431 list_for_each(element, &a->defer_list) {
432 rq = list_entry(element, struct esas2r_request, req_list);
433
434 if (rq->req_stat == RS_STARTED)
435 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
436 list_add_tail(&rq->comp_list, &comp_list);
437 }
438
439 spin_unlock_irqrestore(&a->queue_lock, flags);
440 esas2r_comp_list_drain(a, &comp_list);
441 esas2r_process_bus_reset(a);
442 esas2r_trace_exit();
443}
444
445static void esas2r_process_bus_reset(struct esas2r_adapter *a)
446{
447 struct esas2r_request *rq;
448 struct list_head *element;
449 unsigned long flags;
450
451 LIST_HEAD(comp_list);
452
453 esas2r_trace_enter();
454
455 esas2r_hdebug("reset detected");
456
457 spin_lock_irqsave(&a->queue_lock, flags);
458
459 /* kill all the requests on the deferred queue */
460 list_for_each(element, &a->defer_list) {
461 rq = list_entry(element, struct esas2r_request, req_list);
462 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
463 list_add_tail(&rq->comp_list, &comp_list);
464 }
465
466 spin_unlock_irqrestore(&a->queue_lock, flags);
467
468 esas2r_comp_list_drain(a, &comp_list);
469
470 if (atomic_read(&a->disable_cnt) == 0)
471 esas2r_do_deferred_processes(a);
472
473 esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
474
475 esas2r_trace_exit();
476}
477
478static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
479{
480
481 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
482 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
483 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
484 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
485 /*
486 * Make sure we don't get attempt more than 3 resets
487 * when the uptime between resets does not exceed one
488 * minute. This will stop any situation where there is
489 * really something wrong with the hardware. The way
490 * this works is that we start with uptime ticks at 0.
491 * Each time we do a reset, we add 20 seconds worth to
492 * the count. Each time a timer tick occurs, as long
493 * as a chip reset is not pending, we decrement the
494 * tick count. If the uptime ticks ever gets to 60
495 * seconds worth, we disable the adapter from that
496 * point forward. Three strikes, you're out.
497 */
498 if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
499 ESAS2R_CHP_UPTIME_MAX)) {
500 esas2r_hdebug("*** adapter disabled ***");
501
502 /*
503 * Ok, some kind of hard failure. Make sure we
504 * exit this loop with chip interrupts
505 * permanently disabled so we don't lock up the
506 * entire system. Also flag degraded mode to
507 * prevent the heartbeat from trying to recover.
508 */
509
510 esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
511 esas2r_lock_set_flags(&a->flags, AF_DISABLED);
512 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
513 esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
514
515 esas2r_disable_chip_interrupts(a);
516 a->int_mask = 0;
517 esas2r_process_adapter_reset(a);
518
519 esas2r_log(ESAS2R_LOG_CRIT,
520 "Adapter disabled because of hardware failure");
521 } else {
522 u32 flags =
523 esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
524
525 if (!(flags & AF_CHPRST_STARTED))
526 /*
527 * Only disable interrupts if this is
528 * the first reset attempt.
529 */
530 esas2r_disable_chip_interrupts(a);
531
532 if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
533 !(flags & AF_CHPRST_STARTED)) {
534 /*
535 * Don't reset the chip on the first
536 * deferred power up attempt.
537 */
538 } else {
539 esas2r_hdebug("*** resetting chip ***");
540 esas2r_reset_chip(a);
541 }
542
543 /* Kick off the reinitialization */
544 a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
545 a->chip_init_time = jiffies_to_msecs(jiffies);
546 if (!(a->flags & AF_POWER_MGT)) {
547 esas2r_process_adapter_reset(a);
548
549 if (!(flags & AF_CHPRST_STARTED)) {
550 /* Remove devices now that I/O is cleaned up. */
551 a->prev_dev_cnt =
552 esas2r_targ_db_get_tgt_cnt(a);
553 esas2r_targ_db_remove_all(a, false);
554 }
555 }
556
557 a->int_mask = 0;
558 }
559}
560
561static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
562{
563 while (a->flags & AF_CHPRST_DETECTED) {
564 /*
565 * Balance the enable in esas2r_initadapter_hw.
566 * Esas2r_power_down already took care of it for power
567 * management.
568 */
569 if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
570 AF_POWER_MGT))
571 esas2r_disable_chip_interrupts(a);
572
573 /* Reinitialize the chip. */
574 esas2r_check_adapter(a);
575 esas2r_init_adapter_hw(a, 0);
576
577 if (a->flags & AF_CHPRST_NEEDED)
578 break;
579
580 if (a->flags & AF_POWER_MGT) {
581 /* Recovery from power management. */
582 if (a->flags & AF_FIRST_INIT) {
583 /* Chip reset during normal power up */
584 esas2r_log(ESAS2R_LOG_CRIT,
585 "The firmware was reset during a normal power-up sequence");
586 } else {
587 /* Deferred power up complete. */
588 esas2r_lock_clear_flags(&a->flags,
589 AF_POWER_MGT);
590 esas2r_send_reset_ae(a, true);
591 }
592 } else {
593 /* Recovery from online chip reset. */
594 if (a->flags & AF_FIRST_INIT) {
595 /* Chip reset during driver load */
596 } else {
597 /* Chip reset after driver load */
598 esas2r_send_reset_ae(a, false);
599 }
600
601 esas2r_log(ESAS2R_LOG_CRIT,
602 "Recovering from a chip reset while the chip was online");
603 }
604
605 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
606 esas2r_enable_chip_interrupts(a);
607
608 /*
609 * Clear this flag last! this indicates that the chip has been
610 * reset already during initialization.
611 */
612 esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
613 }
614}
615
616
617/* Perform deferred tasks when chip interrupts are disabled */
618void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
619{
620 if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
621 if (a->flags & AF_CHPRST_NEEDED)
622 esas2r_chip_rst_needed_during_tasklet(a);
623
624 esas2r_handle_chip_rst_during_tasklet(a);
625 }
626
627 if (a->flags & AF_BUSRST_NEEDED) {
628 esas2r_hdebug("hard resetting bus");
629
630 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
631
632 if (a->flags & AF_FLASHING)
633 esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
634 else
635 esas2r_write_register_dword(a, MU_DOORBELL_IN,
636 DRBL_RESET_BUS);
637 }
638
639 if (a->flags & AF_BUSRST_DETECTED) {
640 esas2r_process_bus_reset(a);
641
642 esas2r_log_dev(ESAS2R_LOG_WARN,
643 &(a->host->shost_gendev),
644 "scsi_report_bus_reset() called");
645
646 scsi_report_bus_reset(a->host, 0);
647
648 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
649 esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
650
651 esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
652 }
653
654 if (a->flags & AF_PORT_CHANGE) {
655 esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
656
657 esas2r_targ_db_report_changes(a);
658 }
659
660 if (atomic_read(&a->disable_cnt) == 0)
661 esas2r_do_deferred_processes(a);
662}
663
664static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
665{
666 if (!(doorbell & DRBL_FORCE_INT)) {
667 esas2r_trace_enter();
668 esas2r_trace("doorbell: %x", doorbell);
669 }
670
671 /* First clear the doorbell bits */
672 esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
673
674 if (doorbell & DRBL_RESET_BUS)
675 esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
676
677 if (doorbell & DRBL_FORCE_INT)
678 esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
679
680 if (doorbell & DRBL_PANIC_REASON_MASK) {
681 esas2r_hdebug("*** Firmware Panic ***");
682 esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
683 }
684
685 if (doorbell & DRBL_FW_RESET) {
686 esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
687 esas2r_local_reset_adapter(a);
688 }
689
690 if (!(doorbell & DRBL_FORCE_INT))
691 esas2r_trace_exit();
692}
693
694void esas2r_force_interrupt(struct esas2r_adapter *a)
695{
696 esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
697 DRBL_DRV_VER);
698}
699
700
701static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
702 u16 target, u32 length)
703{
704 struct esas2r_target *t = a->targetdb + target;
705 u32 cplen = length;
706 unsigned long flags;
707
708 if (cplen > sizeof(t->lu_event))
709 cplen = sizeof(t->lu_event);
710
711 esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
712 esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
713
714 spin_lock_irqsave(&a->mem_lock, flags);
715
716 t->new_target_state = TS_INVALID;
717
718 if (ae->lu.dwevent & VDAAE_LU_LOST) {
719 t->new_target_state = TS_NOT_PRESENT;
720 } else {
721 switch (ae->lu.bystate) {
722 case VDAAE_LU_NOT_PRESENT:
723 case VDAAE_LU_OFFLINE:
724 case VDAAE_LU_DELETED:
725 case VDAAE_LU_FACTORY_DISABLED:
726 t->new_target_state = TS_NOT_PRESENT;
727 break;
728
729 case VDAAE_LU_ONLINE:
730 case VDAAE_LU_DEGRADED:
731 t->new_target_state = TS_PRESENT;
732 break;
733 }
734 }
735
736 if (t->new_target_state != TS_INVALID) {
737 memcpy(&t->lu_event, &ae->lu, cplen);
738
739 esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
740 }
741
742 spin_unlock_irqrestore(&a->mem_lock, flags);
743}
744
745
746
747void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
748{
749 union atto_vda_ae *ae =
750 (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
751 u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
752 union atto_vda_ae *last =
753 (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
754 + length);
755
756 esas2r_trace_enter();
757 esas2r_trace("length: %d", length);
758
759 if (length > sizeof(struct atto_vda_ae_data)
760 || (length & 3) != 0
761 || length == 0) {
762 esas2r_log(ESAS2R_LOG_WARN,
763 "The AE request response length (%p) is too long: %d",
764 rq, length);
765
766 esas2r_hdebug("aereq->length (0x%x) too long", length);
767 esas2r_bugon();
768
769 last = ae;
770 }
771
772 while (ae < last) {
773 u16 target;
774
775 esas2r_trace("ae: %p", ae);
776 esas2r_trace("ae->hdr: %p", &(ae->hdr));
777
778 length = ae->hdr.bylength;
779
780 if (length > (u32)((u8 *)last - (u8 *)ae)
781 || (length & 3) != 0
782 || length == 0) {
783 esas2r_log(ESAS2R_LOG_CRIT,
784 "the async event length is invalid (%p): %d",
785 ae, length);
786
787 esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
788 esas2r_bugon();
789
790 break;
791 }
792
793 esas2r_nuxi_ae_data(ae);
794
795 esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
796 sizeof(union atto_vda_ae));
797
798 switch (ae->hdr.bytype) {
799 case VDAAE_HDR_TYPE_RAID:
800
801 if (ae->raid.dwflags & (VDAAE_GROUP_STATE
802 | VDAAE_RBLD_STATE
803 | VDAAE_MEMBER_CHG
804 | VDAAE_PART_CHG)) {
805 esas2r_log(ESAS2R_LOG_INFO,
806 "RAID event received - name:%s rebuild_state:%d group_state:%d",
807 ae->raid.acname,
808 ae->raid.byrebuild_state,
809 ae->raid.bygroup_state);
810 }
811
812 break;
813
814 case VDAAE_HDR_TYPE_LU:
815 esas2r_log(ESAS2R_LOG_INFO,
816 "LUN event received: event:%d target_id:%d LUN:%d state:%d",
817 ae->lu.dwevent,
818 ae->lu.id.tgtlun.wtarget_id,
819 ae->lu.id.tgtlun.bylun,
820 ae->lu.bystate);
821
822 target = ae->lu.id.tgtlun.wtarget_id;
823
824 if (target < ESAS2R_MAX_TARGETS)
825 esas2r_lun_event(a, ae, target, length);
826
827 break;
828
829 case VDAAE_HDR_TYPE_DISK:
830 esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
831 break;
832
833 default:
834
835 /* Silently ignore the rest and let the apps deal with
836 * them.
837 */
838
839 break;
840 }
841
842 ae = (union atto_vda_ae *)((u8 *)ae + length);
843 }
844
845 /* Now requeue it. */
846 esas2r_start_ae_request(a, rq);
847 esas2r_trace_exit();
848}
849
850/* Send an asynchronous event for a chip reset or power management. */
851void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
852{
853 struct atto_vda_ae_hdr ae;
854
855 if (pwr_mgt)
856 ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
857 else
858 ae.bytype = VDAAE_HDR_TYPE_RESET;
859
860 ae.byversion = VDAAE_HDR_VER_0;
861 ae.byflags = 0;
862 ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
863
864 if (pwr_mgt)
865 esas2r_hdebug("*** sending power management AE ***");
866 else
867 esas2r_hdebug("*** sending reset AE ***");
868
869 esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
870 sizeof(union atto_vda_ae));
871}
872
873void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
874{}
875
876static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
877 struct esas2r_request *rq)
878{
879 u8 snslen, snslen2;
880
881 snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
882
883 if (snslen > rq->sense_len)
884 snslen = rq->sense_len;
885
886 if (snslen) {
887 if (rq->sense_buf)
888 memcpy(rq->sense_buf, rq->data_buf, snslen);
889 else
890 rq->sense_buf = (u8 *)rq->data_buf;
891
892 /* See about possible sense data */
893 if (snslen2 > 0x0c) {
894 u8 *s = (u8 *)rq->data_buf;
895
896 esas2r_trace_enter();
897
898 /* Report LUNS data has changed */
899 if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
900 esas2r_trace("rq->target_id: %d",
901 rq->target_id);
902 esas2r_target_state_changed(a, rq->target_id,
903 TS_LUN_CHANGE);
904 }
905
906 esas2r_trace("add_sense_key=%x", s[0x0c]);
907 esas2r_trace("add_sense_qual=%x", s[0x0d]);
908 esas2r_trace_exit();
909 }
910 }
911
912 rq->sense_len = snslen;
913}
914
915
916void esas2r_complete_request(struct esas2r_adapter *a,
917 struct esas2r_request *rq)
918{
919 if (rq->vrq->scsi.function == VDA_FUNC_FLASH
920 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
921 esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
922
923 /* See if we setup a callback to do special processing */
924
925 if (rq->interrupt_cb) {
926 (*rq->interrupt_cb)(a, rq);
927
928 if (rq->req_stat == RS_PENDING) {
929 esas2r_start_request(a, rq);
930 return;
931 }
932 }
933
934 if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
935 && unlikely(rq->req_stat != RS_SUCCESS)) {
936 esas2r_check_req_rsp_sense(a, rq);
937 esas2r_log_request_failure(a, rq);
938 }
939
940 (*rq->comp_cb)(a, rq);
941}
diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c
new file mode 100644
index 000000000000..324e2626a08b
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_io.c
@@ -0,0 +1,880 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_io.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
47{
48 struct esas2r_target *t = NULL;
49 struct esas2r_request *startrq = rq;
50 unsigned long flags;
51
52 if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
53 if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
54 rq->req_stat = RS_SEL2;
55 else
56 rq->req_stat = RS_DEGRADED;
57 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
58 t = a->targetdb + rq->target_id;
59
60 if (unlikely(t >= a->targetdb_end
61 || !(t->flags & TF_USED))) {
62 rq->req_stat = RS_SEL;
63 } else {
64 /* copy in the target ID. */
65 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
66
67 /*
68 * Test if we want to report RS_SEL for missing target.
69 * Note that if AF_DISC_PENDING is set than this will
70 * go on the defer queue.
71 */
72 if (unlikely(t->target_state != TS_PRESENT
73 && !(a->flags & AF_DISC_PENDING)))
74 rq->req_stat = RS_SEL;
75 }
76 }
77
78 if (unlikely(rq->req_stat != RS_PENDING)) {
79 esas2r_complete_request(a, rq);
80 return;
81 }
82
83 esas2r_trace("rq=%p", rq);
84 esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
85
86 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
87 esas2r_trace("rq->target_id=%d", rq->target_id);
88 esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
89 }
90
91 spin_lock_irqsave(&a->queue_lock, flags);
92
93 if (likely(list_empty(&a->defer_list) &&
94 !(a->flags &
95 (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
96 esas2r_local_start_request(a, startrq);
97 else
98 list_add_tail(&startrq->req_list, &a->defer_list);
99
100 spin_unlock_irqrestore(&a->queue_lock, flags);
101}
102
103/*
104 * Starts the specified request. all requests have RS_PENDING set when this
105 * routine is called. The caller is usually esas2r_start_request, but
106 * esas2r_do_deferred_processes will start request that are deferred.
107 *
108 * The caller must ensure that requests can be started.
109 *
110 * esas2r_start_request will defer a request if there are already requests
111 * waiting or there is a chip reset pending. once the reset condition clears,
112 * esas2r_do_deferred_processes will call this function to start the request.
113 *
114 * When a request is started, it is placed on the active list and queued to
115 * the controller.
116 */
117void esas2r_local_start_request(struct esas2r_adapter *a,
118 struct esas2r_request *rq)
119{
120 esas2r_trace_enter();
121 esas2r_trace("rq=%p", rq);
122 esas2r_trace("rq->vrq:%p", rq->vrq);
123 esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
124
125 if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
126 && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
127 esas2r_lock_set_flags(&a->flags, AF_FLASHING);
128
129 list_add_tail(&rq->req_list, &a->active_list);
130 esas2r_start_vda_request(a, rq);
131 esas2r_trace_exit();
132 return;
133}
134
135void esas2r_start_vda_request(struct esas2r_adapter *a,
136 struct esas2r_request *rq)
137{
138 struct esas2r_inbound_list_source_entry *element;
139 u32 dw;
140
141 rq->req_stat = RS_STARTED;
142 /*
143 * Calculate the inbound list entry location and the current state of
144 * toggle bit.
145 */
146 a->last_write++;
147 if (a->last_write >= a->list_size) {
148 a->last_write = 0;
149 /* update the toggle bit */
150 if (a->flags & AF_COMM_LIST_TOGGLE)
151 esas2r_lock_clear_flags(&a->flags,
152 AF_COMM_LIST_TOGGLE);
153 else
154 esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
155 }
156
157 element =
158 (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
159 virt_addr
160 + a->last_write;
161
162 /* Set the VDA request size if it was never modified */
163 if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
164 rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
165
166 element->address = cpu_to_le64(rq->vrq_md->phys_addr);
167 element->length = cpu_to_le32(rq->vda_req_sz);
168
169 /* Update the write pointer */
170 dw = a->last_write;
171
172 if (a->flags & AF_COMM_LIST_TOGGLE)
173 dw |= MU_ILW_TOGGLE;
174
175 esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
176 esas2r_trace("dw:%x", dw);
177 esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
178 esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
179}
180
181/*
182 * Build the scatter/gather list for an I/O request according to the
183 * specifications placed in the s/g context. The caller must initialize
184 * context prior to the initial call by calling esas2r_sgc_init().
185 */
186bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
187 struct esas2r_sg_context *sgc)
188{
189 struct esas2r_request *rq = sgc->first_req;
190 union atto_vda_req *vrq = rq->vrq;
191
192 while (sgc->length) {
193 u32 rem = 0;
194 u64 addr;
195 u32 len;
196
197 len = (*sgc->get_phys_addr)(sgc, &addr);
198
199 if (unlikely(len == 0))
200 return false;
201
202 /* if current length is more than what's left, stop there */
203 if (unlikely(len > sgc->length))
204 len = sgc->length;
205
206another_entry:
207 /* limit to a round number less than the maximum length */
208 if (len > SGE_LEN_MAX) {
209 /*
210 * Save the remainder of the split. Whenever we limit
211 * an entry we come back around to build entries out
212 * of the leftover. We do this to prevent multiple
213 * calls to the get_phys_addr() function for an SGE
214 * that is too large.
215 */
216 rem = len - SGE_LEN_MAX;
217 len = SGE_LEN_MAX;
218 }
219
220 /* See if we need to allocate a new SGL */
221 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
222 u8 sgelen;
223 struct esas2r_mem_desc *sgl;
224
225 /*
226 * If no SGls are available, return failure. The
227 * caller can call us later with the current context
228 * to pick up here.
229 */
230 sgl = esas2r_alloc_sgl(a);
231
232 if (unlikely(sgl == NULL))
233 return false;
234
235 /* Calculate the length of the last SGE filled in */
236 sgelen = (u8)((u8 *)sgc->sge.a64.curr
237 - (u8 *)sgc->sge.a64.last);
238
239 /*
240 * Copy the last SGE filled in to the first entry of
241 * the new SGL to make room for the chain entry.
242 */
243 memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
244
245 /* Figure out the new curr pointer in the new segment */
246 sgc->sge.a64.curr =
247 (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
248 sgelen);
249
250 /* Set the limit pointer and build the chain entry */
251 sgc->sge.a64.limit =
252 (struct atto_vda_sge *)((u8 *)sgl->virt_addr
253 + sgl_page_size
254 - sizeof(struct
255 atto_vda_sge));
256 sgc->sge.a64.last->length = cpu_to_le32(
257 SGE_CHAIN | SGE_ADDR_64);
258 sgc->sge.a64.last->address =
259 cpu_to_le64(sgl->phys_addr);
260
261 /*
262 * Now, if there was a previous chain entry, then
263 * update it to contain the length of this segment
264 * and size of this chain. otherwise this is the
265 * first SGL, so set the chain_offset in the request.
266 */
267 if (sgc->sge.a64.chain) {
268 sgc->sge.a64.chain->length |=
269 cpu_to_le32(
270 ((u8 *)(sgc->sge.a64.
271 last + 1)
272 - (u8 *)rq->sg_table->
273 virt_addr)
274 + sizeof(struct atto_vda_sge) *
275 LOBIT(SGE_CHAIN_SZ));
276 } else {
277 vrq->scsi.chain_offset = (u8)
278 ((u8 *)sgc->
279 sge.a64.last -
280 (u8 *)vrq);
281
282 /*
283 * This is the first SGL, so set the
284 * chain_offset and the VDA request size in
285 * the request.
286 */
287 rq->vda_req_sz =
288 (vrq->scsi.chain_offset +
289 sizeof(struct atto_vda_sge) +
290 3)
291 / sizeof(u32);
292 }
293
294 /*
295 * Remember this so when we get a new SGL filled in we
296 * can update the length of this chain entry.
297 */
298 sgc->sge.a64.chain = sgc->sge.a64.last;
299
300 /* Now link the new SGL onto the primary request. */
301 list_add(&sgl->next_desc, &rq->sg_table_head);
302 }
303
304 /* Update last one filled in */
305 sgc->sge.a64.last = sgc->sge.a64.curr;
306
307 /* Build the new SGE and update the S/G context */
308 sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
309 sgc->sge.a64.curr->address = cpu_to_le32(addr);
310 sgc->sge.a64.curr++;
311 sgc->cur_offset += len;
312 sgc->length -= len;
313
314 /*
315 * Check if we previously split an entry. If so we have to
316 * pick up where we left off.
317 */
318 if (rem) {
319 addr += len;
320 len = rem;
321 rem = 0;
322 goto another_entry;
323 }
324 }
325
326 /* Mark the end of the SGL */
327 sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
328
329 /*
330 * If there was a previous chain entry, update the length to indicate
331 * the length of this last segment.
332 */
333 if (sgc->sge.a64.chain) {
334 sgc->sge.a64.chain->length |= cpu_to_le32(
335 ((u8 *)(sgc->sge.a64.curr) -
336 (u8 *)rq->sg_table->virt_addr));
337 } else {
338 u16 reqsize;
339
340 /*
341 * The entire VDA request was not used so lets
342 * set the size of the VDA request to be DMA'd
343 */
344 reqsize =
345 ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
346 + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
347
348 /*
349 * Only update the request size if it is bigger than what is
350 * already there. We can come in here twice for some management
351 * commands.
352 */
353 if (reqsize > rq->vda_req_sz)
354 rq->vda_req_sz = reqsize;
355 }
356 return true;
357}
358
359
360/*
361 * Create PRD list for each I-block consumed by the command. This routine
362 * determines how much data is required from each I-block being consumed
363 * by the command. The first and last I-blocks can be partials and all of
364 * the I-blocks in between are for a full I-block of data.
365 *
366 * The interleave size is used to determine the number of bytes in the 1st
367 * I-block and the remaining I-blocks are what remeains.
368 */
369static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
370 struct esas2r_sg_context *sgc)
371{
372 struct esas2r_request *rq = sgc->first_req;
373 u64 addr;
374 u32 len;
375 struct esas2r_mem_desc *sgl;
376 u32 numchain = 1;
377 u32 rem = 0;
378
379 while (sgc->length) {
380 /* Get the next address/length pair */
381
382 len = (*sgc->get_phys_addr)(sgc, &addr);
383
384 if (unlikely(len == 0))
385 return false;
386
387 /* If current length is more than what's left, stop there */
388
389 if (unlikely(len > sgc->length))
390 len = sgc->length;
391
392another_entry:
393 /* Limit to a round number less than the maximum length */
394
395 if (len > PRD_LEN_MAX) {
396 /*
397 * Save the remainder of the split. whenever we limit
398 * an entry we come back around to build entries out
399 * of the leftover. We do this to prevent multiple
400 * calls to the get_phys_addr() function for an SGE
401 * that is too large.
402 */
403 rem = len - PRD_LEN_MAX;
404 len = PRD_LEN_MAX;
405 }
406
407 /* See if we need to allocate a new SGL */
408 if (sgc->sge.prd.sge_cnt == 0) {
409 if (len == sgc->length) {
410 /*
411 * We only have 1 PRD entry left.
412 * It can be placed where the chain
413 * entry would have gone
414 */
415
416 /* Build the simple SGE */
417 sgc->sge.prd.curr->ctl_len = cpu_to_le32(
418 PRD_DATA | len);
419 sgc->sge.prd.curr->address = cpu_to_le64(addr);
420
421 /* Adjust length related fields */
422 sgc->cur_offset += len;
423 sgc->length -= len;
424
425 /* We use the reserved chain entry for data */
426 numchain = 0;
427
428 break;
429 }
430
431 if (sgc->sge.prd.chain) {
432 /*
433 * Fill # of entries of current SGL in previous
434 * chain the length of this current SGL may not
435 * full.
436 */
437
438 sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
439 sgc->sge.prd.sgl_max_cnt);
440 }
441
442 /*
443 * If no SGls are available, return failure. The
444 * caller can call us later with the current context
445 * to pick up here.
446 */
447
448 sgl = esas2r_alloc_sgl(a);
449
450 if (unlikely(sgl == NULL))
451 return false;
452
453 /*
454 * Link the new SGL onto the chain
455 * They are in reverse order
456 */
457 list_add(&sgl->next_desc, &rq->sg_table_head);
458
459 /*
460 * An SGL was just filled in and we are starting
461 * a new SGL. Prime the chain of the ending SGL with
462 * info that points to the new SGL. The length gets
463 * filled in when the new SGL is filled or ended
464 */
465
466 sgc->sge.prd.chain = sgc->sge.prd.curr;
467
468 sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
469 sgc->sge.prd.chain->address =
470 cpu_to_le64(sgl->phys_addr);
471
472 /*
473 * Start a new segment.
474 * Take one away and save for chain SGE
475 */
476
477 sgc->sge.prd.curr =
478 (struct atto_physical_region_description *)sgl
479 ->
480 virt_addr;
481 sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
482 }
483
484 sgc->sge.prd.sge_cnt--;
485 /* Build the simple SGE */
486 sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
487 sgc->sge.prd.curr->address = cpu_to_le64(addr);
488
489 /* Used another element. Point to the next one */
490
491 sgc->sge.prd.curr++;
492
493 /* Adjust length related fields */
494
495 sgc->cur_offset += len;
496 sgc->length -= len;
497
498 /*
499 * Check if we previously split an entry. If so we have to
500 * pick up where we left off.
501 */
502
503 if (rem) {
504 addr += len;
505 len = rem;
506 rem = 0;
507 goto another_entry;
508 }
509 }
510
511 if (!list_empty(&rq->sg_table_head)) {
512 if (sgc->sge.prd.chain) {
513 sgc->sge.prd.chain->ctl_len |=
514 cpu_to_le32(sgc->sge.prd.sgl_max_cnt
515 - sgc->sge.prd.sge_cnt
516 - numchain);
517 }
518 }
519
520 return true;
521}
522
523bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
524 struct esas2r_sg_context *sgc)
525{
526 struct esas2r_request *rq = sgc->first_req;
527 u32 len = sgc->length;
528 struct esas2r_target *t = a->targetdb + rq->target_id;
529 u8 is_i_o = 0;
530 u16 reqsize;
531 struct atto_physical_region_description *curr_iblk_chn;
532 u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
533
534 /*
535 * extract LBA from command so we can determine
536 * the I-Block boundary
537 */
538
539 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
540 && t->target_state == TS_PRESENT
541 && !(t->flags & TF_PASS_THRU)) {
542 u32 lbalo = 0;
543
544 switch (rq->vrq->scsi.cdb[0]) {
545 case READ_16:
546 case WRITE_16:
547 {
548 lbalo =
549 MAKEDWORD(MAKEWORD(cdb[9],
550 cdb[8]),
551 MAKEWORD(cdb[7],
552 cdb[6]));
553 is_i_o = 1;
554 break;
555 }
556
557 case READ_12:
558 case WRITE_12:
559 case READ_10:
560 case WRITE_10:
561 {
562 lbalo =
563 MAKEDWORD(MAKEWORD(cdb[5],
564 cdb[4]),
565 MAKEWORD(cdb[3],
566 cdb[2]));
567 is_i_o = 1;
568 break;
569 }
570
571 case READ_6:
572 case WRITE_6:
573 {
574 lbalo =
575 MAKEDWORD(MAKEWORD(cdb[3],
576 cdb[2]),
577 MAKEWORD(cdb[1] & 0x1F,
578 0));
579 is_i_o = 1;
580 break;
581 }
582
583 default:
584 break;
585 }
586
587 if (is_i_o) {
588 u32 startlba;
589
590 rq->vrq->scsi.iblk_cnt_prd = 0;
591
592 /* Determine size of 1st I-block PRD list */
593 startlba = t->inter_block - (lbalo & (t->inter_block -
594 1));
595 sgc->length = startlba * t->block_size;
596
597 /* Chk if the 1st iblk chain starts at base of Iblock */
598 if ((lbalo & (t->inter_block - 1)) == 0)
599 rq->flags |= RF_1ST_IBLK_BASE;
600
601 if (sgc->length > len)
602 sgc->length = len;
603 } else {
604 sgc->length = len;
605 }
606 } else {
607 sgc->length = len;
608 }
609
610 /* get our starting chain address */
611
612 curr_iblk_chn =
613 (struct atto_physical_region_description *)sgc->sge.a64.curr;
614
615 sgc->sge.prd.sgl_max_cnt = sgl_page_size /
616 sizeof(struct
617 atto_physical_region_description);
618
619 /* create all of the I-block PRD lists */
620
621 while (len) {
622 sgc->sge.prd.sge_cnt = 0;
623 sgc->sge.prd.chain = NULL;
624 sgc->sge.prd.curr = curr_iblk_chn;
625
626 /* increment to next I-Block */
627
628 len -= sgc->length;
629
630 /* go build the next I-Block PRD list */
631
632 if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
633 return false;
634
635 curr_iblk_chn++;
636
637 if (is_i_o) {
638 rq->vrq->scsi.iblk_cnt_prd++;
639
640 if (len > t->inter_byte)
641 sgc->length = t->inter_byte;
642 else
643 sgc->length = len;
644 }
645 }
646
647 /* figure out the size used of the VDA request */
648
649 reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
650 / sizeof(u32);
651
652 /*
653 * only update the request size if it is bigger than what is
654 * already there. we can come in here twice for some management
655 * commands.
656 */
657
658 if (reqsize > rq->vda_req_sz)
659 rq->vda_req_sz = reqsize;
660
661 return true;
662}
663
664static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
665{
666 u32 delta = currtime - a->chip_init_time;
667
668 if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
669 /* Wait before accessing registers */
670 } else if (delta >= ESAS2R_CHPRST_TIME) {
671 /*
672 * The last reset failed so try again. Reset
673 * processing will give up after three tries.
674 */
675 esas2r_local_reset_adapter(a);
676 } else {
677 /* We can now see if the firmware is ready */
678 u32 doorbell;
679
680 doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
681 if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
682 esas2r_force_interrupt(a);
683 } else {
684 u32 ver = (doorbell & DRBL_FW_VER_MSK);
685
686 /* Driver supports API version 0 and 1 */
687 esas2r_write_register_dword(a, MU_DOORBELL_OUT,
688 doorbell);
689 if (ver == DRBL_FW_VER_0) {
690 esas2r_lock_set_flags(&a->flags,
691 AF_CHPRST_DETECTED);
692 esas2r_lock_set_flags(&a->flags,
693 AF_LEGACY_SGE_MODE);
694
695 a->max_vdareq_size = 128;
696 a->build_sgl = esas2r_build_sg_list_sge;
697 } else if (ver == DRBL_FW_VER_1) {
698 esas2r_lock_set_flags(&a->flags,
699 AF_CHPRST_DETECTED);
700 esas2r_lock_clear_flags(&a->flags,
701 AF_LEGACY_SGE_MODE);
702
703 a->max_vdareq_size = 1024;
704 a->build_sgl = esas2r_build_sg_list_prd;
705 } else {
706 esas2r_local_reset_adapter(a);
707 }
708 }
709 }
710}
711
712
713/* This function must be called once per timer tick */
714void esas2r_timer_tick(struct esas2r_adapter *a)
715{
716 u32 currtime = jiffies_to_msecs(jiffies);
717 u32 deltatime = currtime - a->last_tick_time;
718
719 a->last_tick_time = currtime;
720
721 /* count down the uptime */
722 if (a->chip_uptime
723 && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
724 if (deltatime >= a->chip_uptime)
725 a->chip_uptime = 0;
726 else
727 a->chip_uptime -= deltatime;
728 }
729
730 if (a->flags & AF_CHPRST_PENDING) {
731 if (!(a->flags & AF_CHPRST_NEEDED)
732 && !(a->flags & AF_CHPRST_DETECTED))
733 esas2r_handle_pending_reset(a, currtime);
734 } else {
735 if (a->flags & AF_DISC_PENDING)
736 esas2r_disc_check_complete(a);
737
738 if (a->flags & AF_HEARTBEAT_ENB) {
739 if (a->flags & AF_HEARTBEAT) {
740 if ((currtime - a->heartbeat_time) >=
741 ESAS2R_HEARTBEAT_TIME) {
742 esas2r_lock_clear_flags(&a->flags,
743 AF_HEARTBEAT);
744 esas2r_hdebug("heartbeat failed");
745 esas2r_log(ESAS2R_LOG_CRIT,
746 "heartbeat failed");
747 esas2r_bugon();
748 esas2r_local_reset_adapter(a);
749 }
750 } else {
751 esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
752 a->heartbeat_time = currtime;
753 esas2r_force_interrupt(a);
754 }
755 }
756 }
757
758 if (atomic_read(&a->disable_cnt) == 0)
759 esas2r_do_deferred_processes(a);
760}
761
762/*
763 * Send the specified task management function to the target and LUN
764 * specified in rqaux. in addition, immediately abort any commands that
765 * are queued but not sent to the device according to the rules specified
766 * by the task management function.
767 */
768bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
769 struct esas2r_request *rqaux, u8 task_mgt_func)
770{
771 u16 targetid = rqaux->target_id;
772 u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
773 bool ret = false;
774 struct esas2r_request *rq;
775 struct list_head *next, *element;
776 unsigned long flags;
777
778 LIST_HEAD(comp_list);
779
780 esas2r_trace_enter();
781 esas2r_trace("rqaux:%p", rqaux);
782 esas2r_trace("task_mgt_func:%x", task_mgt_func);
783 spin_lock_irqsave(&a->queue_lock, flags);
784
785 /* search the defer queue looking for requests for the device */
786 list_for_each_safe(element, next, &a->defer_list) {
787 rq = list_entry(element, struct esas2r_request, req_list);
788
789 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
790 && rq->target_id == targetid
791 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
792 || task_mgt_func == 0x20)) { /* target reset */
793 /* Found a request affected by the task management */
794 if (rq->req_stat == RS_PENDING) {
795 /*
796 * The request is pending or waiting. We can
797 * safelycomplete the request now.
798 */
799 if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
800 list_add_tail(&rq->comp_list,
801 &comp_list);
802 }
803 }
804 }
805
806 /* Send the task management request to the firmware */
807 rqaux->sense_len = 0;
808 rqaux->vrq->scsi.length = 0;
809 rqaux->target_id = targetid;
810 rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
811 memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
812 rqaux->vrq->scsi.flags |=
813 cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
814
815 if (a->flags & AF_FLASHING) {
816 /* Assume success. if there are active requests, return busy */
817 rqaux->req_stat = RS_SUCCESS;
818
819 list_for_each_safe(element, next, &a->active_list) {
820 rq = list_entry(element, struct esas2r_request,
821 req_list);
822 if (rq->vrq->scsi.function == VDA_FUNC_SCSI
823 && rq->target_id == targetid
824 && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
825 || task_mgt_func == 0x20)) /* target reset */
826 rqaux->req_stat = RS_BUSY;
827 }
828
829 ret = true;
830 }
831
832 spin_unlock_irqrestore(&a->queue_lock, flags);
833
834 if (!(a->flags & AF_FLASHING))
835 esas2r_start_request(a, rqaux);
836
837 esas2r_comp_list_drain(a, &comp_list);
838
839 if (atomic_read(&a->disable_cnt) == 0)
840 esas2r_do_deferred_processes(a);
841
842 esas2r_trace_exit();
843
844 return ret;
845}
846
847void esas2r_reset_bus(struct esas2r_adapter *a)
848{
849 esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
850
851 if (!(a->flags & AF_DEGRADED_MODE)
852 && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
853 esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
854 esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
855 esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
856
857 esas2r_schedule_tasklet(a);
858 }
859}
860
861bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
862 u8 status)
863{
864 esas2r_trace_enter();
865 esas2r_trace("rq:%p", rq);
866 list_del_init(&rq->req_list);
867 if (rq->timeout > RQ_MAX_TIMEOUT) {
868 /*
869 * The request timed out, but we could not abort it because a
870 * chip reset occurred. Return busy status.
871 */
872 rq->req_stat = RS_BUSY;
873 esas2r_trace_exit();
874 return true;
875 }
876
877 rq->req_stat = status;
878 esas2r_trace_exit();
879 return true;
880}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
new file mode 100644
index 000000000000..f3d0cb885972
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -0,0 +1,2110 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_ioctl.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46/*
47 * Buffered ioctl handlers. A buffered ioctl is one which requires that we
48 * allocate a DMA-able memory area to communicate with the firmware. In
49 * order to prevent continually allocating and freeing consistent memory,
50 * we will allocate a global buffer the first time we need it and re-use
51 * it for subsequent ioctl calls that require it.
52 */
53
54u8 *esas2r_buffered_ioctl;
55dma_addr_t esas2r_buffered_ioctl_addr;
56u32 esas2r_buffered_ioctl_size;
57struct pci_dev *esas2r_buffered_ioctl_pcid;
58
59static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
60typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
61 struct esas2r_request *,
62 struct esas2r_sg_context *,
63 void *);
64typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
65 struct esas2r_request *, void *);
66
67struct esas2r_buffered_ioctl {
68 struct esas2r_adapter *a;
69 void *ioctl;
70 u32 length;
71 u32 control_code;
72 u32 offset;
73 BUFFERED_IOCTL_CALLBACK
74 callback;
75 void *context;
76 BUFFERED_IOCTL_DONE_CALLBACK
77 done_callback;
78 void *done_context;
79
80};
81
82static void complete_fm_api_req(struct esas2r_adapter *a,
83 struct esas2r_request *rq)
84{
85 a->fm_api_command_done = 1;
86 wake_up_interruptible(&a->fm_api_waiter);
87}
88
89/* Callbacks for building scatter/gather lists for FM API requests */
90static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
91{
92 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
93 int offset = sgc->cur_offset - a->save_offset;
94
95 (*addr) = a->firmware.phys + offset;
96 return a->firmware.orig_len - offset;
97}
98
99static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
100{
101 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
102 int offset = sgc->cur_offset - a->save_offset;
103
104 (*addr) = a->firmware.header_buff_phys + offset;
105 return sizeof(struct esas2r_flash_img) - offset;
106}
107
108/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
109static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
110{
111 struct esas2r_request *rq;
112
113 if (down_interruptible(&a->fm_api_semaphore)) {
114 fi->status = FI_STAT_BUSY;
115 return;
116 }
117
118 rq = esas2r_alloc_request(a);
119 if (rq == NULL) {
120 up(&a->fm_api_semaphore);
121 fi->status = FI_STAT_BUSY;
122 return;
123 }
124
125 if (fi == &a->firmware.header) {
126 a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
127 (size_t)sizeof(
128 struct
129 esas2r_flash_img),
130 (dma_addr_t *)&a->
131 firmware.
132 header_buff_phys,
133 GFP_KERNEL);
134
135 if (a->firmware.header_buff == NULL) {
136 esas2r_debug("failed to allocate header buffer!");
137 fi->status = FI_STAT_BUSY;
138 return;
139 }
140
141 memcpy(a->firmware.header_buff, fi,
142 sizeof(struct esas2r_flash_img));
143 a->save_offset = a->firmware.header_buff;
144 a->fm_api_sgc.get_phys_addr =
145 (PGETPHYSADDR)get_physaddr_fm_api_header;
146 } else {
147 a->save_offset = (u8 *)fi;
148 a->fm_api_sgc.get_phys_addr =
149 (PGETPHYSADDR)get_physaddr_fm_api;
150 }
151
152 rq->comp_cb = complete_fm_api_req;
153 a->fm_api_command_done = 0;
154 a->fm_api_sgc.cur_offset = a->save_offset;
155
156 if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
157 &a->fm_api_sgc))
158 goto all_done;
159
160 /* Now wait around for it to complete. */
161 while (!a->fm_api_command_done)
162 wait_event_interruptible(a->fm_api_waiter,
163 a->fm_api_command_done);
164all_done:
165 if (fi == &a->firmware.header) {
166 memcpy(fi, a->firmware.header_buff,
167 sizeof(struct esas2r_flash_img));
168
169 dma_free_coherent(&a->pcid->dev,
170 (size_t)sizeof(struct esas2r_flash_img),
171 a->firmware.header_buff,
172 (dma_addr_t)a->firmware.header_buff_phys);
173 }
174
175 up(&a->fm_api_semaphore);
176 esas2r_free_request(a, (struct esas2r_request *)rq);
177 return;
178
179}
180
181static void complete_nvr_req(struct esas2r_adapter *a,
182 struct esas2r_request *rq)
183{
184 a->nvram_command_done = 1;
185 wake_up_interruptible(&a->nvram_waiter);
186}
187
188/* Callback for building scatter/gather lists for buffered ioctls */
189static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
190 u64 *addr)
191{
192 int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
193
194 (*addr) = esas2r_buffered_ioctl_addr + offset;
195 return esas2r_buffered_ioctl_size - offset;
196}
197
198static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
199 struct esas2r_request *rq)
200{
201 a->buffered_ioctl_done = 1;
202 wake_up_interruptible(&a->buffered_ioctl_waiter);
203}
204
205static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
206{
207 struct esas2r_adapter *a = bi->a;
208 struct esas2r_request *rq;
209 struct esas2r_sg_context sgc;
210 u8 result = IOCTL_SUCCESS;
211
212 if (down_interruptible(&buffered_ioctl_semaphore))
213 return IOCTL_OUT_OF_RESOURCES;
214
215 /* allocate a buffer or use the existing buffer. */
216 if (esas2r_buffered_ioctl) {
217 if (esas2r_buffered_ioctl_size < bi->length) {
218 /* free the too-small buffer and get a new one */
219 dma_free_coherent(&a->pcid->dev,
220 (size_t)esas2r_buffered_ioctl_size,
221 esas2r_buffered_ioctl,
222 esas2r_buffered_ioctl_addr);
223
224 goto allocate_buffer;
225 }
226 } else {
227allocate_buffer:
228 esas2r_buffered_ioctl_size = bi->length;
229 esas2r_buffered_ioctl_pcid = a->pcid;
230 esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
231 (size_t)
232 esas2r_buffered_ioctl_size,
233 &
234 esas2r_buffered_ioctl_addr,
235 GFP_KERNEL);
236 }
237
238 if (!esas2r_buffered_ioctl) {
239 esas2r_log(ESAS2R_LOG_CRIT,
240 "could not allocate %d bytes of consistent memory "
241 "for a buffered ioctl!",
242 bi->length);
243
244 esas2r_debug("buffered ioctl alloc failure");
245 result = IOCTL_OUT_OF_RESOURCES;
246 goto exit_cleanly;
247 }
248
249 memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
250
251 rq = esas2r_alloc_request(a);
252 if (rq == NULL) {
253 esas2r_log(ESAS2R_LOG_CRIT,
254 "could not allocate an internal request");
255
256 result = IOCTL_OUT_OF_RESOURCES;
257 esas2r_debug("buffered ioctl - no requests");
258 goto exit_cleanly;
259 }
260
261 a->buffered_ioctl_done = 0;
262 rq->comp_cb = complete_buffered_ioctl_req;
263 sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
264 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
265 sgc.length = esas2r_buffered_ioctl_size;
266
267 if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
268 /* completed immediately, no need to wait */
269 a->buffered_ioctl_done = 0;
270 goto free_andexit_cleanly;
271 }
272
273 /* now wait around for it to complete. */
274 while (!a->buffered_ioctl_done)
275 wait_event_interruptible(a->buffered_ioctl_waiter,
276 a->buffered_ioctl_done);
277
278free_andexit_cleanly:
279 if (result == IOCTL_SUCCESS && bi->done_callback)
280 (*bi->done_callback)(a, rq, bi->done_context);
281
282 esas2r_free_request(a, rq);
283
284exit_cleanly:
285 if (result == IOCTL_SUCCESS)
286 memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
287
288 up(&buffered_ioctl_semaphore);
289 return result;
290}
291
292/* SMP ioctl support */
293static int smp_ioctl_callback(struct esas2r_adapter *a,
294 struct esas2r_request *rq,
295 struct esas2r_sg_context *sgc, void *context)
296{
297 struct atto_ioctl_smp *si =
298 (struct atto_ioctl_smp *)esas2r_buffered_ioctl;
299
300 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
301 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
302
303 if (!esas2r_build_sg_list(a, rq, sgc)) {
304 si->status = ATTO_STS_OUT_OF_RSRC;
305 return false;
306 }
307
308 esas2r_start_request(a, rq);
309 return true;
310}
311
312static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
313{
314 struct esas2r_buffered_ioctl bi;
315
316 memset(&bi, 0, sizeof(bi));
317
318 bi.a = a;
319 bi.ioctl = si;
320 bi.length = sizeof(struct atto_ioctl_smp)
321 + le32_to_cpu(si->req_length)
322 + le32_to_cpu(si->rsp_length);
323 bi.offset = 0;
324 bi.callback = smp_ioctl_callback;
325 return handle_buffered_ioctl(&bi);
326}
327
328
329/* CSMI ioctl support */
330static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
331 struct esas2r_request *rq)
332{
333 rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
334 rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
335
336 /* Now call the original completion callback. */
337 (*rq->aux_req_cb)(a, rq);
338}
339
340/* Tunnel a CSMI IOCTL to the back end driver for processing. */
341static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
342 union atto_ioctl_csmi *ci,
343 struct esas2r_request *rq,
344 struct esas2r_sg_context *sgc,
345 u32 ctrl_code,
346 u16 target_id)
347{
348 struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
349
350 if (a->flags & AF_DEGRADED_MODE)
351 return false;
352
353 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
354 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
355 ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
356 ioctl->csmi.target_id = cpu_to_le16(target_id);
357 ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
358
359 /*
360 * Always usurp the completion callback since the interrupt callback
361 * mechanism may be used.
362 */
363 rq->aux_req_cx = ci;
364 rq->aux_req_cb = rq->comp_cb;
365 rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
366
367 if (!esas2r_build_sg_list(a, rq, sgc))
368 return false;
369
370 esas2r_start_request(a, rq);
371 return true;
372}
373
374static bool check_lun(struct scsi_lun lun)
375{
376 bool result;
377
378 result = ((lun.scsi_lun[7] == 0) &&
379 (lun.scsi_lun[6] == 0) &&
380 (lun.scsi_lun[5] == 0) &&
381 (lun.scsi_lun[4] == 0) &&
382 (lun.scsi_lun[3] == 0) &&
383 (lun.scsi_lun[2] == 0) &&
384/* Byte 1 is intentionally skipped */
385 (lun.scsi_lun[0] == 0));
386
387 return result;
388}
389
390static int csmi_ioctl_callback(struct esas2r_adapter *a,
391 struct esas2r_request *rq,
392 struct esas2r_sg_context *sgc, void *context)
393{
394 struct atto_csmi *ci = (struct atto_csmi *)context;
395 union atto_ioctl_csmi *ioctl_csmi =
396 (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
397 u8 path = 0;
398 u8 tid = 0;
399 u8 lun = 0;
400 u32 sts = CSMI_STS_SUCCESS;
401 struct esas2r_target *t;
402 unsigned long flags;
403
404 if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
405 struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
406
407 path = gda->path_id;
408 tid = gda->target_id;
409 lun = gda->lun;
410 } else if (ci->control_code == CSMI_CC_TASK_MGT) {
411 struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
412
413 path = tm->path_id;
414 tid = tm->target_id;
415 lun = tm->lun;
416 }
417
418 if (path > 0 || tid > ESAS2R_MAX_ID) {
419 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
420 CSMI_STS_INV_PARAM);
421 return false;
422 }
423
424 rq->target_id = tid;
425 rq->vrq->scsi.flags |= cpu_to_le32(lun);
426
427 switch (ci->control_code) {
428 case CSMI_CC_GET_DRVR_INFO:
429 {
430 struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
431
432 strcpy(gdi->description, esas2r_get_model_name(a));
433 gdi->csmi_major_rev = CSMI_MAJOR_REV;
434 gdi->csmi_minor_rev = CSMI_MINOR_REV;
435 break;
436 }
437
438 case CSMI_CC_GET_CNTLR_CFG:
439 {
440 struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
441
442 gcc->base_io_addr = 0;
443 pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
444 &gcc->base_memaddr_lo);
445 pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
446 &gcc->base_memaddr_hi);
447 gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
448 a->pcid->subsystem_vendor);
449 gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
450 gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
451 gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
452 gcc->pci_addr.bus_num = a->pcid->bus->number;
453 gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
454 gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
455
456 memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
457
458 gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
459 gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
460 gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
461 gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
462 gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
463 gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
464 gcc->bios_build_rev = LOWORD(a->flash_ver);
465
466 if (a->flags2 & AF2_THUNDERLINK)
467 gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
468 | CSMI_CNTLRF_SATA_HBA;
469 else
470 gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
471 | CSMI_CNTLRF_SATA_RAID;
472
473 gcc->rrom_major_rev = 0;
474 gcc->rrom_minor_rev = 0;
475 gcc->rrom_build_rev = 0;
476 gcc->rrom_release_rev = 0;
477 gcc->rrom_biosmajor_rev = 0;
478 gcc->rrom_biosminor_rev = 0;
479 gcc->rrom_biosbuild_rev = 0;
480 gcc->rrom_biosrelease_rev = 0;
481 break;
482 }
483
484 case CSMI_CC_GET_CNTLR_STS:
485 {
486 struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
487
488 if (a->flags & AF_DEGRADED_MODE)
489 gcs->status = CSMI_CNTLR_STS_FAILED;
490 else
491 gcs->status = CSMI_CNTLR_STS_GOOD;
492
493 gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
494 break;
495 }
496
497 case CSMI_CC_FW_DOWNLOAD:
498 case CSMI_CC_GET_RAID_INFO:
499 case CSMI_CC_GET_RAID_CFG:
500
501 sts = CSMI_STS_BAD_CTRL_CODE;
502 break;
503
504 case CSMI_CC_SMP_PASSTHRU:
505 case CSMI_CC_SSP_PASSTHRU:
506 case CSMI_CC_STP_PASSTHRU:
507 case CSMI_CC_GET_PHY_INFO:
508 case CSMI_CC_SET_PHY_INFO:
509 case CSMI_CC_GET_LINK_ERRORS:
510 case CSMI_CC_GET_SATA_SIG:
511 case CSMI_CC_GET_CONN_INFO:
512 case CSMI_CC_PHY_CTRL:
513
514 if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
515 ci->control_code,
516 ESAS2R_TARG_ID_INV)) {
517 sts = CSMI_STS_FAILED;
518 break;
519 }
520
521 return true;
522
523 case CSMI_CC_GET_SCSI_ADDR:
524 {
525 struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
526
527 struct scsi_lun lun;
528
529 memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
530
531 if (!check_lun(lun)) {
532 sts = CSMI_STS_NO_SCSI_ADDR;
533 break;
534 }
535
536 /* make sure the device is present */
537 spin_lock_irqsave(&a->mem_lock, flags);
538 t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
539 spin_unlock_irqrestore(&a->mem_lock, flags);
540
541 if (t == NULL) {
542 sts = CSMI_STS_NO_SCSI_ADDR;
543 break;
544 }
545
546 gsa->host_index = 0xFF;
547 gsa->lun = gsa->sas_lun[1];
548 rq->target_id = esas2r_targ_get_id(t, a);
549 break;
550 }
551
552 case CSMI_CC_GET_DEV_ADDR:
553 {
554 struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
555
556 /* make sure the target is present */
557 t = a->targetdb + rq->target_id;
558
559 if (t >= a->targetdb_end
560 || t->target_state != TS_PRESENT
561 || t->sas_addr == 0) {
562 sts = CSMI_STS_NO_DEV_ADDR;
563 break;
564 }
565
566 /* fill in the result */
567 *(u64 *)gda->sas_addr = t->sas_addr;
568 memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
569 gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
570 break;
571 }
572
573 case CSMI_CC_TASK_MGT:
574
575 /* make sure the target is present */
576 t = a->targetdb + rq->target_id;
577
578 if (t >= a->targetdb_end
579 || t->target_state != TS_PRESENT
580 || !(t->flags & TF_PASS_THRU)) {
581 sts = CSMI_STS_NO_DEV_ADDR;
582 break;
583 }
584
585 if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
586 ci->control_code,
587 t->phys_targ_id)) {
588 sts = CSMI_STS_FAILED;
589 break;
590 }
591
592 return true;
593
594 default:
595
596 sts = CSMI_STS_BAD_CTRL_CODE;
597 break;
598 }
599
600 rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
601
602 return false;
603}
604
605
606static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
607 struct esas2r_request *rq, void *context)
608{
609 struct atto_csmi *ci = (struct atto_csmi *)context;
610 union atto_ioctl_csmi *ioctl_csmi =
611 (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
612
613 switch (ci->control_code) {
614 case CSMI_CC_GET_DRVR_INFO:
615 {
616 struct atto_csmi_get_driver_info *gdi =
617 &ioctl_csmi->drvr_info;
618
619 strcpy(gdi->name, ESAS2R_VERSION_STR);
620
621 gdi->major_rev = ESAS2R_MAJOR_REV;
622 gdi->minor_rev = ESAS2R_MINOR_REV;
623 gdi->build_rev = 0;
624 gdi->release_rev = 0;
625 break;
626 }
627
628 case CSMI_CC_GET_SCSI_ADDR:
629 {
630 struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
631
632 if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
633 CSMI_STS_SUCCESS) {
634 gsa->target_id = rq->target_id;
635 gsa->path_id = 0;
636 }
637
638 break;
639 }
640 }
641
642 ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
643}
644
645
646static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
647{
648 struct esas2r_buffered_ioctl bi;
649
650 memset(&bi, 0, sizeof(bi));
651
652 bi.a = a;
653 bi.ioctl = &ci->data;
654 bi.length = sizeof(union atto_ioctl_csmi);
655 bi.offset = 0;
656 bi.callback = csmi_ioctl_callback;
657 bi.context = ci;
658 bi.done_callback = csmi_ioctl_done_callback;
659 bi.done_context = ci;
660
661 return handle_buffered_ioctl(&bi);
662}
663
664/* ATTO HBA ioctl support */
665
666/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
667static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
668 struct atto_ioctl *hi,
669 struct esas2r_request *rq,
670 struct esas2r_sg_context *sgc)
671{
672 esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
673
674 esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
675
676 if (!esas2r_build_sg_list(a, rq, sgc)) {
677 hi->status = ATTO_STS_OUT_OF_RSRC;
678
679 return false;
680 }
681
682 esas2r_start_request(a, rq);
683
684 return true;
685}
686
687static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
688 struct esas2r_request *rq)
689{
690 struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
691 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
692 u8 sts = ATTO_SPT_RS_FAILED;
693
694 spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
695 spt->sense_length = rq->sense_len;
696 spt->residual_length =
697 le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
698
699 switch (rq->req_stat) {
700 case RS_SUCCESS:
701 case RS_SCSI_ERROR:
702 sts = ATTO_SPT_RS_SUCCESS;
703 break;
704 case RS_UNDERRUN:
705 sts = ATTO_SPT_RS_UNDERRUN;
706 break;
707 case RS_OVERRUN:
708 sts = ATTO_SPT_RS_OVERRUN;
709 break;
710 case RS_SEL:
711 case RS_SEL2:
712 sts = ATTO_SPT_RS_NO_DEVICE;
713 break;
714 case RS_NO_LUN:
715 sts = ATTO_SPT_RS_NO_LUN;
716 break;
717 case RS_TIMEOUT:
718 sts = ATTO_SPT_RS_TIMEOUT;
719 break;
720 case RS_DEGRADED:
721 sts = ATTO_SPT_RS_DEGRADED;
722 break;
723 case RS_BUSY:
724 sts = ATTO_SPT_RS_BUSY;
725 break;
726 case RS_ABORTED:
727 sts = ATTO_SPT_RS_ABORTED;
728 break;
729 case RS_RESET:
730 sts = ATTO_SPT_RS_BUS_RESET;
731 break;
732 }
733
734 spt->req_status = sts;
735
736 /* Update the target ID to the next one present. */
737 spt->target_id =
738 esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
739
740 /* Done, call the completion callback. */
741 (*rq->aux_req_cb)(a, rq);
742}
743
744static int hba_ioctl_callback(struct esas2r_adapter *a,
745 struct esas2r_request *rq,
746 struct esas2r_sg_context *sgc,
747 void *context)
748{
749 struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
750
751 hi->status = ATTO_STS_SUCCESS;
752
753 switch (hi->function) {
754 case ATTO_FUNC_GET_ADAP_INFO:
755 {
756 u8 *class_code = (u8 *)&a->pcid->class;
757
758 struct atto_hba_get_adapter_info *gai =
759 &hi->data.get_adap_info;
760 int pcie_cap_reg;
761
762 if (hi->flags & HBAF_TUNNEL) {
763 hi->status = ATTO_STS_UNSUPPORTED;
764 break;
765 }
766
767 if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
768 hi->status = ATTO_STS_INV_VERSION;
769 hi->version = ATTO_VER_GET_ADAP_INFO0;
770 break;
771 }
772
773 memset(gai, 0, sizeof(*gai));
774
775 gai->pci.vendor_id = a->pcid->vendor;
776 gai->pci.device_id = a->pcid->device;
777 gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
778 gai->pci.ss_device_id = a->pcid->subsystem_device;
779 gai->pci.class_code[0] = class_code[0];
780 gai->pci.class_code[1] = class_code[1];
781 gai->pci.class_code[2] = class_code[2];
782 gai->pci.rev_id = a->pcid->revision;
783 gai->pci.bus_num = a->pcid->bus->number;
784 gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
785 gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
786
787 pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
788 if (pcie_cap_reg) {
789 u16 stat;
790 u32 caps;
791
792 pci_read_config_word(a->pcid,
793 pcie_cap_reg + PCI_EXP_LNKSTA,
794 &stat);
795 pci_read_config_dword(a->pcid,
796 pcie_cap_reg + PCI_EXP_LNKCAP,
797 &caps);
798
799 gai->pci.link_speed_curr =
800 (u8)(stat & PCI_EXP_LNKSTA_CLS);
801 gai->pci.link_speed_max =
802 (u8)(caps & PCI_EXP_LNKCAP_SLS);
803 gai->pci.link_width_curr =
804 (u8)((stat & PCI_EXP_LNKSTA_NLW)
805 >> PCI_EXP_LNKSTA_NLW_SHIFT);
806 gai->pci.link_width_max =
807 (u8)((caps & PCI_EXP_LNKCAP_MLW)
808 >> 4);
809 }
810
811 gai->pci.msi_vector_cnt = 1;
812
813 if (a->pcid->msix_enabled)
814 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
815 else if (a->pcid->msi_enabled)
816 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
817 else
818 gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
819
820 gai->adap_type = ATTO_GAI_AT_ESASRAID2;
821
822 if (a->flags2 & AF2_THUNDERLINK)
823 gai->adap_type = ATTO_GAI_AT_TLSASHBA;
824
825 if (a->flags & AF_DEGRADED_MODE)
826 gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
827
828 gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
829 ATTO_GAI_AF_DEVADDR_SUPP;
830
831 if (a->pcid->subsystem_device == ATTO_ESAS_R60F
832 || a->pcid->subsystem_device == ATTO_ESAS_R608
833 || a->pcid->subsystem_device == ATTO_ESAS_R644
834 || a->pcid->subsystem_device == ATTO_TSSC_3808E)
835 gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
836
837 gai->num_ports = ESAS2R_NUM_PHYS;
838 gai->num_phys = ESAS2R_NUM_PHYS;
839
840 strcpy(gai->firmware_rev, a->fw_rev);
841 strcpy(gai->flash_rev, a->flash_rev);
842 strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
843 strcpy(gai->model_name, esas2r_get_model_name(a));
844
845 gai->num_targets = ESAS2R_MAX_TARGETS;
846
847 gai->num_busses = 1;
848 gai->num_targsper_bus = gai->num_targets;
849 gai->num_lunsper_targ = 256;
850
851 if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
852 || a->pcid->subsystem_device == ATTO_ESAS_R60F)
853 gai->num_connectors = 4;
854 else
855 gai->num_connectors = 2;
856
857 gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
858
859 gai->num_targets_backend = a->num_targets_backend;
860
861 gai->tunnel_flags = a->ioctl_tunnel
862 & (ATTO_GAI_TF_MEM_RW
863 | ATTO_GAI_TF_TRACE
864 | ATTO_GAI_TF_SCSI_PASS_THRU
865 | ATTO_GAI_TF_GET_DEV_ADDR
866 | ATTO_GAI_TF_PHY_CTRL
867 | ATTO_GAI_TF_CONN_CTRL
868 | ATTO_GAI_TF_GET_DEV_INFO);
869 break;
870 }
871
872 case ATTO_FUNC_GET_ADAP_ADDR:
873 {
874 struct atto_hba_get_adapter_address *gaa =
875 &hi->data.get_adap_addr;
876
877 if (hi->flags & HBAF_TUNNEL) {
878 hi->status = ATTO_STS_UNSUPPORTED;
879 break;
880 }
881
882 if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
883 hi->status = ATTO_STS_INV_VERSION;
884 hi->version = ATTO_VER_GET_ADAP_ADDR0;
885 } else if (gaa->addr_type == ATTO_GAA_AT_PORT
886 || gaa->addr_type == ATTO_GAA_AT_NODE) {
887 if (gaa->addr_type == ATTO_GAA_AT_PORT
888 && gaa->port_id >= ESAS2R_NUM_PHYS) {
889 hi->status = ATTO_STS_NOT_APPL;
890 } else {
891 memcpy((u64 *)gaa->address,
892 &a->nvram->sas_addr[0], sizeof(u64));
893 gaa->addr_len = sizeof(u64);
894 }
895 } else {
896 hi->status = ATTO_STS_INV_PARAM;
897 }
898
899 break;
900 }
901
902 case ATTO_FUNC_MEM_RW:
903 {
904 if (hi->flags & HBAF_TUNNEL) {
905 if (hba_ioctl_tunnel(a, hi, rq, sgc))
906 return true;
907
908 break;
909 }
910
911 hi->status = ATTO_STS_UNSUPPORTED;
912
913 break;
914 }
915
916 case ATTO_FUNC_TRACE:
917 {
918 struct atto_hba_trace *trc = &hi->data.trace;
919
920 if (hi->flags & HBAF_TUNNEL) {
921 if (hba_ioctl_tunnel(a, hi, rq, sgc))
922 return true;
923
924 break;
925 }
926
927 if (hi->version > ATTO_VER_TRACE1) {
928 hi->status = ATTO_STS_INV_VERSION;
929 hi->version = ATTO_VER_TRACE1;
930 break;
931 }
932
933 if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
934 && hi->version >= ATTO_VER_TRACE1) {
935 if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
936 u32 len = hi->data_length;
937 u32 offset = trc->current_offset;
938 u32 total_len = ESAS2R_FWCOREDUMP_SZ;
939
940 /* Size is zero if a core dump isn't present */
941 if (!(a->flags2 & AF2_COREDUMP_SAVED))
942 total_len = 0;
943
944 if (len > total_len)
945 len = total_len;
946
947 if (offset >= total_len
948 || offset + len > total_len
949 || len == 0) {
950 hi->status = ATTO_STS_INV_PARAM;
951 break;
952 }
953
954 memcpy(trc + 1,
955 a->fw_coredump_buff + offset,
956 len);
957
958 hi->data_length = len;
959 } else if (trc->trace_func == ATTO_TRC_TF_RESET) {
960 memset(a->fw_coredump_buff, 0,
961 ESAS2R_FWCOREDUMP_SZ);
962
963 esas2r_lock_clear_flags(&a->flags2,
964 AF2_COREDUMP_SAVED);
965 } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
966 hi->status = ATTO_STS_UNSUPPORTED;
967 break;
968 }
969
970 /* Always return all the info we can. */
971 trc->trace_mask = 0;
972 trc->current_offset = 0;
973 trc->total_length = ESAS2R_FWCOREDUMP_SZ;
974
975 /* Return zero length buffer if core dump not present */
976 if (!(a->flags2 & AF2_COREDUMP_SAVED))
977 trc->total_length = 0;
978 } else {
979 hi->status = ATTO_STS_UNSUPPORTED;
980 }
981
982 break;
983 }
984
985 case ATTO_FUNC_SCSI_PASS_THRU:
986 {
987 struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
988 struct scsi_lun lun;
989
990 memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
991
992 if (hi->flags & HBAF_TUNNEL) {
993 if (hba_ioctl_tunnel(a, hi, rq, sgc))
994 return true;
995
996 break;
997 }
998
999 if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
1000 hi->status = ATTO_STS_INV_VERSION;
1001 hi->version = ATTO_VER_SCSI_PASS_THRU0;
1002 break;
1003 }
1004
1005 if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
1006 hi->status = ATTO_STS_INV_PARAM;
1007 break;
1008 }
1009
1010 esas2r_sgc_init(sgc, a, rq, NULL);
1011
1012 sgc->length = hi->data_length;
1013 sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
1014 + sizeof(struct atto_hba_scsi_pass_thru);
1015
1016 /* Finish request initialization */
1017 rq->target_id = (u16)spt->target_id;
1018 rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
1019 memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
1020 rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
1021 rq->sense_len = spt->sense_length;
1022 rq->sense_buf = (u8 *)spt->sense_data;
1023 /* NOTE: we ignore spt->timeout */
1024
1025 /*
1026 * always usurp the completion callback since the interrupt
1027 * callback mechanism may be used.
1028 */
1029
1030 rq->aux_req_cx = hi;
1031 rq->aux_req_cb = rq->comp_cb;
1032 rq->comp_cb = scsi_passthru_comp_cb;
1033
1034 if (spt->flags & ATTO_SPTF_DATA_IN) {
1035 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
1036 } else if (spt->flags & ATTO_SPTF_DATA_OUT) {
1037 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
1038 } else {
1039 if (sgc->length) {
1040 hi->status = ATTO_STS_INV_PARAM;
1041 break;
1042 }
1043 }
1044
1045 if (spt->flags & ATTO_SPTF_ORDERED_Q)
1046 rq->vrq->scsi.flags |=
1047 cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
1048 else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
1049 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
1050
1051 if (!esas2r_build_sg_list(a, rq, sgc)) {
1052 hi->status = ATTO_STS_OUT_OF_RSRC;
1053 break;
1054 }
1055
1056 esas2r_start_request(a, rq);
1057
1058 return true;
1059 }
1060
1061 case ATTO_FUNC_GET_DEV_ADDR:
1062 {
1063 struct atto_hba_get_device_address *gda =
1064 &hi->data.get_dev_addr;
1065 struct esas2r_target *t;
1066
1067 if (hi->flags & HBAF_TUNNEL) {
1068 if (hba_ioctl_tunnel(a, hi, rq, sgc))
1069 return true;
1070
1071 break;
1072 }
1073
1074 if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
1075 hi->status = ATTO_STS_INV_VERSION;
1076 hi->version = ATTO_VER_GET_DEV_ADDR0;
1077 break;
1078 }
1079
1080 if (gda->target_id >= ESAS2R_MAX_TARGETS) {
1081 hi->status = ATTO_STS_INV_PARAM;
1082 break;
1083 }
1084
1085 t = a->targetdb + (u16)gda->target_id;
1086
1087 if (t->target_state != TS_PRESENT) {
1088 hi->status = ATTO_STS_FAILED;
1089 } else if (gda->addr_type == ATTO_GDA_AT_PORT) {
1090 if (t->sas_addr == 0) {
1091 hi->status = ATTO_STS_UNSUPPORTED;
1092 } else {
1093 *(u64 *)gda->address = t->sas_addr;
1094
1095 gda->addr_len = sizeof(u64);
1096 }
1097 } else if (gda->addr_type == ATTO_GDA_AT_NODE) {
1098 hi->status = ATTO_STS_NOT_APPL;
1099 } else {
1100 hi->status = ATTO_STS_INV_PARAM;
1101 }
1102
1103 /* update the target ID to the next one present. */
1104
1105 gda->target_id =
1106 esas2r_targ_db_find_next_present(a,
1107 (u16)gda->target_id);
1108 break;
1109 }
1110
1111 case ATTO_FUNC_PHY_CTRL:
1112 case ATTO_FUNC_CONN_CTRL:
1113 {
1114 if (hba_ioctl_tunnel(a, hi, rq, sgc))
1115 return true;
1116
1117 break;
1118 }
1119
1120 case ATTO_FUNC_ADAP_CTRL:
1121 {
1122 struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
1123
1124 if (hi->flags & HBAF_TUNNEL) {
1125 hi->status = ATTO_STS_UNSUPPORTED;
1126 break;
1127 }
1128
1129 if (hi->version > ATTO_VER_ADAP_CTRL0) {
1130 hi->status = ATTO_STS_INV_VERSION;
1131 hi->version = ATTO_VER_ADAP_CTRL0;
1132 break;
1133 }
1134
1135 if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
1136 esas2r_reset_adapter(a);
1137 } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
1138 hi->status = ATTO_STS_UNSUPPORTED;
1139 break;
1140 }
1141
1142 if (a->flags & AF_CHPRST_NEEDED)
1143 ac->adap_state = ATTO_AC_AS_RST_SCHED;
1144 else if (a->flags & AF_CHPRST_PENDING)
1145 ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
1146 else if (a->flags & AF_DISC_PENDING)
1147 ac->adap_state = ATTO_AC_AS_RST_DISC;
1148 else if (a->flags & AF_DISABLED)
1149 ac->adap_state = ATTO_AC_AS_DISABLED;
1150 else if (a->flags & AF_DEGRADED_MODE)
1151 ac->adap_state = ATTO_AC_AS_DEGRADED;
1152 else
1153 ac->adap_state = ATTO_AC_AS_OK;
1154
1155 break;
1156 }
1157
1158 case ATTO_FUNC_GET_DEV_INFO:
1159 {
1160 struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
1161 struct esas2r_target *t;
1162
1163 if (hi->flags & HBAF_TUNNEL) {
1164 if (hba_ioctl_tunnel(a, hi, rq, sgc))
1165 return true;
1166
1167 break;
1168 }
1169
1170 if (hi->version > ATTO_VER_GET_DEV_INFO0) {
1171 hi->status = ATTO_STS_INV_VERSION;
1172 hi->version = ATTO_VER_GET_DEV_INFO0;
1173 break;
1174 }
1175
1176 if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
1177 hi->status = ATTO_STS_INV_PARAM;
1178 break;
1179 }
1180
1181 t = a->targetdb + (u16)gdi->target_id;
1182
1183 /* update the target ID to the next one present. */
1184
1185 gdi->target_id =
1186 esas2r_targ_db_find_next_present(a,
1187 (u16)gdi->target_id);
1188
1189 if (t->target_state != TS_PRESENT) {
1190 hi->status = ATTO_STS_FAILED;
1191 break;
1192 }
1193
1194 hi->status = ATTO_STS_UNSUPPORTED;
1195 break;
1196 }
1197
1198 default:
1199
1200 hi->status = ATTO_STS_INV_FUNC;
1201 break;
1202 }
1203
1204 return false;
1205}
1206
1207static void hba_ioctl_done_callback(struct esas2r_adapter *a,
1208 struct esas2r_request *rq, void *context)
1209{
1210 struct atto_ioctl *ioctl_hba =
1211 (struct atto_ioctl *)esas2r_buffered_ioctl;
1212
1213 esas2r_debug("hba_ioctl_done_callback %d", a->index);
1214
1215 if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
1216 struct atto_hba_get_adapter_info *gai =
1217 &ioctl_hba->data.get_adap_info;
1218
1219 esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
1220
1221 gai->drvr_rev_major = ESAS2R_MAJOR_REV;
1222 gai->drvr_rev_minor = ESAS2R_MINOR_REV;
1223
1224 strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
1225 strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
1226
1227 gai->num_busses = 1;
1228 gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
1229 gai->num_lunsper_targ = 1;
1230 }
1231}
1232
1233u8 handle_hba_ioctl(struct esas2r_adapter *a,
1234 struct atto_ioctl *ioctl_hba)
1235{
1236 struct esas2r_buffered_ioctl bi;
1237
1238 memset(&bi, 0, sizeof(bi));
1239
1240 bi.a = a;
1241 bi.ioctl = ioctl_hba;
1242 bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
1243 bi.callback = hba_ioctl_callback;
1244 bi.context = NULL;
1245 bi.done_callback = hba_ioctl_done_callback;
1246 bi.done_context = NULL;
1247 bi.offset = 0;
1248
1249 return handle_buffered_ioctl(&bi);
1250}
1251
1252
1253int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
1254 struct esas2r_sas_nvram *data)
1255{
1256 int result = 0;
1257
1258 a->nvram_command_done = 0;
1259 rq->comp_cb = complete_nvr_req;
1260
1261 if (esas2r_nvram_write(a, rq, data)) {
1262 /* now wait around for it to complete. */
1263 while (!a->nvram_command_done)
1264 wait_event_interruptible(a->nvram_waiter,
1265 a->nvram_command_done);
1266 ;
1267
1268 /* done, check the status. */
1269 if (rq->req_stat == RS_SUCCESS)
1270 result = 1;
1271 }
1272 return result;
1273}
1274
1275
1276/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
1277int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
1278{
1279 struct atto_express_ioctl *ioctl = NULL;
1280 struct esas2r_adapter *a;
1281 struct esas2r_request *rq;
1282 u16 code;
1283 int err;
1284
1285 esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
1286
1287 if ((arg == NULL)
1288 || (cmd < EXPRESS_IOCTL_MIN)
1289 || (cmd > EXPRESS_IOCTL_MAX))
1290 return -ENOTSUPP;
1291
1292 if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
1293 esas2r_log(ESAS2R_LOG_WARN,
1294 "ioctl_handler access_ok failed for cmd %d, "
1295 "address %p", cmd,
1296 arg);
1297 return -EFAULT;
1298 }
1299
1300 /* allocate a kernel memory buffer for the IOCTL data */
1301 ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
1302 if (ioctl == NULL) {
1303 esas2r_log(ESAS2R_LOG_WARN,
1304 "ioctl_handler kzalloc failed for %d bytes",
1305 sizeof(struct atto_express_ioctl));
1306 return -ENOMEM;
1307 }
1308
1309 err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
1310 if (err != 0) {
1311 esas2r_log(ESAS2R_LOG_WARN,
1312 "copy_from_user didn't copy everything (err %d, cmd %d)",
1313 err,
1314 cmd);
1315 kfree(ioctl);
1316
1317 return -EFAULT;
1318 }
1319
1320 /* verify the signature */
1321
1322 if (memcmp(ioctl->header.signature,
1323 EXPRESS_IOCTL_SIGNATURE,
1324 EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
1325 esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
1326 kfree(ioctl);
1327
1328 return -ENOTSUPP;
1329 }
1330
1331 /* assume success */
1332
1333 ioctl->header.return_code = IOCTL_SUCCESS;
1334 err = 0;
1335
1336 /*
1337 * handle EXPRESS_IOCTL_GET_CHANNELS
1338 * without paying attention to channel
1339 */
1340
1341 if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
1342 int i = 0, k = 0;
1343
1344 ioctl->data.chanlist.num_channels = 0;
1345
1346 while (i < MAX_ADAPTERS) {
1347 if (esas2r_adapters[i]) {
1348 ioctl->data.chanlist.num_channels++;
1349 ioctl->data.chanlist.channel[k] = i;
1350 k++;
1351 }
1352 i++;
1353 }
1354
1355 goto ioctl_done;
1356 }
1357
1358 /* get the channel */
1359
1360 if (ioctl->header.channel == 0xFF) {
1361 a = (struct esas2r_adapter *)hostdata;
1362 } else {
1363 a = esas2r_adapters[ioctl->header.channel];
1364 if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
1365 ioctl->header.return_code = IOCTL_BAD_CHANNEL;
1366 esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
1367 kfree(ioctl);
1368
1369 return -ENOTSUPP;
1370 }
1371 }
1372
1373 switch (cmd) {
1374 case EXPRESS_IOCTL_RW_FIRMWARE:
1375
1376 if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
1377 err = esas2r_write_fw(a,
1378 (char *)ioctl->data.fwrw.image,
1379 0,
1380 sizeof(struct
1381 atto_express_ioctl));
1382
1383 if (err >= 0) {
1384 err = esas2r_read_fw(a,
1385 (char *)ioctl->data.fwrw.
1386 image,
1387 0,
1388 sizeof(struct
1389 atto_express_ioctl));
1390 }
1391 } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
1392 err = esas2r_write_fs(a,
1393 (char *)ioctl->data.fwrw.image,
1394 0,
1395 sizeof(struct
1396 atto_express_ioctl));
1397
1398 if (err >= 0) {
1399 err = esas2r_read_fs(a,
1400 (char *)ioctl->data.fwrw.
1401 image,
1402 0,
1403 sizeof(struct
1404 atto_express_ioctl));
1405 }
1406 } else {
1407 ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
1408 }
1409
1410 break;
1411
1412 case EXPRESS_IOCTL_READ_PARAMS:
1413
1414 memcpy(ioctl->data.prw.data_buffer, a->nvram,
1415 sizeof(struct esas2r_sas_nvram));
1416 ioctl->data.prw.code = 1;
1417 break;
1418
1419 case EXPRESS_IOCTL_WRITE_PARAMS:
1420
1421 rq = esas2r_alloc_request(a);
1422 if (rq == NULL) {
1423 up(&a->nvram_semaphore);
1424 ioctl->data.prw.code = 0;
1425 break;
1426 }
1427
1428 code = esas2r_write_params(a, rq,
1429 (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
1430 ioctl->data.prw.code = code;
1431
1432 esas2r_free_request(a, rq);
1433
1434 break;
1435
1436 case EXPRESS_IOCTL_DEFAULT_PARAMS:
1437
1438 esas2r_nvram_get_defaults(a,
1439 (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
1440 ioctl->data.prw.code = 1;
1441 break;
1442
1443 case EXPRESS_IOCTL_CHAN_INFO:
1444
1445 ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
1446 ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
1447 ioctl->data.chaninfo.IRQ = a->pcid->irq;
1448 ioctl->data.chaninfo.device_id = a->pcid->device;
1449 ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
1450 ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
1451 ioctl->data.chaninfo.revision_id = a->pcid->revision;
1452 ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
1453 ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
1454 ioctl->data.chaninfo.core_rev = 0;
1455 ioctl->data.chaninfo.host_no = a->host->host_no;
1456 ioctl->data.chaninfo.hbaapi_rev = 0;
1457 break;
1458
1459 case EXPRESS_IOCTL_SMP:
1460 ioctl->header.return_code = handle_smp_ioctl(a,
1461 &ioctl->data.
1462 ioctl_smp);
1463 break;
1464
1465 case EXPRESS_CSMI:
1466 ioctl->header.return_code =
1467 handle_csmi_ioctl(a, &ioctl->data.csmi);
1468 break;
1469
1470 case EXPRESS_IOCTL_HBA:
1471 ioctl->header.return_code = handle_hba_ioctl(a,
1472 &ioctl->data.
1473 ioctl_hba);
1474 break;
1475
1476 case EXPRESS_IOCTL_VDA:
1477 err = esas2r_write_vda(a,
1478 (char *)&ioctl->data.ioctl_vda,
1479 0,
1480 sizeof(struct atto_ioctl_vda) +
1481 ioctl->data.ioctl_vda.data_length);
1482
1483 if (err >= 0) {
1484 err = esas2r_read_vda(a,
1485 (char *)&ioctl->data.ioctl_vda,
1486 0,
1487 sizeof(struct atto_ioctl_vda) +
1488 ioctl->data.ioctl_vda.data_length);
1489 }
1490
1491
1492
1493
1494 break;
1495
1496 case EXPRESS_IOCTL_GET_MOD_INFO:
1497
1498 ioctl->data.modinfo.adapter = a;
1499 ioctl->data.modinfo.pci_dev = a->pcid;
1500 ioctl->data.modinfo.scsi_host = a->host;
1501 ioctl->data.modinfo.host_no = a->host->host_no;
1502
1503 break;
1504
1505 default:
1506 esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
1507 ioctl->header.return_code = IOCTL_ERR_INVCMD;
1508 }
1509
1510ioctl_done:
1511
1512 if (err < 0) {
1513 esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
1514 cmd);
1515
1516 switch (err) {
1517 case -ENOMEM:
1518 case -EBUSY:
1519 ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
1520 break;
1521
1522 case -ENOSYS:
1523 case -EINVAL:
1524 ioctl->header.return_code = IOCTL_INVALID_PARAM;
1525 break;
1526 }
1527
1528 ioctl->header.return_code = IOCTL_GENERAL_ERROR;
1529 }
1530
1531 /* Always copy the buffer back, if only to pick up the status */
1532 err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
1533 if (err != 0) {
1534 esas2r_log(ESAS2R_LOG_WARN,
1535 "ioctl_handler copy_to_user didn't copy "
1536 "everything (err %d, cmd %d)", err,
1537 cmd);
1538 kfree(ioctl);
1539
1540 return -EFAULT;
1541 }
1542
1543 kfree(ioctl);
1544
1545 return 0;
1546}
1547
1548int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
1549{
1550 return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
1551}
1552
1553static void free_fw_buffers(struct esas2r_adapter *a)
1554{
1555 if (a->firmware.data) {
1556 dma_free_coherent(&a->pcid->dev,
1557 (size_t)a->firmware.orig_len,
1558 a->firmware.data,
1559 (dma_addr_t)a->firmware.phys);
1560
1561 a->firmware.data = NULL;
1562 }
1563}
1564
1565static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
1566{
1567 free_fw_buffers(a);
1568
1569 a->firmware.orig_len = length;
1570
1571 a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
1572 (size_t)length,
1573 (dma_addr_t *)&a->firmware.
1574 phys,
1575 GFP_KERNEL);
1576
1577 if (!a->firmware.data) {
1578 esas2r_debug("buffer alloc failed!");
1579 return 0;
1580 }
1581
1582 return 1;
1583}
1584
1585/* Handle a call to read firmware. */
1586int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
1587{
1588 esas2r_trace_enter();
1589 /* if the cached header is a status, simply copy it over and return. */
1590 if (a->firmware.state == FW_STATUS_ST) {
1591 int size = min_t(int, count, sizeof(a->firmware.header));
1592 esas2r_trace_exit();
1593 memcpy(buf, &a->firmware.header, size);
1594 esas2r_debug("esas2r_read_fw: STATUS size %d", size);
1595 return size;
1596 }
1597
1598 /*
1599 * if the cached header is a command, do it if at
1600 * offset 0, otherwise copy the pieces.
1601 */
1602
1603 if (a->firmware.state == FW_COMMAND_ST) {
1604 u32 length = a->firmware.header.length;
1605 esas2r_trace_exit();
1606
1607 esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
1608 length,
1609 off);
1610
1611 if (off == 0) {
1612 if (a->firmware.header.action == FI_ACT_UP) {
1613 if (!allocate_fw_buffers(a, length))
1614 return -ENOMEM;
1615
1616
1617 /* copy header over */
1618
1619 memcpy(a->firmware.data,
1620 &a->firmware.header,
1621 sizeof(a->firmware.header));
1622
1623 do_fm_api(a,
1624 (struct esas2r_flash_img *)a->firmware.data);
1625 } else if (a->firmware.header.action == FI_ACT_UPSZ) {
1626 int size =
1627 min((int)count,
1628 (int)sizeof(a->firmware.header));
1629 do_fm_api(a, &a->firmware.header);
1630 memcpy(buf, &a->firmware.header, size);
1631 esas2r_debug("FI_ACT_UPSZ size %d", size);
1632 return size;
1633 } else {
1634 esas2r_debug("invalid action %d",
1635 a->firmware.header.action);
1636 return -ENOSYS;
1637 }
1638 }
1639
1640 if (count + off > length)
1641 count = length - off;
1642
1643 if (count < 0)
1644 return 0;
1645
1646 if (!a->firmware.data) {
1647 esas2r_debug(
1648 "read: nonzero offset but no buffer available!");
1649 return -ENOMEM;
1650 }
1651
1652 esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
1653 count,
1654 length);
1655
1656 memcpy(buf, &a->firmware.data[off], count);
1657
1658 /* when done, release the buffer */
1659
1660 if (length <= off + count) {
1661 esas2r_debug("esas2r_read_fw: freeing buffer!");
1662
1663 free_fw_buffers(a);
1664 }
1665
1666 return count;
1667 }
1668
1669 esas2r_trace_exit();
1670 esas2r_debug("esas2r_read_fw: invalid firmware state %d",
1671 a->firmware.state);
1672
1673 return -EINVAL;
1674}
1675
1676/* Handle a call to write firmware. */
1677int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
1678 int count)
1679{
1680 u32 length;
1681
1682 if (off == 0) {
1683 struct esas2r_flash_img *header =
1684 (struct esas2r_flash_img *)buf;
1685
1686 /* assume version 0 flash image */
1687
1688 int min_size = sizeof(struct esas2r_flash_img_v0);
1689
1690 a->firmware.state = FW_INVALID_ST;
1691
1692 /* validate the version field first */
1693
1694 if (count < 4
1695 || header->fi_version > FI_VERSION_1) {
1696 esas2r_debug(
1697 "esas2r_write_fw: short header or invalid version");
1698 return -EINVAL;
1699 }
1700
1701 /* See if its a version 1 flash image */
1702
1703 if (header->fi_version == FI_VERSION_1)
1704 min_size = sizeof(struct esas2r_flash_img);
1705
1706 /* If this is the start, the header must be full and valid. */
1707 if (count < min_size) {
1708 esas2r_debug("esas2r_write_fw: short header, aborting");
1709 return -EINVAL;
1710 }
1711
1712 /* Make sure the size is reasonable. */
1713 length = header->length;
1714
1715 if (length > 1024 * 1024) {
1716 esas2r_debug(
1717 "esas2r_write_fw: hosed, length %d fi_version %d",
1718 length, header->fi_version);
1719 return -EINVAL;
1720 }
1721
1722 /*
1723 * If this is a write command, allocate memory because
1724 * we have to cache everything. otherwise, just cache
1725 * the header, because the read op will do the command.
1726 */
1727
1728 if (header->action == FI_ACT_DOWN) {
1729 if (!allocate_fw_buffers(a, length))
1730 return -ENOMEM;
1731
1732 /*
1733 * Store the command, so there is context on subsequent
1734 * calls.
1735 */
1736 memcpy(&a->firmware.header,
1737 buf,
1738 sizeof(*header));
1739 } else if (header->action == FI_ACT_UP
1740 || header->action == FI_ACT_UPSZ) {
1741 /* Save the command, result will be picked up on read */
1742 memcpy(&a->firmware.header,
1743 buf,
1744 sizeof(*header));
1745
1746 a->firmware.state = FW_COMMAND_ST;
1747
1748 esas2r_debug(
1749 "esas2r_write_fw: COMMAND, count %d, action %d ",
1750 count, header->action);
1751
1752 /*
1753 * Pretend we took the whole buffer,
1754 * so we don't get bothered again.
1755 */
1756
1757 return count;
1758 } else {
1759 esas2r_debug("esas2r_write_fw: invalid action %d ",
1760 a->firmware.header.action);
1761 return -ENOSYS;
1762 }
1763 } else {
1764 length = a->firmware.header.length;
1765 }
1766
1767 /*
1768 * We only get here on a download command, regardless of offset.
1769 * the chunks written by the system need to be cached, and when
1770 * the final one arrives, issue the fmapi command.
1771 */
1772
1773 if (off + count > length)
1774 count = length - off;
1775
1776 if (count > 0) {
1777 esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
1778 count,
1779 length);
1780
1781 /*
1782 * On a full upload, the system tries sending the whole buffer.
1783 * there's nothing to do with it, so just drop it here, before
1784 * trying to copy over into unallocated memory!
1785 */
1786 if (a->firmware.header.action == FI_ACT_UP)
1787 return count;
1788
1789 if (!a->firmware.data) {
1790 esas2r_debug(
1791 "write: nonzero offset but no buffer available!");
1792 return -ENOMEM;
1793 }
1794
1795 memcpy(&a->firmware.data[off], buf, count);
1796
1797 if (length == off + count) {
1798 do_fm_api(a,
1799 (struct esas2r_flash_img *)a->firmware.data);
1800
1801 /*
1802 * Now copy the header result to be picked up by the
1803 * next read
1804 */
1805 memcpy(&a->firmware.header,
1806 a->firmware.data,
1807 sizeof(a->firmware.header));
1808
1809 a->firmware.state = FW_STATUS_ST;
1810
1811 esas2r_debug("write completed");
1812
1813 /*
1814 * Since the system has the data buffered, the only way
1815 * this can leak is if a root user writes a program
1816 * that writes a shorter buffer than it claims, and the
1817 * copyin fails.
1818 */
1819 free_fw_buffers(a);
1820 }
1821 }
1822
1823 return count;
1824}
1825
1826/* Callback for the completion of a VDA request. */
1827static void vda_complete_req(struct esas2r_adapter *a,
1828 struct esas2r_request *rq)
1829{
1830 a->vda_command_done = 1;
1831 wake_up_interruptible(&a->vda_waiter);
1832}
1833
1834/* Scatter/gather callback for VDA requests */
1835static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
1836{
1837 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
1838 int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
1839
1840 (*addr) = a->ppvda_buffer + offset;
1841 return VDA_MAX_BUFFER_SIZE - offset;
1842}
1843
1844/* Handle a call to read a VDA command. */
1845int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
1846{
1847 if (!a->vda_buffer)
1848 return -ENOMEM;
1849
1850 if (off == 0) {
1851 struct esas2r_request *rq;
1852 struct atto_ioctl_vda *vi =
1853 (struct atto_ioctl_vda *)a->vda_buffer;
1854 struct esas2r_sg_context sgc;
1855 bool wait_for_completion;
1856
1857 /*
1858 * Presumeably, someone has already written to the vda_buffer,
1859 * and now they are reading the node the response, so now we
1860 * will actually issue the request to the chip and reply.
1861 */
1862
1863 /* allocate a request */
1864 rq = esas2r_alloc_request(a);
1865 if (rq == NULL) {
1866 esas2r_debug("esas2r_read_vda: out of requestss");
1867 return -EBUSY;
1868 }
1869
1870 rq->comp_cb = vda_complete_req;
1871
1872 sgc.first_req = rq;
1873 sgc.adapter = a;
1874 sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
1875 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
1876
1877 a->vda_command_done = 0;
1878
1879 wait_for_completion =
1880 esas2r_process_vda_ioctl(a, vi, rq, &sgc);
1881
1882 if (wait_for_completion) {
1883 /* now wait around for it to complete. */
1884
1885 while (!a->vda_command_done)
1886 wait_event_interruptible(a->vda_waiter,
1887 a->vda_command_done);
1888 }
1889
1890 esas2r_free_request(a, (struct esas2r_request *)rq);
1891 }
1892
1893 if (off > VDA_MAX_BUFFER_SIZE)
1894 return 0;
1895
1896 if (count + off > VDA_MAX_BUFFER_SIZE)
1897 count = VDA_MAX_BUFFER_SIZE - off;
1898
1899 if (count < 0)
1900 return 0;
1901
1902 memcpy(buf, a->vda_buffer + off, count);
1903
1904 return count;
1905}
1906
1907/* Handle a call to write a VDA command. */
1908int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
1909 int count)
1910{
1911 /*
1912 * allocate memory for it, if not already done. once allocated,
1913 * we will keep it around until the driver is unloaded.
1914 */
1915
1916 if (!a->vda_buffer) {
1917 dma_addr_t dma_addr;
1918 a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
1919 (size_t)
1920 VDA_MAX_BUFFER_SIZE,
1921 &dma_addr,
1922 GFP_KERNEL);
1923
1924 a->ppvda_buffer = dma_addr;
1925 }
1926
1927 if (!a->vda_buffer)
1928 return -ENOMEM;
1929
1930 if (off > VDA_MAX_BUFFER_SIZE)
1931 return 0;
1932
1933 if (count + off > VDA_MAX_BUFFER_SIZE)
1934 count = VDA_MAX_BUFFER_SIZE - off;
1935
1936 if (count < 1)
1937 return 0;
1938
1939 memcpy(a->vda_buffer + off, buf, count);
1940
1941 return count;
1942}
1943
1944/* Callback for the completion of an FS_API request.*/
1945static void fs_api_complete_req(struct esas2r_adapter *a,
1946 struct esas2r_request *rq)
1947{
1948 a->fs_api_command_done = 1;
1949
1950 wake_up_interruptible(&a->fs_api_waiter);
1951}
1952
1953/* Scatter/gather callback for VDA requests */
1954static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
1955{
1956 struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
1957 struct esas2r_ioctl_fs *fs =
1958 (struct esas2r_ioctl_fs *)a->fs_api_buffer;
1959 u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
1960
1961 (*addr) = a->ppfs_api_buffer + offset;
1962
1963 return a->fs_api_buffer_size - offset;
1964}
1965
1966/* Handle a call to read firmware via FS_API. */
1967int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
1968{
1969 if (!a->fs_api_buffer)
1970 return -ENOMEM;
1971
1972 if (off == 0) {
1973 struct esas2r_request *rq;
1974 struct esas2r_sg_context sgc;
1975 struct esas2r_ioctl_fs *fs =
1976 (struct esas2r_ioctl_fs *)a->fs_api_buffer;
1977
1978 /* If another flash request is already in progress, return. */
1979 if (down_interruptible(&a->fs_api_semaphore)) {
1980busy:
1981 fs->status = ATTO_STS_OUT_OF_RSRC;
1982 return -EBUSY;
1983 }
1984
1985 /*
1986 * Presumeably, someone has already written to the
1987 * fs_api_buffer, and now they are reading the node the
1988 * response, so now we will actually issue the request to the
1989 * chip and reply. Allocate a request
1990 */
1991
1992 rq = esas2r_alloc_request(a);
1993 if (rq == NULL) {
1994 esas2r_debug("esas2r_read_fs: out of requests");
1995 up(&a->fs_api_semaphore);
1996 goto busy;
1997 }
1998
1999 rq->comp_cb = fs_api_complete_req;
2000
2001 /* Set up the SGCONTEXT for to build the s/g table */
2002
2003 sgc.cur_offset = fs->data;
2004 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
2005
2006 a->fs_api_command_done = 0;
2007
2008 if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
2009 if (fs->status == ATTO_STS_OUT_OF_RSRC)
2010 count = -EBUSY;
2011
2012 goto dont_wait;
2013 }
2014
2015 /* Now wait around for it to complete. */
2016
2017 while (!a->fs_api_command_done)
2018 wait_event_interruptible(a->fs_api_waiter,
2019 a->fs_api_command_done);
2020 ;
2021dont_wait:
2022 /* Free the request and keep going */
2023 up(&a->fs_api_semaphore);
2024 esas2r_free_request(a, (struct esas2r_request *)rq);
2025
2026 /* Pick up possible error code from above */
2027 if (count < 0)
2028 return count;
2029 }
2030
2031 if (off > a->fs_api_buffer_size)
2032 return 0;
2033
2034 if (count + off > a->fs_api_buffer_size)
2035 count = a->fs_api_buffer_size - off;
2036
2037 if (count < 0)
2038 return 0;
2039
2040 memcpy(buf, a->fs_api_buffer + off, count);
2041
2042 return count;
2043}
2044
2045/* Handle a call to write firmware via FS_API. */
2046int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
2047 int count)
2048{
2049 if (off == 0) {
2050 struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
2051 u32 length = fs->command.length + offsetof(
2052 struct esas2r_ioctl_fs,
2053 data);
2054
2055 /*
2056 * Special case, for BEGIN commands, the length field
2057 * is lying to us, so just get enough for the header.
2058 */
2059
2060 if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
2061 length = offsetof(struct esas2r_ioctl_fs, data);
2062
2063 /*
2064 * Beginning a command. We assume we'll get at least
2065 * enough in the first write so we can look at the
2066 * header and see how much we need to alloc.
2067 */
2068
2069 if (count < offsetof(struct esas2r_ioctl_fs, data))
2070 return -EINVAL;
2071
2072 /* Allocate a buffer or use the existing buffer. */
2073 if (a->fs_api_buffer) {
2074 if (a->fs_api_buffer_size < length) {
2075 /* Free too-small buffer and get a new one */
2076 dma_free_coherent(&a->pcid->dev,
2077 (size_t)a->fs_api_buffer_size,
2078 a->fs_api_buffer,
2079 (dma_addr_t)a->ppfs_api_buffer);
2080
2081 goto re_allocate_buffer;
2082 }
2083 } else {
2084re_allocate_buffer:
2085 a->fs_api_buffer_size = length;
2086
2087 a->fs_api_buffer = (u8 *)dma_alloc_coherent(
2088 &a->pcid->dev,
2089 (size_t)a->fs_api_buffer_size,
2090 (dma_addr_t *)&a->ppfs_api_buffer,
2091 GFP_KERNEL);
2092 }
2093 }
2094
2095 if (!a->fs_api_buffer)
2096 return -ENOMEM;
2097
2098 if (off > a->fs_api_buffer_size)
2099 return 0;
2100
2101 if (count + off > a->fs_api_buffer_size)
2102 count = a->fs_api_buffer_size - off;
2103
2104 if (count < 1)
2105 return 0;
2106
2107 memcpy(a->fs_api_buffer + off, buf, count);
2108
2109 return count;
2110}
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
new file mode 100644
index 000000000000..9bf285df58dd
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -0,0 +1,254 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_log.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46/*
47 * this module within the driver is tasked with providing logging functionality.
48 * the event_log_level module parameter controls the level of messages that are
49 * written to the system log. the default level of messages that are written
50 * are critical and warning messages. if other types of messages are desired,
51 * one simply needs to load the module with the correct value for the
52 * event_log_level module parameter. for example:
53 *
54 * insmod <module> event_log_level=1
55 *
56 * will load the module and only critical events will be written by this module
57 * to the system log. if critical, warning, and information-level messages are
58 * desired, the correct value for the event_log_level module parameter
59 * would be as follows:
60 *
61 * insmod <module> event_log_level=3
62 */
63
64#define EVENT_LOG_BUFF_SIZE 1024
65
66static long event_log_level = ESAS2R_LOG_DFLT;
67
68module_param(event_log_level, long, S_IRUGO | S_IRUSR);
69MODULE_PARM_DESC(event_log_level,
70 "Specifies the level of events to report to the system log. Critical and warning level events are logged by default.");
71
72/* A shared buffer to use for formatting messages. */
73static char event_buffer[EVENT_LOG_BUFF_SIZE];
74
75/* A lock to protect the shared buffer used for formatting messages. */
76static DEFINE_SPINLOCK(event_buffer_lock);
77
78/**
79 * translates an esas2r-defined logging event level to a kernel logging level.
80 *
81 * @param [in] level the esas2r-defined logging event level to translate
82 *
83 * @return the corresponding kernel logging level.
84 */
85static const char *translate_esas2r_event_level_to_kernel(const long level)
86{
87 switch (level) {
88 case ESAS2R_LOG_CRIT:
89 return KERN_CRIT;
90
91 case ESAS2R_LOG_WARN:
92 return KERN_WARNING;
93
94 case ESAS2R_LOG_INFO:
95 return KERN_INFO;
96
97 case ESAS2R_LOG_DEBG:
98 case ESAS2R_LOG_TRCE:
99 default:
100 return KERN_DEBUG;
101 }
102}
103
104/**
105 * the master logging function. this function will format the message as
106 * outlined by the formatting string, the input device information and the
107 * substitution arguments and output the resulting string to the system log.
108 *
109 * @param [in] level the event log level of the message
110 * @param [in] dev the device information
111 * @param [in] format the formatting string for the message
112 * @param [in] args the substition arguments to the formatting string
113 *
114 * @return 0 on success, or -1 if an error occurred.
115 */
116static int esas2r_log_master(const long level,
117 const struct device *dev,
118 const char *format,
119 va_list args)
120{
121 if (level <= event_log_level) {
122 unsigned long flags = 0;
123 int retval = 0;
124 char *buffer = event_buffer;
125 size_t buflen = EVENT_LOG_BUFF_SIZE;
126 const char *fmt_nodev = "%s%s: ";
127 const char *fmt_dev = "%s%s [%s, %s, %s]";
128 const char *slevel =
129 translate_esas2r_event_level_to_kernel(level);
130
131 spin_lock_irqsave(&event_buffer_lock, flags);
132
133 if (buffer == NULL) {
134 spin_unlock_irqrestore(&event_buffer_lock, flags);
135 return -1;
136 }
137
138 memset(buffer, 0, buflen);
139
140 /*
141 * format the level onto the beginning of the string and do
142 * some pointer arithmetic to move the pointer to the point
143 * where the actual message can be inserted.
144 */
145
146 if (dev == NULL) {
147 snprintf(buffer, buflen, fmt_nodev, slevel,
148 ESAS2R_DRVR_NAME);
149 } else {
150 snprintf(buffer, buflen, fmt_dev, slevel,
151 ESAS2R_DRVR_NAME,
152 (dev->driver ? dev->driver->name : "unknown"),
153 (dev->bus ? dev->bus->name : "unknown"),
154 dev_name(dev));
155 }
156
157 buffer += strlen(event_buffer);
158 buflen -= strlen(event_buffer);
159
160 retval = vsnprintf(buffer, buflen, format, args);
161 if (retval < 0) {
162 spin_unlock_irqrestore(&event_buffer_lock, flags);
163 return -1;
164 }
165
166 /*
167 * Put a line break at the end of the formatted string so that
168 * we don't wind up with run-on messages. only append if there
169 * is enough space in the buffer.
170 */
171 if (strlen(event_buffer) < buflen)
172 strcat(buffer, "\n");
173
174 printk(event_buffer);
175
176 spin_unlock_irqrestore(&event_buffer_lock, flags);
177 }
178
179 return 0;
180}
181
182/**
183 * formats and logs a message to the system log.
184 *
185 * @param [in] level the event level of the message
186 * @param [in] format the formating string for the message
187 * @param [in] ... the substitution arguments to the formatting string
188 *
189 * @return 0 on success, or -1 if an error occurred.
190 */
191int esas2r_log(const long level, const char *format, ...)
192{
193 int retval = 0;
194 va_list args;
195
196 va_start(args, format);
197
198 retval = esas2r_log_master(level, NULL, format, args);
199
200 va_end(args);
201
202 return retval;
203}
204
205/**
206 * formats and logs a message to the system log. this message will include
207 * device information.
208 *
209 * @param [in] level the event level of the message
210 * @param [in] dev the device information
211 * @param [in] format the formatting string for the message
212 * @param [in] ... the substitution arguments to the formatting string
213 *
214 * @return 0 on success, or -1 if an error occurred.
215 */
216int esas2r_log_dev(const long level,
217 const struct device *dev,
218 const char *format,
219 ...)
220{
221 int retval = 0;
222 va_list args;
223
224 va_start(args, format);
225
226 retval = esas2r_log_master(level, dev, format, args);
227
228 va_end(args);
229
230 return retval;
231}
232
233/**
234 * formats and logs a message to the system log. this message will include
235 * device information.
236 *
237 * @param [in] level the event level of the message
238 * @param [in] buf
239 * @param [in] len
240 *
241 * @return 0 on success, or -1 if an error occurred.
242 */
243int esas2r_log_hexdump(const long level,
244 const void *buf,
245 size_t len)
246{
247 if (level <= event_log_level) {
248 print_hex_dump(translate_esas2r_event_level_to_kernel(level),
249 "", DUMP_PREFIX_OFFSET, 16, 1, buf,
250 len, true);
251 }
252
253 return 1;
254}
diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
new file mode 100644
index 000000000000..7b6397bb5b94
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.h
@@ -0,0 +1,118 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_log.h
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#ifndef __esas2r_log_h__
45#define __esas2r_log_h__
46
47struct device;
48
49enum {
50 ESAS2R_LOG_NONE = 0, /* no events logged */
51 ESAS2R_LOG_CRIT = 1, /* critical events */
52 ESAS2R_LOG_WARN = 2, /* warning events */
53 ESAS2R_LOG_INFO = 3, /* info events */
54 ESAS2R_LOG_DEBG = 4, /* debugging events */
55 ESAS2R_LOG_TRCE = 5, /* tracing events */
56
57#ifdef ESAS2R_TRACE
58 ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE
59#else
60 ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN
61#endif
62};
63
64int esas2r_log(const long level, const char *format, ...);
65int esas2r_log_dev(const long level,
66 const struct device *dev,
67 const char *format,
68 ...);
69int esas2r_log_hexdump(const long level,
70 const void *buf,
71 size_t len);
72
73/*
74 * the following macros are provided specifically for debugging and tracing
75 * messages. esas2r_debug() is provided for generic non-hardware layer
76 * debugging and tracing events. esas2r_hdebug is provided specifically for
77 * hardware layer debugging and tracing events.
78 */
79
80#ifdef ESAS2R_DEBUG
81#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
82#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
83#else
84#define esas2r_debug(f, args ...)
85#define esas2r_hdebug(f, args ...)
86#endif /* ESAS2R_DEBUG */
87
88/*
89 * the following macros are provided in order to trace the driver and catch
90 * some more serious bugs. be warned, enabling these macros may *severely*
91 * impact performance.
92 */
93
94#ifdef ESAS2R_TRACE
95#define esas2r_bugon() \
96 do { \
97 esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \
98 " - dumping stack and stopping kernel", __func__, \
99 __LINE__); \
100 dump_stack(); \
101 BUG(); \
102 } while (0)
103
104#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \
105 __func__, __FILE__, __LINE__)
106#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \
107 __func__, __FILE__, __LINE__)
108#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \
109 f, __func__, __FILE__, __LINE__, \
110 ## args)
111#else
112#define esas2r_bugon()
113#define esas2r_trace_enter()
114#define esas2r_trace_exit()
115#define esas2r_trace(f, args ...)
116#endif /* ESAS2R_TRACE */
117
118#endif /* __esas2r_log_h__ */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
new file mode 100644
index 000000000000..4abf1272e1eb
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -0,0 +1,2032 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_main.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver");
47MODULE_AUTHOR("ATTO Technology, Inc.");
48MODULE_LICENSE("GPL");
49MODULE_VERSION(ESAS2R_VERSION_STR);
50
51/* global definitions */
52
53static int found_adapters;
54struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS];
55
56#define ESAS2R_VDA_EVENT_PORT1 54414
57#define ESAS2R_VDA_EVENT_PORT2 54415
58#define ESAS2R_VDA_EVENT_SOCK_COUNT 2
59
60static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
61{
62 struct device *dev = container_of(kobj, struct device, kobj);
63 struct Scsi_Host *host = class_to_shost(dev);
64
65 return (struct esas2r_adapter *)host->hostdata;
66}
67
68static ssize_t read_fw(struct file *file, struct kobject *kobj,
69 struct bin_attribute *attr,
70 char *buf, loff_t off, size_t count)
71{
72 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
73
74 return esas2r_read_fw(a, buf, off, count);
75}
76
77static ssize_t write_fw(struct file *file, struct kobject *kobj,
78 struct bin_attribute *attr,
79 char *buf, loff_t off, size_t count)
80{
81 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
82
83 return esas2r_write_fw(a, buf, off, count);
84}
85
86static ssize_t read_fs(struct file *file, struct kobject *kobj,
87 struct bin_attribute *attr,
88 char *buf, loff_t off, size_t count)
89{
90 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
91
92 return esas2r_read_fs(a, buf, off, count);
93}
94
95static ssize_t write_fs(struct file *file, struct kobject *kobj,
96 struct bin_attribute *attr,
97 char *buf, loff_t off, size_t count)
98{
99 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
100 int length = min(sizeof(struct esas2r_ioctl_fs), count);
101 int result = 0;
102
103 result = esas2r_write_fs(a, buf, off, count);
104
105 if (result < 0)
106 result = 0;
107
108 return length;
109}
110
111static ssize_t read_vda(struct file *file, struct kobject *kobj,
112 struct bin_attribute *attr,
113 char *buf, loff_t off, size_t count)
114{
115 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
116
117 return esas2r_read_vda(a, buf, off, count);
118}
119
120static ssize_t write_vda(struct file *file, struct kobject *kobj,
121 struct bin_attribute *attr,
122 char *buf, loff_t off, size_t count)
123{
124 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
125
126 return esas2r_write_vda(a, buf, off, count);
127}
128
129static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
130 struct bin_attribute *attr,
131 char *buf, loff_t off, size_t count)
132{
133 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
134 int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
135
136 memcpy(buf, a->nvram, length);
137 return length;
138}
139
140static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
141 struct bin_attribute *attr,
142 char *buf, loff_t off, size_t count)
143{
144 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
145 struct esas2r_request *rq;
146 int result = -EFAULT;
147
148 rq = esas2r_alloc_request(a);
149 if (rq == NULL)
150 return -ENOMEM;
151
152 if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
153 result = count;
154
155 esas2r_free_request(a, rq);
156
157 return result;
158}
159
160static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
161 struct bin_attribute *attr,
162 char *buf, loff_t off, size_t count)
163{
164 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
165
166 esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf);
167
168 return sizeof(struct esas2r_sas_nvram);
169}
170
171static ssize_t read_hw(struct file *file, struct kobject *kobj,
172 struct bin_attribute *attr,
173 char *buf, loff_t off, size_t count)
174{
175 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
176 int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
177
178 if (!a->local_atto_ioctl)
179 return -ENOMEM;
180
181 if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS)
182 return -ENOMEM;
183
184 memcpy(buf, a->local_atto_ioctl, length);
185
186 return length;
187}
188
189static ssize_t write_hw(struct file *file, struct kobject *kobj,
190 struct bin_attribute *attr,
191 char *buf, loff_t off, size_t count)
192{
193 struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
194 int length = min(sizeof(struct atto_ioctl), count);
195
196 if (!a->local_atto_ioctl) {
197 a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
198 GFP_KERNEL);
199 if (a->local_atto_ioctl == NULL) {
200 esas2r_log(ESAS2R_LOG_WARN,
201 "write_hw kzalloc failed for %d bytes",
202 sizeof(struct atto_ioctl));
203 return -ENOMEM;
204 }
205 }
206
207 memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl));
208 memcpy(a->local_atto_ioctl, buf, length);
209
210 return length;
211}
212
213#define ESAS2R_RW_BIN_ATTR(_name) \
214 struct bin_attribute bin_attr_ ## _name = { \
215 .attr = \
216 { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
217 .size = 0, \
218 .read = read_ ## _name, \
219 .write = write_ ## _name }
220
221ESAS2R_RW_BIN_ATTR(fw);
222ESAS2R_RW_BIN_ATTR(fs);
223ESAS2R_RW_BIN_ATTR(vda);
224ESAS2R_RW_BIN_ATTR(hw);
225ESAS2R_RW_BIN_ATTR(live_nvram);
226
227struct bin_attribute bin_attr_default_nvram = {
228 .attr = { .name = "default_nvram", .mode = S_IRUGO },
229 .size = 0,
230 .read = read_default_nvram,
231 .write = NULL
232};
233
234static struct scsi_host_template driver_template = {
235 .module = THIS_MODULE,
236 .show_info = esas2r_show_info,
237 .name = ESAS2R_LONGNAME,
238 .release = esas2r_release,
239 .info = esas2r_info,
240 .ioctl = esas2r_ioctl,
241 .queuecommand = esas2r_queuecommand,
242 .eh_abort_handler = esas2r_eh_abort,
243 .eh_device_reset_handler = esas2r_device_reset,
244 .eh_bus_reset_handler = esas2r_bus_reset,
245 .eh_host_reset_handler = esas2r_host_reset,
246 .eh_target_reset_handler = esas2r_target_reset,
247 .can_queue = 128,
248 .this_id = -1,
249 .sg_tablesize = SCSI_MAX_SG_SEGMENTS,
250 .cmd_per_lun =
251 ESAS2R_DEFAULT_CMD_PER_LUN,
252 .present = 0,
253 .unchecked_isa_dma = 0,
254 .use_clustering = ENABLE_CLUSTERING,
255 .emulated = 0,
256 .proc_name = ESAS2R_DRVR_NAME,
257 .slave_configure = esas2r_slave_configure,
258 .slave_alloc = esas2r_slave_alloc,
259 .slave_destroy = esas2r_slave_destroy,
260 .change_queue_depth = esas2r_change_queue_depth,
261 .change_queue_type = esas2r_change_queue_type,
262 .max_sectors = 0xFFFF,
263};
264
265int sgl_page_size = 512;
266module_param(sgl_page_size, int, 0);
267MODULE_PARM_DESC(sgl_page_size,
268 "Scatter/gather list (SGL) page size in number of S/G "
269 "entries. If your application is doing a lot of very large "
270 "transfers, you may want to increase the SGL page size. "
271 "Default 512.");
272
273int num_sg_lists = 1024;
274module_param(num_sg_lists, int, 0);
275MODULE_PARM_DESC(num_sg_lists,
276 "Number of scatter/gather lists. Default 1024.");
277
278int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
279module_param(sg_tablesize, int, 0);
280MODULE_PARM_DESC(sg_tablesize,
281 "Maximum number of entries in a scatter/gather table.");
282
283int num_requests = 256;
284module_param(num_requests, int, 0);
285MODULE_PARM_DESC(num_requests,
286 "Number of requests. Default 256.");
287
288int num_ae_requests = 4;
289module_param(num_ae_requests, int, 0);
290MODULE_PARM_DESC(num_ae_requests,
291 "Number of VDA asynchromous event requests. Default 4.");
292
293int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
294module_param(cmd_per_lun, int, 0);
295MODULE_PARM_DESC(cmd_per_lun,
296 "Maximum number of commands per LUN. Default "
297 DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) ".");
298
299int can_queue = 128;
300module_param(can_queue, int, 0);
301MODULE_PARM_DESC(can_queue,
302 "Maximum number of commands per adapter. Default 128.");
303
304int esas2r_max_sectors = 0xFFFF;
305module_param(esas2r_max_sectors, int, 0);
306MODULE_PARM_DESC(esas2r_max_sectors,
307 "Maximum number of disk sectors in a single data transfer. "
308 "Default 65535 (largest possible setting).");
309
310int interrupt_mode = 1;
311module_param(interrupt_mode, int, 0);
312MODULE_PARM_DESC(interrupt_mode,
313 "Defines the interrupt mode to use. 0 for legacy"
314 ", 1 for MSI. Default is MSI (1).");
315
316static struct pci_device_id
317 esas2r_pci_table[] = {
318 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049,
319 0,
320 0, 0 },
321 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A,
322 0,
323 0, 0 },
324 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B,
325 0,
326 0, 0 },
327 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C,
328 0,
329 0, 0 },
330 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D,
331 0,
332 0, 0 },
333 { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E,
334 0,
335 0, 0 },
336 { 0, 0, 0, 0,
337 0,
338 0, 0 }
339};
340
341MODULE_DEVICE_TABLE(pci, esas2r_pci_table);
342
343static int
344esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id);
345
346static void
347esas2r_remove(struct pci_dev *pcid);
348
349static struct pci_driver
350 esas2r_pci_driver = {
351 .name = ESAS2R_DRVR_NAME,
352 .id_table = esas2r_pci_table,
353 .probe = esas2r_probe,
354 .remove = esas2r_remove,
355 .suspend = esas2r_suspend,
356 .resume = esas2r_resume,
357};
358
359static int esas2r_probe(struct pci_dev *pcid,
360 const struct pci_device_id *id)
361{
362 struct Scsi_Host *host = NULL;
363 struct esas2r_adapter *a;
364 int err;
365
366 size_t host_alloc_size = sizeof(struct esas2r_adapter)
367 + ((num_requests) +
368 1) * sizeof(struct esas2r_request);
369
370 esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
371 "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
372 pcid->vendor,
373 pcid->device,
374 pcid->subsystem_vendor,
375 pcid->subsystem_device);
376
377 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
378 "before pci_enable_device() "
379 "enable_cnt: %d",
380 pcid->enable_cnt.counter);
381
382 err = pci_enable_device(pcid);
383 if (err != 0) {
384 esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
385 "pci_enable_device() FAIL (%d)",
386 err);
387 return -ENODEV;
388 }
389
390 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
391 "pci_enable_device() OK");
392 esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
393 "after pci_device_enable() enable_cnt: %d",
394 pcid->enable_cnt.counter);
395
396 host = scsi_host_alloc(&driver_template, host_alloc_size);
397 if (host == NULL) {
398 esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
399 return -ENODEV;
400 }
401
402 memset(host->hostdata, 0, host_alloc_size);
403
404 a = (struct esas2r_adapter *)host->hostdata;
405
406 esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);
407
408 /* override max LUN and max target id */
409
410 host->max_id = ESAS2R_MAX_ID + 1;
411 host->max_lun = 255;
412
413 /* we can handle 16-byte CDbs */
414
415 host->max_cmd_len = 16;
416
417 host->can_queue = can_queue;
418 host->cmd_per_lun = cmd_per_lun;
419 host->this_id = host->max_id + 1;
420 host->max_channel = 0;
421 host->unique_id = found_adapters;
422 host->sg_tablesize = sg_tablesize;
423 host->max_sectors = esas2r_max_sectors;
424
425 /* set to bus master for BIOses that don't do it for us */
426
427 esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");
428
429 pci_set_master(pcid);
430
431 if (!esas2r_init_adapter(host, pcid, found_adapters)) {
432 esas2r_log(ESAS2R_LOG_CRIT,
433 "unable to initialize device at PCI bus %x:%x",
434 pcid->bus->number,
435 pcid->devfn);
436
437 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
438 "scsi_host_put() called");
439
440 scsi_host_put(host);
441
442 return 0;
443
444 }
445
446 esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
447 host->hostdata);
448
449 pci_set_drvdata(pcid, host);
450
451 esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");
452
453 err = scsi_add_host(host, &pcid->dev);
454
455 if (err) {
456 esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
457 esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
458 "scsi_add_host() FAIL");
459
460 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
461 "scsi_host_put() called");
462
463 scsi_host_put(host);
464
465 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
466 "pci_set_drvdata(%p, NULL) called",
467 pcid);
468
469 pci_set_drvdata(pcid, NULL);
470
471 return -ENODEV;
472 }
473
474
475 esas2r_fw_event_on(a);
476
477 esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
478 "scsi_scan_host() called");
479
480 scsi_scan_host(host);
481
482 /* Add sysfs binary files */
483 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
484 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
485 "Failed to create sysfs binary file: fw");
486 else
487 a->sysfs_fw_created = 1;
488
489 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
490 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
491 "Failed to create sysfs binary file: fs");
492 else
493 a->sysfs_fs_created = 1;
494
495 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
496 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
497 "Failed to create sysfs binary file: vda");
498 else
499 a->sysfs_vda_created = 1;
500
501 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
502 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
503 "Failed to create sysfs binary file: hw");
504 else
505 a->sysfs_hw_created = 1;
506
507 if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
508 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
509 "Failed to create sysfs binary file: live_nvram");
510 else
511 a->sysfs_live_nvram_created = 1;
512
513 if (sysfs_create_bin_file(&host->shost_dev.kobj,
514 &bin_attr_default_nvram))
515 esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
516 "Failed to create sysfs binary file: default_nvram");
517 else
518 a->sysfs_default_nvram_created = 1;
519
520 found_adapters++;
521
522 return 0;
523}
524
525static void esas2r_remove(struct pci_dev *pdev)
526{
527 struct Scsi_Host *host;
528 int index;
529
530 if (pdev == NULL) {
531 esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL");
532 return;
533 }
534
535 host = pci_get_drvdata(pdev);
536
537 if (host == NULL) {
538 /*
539 * this can happen if pci_set_drvdata was already called
540 * to clear the host pointer. if this is the case, we
541 * are okay; this channel has already been cleaned up.
542 */
543
544 return;
545 }
546
547 esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
548 "esas2r_remove(%p) called; "
549 "host:%p", pdev,
550 host);
551
552 index = esas2r_cleanup(host);
553
554 if (index < 0)
555 esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev),
556 "unknown host in %s",
557 __func__);
558
559 found_adapters--;
560
561 /* if this was the last adapter, clean up the rest of the driver */
562
563 if (found_adapters == 0)
564 esas2r_cleanup(NULL);
565}
566
567static int __init esas2r_init(void)
568{
569 int i;
570
571 esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
572
573 /* verify valid parameters */
574
575 if (can_queue < 1) {
576 esas2r_log(ESAS2R_LOG_WARN,
577 "warning: can_queue must be at least 1, value "
578 "forced.");
579 can_queue = 1;
580 } else if (can_queue > 2048) {
581 esas2r_log(ESAS2R_LOG_WARN,
582 "warning: can_queue must be no larger than 2048, "
583 "value forced.");
584 can_queue = 2048;
585 }
586
587 if (cmd_per_lun < 1) {
588 esas2r_log(ESAS2R_LOG_WARN,
589 "warning: cmd_per_lun must be at least 1, value "
590 "forced.");
591 cmd_per_lun = 1;
592 } else if (cmd_per_lun > 2048) {
593 esas2r_log(ESAS2R_LOG_WARN,
594 "warning: cmd_per_lun must be no larger than "
595 "2048, value forced.");
596 cmd_per_lun = 2048;
597 }
598
599 if (sg_tablesize < 32) {
600 esas2r_log(ESAS2R_LOG_WARN,
601 "warning: sg_tablesize must be at least 32, "
602 "value forced.");
603 sg_tablesize = 32;
604 }
605
606 if (esas2r_max_sectors < 1) {
607 esas2r_log(ESAS2R_LOG_WARN,
608 "warning: esas2r_max_sectors must be at least "
609 "1, value forced.");
610 esas2r_max_sectors = 1;
611 } else if (esas2r_max_sectors > 0xffff) {
612 esas2r_log(ESAS2R_LOG_WARN,
613 "warning: esas2r_max_sectors must be no larger "
614 "than 0xffff, value forced.");
615 esas2r_max_sectors = 0xffff;
616 }
617
618 sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1);
619
620 if (sgl_page_size < SGL_PG_SZ_MIN)
621 sgl_page_size = SGL_PG_SZ_MIN;
622 else if (sgl_page_size > SGL_PG_SZ_MAX)
623 sgl_page_size = SGL_PG_SZ_MAX;
624
625 if (num_sg_lists < NUM_SGL_MIN)
626 num_sg_lists = NUM_SGL_MIN;
627 else if (num_sg_lists > NUM_SGL_MAX)
628 num_sg_lists = NUM_SGL_MAX;
629
630 if (num_requests < NUM_REQ_MIN)
631 num_requests = NUM_REQ_MIN;
632 else if (num_requests > NUM_REQ_MAX)
633 num_requests = NUM_REQ_MAX;
634
635 if (num_ae_requests < NUM_AE_MIN)
636 num_ae_requests = NUM_AE_MIN;
637 else if (num_ae_requests > NUM_AE_MAX)
638 num_ae_requests = NUM_AE_MAX;
639
640 /* set up other globals */
641
642 for (i = 0; i < MAX_ADAPTERS; i++)
643 esas2r_adapters[i] = NULL;
644
645 /* initialize */
646
647 driver_template.module = THIS_MODULE;
648
649 if (pci_register_driver(&esas2r_pci_driver) != 0)
650 esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED");
651 else
652 esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK");
653
654 if (!found_adapters) {
655 pci_unregister_driver(&esas2r_pci_driver);
656 esas2r_cleanup(NULL);
657
658 esas2r_log(ESAS2R_LOG_CRIT,
659 "driver will not be loaded because no ATTO "
660 "%s devices were found",
661 ESAS2R_DRVR_NAME);
662 return -1;
663 } else {
664 esas2r_log(ESAS2R_LOG_INFO, "found %d adapters",
665 found_adapters);
666 }
667
668 return 0;
669}
670
671/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
672static const struct file_operations esas2r_proc_fops = {
673 .compat_ioctl = esas2r_proc_ioctl,
674 .unlocked_ioctl = esas2r_proc_ioctl,
675};
676
677static struct Scsi_Host *esas2r_proc_host;
678static int esas2r_proc_major;
679
680long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
681{
682 return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
683 (int)cmd, (void __user *)arg);
684}
685
686static void __exit esas2r_exit(void)
687{
688 esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
689
690 if (esas2r_proc_major > 0) {
691 esas2r_log(ESAS2R_LOG_INFO, "unregister proc");
692
693 remove_proc_entry(ATTONODE_NAME,
694 esas2r_proc_host->hostt->proc_dir);
695 unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME);
696
697 esas2r_proc_major = 0;
698 }
699
700 esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called");
701
702 pci_unregister_driver(&esas2r_pci_driver);
703}
704
705int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
706{
707 struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
708
709 struct esas2r_target *t;
710 int dev_count = 0;
711
712 esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no);
713
714 seq_printf(m, ESAS2R_LONGNAME "\n"
715 "Driver version: "ESAS2R_VERSION_STR "\n"
716 "Flash version: %s\n"
717 "Firmware version: %s\n"
718 "Copyright "ESAS2R_COPYRIGHT_YEARS "\n"
719 "http://www.attotech.com\n"
720 "\n",
721 a->flash_rev,
722 a->fw_rev[0] ? a->fw_rev : "(none)");
723
724
725 seq_printf(m, "Adapter information:\n"
726 "--------------------\n"
727 "Model: %s\n"
728 "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n",
729 esas2r_get_model_name(a),
730 a->nvram->sas_addr[0],
731 a->nvram->sas_addr[1],
732 a->nvram->sas_addr[2],
733 a->nvram->sas_addr[3],
734 a->nvram->sas_addr[4],
735 a->nvram->sas_addr[5],
736 a->nvram->sas_addr[6],
737 a->nvram->sas_addr[7]);
738
739 seq_puts(m, "\n"
740 "Discovered devices:\n"
741 "\n"
742 " # Target ID\n"
743 "---------------\n");
744
745 for (t = a->targetdb; t < a->targetdb_end; t++)
746 if (t->buffered_target_state == TS_PRESENT) {
747 seq_printf(m, " %3d %3d\n",
748 ++dev_count,
749 (u16)(uintptr_t)(t - a->targetdb));
750 }
751
752 if (dev_count == 0)
753 seq_puts(m, "none\n");
754
755 seq_puts(m, "\n");
756 return 0;
757
758}
759
760int esas2r_release(struct Scsi_Host *sh)
761{
762 esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
763 "esas2r_release() called");
764
765 esas2r_cleanup(sh);
766 if (sh->irq)
767 free_irq(sh->irq, NULL);
768 scsi_unregister(sh);
769 return 0;
770}
771
772const char *esas2r_info(struct Scsi_Host *sh)
773{
774 struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
775 static char esas2r_info_str[512];
776
777 esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
778 "esas2r_info() called");
779
780 /*
781 * if we haven't done so already, register as a char driver
782 * and stick a node under "/proc/scsi/esas2r/ATTOnode"
783 */
784
785 if (esas2r_proc_major <= 0) {
786 esas2r_proc_host = sh;
787
788 esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME,
789 &esas2r_proc_fops);
790
791 esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev),
792 "register_chrdev (major %d)",
793 esas2r_proc_major);
794
795 if (esas2r_proc_major > 0) {
796 struct proc_dir_entry *pde;
797
798 pde = proc_create(ATTONODE_NAME, 0,
799 sh->hostt->proc_dir,
800 &esas2r_proc_fops);
801
802 if (!pde) {
803 esas2r_log_dev(ESAS2R_LOG_WARN,
804 &(sh->shost_gendev),
805 "failed to create_proc_entry");
806 esas2r_proc_major = -1;
807 }
808 }
809 }
810
811 sprintf(esas2r_info_str,
812 ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)"
813 " driver version: "ESAS2R_VERSION_STR " firmware version: "
814 "%s\n",
815 a->pcid->bus->number, a->pcid->devfn, a->pcid->irq,
816 a->fw_rev[0] ? a->fw_rev : "(none)");
817
818 return esas2r_info_str;
819}
820
821/* Callback for building a request scatter/gather list */
822static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
823{
824 u32 len;
825
826 if (likely(sgc->cur_offset == sgc->exp_offset)) {
827 /*
828 * the normal case: caller used all bytes from previous call, so
829 * expected offset is the same as the current offset.
830 */
831
832 if (sgc->sgel_count < sgc->num_sgel) {
833 /* retrieve next segment, except for first time */
834 if (sgc->exp_offset > (u8 *)0) {
835 /* advance current segment */
836 sgc->cur_sgel = sg_next(sgc->cur_sgel);
837 ++(sgc->sgel_count);
838 }
839
840
841 len = sg_dma_len(sgc->cur_sgel);
842 (*addr) = sg_dma_address(sgc->cur_sgel);
843
844 /* save the total # bytes returned to caller so far */
845 sgc->exp_offset += len;
846
847 } else {
848 len = 0;
849 }
850 } else if (sgc->cur_offset < sgc->exp_offset) {
851 /*
852 * caller did not use all bytes from previous call. need to
853 * compute the address based on current segment.
854 */
855
856 len = sg_dma_len(sgc->cur_sgel);
857 (*addr) = sg_dma_address(sgc->cur_sgel);
858
859 sgc->exp_offset -= len;
860
861 /* calculate PA based on prev segment address and offsets */
862 *addr = *addr +
863 (sgc->cur_offset - sgc->exp_offset);
864
865 sgc->exp_offset += len;
866
867 /* re-calculate length based on offset */
868 len = lower_32_bits(
869 sgc->exp_offset - sgc->cur_offset);
870 } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */
871 /*
872 * we don't expect the caller to skip ahead.
873 * cur_offset will never exceed the len we return
874 */
875 len = 0;
876 }
877
878 return len;
879}
880
881int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
882{
883 struct esas2r_adapter *a =
884 (struct esas2r_adapter *)cmd->device->host->hostdata;
885 struct esas2r_request *rq;
886 struct esas2r_sg_context sgc;
887 unsigned bufflen;
888
889 /* Assume success, if it fails we will fix the result later. */
890 cmd->result = DID_OK << 16;
891
892 if (unlikely(a->flags & AF_DEGRADED_MODE)) {
893 cmd->result = DID_NO_CONNECT << 16;
894 cmd->scsi_done(cmd);
895 return 0;
896 }
897
898 rq = esas2r_alloc_request(a);
899 if (unlikely(rq == NULL)) {
900 esas2r_debug("esas2r_alloc_request failed");
901 return SCSI_MLQUEUE_HOST_BUSY;
902 }
903
904 rq->cmd = cmd;
905 bufflen = scsi_bufflen(cmd);
906
907 if (likely(bufflen != 0)) {
908 if (cmd->sc_data_direction == DMA_TO_DEVICE)
909 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
910 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
911 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
912 }
913
914 memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
915 rq->vrq->scsi.length = cpu_to_le32(bufflen);
916 rq->target_id = cmd->device->id;
917 rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
918 rq->sense_buf = cmd->sense_buffer;
919 rq->sense_len = SCSI_SENSE_BUFFERSIZE;
920
921 esas2r_sgc_init(&sgc, a, rq, NULL);
922
923 sgc.length = bufflen;
924 sgc.cur_offset = NULL;
925
926 sgc.cur_sgel = scsi_sglist(cmd);
927 sgc.exp_offset = NULL;
928 sgc.num_sgel = scsi_dma_map(cmd);
929 sgc.sgel_count = 0;
930
931 if (unlikely(sgc.num_sgel < 0)) {
932 esas2r_free_request(a, rq);
933 return SCSI_MLQUEUE_HOST_BUSY;
934 }
935
936 sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;
937
938 if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
939 scsi_dma_unmap(cmd);
940 esas2r_free_request(a, rq);
941 return SCSI_MLQUEUE_HOST_BUSY;
942 }
943
944 esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
945 (int)cmd->device->lun);
946
947 esas2r_start_request(a, rq);
948
949 return 0;
950}
951
952static void complete_task_management_request(struct esas2r_adapter *a,
953 struct esas2r_request *rq)
954{
955 (*rq->task_management_status_ptr) = rq->req_stat;
956 esas2r_free_request(a, rq);
957}
958
959/**
960 * Searches the specified queue for the specified queue for the command
961 * to abort.
962 *
963 * @param [in] a
964 * @param [in] abort_request
965 * @param [in] cmd
966 * t
967 * @return 0 on failure, 1 if command was not found, 2 if command was found
968 */
969static int esas2r_check_active_queue(struct esas2r_adapter *a,
970 struct esas2r_request **abort_request,
971 struct scsi_cmnd *cmd,
972 struct list_head *queue)
973{
974 bool found = false;
975 struct esas2r_request *ar = *abort_request;
976 struct esas2r_request *rq;
977 struct list_head *element, *next;
978
979 list_for_each_safe(element, next, queue) {
980
981 rq = list_entry(element, struct esas2r_request, req_list);
982
983 if (rq->cmd == cmd) {
984
985 /* Found the request. See what to do with it. */
986 if (queue == &a->active_list) {
987 /*
988 * We are searching the active queue, which
989 * means that we need to send an abort request
990 * to the firmware.
991 */
992 ar = esas2r_alloc_request(a);
993 if (ar == NULL) {
994 esas2r_log_dev(ESAS2R_LOG_WARN,
995 &(a->host->shost_gendev),
996 "unable to allocate an abort request for cmd %p",
997 cmd);
998 return 0; /* Failure */
999 }
1000
1001 /*
1002 * Task management request must be formatted
1003 * with a lock held.
1004 */
1005 ar->sense_len = 0;
1006 ar->vrq->scsi.length = 0;
1007 ar->target_id = rq->target_id;
1008 ar->vrq->scsi.flags |= cpu_to_le32(
1009 (u8)le32_to_cpu(rq->vrq->scsi.flags));
1010
1011 memset(ar->vrq->scsi.cdb, 0,
1012 sizeof(ar->vrq->scsi.cdb));
1013
1014 ar->vrq->scsi.flags |= cpu_to_le32(
1015 FCP_CMND_TRM);
1016 ar->vrq->scsi.u.abort_handle =
1017 rq->vrq->scsi.handle;
1018 } else {
1019 /*
1020 * The request is pending but not active on
1021 * the firmware. Just free it now and we'll
1022 * report the successful abort below.
1023 */
1024 list_del_init(&rq->req_list);
1025 esas2r_free_request(a, rq);
1026 }
1027
1028 found = true;
1029 break;
1030 }
1031
1032 }
1033
1034 if (!found)
1035 return 1; /* Not found */
1036
1037 return 2; /* found */
1038
1039
1040}
1041
1042int esas2r_eh_abort(struct scsi_cmnd *cmd)
1043{
1044 struct esas2r_adapter *a =
1045 (struct esas2r_adapter *)cmd->device->host->hostdata;
1046 struct esas2r_request *abort_request = NULL;
1047 unsigned long flags;
1048 struct list_head *queue;
1049 int result;
1050
1051 esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
1052
1053 if (a->flags & AF_DEGRADED_MODE) {
1054 cmd->result = DID_ABORT << 16;
1055
1056 scsi_set_resid(cmd, 0);
1057
1058 cmd->scsi_done(cmd);
1059
1060 return 0;
1061 }
1062
1063 spin_lock_irqsave(&a->queue_lock, flags);
1064
1065 /*
1066 * Run through the defer and active queues looking for the request
1067 * to abort.
1068 */
1069
1070 queue = &a->defer_list;
1071
1072check_active_queue:
1073
1074 result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
1075
1076 if (!result) {
1077 spin_unlock_irqrestore(&a->queue_lock, flags);
1078 return FAILED;
1079 } else if (result == 2 && (queue == &a->defer_list)) {
1080 queue = &a->active_list;
1081 goto check_active_queue;
1082 }
1083
1084 spin_unlock_irqrestore(&a->queue_lock, flags);
1085
1086 if (abort_request) {
1087 u8 task_management_status = RS_PENDING;
1088
1089 /*
1090 * the request is already active, so we need to tell
1091 * the firmware to abort it and wait for the response.
1092 */
1093
1094 abort_request->comp_cb = complete_task_management_request;
1095 abort_request->task_management_status_ptr =
1096 &task_management_status;
1097
1098 esas2r_start_request(a, abort_request);
1099
1100 if (atomic_read(&a->disable_cnt) == 0)
1101 esas2r_do_deferred_processes(a);
1102
1103 while (task_management_status == RS_PENDING)
1104 msleep(10);
1105
1106 /*
1107 * Once we get here, the original request will have been
1108 * completed by the firmware and the abort request will have
1109 * been cleaned up. we're done!
1110 */
1111
1112 return SUCCESS;
1113 }
1114
1115 /*
1116 * If we get here, either we found the inactive request and
1117 * freed it, or we didn't find it at all. Either way, success!
1118 */
1119
1120 cmd->result = DID_ABORT << 16;
1121
1122 scsi_set_resid(cmd, 0);
1123
1124 cmd->scsi_done(cmd);
1125
1126 return SUCCESS;
1127}
1128
1129static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
1130{
1131 struct esas2r_adapter *a =
1132 (struct esas2r_adapter *)cmd->device->host->hostdata;
1133
1134 if (a->flags & AF_DEGRADED_MODE)
1135 return FAILED;
1136
1137 if (host_reset)
1138 esas2r_reset_adapter(a);
1139 else
1140 esas2r_reset_bus(a);
1141
1142 /* above call sets the AF_OS_RESET flag. wait for it to clear. */
1143
1144 while (a->flags & AF_OS_RESET) {
1145 msleep(10);
1146
1147 if (a->flags & AF_DEGRADED_MODE)
1148 return FAILED;
1149 }
1150
1151 if (a->flags & AF_DEGRADED_MODE)
1152 return FAILED;
1153
1154 return SUCCESS;
1155}
1156
1157int esas2r_host_reset(struct scsi_cmnd *cmd)
1158{
1159 esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd);
1160
1161 return esas2r_host_bus_reset(cmd, true);
1162}
1163
1164int esas2r_bus_reset(struct scsi_cmnd *cmd)
1165{
1166 esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd);
1167
1168 return esas2r_host_bus_reset(cmd, false);
1169}
1170
1171static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
1172{
1173 struct esas2r_adapter *a =
1174 (struct esas2r_adapter *)cmd->device->host->hostdata;
1175 struct esas2r_request *rq;
1176 u8 task_management_status = RS_PENDING;
1177 bool completed;
1178
1179 if (a->flags & AF_DEGRADED_MODE)
1180 return FAILED;
1181
1182retry:
1183 rq = esas2r_alloc_request(a);
1184 if (rq == NULL) {
1185 if (target_reset) {
1186 esas2r_log(ESAS2R_LOG_CRIT,
1187 "unable to allocate a request for a "
1188 "target reset (%d)!",
1189 cmd->device->id);
1190 } else {
1191 esas2r_log(ESAS2R_LOG_CRIT,
1192 "unable to allocate a request for a "
1193 "device reset (%d:%d)!",
1194 cmd->device->id,
1195 cmd->device->lun);
1196 }
1197
1198
1199 return FAILED;
1200 }
1201
1202 rq->target_id = cmd->device->id;
1203 rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
1204 rq->req_stat = RS_PENDING;
1205
1206 rq->comp_cb = complete_task_management_request;
1207 rq->task_management_status_ptr = &task_management_status;
1208
1209 if (target_reset) {
1210 esas2r_debug("issuing target reset (%p) to id %d", rq,
1211 cmd->device->id);
1212 completed = esas2r_send_task_mgmt(a, rq, 0x20);
1213 } else {
1214 esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
1215 cmd->device->id, cmd->device->lun);
1216 completed = esas2r_send_task_mgmt(a, rq, 0x10);
1217 }
1218
1219 if (completed) {
1220 /* Task management cmd completed right away, need to free it. */
1221
1222 esas2r_free_request(a, rq);
1223 } else {
1224 /*
1225 * Wait for firmware to complete the request. Completion
1226 * callback will free it.
1227 */
1228 while (task_management_status == RS_PENDING)
1229 msleep(10);
1230 }
1231
1232 if (a->flags & AF_DEGRADED_MODE)
1233 return FAILED;
1234
1235 if (task_management_status == RS_BUSY) {
1236 /*
1237 * Busy, probably because we are flashing. Wait a bit and
1238 * try again.
1239 */
1240 msleep(100);
1241 goto retry;
1242 }
1243
1244 return SUCCESS;
1245}
1246
1247int esas2r_device_reset(struct scsi_cmnd *cmd)
1248{
1249 esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd);
1250
1251 return esas2r_dev_targ_reset(cmd, false);
1252
1253}
1254
1255int esas2r_target_reset(struct scsi_cmnd *cmd)
1256{
1257 esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd);
1258
1259 return esas2r_dev_targ_reset(cmd, true);
1260}
1261
1262int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
1263{
1264 esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
1265
1266 scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
1267
1268 return dev->queue_depth;
1269}
1270
1271int esas2r_change_queue_type(struct scsi_device *dev, int type)
1272{
1273 esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type);
1274
1275 if (dev->tagged_supported) {
1276 scsi_set_tag_type(dev, type);
1277
1278 if (type)
1279 scsi_activate_tcq(dev, dev->queue_depth);
1280 else
1281 scsi_deactivate_tcq(dev, dev->queue_depth);
1282 } else {
1283 type = 0;
1284 }
1285
1286 return type;
1287}
1288
1289int esas2r_slave_alloc(struct scsi_device *dev)
1290{
1291 return 0;
1292}
1293
1294int esas2r_slave_configure(struct scsi_device *dev)
1295{
1296 esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
1297 "esas2r_slave_configure()");
1298
1299 if (dev->tagged_supported) {
1300 scsi_set_tag_type(dev, MSG_SIMPLE_TAG);
1301 scsi_activate_tcq(dev, cmd_per_lun);
1302 } else {
1303 scsi_set_tag_type(dev, 0);
1304 scsi_deactivate_tcq(dev, cmd_per_lun);
1305 }
1306
1307 return 0;
1308}
1309
1310void esas2r_slave_destroy(struct scsi_device *dev)
1311{
1312 esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
1313 "esas2r_slave_destroy()");
1314}
1315
1316void esas2r_log_request_failure(struct esas2r_adapter *a,
1317 struct esas2r_request *rq)
1318{
1319 u8 reqstatus = rq->req_stat;
1320
1321 if (reqstatus == RS_SUCCESS)
1322 return;
1323
1324 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
1325 if (reqstatus == RS_SCSI_ERROR) {
1326 if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
1327 esas2r_log(ESAS2R_LOG_WARN,
1328 "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x",
1329 rq->sense_buf[2], rq->sense_buf[12],
1330 rq->sense_buf[13],
1331 rq->vrq->scsi.cdb[0]);
1332 } else {
1333 esas2r_log(ESAS2R_LOG_WARN,
1334 "request failure - SCSI error CDB:%x\n",
1335 rq->vrq->scsi.cdb[0]);
1336 }
1337 } else if ((rq->vrq->scsi.cdb[0] != INQUIRY
1338 && rq->vrq->scsi.cdb[0] != REPORT_LUNS)
1339 || (reqstatus != RS_SEL
1340 && reqstatus != RS_SEL2)) {
1341 if ((reqstatus == RS_UNDERRUN) &&
1342 (rq->vrq->scsi.cdb[0] == INQUIRY)) {
1343 /* Don't log inquiry underruns */
1344 } else {
1345 esas2r_log(ESAS2R_LOG_WARN,
1346 "request failure - cdb:%x reqstatus:%d target:%d",
1347 rq->vrq->scsi.cdb[0], reqstatus,
1348 rq->target_id);
1349 }
1350 }
1351 }
1352}
1353
1354void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
1355{
1356 u32 starttime;
1357 u32 timeout;
1358
1359 starttime = jiffies_to_msecs(jiffies);
1360 timeout = rq->timeout ? rq->timeout : 5000;
1361
1362 while (true) {
1363 esas2r_polled_interrupt(a);
1364
1365 if (rq->req_stat != RS_STARTED)
1366 break;
1367
1368 schedule_timeout_interruptible(msecs_to_jiffies(100));
1369
1370 if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
1371 esas2r_hdebug("request TMO");
1372 esas2r_bugon();
1373
1374 rq->req_stat = RS_TIMEOUT;
1375
1376 esas2r_local_reset_adapter(a);
1377 return;
1378 }
1379 }
1380}
1381
1382u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo)
1383{
1384 u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1);
1385 u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE;
1386
1387 if (a->window_base != base) {
1388 esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP,
1389 base | MVRPW1R_ENABLE);
1390 esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP);
1391 a->window_base = base;
1392 }
1393
1394 return offset;
1395}
1396
1397/* Read a block of data from chip memory */
1398bool esas2r_read_mem_block(struct esas2r_adapter *a,
1399 void *to,
1400 u32 from,
1401 u32 size)
1402{
1403 u8 *end = (u8 *)to;
1404
1405 while (size) {
1406 u32 len;
1407 u32 offset;
1408 u32 iatvr;
1409
1410 iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE);
1411
1412 esas2r_map_data_window(a, iatvr);
1413
1414 offset = from & (MW_DATA_WINDOW_SIZE - 1);
1415 len = size;
1416
1417 if (len > MW_DATA_WINDOW_SIZE - offset)
1418 len = MW_DATA_WINDOW_SIZE - offset;
1419
1420 from += len;
1421 size -= len;
1422
1423 while (len--) {
1424 *end++ = esas2r_read_data_byte(a, offset);
1425 offset++;
1426 }
1427 }
1428
1429 return true;
1430}
1431
1432void esas2r_nuxi_mgt_data(u8 function, void *data)
1433{
1434 struct atto_vda_grp_info *g;
1435 struct atto_vda_devinfo *d;
1436 struct atto_vdapart_info *p;
1437 struct atto_vda_dh_info *h;
1438 struct atto_vda_metrics_info *m;
1439 struct atto_vda_schedule_info *s;
1440 struct atto_vda_buzzer_info *b;
1441 u8 i;
1442
1443 switch (function) {
1444 case VDAMGT_BUZZER_INFO:
1445 case VDAMGT_BUZZER_SET:
1446
1447 b = (struct atto_vda_buzzer_info *)data;
1448
1449 b->duration = le32_to_cpu(b->duration);
1450 break;
1451
1452 case VDAMGT_SCHEDULE_INFO:
1453 case VDAMGT_SCHEDULE_EVENT:
1454
1455 s = (struct atto_vda_schedule_info *)data;
1456
1457 s->id = le32_to_cpu(s->id);
1458
1459 break;
1460
1461 case VDAMGT_DEV_INFO:
1462 case VDAMGT_DEV_CLEAN:
1463 case VDAMGT_DEV_PT_INFO:
1464 case VDAMGT_DEV_FEATURES:
1465 case VDAMGT_DEV_PT_FEATURES:
1466 case VDAMGT_DEV_OPERATION:
1467
1468 d = (struct atto_vda_devinfo *)data;
1469
1470 d->capacity = le64_to_cpu(d->capacity);
1471 d->block_size = le32_to_cpu(d->block_size);
1472 d->ses_dev_index = le16_to_cpu(d->ses_dev_index);
1473 d->target_id = le16_to_cpu(d->target_id);
1474 d->lun = le16_to_cpu(d->lun);
1475 d->features = le16_to_cpu(d->features);
1476 break;
1477
1478 case VDAMGT_GRP_INFO:
1479 case VDAMGT_GRP_CREATE:
1480 case VDAMGT_GRP_DELETE:
1481 case VDAMGT_ADD_STORAGE:
1482 case VDAMGT_MEMBER_ADD:
1483 case VDAMGT_GRP_COMMIT:
1484 case VDAMGT_GRP_REBUILD:
1485 case VDAMGT_GRP_COMMIT_INIT:
1486 case VDAMGT_QUICK_RAID:
1487 case VDAMGT_GRP_FEATURES:
1488 case VDAMGT_GRP_COMMIT_INIT_AUTOMAP:
1489 case VDAMGT_QUICK_RAID_INIT_AUTOMAP:
1490 case VDAMGT_SPARE_LIST:
1491 case VDAMGT_SPARE_ADD:
1492 case VDAMGT_SPARE_REMOVE:
1493 case VDAMGT_LOCAL_SPARE_ADD:
1494 case VDAMGT_GRP_OPERATION:
1495
1496 g = (struct atto_vda_grp_info *)data;
1497
1498 g->capacity = le64_to_cpu(g->capacity);
1499 g->block_size = le32_to_cpu(g->block_size);
1500 g->interleave = le32_to_cpu(g->interleave);
1501 g->features = le16_to_cpu(g->features);
1502
1503 for (i = 0; i < 32; i++)
1504 g->members[i] = le16_to_cpu(g->members[i]);
1505
1506 break;
1507
1508 case VDAMGT_PART_INFO:
1509 case VDAMGT_PART_MAP:
1510 case VDAMGT_PART_UNMAP:
1511 case VDAMGT_PART_AUTOMAP:
1512 case VDAMGT_PART_SPLIT:
1513 case VDAMGT_PART_MERGE:
1514
1515 p = (struct atto_vdapart_info *)data;
1516
1517 p->part_size = le64_to_cpu(p->part_size);
1518 p->start_lba = le32_to_cpu(p->start_lba);
1519 p->block_size = le32_to_cpu(p->block_size);
1520 p->target_id = le16_to_cpu(p->target_id);
1521 break;
1522
1523 case VDAMGT_DEV_HEALTH_REQ:
1524
1525 h = (struct atto_vda_dh_info *)data;
1526
1527 h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt);
1528 h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt);
1529 break;
1530
1531 case VDAMGT_DEV_METRICS:
1532
1533 m = (struct atto_vda_metrics_info *)data;
1534
1535 for (i = 0; i < 32; i++)
1536 m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]);
1537
1538 break;
1539
1540 default:
1541 break;
1542 }
1543}
1544
1545void esas2r_nuxi_cfg_data(u8 function, void *data)
1546{
1547 struct atto_vda_cfg_init *ci;
1548
1549 switch (function) {
1550 case VDA_CFG_INIT:
1551 case VDA_CFG_GET_INIT:
1552 case VDA_CFG_GET_INIT2:
1553
1554 ci = (struct atto_vda_cfg_init *)data;
1555
1556 ci->date_time.year = le16_to_cpu(ci->date_time.year);
1557 ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size);
1558 ci->vda_version = le32_to_cpu(ci->vda_version);
1559 ci->epoch_time = le32_to_cpu(ci->epoch_time);
1560 ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel);
1561 ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend);
1562 break;
1563
1564 default:
1565 break;
1566 }
1567}
1568
1569void esas2r_nuxi_ae_data(union atto_vda_ae *ae)
1570{
1571 struct atto_vda_ae_raid *r = &ae->raid;
1572 struct atto_vda_ae_lu *l = &ae->lu;
1573
1574 switch (ae->hdr.bytype) {
1575 case VDAAE_HDR_TYPE_RAID:
1576
1577 r->dwflags = le32_to_cpu(r->dwflags);
1578 break;
1579
1580 case VDAAE_HDR_TYPE_LU:
1581
1582 l->dwevent = le32_to_cpu(l->dwevent);
1583 l->wphys_target_id = le16_to_cpu(l->wphys_target_id);
1584 l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id);
1585
1586 if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
1587 + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) {
1588 l->id.tgtlun_raid.dwinterleave
1589 = le32_to_cpu(l->id.tgtlun_raid.dwinterleave);
1590 l->id.tgtlun_raid.dwblock_size
1591 = le32_to_cpu(l->id.tgtlun_raid.dwblock_size);
1592 }
1593
1594 break;
1595
1596 case VDAAE_HDR_TYPE_DISK:
1597 default:
1598 break;
1599 }
1600}
1601
1602void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
1603{
1604 unsigned long flags;
1605
1606 esas2r_rq_destroy_request(rq, a);
1607 spin_lock_irqsave(&a->request_lock, flags);
1608 list_add(&rq->comp_list, &a->avail_request);
1609 spin_unlock_irqrestore(&a->request_lock, flags);
1610}
1611
1612struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a)
1613{
1614 struct esas2r_request *rq;
1615 unsigned long flags;
1616
1617 spin_lock_irqsave(&a->request_lock, flags);
1618
1619 if (unlikely(list_empty(&a->avail_request))) {
1620 spin_unlock_irqrestore(&a->request_lock, flags);
1621 return NULL;
1622 }
1623
1624 rq = list_first_entry(&a->avail_request, struct esas2r_request,
1625 comp_list);
1626 list_del(&rq->comp_list);
1627 spin_unlock_irqrestore(&a->request_lock, flags);
1628 esas2r_rq_init_request(rq, a);
1629
1630 return rq;
1631
1632}
1633
1634void esas2r_complete_request_cb(struct esas2r_adapter *a,
1635 struct esas2r_request *rq)
1636{
1637 esas2r_debug("completing request %p\n", rq);
1638
1639 scsi_dma_unmap(rq->cmd);
1640
1641 if (unlikely(rq->req_stat != RS_SUCCESS)) {
1642 esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
1643 rq->req_stat,
1644 rq->func_rsp.scsi_rsp.scsi_stat,
1645 rq->cmd);
1646
1647 rq->cmd->result =
1648 ((esas2r_req_status_to_error(rq->req_stat) << 16)
1649 | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
1650
1651 if (rq->req_stat == RS_UNDERRUN)
1652 scsi_set_resid(rq->cmd,
1653 le32_to_cpu(rq->func_rsp.scsi_rsp.
1654 residual_length));
1655 else
1656 scsi_set_resid(rq->cmd, 0);
1657 }
1658
1659 rq->cmd->scsi_done(rq->cmd);
1660
1661 esas2r_free_request(a, rq);
1662}
1663
1664/* Run tasklet to handle stuff outside of interrupt context. */
1665void esas2r_adapter_tasklet(unsigned long context)
1666{
1667 struct esas2r_adapter *a = (struct esas2r_adapter *)context;
1668
1669 if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
1670 esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
1671 esas2r_timer_tick(a);
1672 }
1673
1674 if (likely(a->flags2 & AF2_INT_PENDING)) {
1675 esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
1676 esas2r_adapter_interrupt(a);
1677 }
1678
1679 if (esas2r_is_tasklet_pending(a))
1680 esas2r_do_tasklet_tasks(a);
1681
1682 if (esas2r_is_tasklet_pending(a)
1683 || (a->flags2 & AF2_INT_PENDING)
1684 || (a->flags2 & AF2_TIMER_TICK)) {
1685 esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
1686 esas2r_schedule_tasklet(a);
1687 } else {
1688 esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
1689 }
1690}
1691
1692static void esas2r_timer_callback(unsigned long context);
1693
1694void esas2r_kickoff_timer(struct esas2r_adapter *a)
1695{
1696 init_timer(&a->timer);
1697
1698 a->timer.function = esas2r_timer_callback;
1699 a->timer.data = (unsigned long)a;
1700 a->timer.expires = jiffies +
1701 msecs_to_jiffies(100);
1702
1703 add_timer(&a->timer);
1704}
1705
1706static void esas2r_timer_callback(unsigned long context)
1707{
1708 struct esas2r_adapter *a = (struct esas2r_adapter *)context;
1709
1710 esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
1711
1712 esas2r_schedule_tasklet(a);
1713
1714 esas2r_kickoff_timer(a);
1715}
1716
1717/*
1718 * Firmware events need to be handled outside of interrupt context
1719 * so we schedule a delayed_work to handle them.
1720 */
1721
1722static void
1723esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event)
1724{
1725 unsigned long flags;
1726 struct esas2r_adapter *a = fw_event->a;
1727
1728 spin_lock_irqsave(&a->fw_event_lock, flags);
1729 list_del(&fw_event->list);
1730 kfree(fw_event);
1731 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1732}
1733
1734void
1735esas2r_fw_event_off(struct esas2r_adapter *a)
1736{
1737 unsigned long flags;
1738
1739 spin_lock_irqsave(&a->fw_event_lock, flags);
1740 a->fw_events_off = 1;
1741 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1742}
1743
1744void
1745esas2r_fw_event_on(struct esas2r_adapter *a)
1746{
1747 unsigned long flags;
1748
1749 spin_lock_irqsave(&a->fw_event_lock, flags);
1750 a->fw_events_off = 0;
1751 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1752}
1753
1754static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id)
1755{
1756 int ret;
1757 struct scsi_device *scsi_dev;
1758
1759 scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
1760
1761 if (scsi_dev) {
1762 esas2r_log_dev(
1763 ESAS2R_LOG_WARN,
1764 &(scsi_dev->
1765 sdev_gendev),
1766 "scsi device already exists at id %d", target_id);
1767
1768 scsi_device_put(scsi_dev);
1769 } else {
1770 esas2r_log_dev(
1771 ESAS2R_LOG_INFO,
1772 &(a->host->
1773 shost_gendev),
1774 "scsi_add_device() called for 0:%d:0",
1775 target_id);
1776
1777 ret = scsi_add_device(a->host, 0, target_id, 0);
1778 if (ret) {
1779 esas2r_log_dev(
1780 ESAS2R_LOG_CRIT,
1781 &(a->host->
1782 shost_gendev),
1783 "scsi_add_device failed with %d for id %d",
1784 ret, target_id);
1785 }
1786 }
1787}
1788
1789static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id)
1790{
1791 struct scsi_device *scsi_dev;
1792
1793 scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
1794
1795 if (scsi_dev) {
1796 scsi_device_set_state(scsi_dev, SDEV_OFFLINE);
1797
1798 esas2r_log_dev(
1799 ESAS2R_LOG_INFO,
1800 &(scsi_dev->
1801 sdev_gendev),
1802 "scsi_remove_device() called for 0:%d:0",
1803 target_id);
1804
1805 scsi_remove_device(scsi_dev);
1806
1807 esas2r_log_dev(
1808 ESAS2R_LOG_INFO,
1809 &(scsi_dev->
1810 sdev_gendev),
1811 "scsi_device_put() called");
1812
1813 scsi_device_put(scsi_dev);
1814 } else {
1815 esas2r_log_dev(
1816 ESAS2R_LOG_WARN,
1817 &(a->host->shost_gendev),
1818 "no target found at id %d",
1819 target_id);
1820 }
1821}
1822
1823/*
1824 * Sends a firmware asynchronous event to anyone who happens to be
1825 * listening on the defined ATTO VDA event ports.
1826 */
1827static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event)
1828{
1829 struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data;
1830 char *type;
1831
1832 switch (ae->vda_ae.hdr.bytype) {
1833 case VDAAE_HDR_TYPE_RAID:
1834 type = "RAID group state change";
1835 break;
1836
1837 case VDAAE_HDR_TYPE_LU:
1838 type = "Mapped destination LU change";
1839 break;
1840
1841 case VDAAE_HDR_TYPE_DISK:
1842 type = "Physical disk inventory change";
1843 break;
1844
1845 case VDAAE_HDR_TYPE_RESET:
1846 type = "Firmware reset";
1847 break;
1848
1849 case VDAAE_HDR_TYPE_LOG_INFO:
1850 type = "Event Log message (INFO level)";
1851 break;
1852
1853 case VDAAE_HDR_TYPE_LOG_WARN:
1854 type = "Event Log message (WARN level)";
1855 break;
1856
1857 case VDAAE_HDR_TYPE_LOG_CRIT:
1858 type = "Event Log message (CRIT level)";
1859 break;
1860
1861 case VDAAE_HDR_TYPE_LOG_FAIL:
1862 type = "Event Log message (FAIL level)";
1863 break;
1864
1865 case VDAAE_HDR_TYPE_NVC:
1866 type = "NVCache change";
1867 break;
1868
1869 case VDAAE_HDR_TYPE_TLG_INFO:
1870 type = "Time stamped log message (INFO level)";
1871 break;
1872
1873 case VDAAE_HDR_TYPE_TLG_WARN:
1874 type = "Time stamped log message (WARN level)";
1875 break;
1876
1877 case VDAAE_HDR_TYPE_TLG_CRIT:
1878 type = "Time stamped log message (CRIT level)";
1879 break;
1880
1881 case VDAAE_HDR_TYPE_PWRMGT:
1882 type = "Power management";
1883 break;
1884
1885 case VDAAE_HDR_TYPE_MUTE:
1886 type = "Mute button pressed";
1887 break;
1888
1889 case VDAAE_HDR_TYPE_DEV:
1890 type = "Device attribute change";
1891 break;
1892
1893 default:
1894 type = "Unknown";
1895 break;
1896 }
1897
1898 esas2r_log(ESAS2R_LOG_WARN,
1899 "An async event of type \"%s\" was received from the firmware. The event contents are:",
1900 type);
1901 esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae,
1902 ae->vda_ae.hdr.bylength);
1903
1904}
1905
1906static void
1907esas2r_firmware_event_work(struct work_struct *work)
1908{
1909 struct esas2r_fw_event_work *fw_event =
1910 container_of(work, struct esas2r_fw_event_work, work.work);
1911
1912 struct esas2r_adapter *a = fw_event->a;
1913
1914 u16 target_id = *(u16 *)&fw_event->data[0];
1915
1916 if (a->fw_events_off)
1917 goto done;
1918
1919 switch (fw_event->type) {
1920 case fw_event_null:
1921 break; /* do nothing */
1922
1923 case fw_event_lun_change:
1924 esas2r_remove_device(a, target_id);
1925 esas2r_add_device(a, target_id);
1926 break;
1927
1928 case fw_event_present:
1929 esas2r_add_device(a, target_id);
1930 break;
1931
1932 case fw_event_not_present:
1933 esas2r_remove_device(a, target_id);
1934 break;
1935
1936 case fw_event_vda_ae:
1937 esas2r_send_ae_event(fw_event);
1938 break;
1939 }
1940
1941done:
1942 esas2r_free_fw_event(fw_event);
1943}
1944
1945void esas2r_queue_fw_event(struct esas2r_adapter *a,
1946 enum fw_event_type type,
1947 void *data,
1948 int data_sz)
1949{
1950 struct esas2r_fw_event_work *fw_event;
1951 unsigned long flags;
1952
1953 fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC);
1954 if (!fw_event) {
1955 esas2r_log(ESAS2R_LOG_WARN,
1956 "esas2r_queue_fw_event failed to alloc");
1957 return;
1958 }
1959
1960 if (type == fw_event_vda_ae) {
1961 struct esas2r_vda_ae *ae =
1962 (struct esas2r_vda_ae *)fw_event->data;
1963
1964 ae->signature = ESAS2R_VDA_EVENT_SIG;
1965 ae->bus_number = a->pcid->bus->number;
1966 ae->devfn = a->pcid->devfn;
1967 memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae));
1968 } else {
1969 memcpy(fw_event->data, data, data_sz);
1970 }
1971
1972 fw_event->type = type;
1973 fw_event->a = a;
1974
1975 spin_lock_irqsave(&a->fw_event_lock, flags);
1976 list_add_tail(&fw_event->list, &a->fw_event_list);
1977 INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
1978 queue_delayed_work_on(
1979 smp_processor_id(), a->fw_event_q, &fw_event->work,
1980 msecs_to_jiffies(1));
1981 spin_unlock_irqrestore(&a->fw_event_lock, flags);
1982}
1983
1984void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id,
1985 u8 state)
1986{
1987 if (state == TS_LUN_CHANGE)
1988 esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id,
1989 sizeof(targ_id));
1990 else if (state == TS_PRESENT)
1991 esas2r_queue_fw_event(a, fw_event_present, &targ_id,
1992 sizeof(targ_id));
1993 else if (state == TS_NOT_PRESENT)
1994 esas2r_queue_fw_event(a, fw_event_not_present, &targ_id,
1995 sizeof(targ_id));
1996}
1997
1998/* Translate status to a Linux SCSI mid-layer error code */
1999int esas2r_req_status_to_error(u8 req_stat)
2000{
2001 switch (req_stat) {
2002 case RS_OVERRUN:
2003 case RS_UNDERRUN:
2004 case RS_SUCCESS:
2005 /*
2006 * NOTE: SCSI mid-layer wants a good status for a SCSI error, because
2007 * it will check the scsi_stat value in the completion anyway.
2008 */
2009 case RS_SCSI_ERROR:
2010 return DID_OK;
2011
2012 case RS_SEL:
2013 case RS_SEL2:
2014 return DID_NO_CONNECT;
2015
2016 case RS_RESET:
2017 return DID_RESET;
2018
2019 case RS_ABORTED:
2020 return DID_ABORT;
2021
2022 case RS_BUSY:
2023 return DID_BUS_BUSY;
2024 }
2025
2026 /* everything else is just an error. */
2027
2028 return DID_ERROR;
2029}
2030
2031module_init(esas2r_init);
2032module_exit(esas2r_exit);
diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c
new file mode 100644
index 000000000000..e540a2fa3d15
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_targdb.c
@@ -0,0 +1,306 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_targdb.c
3 * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 2
11 * of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * NO WARRANTY
19 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
20 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
21 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
22 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
23 * solely responsible for determining the appropriateness of using and
24 * distributing the Program and assumes all risks associated with its
25 * exercise of rights under this Agreement, including but not limited to
26 * the risks and costs of program errors, damage to or loss of data,
27 * programs or equipment, and unavailability or interruption of operations.
28 *
29 * DISCLAIMER OF LIABILITY
30 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
33 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
34 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
35 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
36 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
37 *
38 * You should have received a copy of the GNU General Public License
39 * along with this program; if not, write to the Free Software
40 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
41 * USA.
42 */
43
44#include "esas2r.h"
45
46void esas2r_targ_db_initialize(struct esas2r_adapter *a)
47{
48 struct esas2r_target *t;
49
50 for (t = a->targetdb; t < a->targetdb_end; t++) {
51 memset(t, 0, sizeof(struct esas2r_target));
52
53 t->target_state = TS_NOT_PRESENT;
54 t->buffered_target_state = TS_NOT_PRESENT;
55 t->new_target_state = TS_INVALID;
56 }
57}
58
59void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify)
60{
61 struct esas2r_target *t;
62 unsigned long flags;
63
64 for (t = a->targetdb; t < a->targetdb_end; t++) {
65 if (t->target_state != TS_PRESENT)
66 continue;
67
68 spin_lock_irqsave(&a->mem_lock, flags);
69 esas2r_targ_db_remove(a, t);
70 spin_unlock_irqrestore(&a->mem_lock, flags);
71
72 if (notify) {
73 esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
74 a));
75 esas2r_target_state_changed(a, esas2r_targ_get_id(t,
76 a),
77 TS_NOT_PRESENT);
78 }
79 }
80}
81
82void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
83{
84 struct esas2r_target *t;
85 unsigned long flags;
86
87 esas2r_trace_enter();
88
89 if (a->flags & AF_DISC_PENDING) {
90 esas2r_trace_exit();
91 return;
92 }
93
94 for (t = a->targetdb; t < a->targetdb_end; t++) {
95 u8 state = TS_INVALID;
96
97 spin_lock_irqsave(&a->mem_lock, flags);
98 if (t->buffered_target_state != t->target_state)
99 state = t->buffered_target_state = t->target_state;
100
101 spin_unlock_irqrestore(&a->mem_lock, flags);
102 if (state != TS_INVALID) {
103 esas2r_trace("targ_db_report_changes:%d",
104 esas2r_targ_get_id(
105 t,
106 a));
107 esas2r_trace("state:%d", state);
108
109 esas2r_target_state_changed(a,
110 esas2r_targ_get_id(t,
111 a),
112 state);
113 }
114 }
115
116 esas2r_trace_exit();
117}
118
119struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
120 struct esas2r_disc_context *
121 dc)
122{
123 struct esas2r_target *t;
124
125 esas2r_trace_enter();
126
127 if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
128 esas2r_bugon();
129 esas2r_trace_exit();
130 return NULL;
131 }
132
133 t = a->targetdb + dc->curr_virt_id;
134
135 if (t->target_state == TS_PRESENT) {
136 esas2r_trace_exit();
137 return NULL;
138 }
139
140 esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name,
141 esas2r_targ_get_id(
142 t,
143 a));
144
145 if (dc->interleave == 0
146 || dc->block_size == 0) {
147 /* these are invalid values, don't create the target entry. */
148
149 esas2r_hdebug("invalid RAID group dimensions");
150
151 esas2r_trace_exit();
152
153 return NULL;
154 }
155
156 t->block_size = dc->block_size;
157 t->inter_byte = dc->interleave;
158 t->inter_block = dc->interleave / dc->block_size;
159 t->virt_targ_id = dc->curr_virt_id;
160 t->phys_targ_id = ESAS2R_TARG_ID_INV;
161
162 t->flags &= ~TF_PASS_THRU;
163 t->flags |= TF_USED;
164
165 t->identifier_len = 0;
166
167 t->target_state = TS_PRESENT;
168
169 return t;
170}
171
172struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
173 struct esas2r_disc_context *dc,
174 u8 *ident,
175 u8 ident_len)
176{
177 struct esas2r_target *t;
178
179 esas2r_trace_enter();
180
181 if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
182 esas2r_bugon();
183 esas2r_trace_exit();
184 return NULL;
185 }
186
187 /* see if we found this device before. */
188
189 t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
190
191 if (t == NULL) {
192 t = a->targetdb + dc->curr_virt_id;
193
194 if (ident_len > sizeof(t->identifier)
195 || t->target_state == TS_PRESENT) {
196 esas2r_trace_exit();
197 return NULL;
198 }
199 }
200
201 esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
202 dc->curr_virt_id,
203 dc->curr_phys_id);
204
205 t->block_size = 0;
206 t->inter_byte = 0;
207 t->inter_block = 0;
208 t->virt_targ_id = dc->curr_virt_id;
209 t->phys_targ_id = dc->curr_phys_id;
210 t->identifier_len = ident_len;
211
212 memcpy(t->identifier, ident, ident_len);
213
214 t->flags |= TF_PASS_THRU | TF_USED;
215
216 t->target_state = TS_PRESENT;
217
218 return t;
219}
220
221void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
222{
223 esas2r_trace_enter();
224
225 t->target_state = TS_NOT_PRESENT;
226
227 esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
228
229 esas2r_trace_exit();
230}
231
232struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
233 u64 *sas_addr)
234{
235 struct esas2r_target *t;
236
237 for (t = a->targetdb; t < a->targetdb_end; t++)
238 if (t->sas_addr == *sas_addr)
239 return t;
240
241 return NULL;
242}
243
244struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
245 void *identifier,
246 u8 ident_len)
247{
248 struct esas2r_target *t;
249
250 for (t = a->targetdb; t < a->targetdb_end; t++) {
251 if (ident_len == t->identifier_len
252 && memcmp(&t->identifier[0], identifier,
253 ident_len) == 0)
254 return t;
255 }
256
257 return NULL;
258}
259
260u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id)
261{
262 u16 id = target_id + 1;
263
264 while (id < ESAS2R_MAX_TARGETS) {
265 struct esas2r_target *t = a->targetdb + id;
266
267 if (t->target_state == TS_PRESENT)
268 break;
269
270 id++;
271 }
272
273 return id;
274}
275
276struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
277 u16 virt_id)
278{
279 struct esas2r_target *t;
280
281 for (t = a->targetdb; t < a->targetdb_end; t++) {
282 if (t->target_state != TS_PRESENT)
283 continue;
284
285 if (t->virt_targ_id == virt_id)
286 return t;
287 }
288
289 return NULL;
290}
291
292u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a)
293{
294 u16 devcnt = 0;
295 struct esas2r_target *t;
296 unsigned long flags;
297
298 spin_lock_irqsave(&a->mem_lock, flags);
299 for (t = a->targetdb; t < a->targetdb_end; t++)
300 if (t->target_state == TS_PRESENT)
301 devcnt++;
302
303 spin_unlock_irqrestore(&a->mem_lock, flags);
304
305 return devcnt;
306}
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
new file mode 100644
index 000000000000..f8ec6d636846
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -0,0 +1,521 @@
1/*
2 * linux/drivers/scsi/esas2r/esas2r_vda.c
3 * esas2r driver VDA firmware interface functions
4 *
5 * Copyright (c) 2001-2013 ATTO Technology, Inc.
6 * (mailto:linuxdrivers@attotech.com)
7 */
8/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
9/*
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29 *
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38 *
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42 */
43/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
44
45#include "esas2r.h"
46
47static u8 esas2r_vdaioctl_versions[] = {
48 ATTO_VDA_VER_UNSUPPORTED,
49 ATTO_VDA_FLASH_VER,
50 ATTO_VDA_VER_UNSUPPORTED,
51 ATTO_VDA_VER_UNSUPPORTED,
52 ATTO_VDA_CLI_VER,
53 ATTO_VDA_VER_UNSUPPORTED,
54 ATTO_VDA_CFG_VER,
55 ATTO_VDA_MGT_VER,
56 ATTO_VDA_GSV_VER
57};
58
59static void clear_vda_request(struct esas2r_request *rq);
60
61static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
62 struct esas2r_request *rq);
63
64/* Prepare a VDA IOCTL request to be sent to the firmware. */
65bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
66 struct atto_ioctl_vda *vi,
67 struct esas2r_request *rq,
68 struct esas2r_sg_context *sgc)
69{
70 u32 datalen = 0;
71 struct atto_vda_sge *firstsg = NULL;
72 u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
73
74 vi->status = ATTO_STS_SUCCESS;
75 vi->vda_status = RS_PENDING;
76
77 if (vi->function >= vercnt) {
78 vi->status = ATTO_STS_INV_FUNC;
79 return false;
80 }
81
82 if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
83 vi->status = ATTO_STS_INV_VERSION;
84 return false;
85 }
86
87 if (a->flags & AF_DEGRADED_MODE) {
88 vi->status = ATTO_STS_DEGRADED;
89 return false;
90 }
91
92 if (vi->function != VDA_FUNC_SCSI)
93 clear_vda_request(rq);
94
95 rq->vrq->scsi.function = vi->function;
96 rq->interrupt_cb = esas2r_complete_vda_ioctl;
97 rq->interrupt_cx = vi;
98
99 switch (vi->function) {
100 case VDA_FUNC_FLASH:
101
102 if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
103 && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
104 && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
105 vi->status = ATTO_STS_INV_FUNC;
106 return false;
107 }
108
109 if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
110 datalen = vi->data_length;
111
112 rq->vrq->flash.length = cpu_to_le32(datalen);
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
114
115 memcpy(rq->vrq->flash.data.file.file_name,
116 vi->cmd.flash.data.file.file_name,
117 sizeof(vi->cmd.flash.data.file.file_name));
118
119 firstsg = rq->vrq->flash.data.file.sge;
120 break;
121
122 case VDA_FUNC_CLI:
123
124 datalen = vi->data_length;
125
126 rq->vrq->cli.cmd_rsp_len =
127 cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
128 rq->vrq->cli.length = cpu_to_le32(datalen);
129
130 firstsg = rq->vrq->cli.sge;
131 break;
132
133 case VDA_FUNC_MGT:
134 {
135 u8 *cmdcurr_offset = sgc->cur_offset
136 - offsetof(struct atto_ioctl_vda, data)
137 + offsetof(struct atto_ioctl_vda, cmd)
138 + offsetof(struct atto_ioctl_vda_mgt_cmd,
139 data);
140 /*
141 * build the data payload SGL here first since
142 * esas2r_sgc_init() will modify the S/G list offset for the
143 * management SGL (which is built below where the data SGL is
144 * usually built).
145 */
146
147 if (vi->data_length) {
148 u32 payldlen = 0;
149
150 if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
151 || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
152 rq->vrq->mgt.payld_sglst_offset =
153 (u8)offsetof(struct atto_vda_mgmt_req,
154 payld_sge);
155
156 payldlen = vi->data_length;
157 datalen = vi->cmd.mgt.data_length;
158 } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
159 || vi->cmd.mgt.mgt_func ==
160 VDAMGT_DEV_INFO2_BYADDR) {
161 datalen = vi->data_length;
162 cmdcurr_offset = sgc->cur_offset;
163 } else {
164 vi->status = ATTO_STS_INV_PARAM;
165 return false;
166 }
167
168 /* Setup the length so building the payload SGL works */
169 rq->vrq->mgt.length = cpu_to_le32(datalen);
170
171 if (payldlen) {
172 rq->vrq->mgt.payld_length =
173 cpu_to_le32(payldlen);
174
175 esas2r_sgc_init(sgc, a, rq,
176 rq->vrq->mgt.payld_sge);
177 sgc->length = payldlen;
178
179 if (!esas2r_build_sg_list(a, rq, sgc)) {
180 vi->status = ATTO_STS_OUT_OF_RSRC;
181 return false;
182 }
183 }
184 } else {
185 datalen = vi->cmd.mgt.data_length;
186
187 rq->vrq->mgt.length = cpu_to_le32(datalen);
188 }
189
190 /*
191 * Now that the payload SGL is built, if any, setup to build
192 * the management SGL.
193 */
194 firstsg = rq->vrq->mgt.sge;
195 sgc->cur_offset = cmdcurr_offset;
196
197 /* Finish initializing the management request. */
198 rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
199 rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
200 rq->vrq->mgt.dev_index =
201 cpu_to_le32(vi->cmd.mgt.dev_index);
202
203 esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
204 break;
205 }
206
207 case VDA_FUNC_CFG:
208
209 if (vi->data_length
210 || vi->cmd.cfg.data_length == 0) {
211 vi->status = ATTO_STS_INV_PARAM;
212 return false;
213 }
214
215 if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
216 vi->status = ATTO_STS_INV_FUNC;
217 return false;
218 }
219
220 rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
221 rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
222
223 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
224 memcpy(&rq->vrq->cfg.data,
225 &vi->cmd.cfg.data,
226 vi->cmd.cfg.data_length);
227
228 esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
229 &rq->vrq->cfg.data);
230 } else {
231 vi->status = ATTO_STS_INV_FUNC;
232
233 return false;
234 }
235
236 break;
237
238 case VDA_FUNC_GSV:
239
240 vi->cmd.gsv.rsp_len = vercnt;
241
242 memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
243 vercnt);
244
245 vi->vda_status = RS_SUCCESS;
246 break;
247
248 default:
249
250 vi->status = ATTO_STS_INV_FUNC;
251 return false;
252 }
253
254 if (datalen) {
255 esas2r_sgc_init(sgc, a, rq, firstsg);
256 sgc->length = datalen;
257
258 if (!esas2r_build_sg_list(a, rq, sgc)) {
259 vi->status = ATTO_STS_OUT_OF_RSRC;
260 return false;
261 }
262 }
263
264 esas2r_start_request(a, rq);
265
266 return true;
267}
268
269static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
270 struct esas2r_request *rq)
271{
272 struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
273
274 vi->vda_status = rq->req_stat;
275
276 switch (vi->function) {
277 case VDA_FUNC_FLASH:
278
279 if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
280 || vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
281 vi->cmd.flash.data.file.file_size =
282 le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
283
284 break;
285
286 case VDA_FUNC_MGT:
287
288 vi->cmd.mgt.scan_generation =
289 rq->func_rsp.mgt_rsp.scan_generation;
290 vi->cmd.mgt.dev_index = le16_to_cpu(
291 rq->func_rsp.mgt_rsp.dev_index);
292
293 if (vi->data_length == 0)
294 vi->cmd.mgt.data_length =
295 le32_to_cpu(rq->func_rsp.mgt_rsp.length);
296
297 esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
298 break;
299
300 case VDA_FUNC_CFG:
301
302 if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
303 struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
304 struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
305
306 cfg->data_length =
307 cpu_to_le32(sizeof(struct atto_vda_cfg_init));
308 cfg->data.init.vda_version =
309 le32_to_cpu(rsp->vda_version);
310 cfg->data.init.fw_build = rsp->fw_build;
311
312 sprintf((char *)&cfg->data.init.fw_release,
313 "%1d.%02d",
314 (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
315 (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
316
317 if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
318 cfg->data.init.fw_version =
319 cfg->data.init.fw_build;
320 else
321 cfg->data.init.fw_version =
322 cfg->data.init.fw_release;
323 } else {
324 esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
325 &vi->cmd.cfg.data);
326 }
327
328 break;
329
330 case VDA_FUNC_CLI:
331
332 vi->cmd.cli.cmd_rsp_len =
333 le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
334 break;
335
336 default:
337
338 break;
339 }
340}
341
342/* Build a flash VDA request. */
343void esas2r_build_flash_req(struct esas2r_adapter *a,
344 struct esas2r_request *rq,
345 u8 sub_func,
346 u8 cksum,
347 u32 addr,
348 u32 length)
349{
350 struct atto_vda_flash_req *vrq = &rq->vrq->flash;
351
352 clear_vda_request(rq);
353
354 rq->vrq->scsi.function = VDA_FUNC_FLASH;
355
356 if (sub_func == VDA_FLASH_BEGINW
357 || sub_func == VDA_FLASH_WRITE
358 || sub_func == VDA_FLASH_READ)
359 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
360 data.sge);
361
362 vrq->length = cpu_to_le32(length);
363 vrq->flash_addr = cpu_to_le32(addr);
364 vrq->checksum = cksum;
365 vrq->sub_func = sub_func;
366}
367
368/* Build a VDA management request. */
369void esas2r_build_mgt_req(struct esas2r_adapter *a,
370 struct esas2r_request *rq,
371 u8 sub_func,
372 u8 scan_gen,
373 u16 dev_index,
374 u32 length,
375 void *data)
376{
377 struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
378
379 clear_vda_request(rq);
380
381 rq->vrq->scsi.function = VDA_FUNC_MGT;
382
383 vrq->mgt_func = sub_func;
384 vrq->scan_generation = scan_gen;
385 vrq->dev_index = cpu_to_le16(dev_index);
386 vrq->length = cpu_to_le32(length);
387
388 if (vrq->length) {
389 if (a->flags & AF_LEGACY_SGE_MODE) {
390 vrq->sg_list_offset = (u8)offsetof(
391 struct atto_vda_mgmt_req, sge);
392
393 vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
394 vrq->sge[0].address = cpu_to_le64(
395 rq->vrq_md->phys_addr +
396 sizeof(union atto_vda_req));
397 } else {
398 vrq->sg_list_offset = (u8)offsetof(
399 struct atto_vda_mgmt_req, prde);
400
401 vrq->prde[0].ctl_len = cpu_to_le32(length);
402 vrq->prde[0].address = cpu_to_le64(
403 rq->vrq_md->phys_addr +
404 sizeof(union atto_vda_req));
405 }
406 }
407
408 if (data) {
409 esas2r_nuxi_mgt_data(sub_func, data);
410
411 memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
412 length);
413 }
414}
415
416/* Build a VDA asyncronous event (AE) request. */
417void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
418{
419 struct atto_vda_ae_req *vrq = &rq->vrq->ae;
420
421 clear_vda_request(rq);
422
423 rq->vrq->scsi.function = VDA_FUNC_AE;
424
425 vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
426
427 if (a->flags & AF_LEGACY_SGE_MODE) {
428 vrq->sg_list_offset =
429 (u8)offsetof(struct atto_vda_ae_req, sge);
430 vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
431 vrq->sge[0].address = cpu_to_le64(
432 rq->vrq_md->phys_addr +
433 sizeof(union atto_vda_req));
434 } else {
435 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
436 prde);
437 vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
438 vrq->prde[0].address = cpu_to_le64(
439 rq->vrq_md->phys_addr +
440 sizeof(union atto_vda_req));
441 }
442}
443
444/* Build a VDA CLI request. */
445void esas2r_build_cli_req(struct esas2r_adapter *a,
446 struct esas2r_request *rq,
447 u32 length,
448 u32 cmd_rsp_len)
449{
450 struct atto_vda_cli_req *vrq = &rq->vrq->cli;
451
452 clear_vda_request(rq);
453
454 rq->vrq->scsi.function = VDA_FUNC_CLI;
455
456 vrq->length = cpu_to_le32(length);
457 vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
458 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
459}
460
461/* Build a VDA IOCTL request. */
462void esas2r_build_ioctl_req(struct esas2r_adapter *a,
463 struct esas2r_request *rq,
464 u32 length,
465 u8 sub_func)
466{
467 struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
468
469 clear_vda_request(rq);
470
471 rq->vrq->scsi.function = VDA_FUNC_IOCTL;
472
473 vrq->length = cpu_to_le32(length);
474 vrq->sub_func = sub_func;
475 vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
476}
477
478/* Build a VDA configuration request. */
479void esas2r_build_cfg_req(struct esas2r_adapter *a,
480 struct esas2r_request *rq,
481 u8 sub_func,
482 u32 length,
483 void *data)
484{
485 struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
486
487 clear_vda_request(rq);
488
489 rq->vrq->scsi.function = VDA_FUNC_CFG;
490
491 vrq->sub_func = sub_func;
492 vrq->length = cpu_to_le32(length);
493
494 if (data) {
495 esas2r_nuxi_cfg_data(sub_func, data);
496
497 memcpy(&vrq->data, data, length);
498 }
499}
500
501static void clear_vda_request(struct esas2r_request *rq)
502{
503 u32 handle = rq->vrq->scsi.handle;
504
505 memset(rq->vrq, 0, sizeof(*rq->vrq));
506
507 rq->vrq->scsi.handle = handle;
508
509 rq->req_stat = RS_PENDING;
510
511 /* since the data buffer is separate clear that too */
512
513 memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
514
515 /*
516 * Setup next and prev pointer in case the request is not going through
517 * esas2r_start_request().
518 */
519
520 INIT_LIST_HEAD(&rq->req_list);
521}