aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/megaraid
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/scsi/megaraid
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/scsi/megaraid')
-rw-r--r--drivers/scsi/megaraid/Kconfig.megaraid78
-rw-r--r--drivers/scsi/megaraid/Makefile2
-rw-r--r--drivers/scsi/megaraid/mbox_defs.h790
-rw-r--r--drivers/scsi/megaraid/mega_common.h286
-rw-r--r--drivers/scsi/megaraid/megaraid_ioctl.h296
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c4276
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h288
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c1255
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.h102
9 files changed, 7373 insertions, 0 deletions
diff --git a/drivers/scsi/megaraid/Kconfig.megaraid b/drivers/scsi/megaraid/Kconfig.megaraid
new file mode 100644
index 000000000000..917d591d90b2
--- /dev/null
+++ b/drivers/scsi/megaraid/Kconfig.megaraid
@@ -0,0 +1,78 @@
1config MEGARAID_NEWGEN
2 bool "LSI Logic New Generation RAID Device Drivers"
3 depends on PCI && SCSI
4 help
5 LSI Logic RAID Device Drivers
6
7config MEGARAID_MM
8 tristate "LSI Logic Management Module (New Driver)"
9 depends on PCI && SCSI && MEGARAID_NEWGEN
10 help
11 Management Module provides ioctl, sysfs support for LSI Logic
12 RAID controllers.
13 To compile this driver as a module, choose M here: the
14 module will be called megaraid_mm
15
16
17config MEGARAID_MAILBOX
18 tristate "LSI Logic MegaRAID Driver (New Driver)"
19 depends on PCI && SCSI && MEGARAID_MM
20 help
21 List of supported controllers
22
23 OEM Product Name VID :DID :SVID:SSID
24 --- ------------ ---- ---- ---- ----
25 Dell PERC3/QC 101E:1960:1028:0471
26 Dell PERC3/DC 101E:1960:1028:0493
27 Dell PERC3/SC 101E:1960:1028:0475
28 Dell PERC3/Di 1028:000E:1028:0123
29 Dell PERC4/SC 1000:1960:1028:0520
30 Dell PERC4/DC 1000:1960:1028:0518
31 Dell PERC4/QC 1000:0407:1028:0531
32 Dell PERC4/Di 1028:000F:1028:014A
33 Dell PERC 4e/Si 1028:0013:1028:016c
34 Dell PERC 4e/Di 1028:0013:1028:016d
35 Dell PERC 4e/Di 1028:0013:1028:016e
36 Dell PERC 4e/Di 1028:0013:1028:016f
37 Dell PERC 4e/Di 1028:0013:1028:0170
38 Dell PERC 4e/DC 1000:0408:1028:0002
39 Dell PERC 4e/SC 1000:0408:1028:0001
40 LSI MegaRAID SCSI 320-0 1000:1960:1000:A520
41 LSI MegaRAID SCSI 320-1 1000:1960:1000:0520
42 LSI MegaRAID SCSI 320-2 1000:1960:1000:0518
43 LSI MegaRAID SCSI 320-0X 1000:0407:1000:0530
44 LSI MegaRAID SCSI 320-2X 1000:0407:1000:0532
45 LSI MegaRAID SCSI 320-4X 1000:0407:1000:0531
46 LSI MegaRAID SCSI 320-1E 1000:0408:1000:0001
47 LSI MegaRAID SCSI 320-2E 1000:0408:1000:0002
48 LSI MegaRAID SATA 150-4 1000:1960:1000:4523
49 LSI MegaRAID SATA 150-6 1000:1960:1000:0523
50 LSI MegaRAID SATA 300-4X 1000:0409:1000:3004
51 LSI MegaRAID SATA 300-8X 1000:0409:1000:3008
52 INTEL RAID Controller SRCU42X 1000:0407:8086:0532
53 INTEL RAID Controller SRCS16 1000:1960:8086:0523
54 INTEL RAID Controller SRCU42E 1000:0408:8086:0002
55 INTEL RAID Controller SRCZCRX 1000:0407:8086:0530
56 INTEL RAID Controller SRCS28X 1000:0409:8086:3008
57 INTEL RAID Controller SROMBU42E 1000:0408:8086:3431
58 INTEL RAID Controller SROMBU42E 1000:0408:8086:3499
59 INTEL RAID Controller SRCU51L 1000:1960:8086:0520
60 FSC MegaRAID PCI Express ROMB 1000:0408:1734:1065
61 ACER MegaRAID ROMB-2E 1000:0408:1025:004D
62 NEC MegaRAID PCI Express ROMB 1000:0408:1033:8287
63
64 To compile this driver as a module, choose M here: the
65 module will be called megaraid_mbox
66
67if MEGARAID_NEWGEN=n
68config MEGARAID_LEGACY
69 tristate "LSI Logic Legacy MegaRAID Driver"
70 depends on PCI && SCSI
71 help
72 This driver supports the LSI MegaRAID 418, 428, 438, 466, 762, 490
73 and 467 SCSI host adapters. This driver also support the all U320
74 RAID controllers
75
76 To compile this driver as a module, choose M here: the
77 module will be called megaraid
78endif
diff --git a/drivers/scsi/megaraid/Makefile b/drivers/scsi/megaraid/Makefile
new file mode 100644
index 000000000000..6dd99f275722
--- /dev/null
+++ b/drivers/scsi/megaraid/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_MEGARAID_MM) += megaraid_mm.o
2obj-$(CONFIG_MEGARAID_MAILBOX) += megaraid_mbox.o
diff --git a/drivers/scsi/megaraid/mbox_defs.h b/drivers/scsi/megaraid/mbox_defs.h
new file mode 100644
index 000000000000..3052869f51f4
--- /dev/null
+++ b/drivers/scsi/megaraid/mbox_defs.h
@@ -0,0 +1,790 @@
1/*
2 *
3 * Linux MegaRAID Unified device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : mbox_defs.h
13 *
14 */
15#ifndef _MRAID_MBOX_DEFS_H_
16#define _MRAID_MBOX_DEFS_H_
17
18#include <linux/types.h>
19
20/*
21 * Commands and states for mailbox based controllers
22 */
23
24#define MBOXCMD_LREAD 0x01
25#define MBOXCMD_LWRITE 0x02
26#define MBOXCMD_PASSTHRU 0x03
27#define MBOXCMD_ADPEXTINQ 0x04
28#define MBOXCMD_ADAPTERINQ 0x05
29#define MBOXCMD_LREAD64 0xA7
30#define MBOXCMD_LWRITE64 0xA8
31#define MBOXCMD_PASSTHRU64 0xC3
32#define MBOXCMD_EXTPTHRU 0xE3
33
34#define MAIN_MISC_OPCODE 0xA4
35#define GET_MAX_SG_SUPPORT 0x01
36#define SUPPORT_EXT_CDB 0x16
37
38#define FC_NEW_CONFIG 0xA1
39#define NC_SUBOP_PRODUCT_INFO 0x0E
40#define NC_SUBOP_ENQUIRY3 0x0F
41#define ENQ3_GET_SOLICITED_FULL 0x02
42#define OP_DCMD_READ_CONFIG 0x04
43#define NEW_READ_CONFIG_8LD 0x67
44#define READ_CONFIG_8LD 0x07
45#define FLUSH_ADAPTER 0x0A
46#define FLUSH_SYSTEM 0xFE
47
48/*
49 * Command for random deletion of logical drives
50 */
51#define FC_DEL_LOGDRV 0xA4
52#define OP_SUP_DEL_LOGDRV 0x2A
53#define OP_GET_LDID_MAP 0x18
54#define OP_DEL_LOGDRV 0x1C
55
56/*
57 * BIOS commands
58 */
59#define IS_BIOS_ENABLED 0x62
60#define GET_BIOS 0x01
61#define CHNL_CLASS 0xA9
62#define GET_CHNL_CLASS 0x00
63#define SET_CHNL_CLASS 0x01
64#define CH_RAID 0x01
65#define CH_SCSI 0x00
66#define BIOS_PVT_DATA 0x40
67#define GET_BIOS_PVT_DATA 0x00
68
69
70/*
71 * Commands to support clustering
72 */
73#define GET_TARGET_ID 0x7D
74#define CLUSTER_OP 0x70
75#define GET_CLUSTER_MODE 0x02
76#define CLUSTER_CMD 0x6E
77#define RESERVE_LD 0x01
78#define RELEASE_LD 0x02
79#define RESET_RESERVATIONS 0x03
80#define RESERVATION_STATUS 0x04
81#define RESERVE_PD 0x05
82#define RELEASE_PD 0x06
83
84
85/*
86 * Module battery status
87 */
88#define BATTERY_MODULE_MISSING 0x01
89#define BATTERY_LOW_VOLTAGE 0x02
90#define BATTERY_TEMP_HIGH 0x04
91#define BATTERY_PACK_MISSING 0x08
92#define BATTERY_CHARGE_MASK 0x30
93#define BATTERY_CHARGE_DONE 0x00
94#define BATTERY_CHARGE_INPROG 0x10
95#define BATTERY_CHARGE_FAIL 0x20
96#define BATTERY_CYCLES_EXCEEDED 0x40
97
98/*
99 * Physical drive states.
100 */
101#define PDRV_UNCNF 0
102#define PDRV_ONLINE 3
103#define PDRV_FAILED 4
104#define PDRV_RBLD 5
105#define PDRV_HOTSPARE 6
106
107
108/*
109 * Raid logical drive states.
110 */
111#define RDRV_OFFLINE 0
112#define RDRV_DEGRADED 1
113#define RDRV_OPTIMAL 2
114#define RDRV_DELETED 3
115
116/*
117 * Read, write and cache policies
118 */
119#define NO_READ_AHEAD 0
120#define READ_AHEAD 1
121#define ADAP_READ_AHEAD 2
122#define WRMODE_WRITE_THRU 0
123#define WRMODE_WRITE_BACK 1
124#define CACHED_IO 0
125#define DIRECT_IO 1
126
127#define MAX_LOGICAL_DRIVES_8LD 8
128#define MAX_LOGICAL_DRIVES_40LD 40
129#define FC_MAX_PHYSICAL_DEVICES 256
130#define MAX_MBOX_CHANNELS 5
131#define MAX_MBOX_TARGET 15
132#define MBOX_MAX_PHYSICAL_DRIVES MAX_MBOX_CHANNELS*MAX_MBOX_TARGET
133#define MAX_ROW_SIZE_40LD 32
134#define MAX_ROW_SIZE_8LD 8
135#define SPAN_DEPTH_8_SPANS 8
136#define SPAN_DEPTH_4_SPANS 4
137#define MAX_REQ_SENSE_LEN 0x20
138
139
140
141/**
142 * struct mbox_t - Driver and f/w handshake structure.
143 * @cmd : firmware command
144 * @cmdid : command id
145 * @numsectors : number of sectors to be transferred
146 * @lba : Logical Block Address on LD
147 * @xferaddr : DMA address for data transfer
148 * @logdrv : logical drive number
149 * @numsge : number of scatter gather elements in sg list
150 * @resvd : reserved
151 * @busy : f/w busy, must wait to issue more commands.
152 * @numstatus : number of commands completed.
153 * @status : status of the commands completed
154 * @completed : array of completed command ids.
155 * @poll : poll and ack sequence
156 * @ack : poll and ack sequence
157 *
158 * The central handshake structure between the driver and the firmware. This
159 * structure must be allocated by the driver and aligned at 8-byte boundary.
160 */
161#define MBOX_MAX_FIRMWARE_STATUS 46
162typedef struct {
163 uint8_t cmd;
164 uint8_t cmdid;
165 uint16_t numsectors;
166 uint32_t lba;
167 uint32_t xferaddr;
168 uint8_t logdrv;
169 uint8_t numsge;
170 uint8_t resvd;
171 uint8_t busy;
172 uint8_t numstatus;
173 uint8_t status;
174 uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
175 uint8_t poll;
176 uint8_t ack;
177} __attribute__ ((packed)) mbox_t;
178
179
180/**
181 * mbox64_t - 64-bit extension for the mailbox
182 * @segment_lo : the low 32-bits of the address of the scatter-gather list
183 * @segment_hi : the upper 32-bits of the address of the scatter-gather list
184 * @mbox : 32-bit mailbox, whose xferadder field must be set to
185 * 0xFFFFFFFF
186 *
187 * This is the extension of the 32-bit mailbox to be able to perform DMA
188 * beyond 4GB address range.
189 */
190typedef struct {
191 uint32_t xferaddr_lo;
192 uint32_t xferaddr_hi;
193 mbox_t mbox32;
194} __attribute__ ((packed)) mbox64_t;
195
196/*
197 * mailbox structure used for internal commands
198 */
199typedef struct {
200 u8 cmd;
201 u8 cmdid;
202 u8 opcode;
203 u8 subopcode;
204 u32 lba;
205 u32 xferaddr;
206 u8 logdrv;
207 u8 rsvd[3];
208 u8 numstatus;
209 u8 status;
210} __attribute__ ((packed)) int_mbox_t;
211
212/**
213 * mraid_passthru_t - passthru structure to issue commands to physical devices
214 * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
215 * @ars : set if ARS required after check condition
216 * @islogical : set if command meant for logical devices
217 * @logdrv : logical drive number if command for LD
218 * @channel : Channel on which physical device is located
219 * @target : SCSI target of the device
220 * @queuetag : unused
221 * @queueaction : unused
222 * @cdb : SCSI CDB
223 * @cdblen : length of the CDB
224 * @reqsenselen : amount of request sense data to be returned
225 * @reqsensearea : Sense information buffer
226 * @numsge : number of scatter-gather elements in the sg list
227 * @scsistatus : SCSI status of the command completed.
228 * @dataxferaddr : DMA data transfer address
229 * @dataxferlen : amount of the data to be transferred.
230 */
231typedef struct {
232 uint8_t timeout :3;
233 uint8_t ars :1;
234 uint8_t reserved :3;
235 uint8_t islogical :1;
236 uint8_t logdrv;
237 uint8_t channel;
238 uint8_t target;
239 uint8_t queuetag;
240 uint8_t queueaction;
241 uint8_t cdb[10];
242 uint8_t cdblen;
243 uint8_t reqsenselen;
244 uint8_t reqsensearea[MAX_REQ_SENSE_LEN];
245 uint8_t numsge;
246 uint8_t scsistatus;
247 uint32_t dataxferaddr;
248 uint32_t dataxferlen;
249} __attribute__ ((packed)) mraid_passthru_t;
250
251typedef struct {
252
253 uint32_t dataxferaddr_lo;
254 uint32_t dataxferaddr_hi;
255 mraid_passthru_t pthru32;
256
257} __attribute__ ((packed)) mega_passthru64_t;
258
259/**
260 * mraid_epassthru_t - passthru structure to issue commands to physical devices
261 * @timeout : command timeout, 0=6sec, 1=60sec, 2=10min, 3=3hr
262 * @ars : set if ARS required after check condition
263 * @rsvd1 : reserved field
264 * @cd_rom : (?)
265 * @rsvd2 : reserved field
266 * @islogical : set if command meant for logical devices
267 * @logdrv : logical drive number if command for LD
268 * @channel : Channel on which physical device is located
269 * @target : SCSI target of the device
270 * @queuetag : unused
271 * @queueaction : unused
272 * @cdblen : length of the CDB
273 * @rsvd3 : reserved field
274 * @cdb : SCSI CDB
275 * @numsge : number of scatter-gather elements in the sg list
276 * @status : SCSI status of the command completed.
277 * @reqsenselen : amount of request sense data to be returned
278 * @reqsensearea : Sense information buffer
279 * @rsvd4 : reserved field
280 * @dataxferaddr : DMA data transfer address
281 * @dataxferlen : amount of the data to be transferred.
282 */
283typedef struct {
284 uint8_t timeout :3;
285 uint8_t ars :1;
286 uint8_t rsvd1 :1;
287 uint8_t cd_rom :1;
288 uint8_t rsvd2 :1;
289 uint8_t islogical :1;
290 uint8_t logdrv;
291 uint8_t channel;
292 uint8_t target;
293 uint8_t queuetag;
294 uint8_t queueaction;
295 uint8_t cdblen;
296 uint8_t rsvd3;
297 uint8_t cdb[16];
298 uint8_t numsge;
299 uint8_t status;
300 uint8_t reqsenselen;
301 uint8_t reqsensearea[MAX_REQ_SENSE_LEN];
302 uint8_t rsvd4;
303 uint32_t dataxferaddr;
304 uint32_t dataxferlen;
305} __attribute__ ((packed)) mraid_epassthru_t;
306
307
308/**
309 * mraid_pinfo_t - product info, static information about the controller
310 * @data_size : current size in bytes (not including resvd)
311 * @config_signature : Current value is 0x00282008
312 * @fw_version : Firmware version
313 * @bios_version : version of the BIOS
314 * @product_name : Name given to the controller
315 * @max_commands : Maximum concurrent commands supported
316 * @nchannels : Number of SCSI Channels detected
317 * @fc_loop_present : Number of Fibre Loops detected
318 * @mem_type : EDO, FPM, SDRAM etc
319 * @signature :
320 * @dram_size : In terms of MB
321 * @subsysid : device PCI subsystem ID
322 * @subsysvid : device PCI subsystem vendor ID
323 * @notify_counters :
324 * @pad1k : 135 + 889 resvd = 1024 total size
325 *
326 * This structures holds the information about the controller which is not
327 * expected to change dynamically.
328 *
329 * The current value of config signature is 0x00282008:
330 * 0x28 = MAX_LOGICAL_DRIVES,
331 * 0x20 = Number of stripes and
332 * 0x08 = Number of spans
333 */
334typedef struct {
335 uint32_t data_size;
336 uint32_t config_signature;
337 uint8_t fw_version[16];
338 uint8_t bios_version[16];
339 uint8_t product_name[80];
340 uint8_t max_commands;
341 uint8_t nchannels;
342 uint8_t fc_loop_present;
343 uint8_t mem_type;
344 uint32_t signature;
345 uint16_t dram_size;
346 uint16_t subsysid;
347 uint16_t subsysvid;
348 uint8_t notify_counters;
349 uint8_t pad1k[889];
350} __attribute__ ((packed)) mraid_pinfo_t;
351
352
353/**
354 * mraid_notify_t - the notification structure
355 * @global_counter : Any change increments this counter
356 * @param_counter : Indicates any params changed
357 * @param_id : Param modified - defined below
358 * @param_val : New val of last param modified
359 * @write_config_counter : write config occurred
360 * @write_config_rsvd :
361 * @ldrv_op_counter : Indicates ldrv op started/completed
362 * @ldrv_opid : ldrv num
363 * @ldrv_opcmd : ldrv operation - defined below
364 * @ldrv_opstatus : status of the operation
365 * @ldrv_state_counter : Indicates change of ldrv state
366 * @ldrv_state_id : ldrv num
367 * @ldrv_state_new : New state
368 * @ldrv_state_old : old state
369 * @pdrv_state_counter : Indicates change of ldrv state
370 * @pdrv_state_id : pdrv id
371 * @pdrv_state_new : New state
372 * @pdrv_state_old : old state
373 * @pdrv_fmt_counter : Indicates pdrv format started/over
374 * @pdrv_fmt_id : pdrv id
375 * @pdrv_fmt_val : format started/over
376 * @pdrv_fmt_rsvd :
377 * @targ_xfer_counter : Indicates SCSI-2 Xfer rate change
378 * @targ_xfer_id : pdrv Id
379 * @targ_xfer_val : new Xfer params of last pdrv
380 * @targ_xfer_rsvd :
381 * @fcloop_id_chg_counter : Indicates loopid changed
382 * @fcloopid_pdrvid : pdrv id
383 * @fcloop_id0 : loopid on fc loop 0
384 * @fcloop_id1 : loopid on fc loop 1
385 * @fcloop_state_counter : Indicates loop state changed
386 * @fcloop_state0 : state of fc loop 0
387 * @fcloop_state1 : state of fc loop 1
388 * @fcloop_state_rsvd :
389 */
390typedef struct {
391 uint32_t global_counter;
392 uint8_t param_counter;
393 uint8_t param_id;
394 uint16_t param_val;
395 uint8_t write_config_counter;
396 uint8_t write_config_rsvd[3];
397 uint8_t ldrv_op_counter;
398 uint8_t ldrv_opid;
399 uint8_t ldrv_opcmd;
400 uint8_t ldrv_opstatus;
401 uint8_t ldrv_state_counter;
402 uint8_t ldrv_state_id;
403 uint8_t ldrv_state_new;
404 uint8_t ldrv_state_old;
405 uint8_t pdrv_state_counter;
406 uint8_t pdrv_state_id;
407 uint8_t pdrv_state_new;
408 uint8_t pdrv_state_old;
409 uint8_t pdrv_fmt_counter;
410 uint8_t pdrv_fmt_id;
411 uint8_t pdrv_fmt_val;
412 uint8_t pdrv_fmt_rsvd;
413 uint8_t targ_xfer_counter;
414 uint8_t targ_xfer_id;
415 uint8_t targ_xfer_val;
416 uint8_t targ_xfer_rsvd;
417 uint8_t fcloop_id_chg_counter;
418 uint8_t fcloopid_pdrvid;
419 uint8_t fcloop_id0;
420 uint8_t fcloop_id1;
421 uint8_t fcloop_state_counter;
422 uint8_t fcloop_state0;
423 uint8_t fcloop_state1;
424 uint8_t fcloop_state_rsvd;
425} __attribute__ ((packed)) mraid_notify_t;
426
427
428/**
429 * mraid_inquiry3_t - enquiry for device information
430 *
431 * @data_size : current size in bytes (not including resvd)
432 * @notify :
433 * @notify_rsvd :
434 * @rebuild_rate : rebuild rate (0% - 100%)
435 * @cache_flush_int : cache flush interval in seconds
436 * @sense_alert :
437 * @drive_insert_count : drive insertion count
438 * @battery_status :
439 * @num_ldrv : no. of Log Drives configured
440 * @recon_state : state of reconstruct
441 * @ldrv_op_status : logdrv Status
442 * @ldrv_size : size of each log drv
443 * @ldrv_prop :
444 * @ldrv_state : state of log drives
445 * @pdrv_state : state of phys drvs.
446 * @pdrv_format :
447 * @targ_xfer : phys device transfer rate
448 * @pad1k : 761 + 263reserved = 1024 bytes total size
449 */
450#define MAX_NOTIFY_SIZE 0x80
451#define CUR_NOTIFY_SIZE sizeof(mraid_notify_t)
452
453typedef struct {
454 uint32_t data_size;
455
456 mraid_notify_t notify;
457
458 uint8_t notify_rsvd[MAX_NOTIFY_SIZE - CUR_NOTIFY_SIZE];
459
460 uint8_t rebuild_rate;
461 uint8_t cache_flush_int;
462 uint8_t sense_alert;
463 uint8_t drive_insert_count;
464
465 uint8_t battery_status;
466 uint8_t num_ldrv;
467 uint8_t recon_state[MAX_LOGICAL_DRIVES_40LD / 8];
468 uint16_t ldrv_op_status[MAX_LOGICAL_DRIVES_40LD / 8];
469
470 uint32_t ldrv_size[MAX_LOGICAL_DRIVES_40LD];
471 uint8_t ldrv_prop[MAX_LOGICAL_DRIVES_40LD];
472 uint8_t ldrv_state[MAX_LOGICAL_DRIVES_40LD];
473 uint8_t pdrv_state[FC_MAX_PHYSICAL_DEVICES];
474 uint16_t pdrv_format[FC_MAX_PHYSICAL_DEVICES / 16];
475
476 uint8_t targ_xfer[80];
477 uint8_t pad1k[263];
478} __attribute__ ((packed)) mraid_inquiry3_t;
479
480
481/**
482 * mraid_adapinfo_t - information about the adapter
483 * @max_commands : max concurrent commands supported
484 * @rebuild_rate : rebuild rate - 0% thru 100%
485 * @max_targ_per_chan : max targ per channel
486 * @nchannels : number of channels on HBA
487 * @fw_version : firmware version
488 * @age_of_flash : number of times FW has been flashed
489 * @chip_set_value : contents of 0xC0000832
490 * @dram_size : in MB
491 * @cache_flush_interval : in seconds
492 * @bios_version :
493 * @board_type :
494 * @sense_alert :
495 * @write_config_count : increase with every configuration change
496 * @drive_inserted_count : increase with every drive inserted
497 * @inserted_drive : channel:Id of inserted drive
498 * @battery_status : bit 0: battery module missing
499 * bit 1: VBAD
500 * bit 2: temprature high
501 * bit 3: battery pack missing
502 * bit 4,5:
503 * 00 - charge complete
504 * 01 - fast charge in progress
505 * 10 - fast charge fail
506 * 11 - undefined
507 * bit 6: counter > 1000
508 * bit 7: Undefined
509 * @dec_fault_bus_info :
510 */
511typedef struct {
512 uint8_t max_commands;
513 uint8_t rebuild_rate;
514 uint8_t max_targ_per_chan;
515 uint8_t nchannels;
516 uint8_t fw_version[4];
517 uint16_t age_of_flash;
518 uint8_t chip_set_value;
519 uint8_t dram_size;
520 uint8_t cache_flush_interval;
521 uint8_t bios_version[4];
522 uint8_t board_type;
523 uint8_t sense_alert;
524 uint8_t write_config_count;
525 uint8_t battery_status;
526 uint8_t dec_fault_bus_info;
527} __attribute__ ((packed)) mraid_adapinfo_t;
528
529
530/**
531 * mraid_ldrv_info_t - information about the logical drives
532 * @nldrv : Number of logical drives configured
533 * @rsvd :
534 * @size : size of each logical drive
535 * @prop :
536 * @state : state of each logical drive
537 */
538typedef struct {
539 uint8_t nldrv;
540 uint8_t rsvd[3];
541 uint32_t size[MAX_LOGICAL_DRIVES_8LD];
542 uint8_t prop[MAX_LOGICAL_DRIVES_8LD];
543 uint8_t state[MAX_LOGICAL_DRIVES_8LD];
544} __attribute__ ((packed)) mraid_ldrv_info_t;
545
546
547/**
548 * mraid_pdrv_info_t - information about the physical drives
549 * @pdrv_state : state of each physical drive
550 */
551typedef struct {
552 uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
553 uint8_t rsvd;
554} __attribute__ ((packed)) mraid_pdrv_info_t;
555
556
557/**
558 * mraid_inquiry_t - RAID inquiry, mailbox command 0x05
559 * @mraid_adapinfo_t : adapter information
560 * @mraid_ldrv_info_t : logical drives information
561 * @mraid_pdrv_info_t : physical drives information
562 */
563typedef struct {
564 mraid_adapinfo_t adapter_info;
565 mraid_ldrv_info_t logdrv_info;
566 mraid_pdrv_info_t pdrv_info;
567} __attribute__ ((packed)) mraid_inquiry_t;
568
569
570/**
571 * mraid_extinq_t - RAID extended inquiry, mailbox command 0x04
572 *
573 * @raid_inq : raid inquiry
574 * @phys_drv_format :
575 * @stack_attn :
576 * @modem_status :
577 * @rsvd :
578 */
579typedef struct {
580 mraid_inquiry_t raid_inq;
581 uint16_t phys_drv_format[MAX_MBOX_CHANNELS];
582 uint8_t stack_attn;
583 uint8_t modem_status;
584 uint8_t rsvd[2];
585} __attribute__ ((packed)) mraid_extinq_t;
586
587
588/**
589 * adap_device_t - device information
590 * @channel : channel fpor the device
591 * @target : target ID of the device
592 */
593typedef struct {
594 uint8_t channel;
595 uint8_t target;
596}__attribute__ ((packed)) adap_device_t;
597
598
599/**
600 * adap_span_40ld_t - 40LD span
601 * @start_blk : starting block
602 * @num_blks : number of blocks
603 */
604typedef struct {
605 uint32_t start_blk;
606 uint32_t num_blks;
607 adap_device_t device[MAX_ROW_SIZE_40LD];
608}__attribute__ ((packed)) adap_span_40ld_t;
609
610
611/**
612 * adap_span_8ld_t - 8LD span
613 * @start_blk : starting block
614 * @num_blks : number of blocks
615 */
616typedef struct {
617 uint32_t start_blk;
618 uint32_t num_blks;
619 adap_device_t device[MAX_ROW_SIZE_8LD];
620}__attribute__ ((packed)) adap_span_8ld_t;
621
622
623/**
624 * logdrv_param_t - logical drives parameters
625 *
626 * @span_depth : total number of spans
627 * @level : RAID level
628 * @read_ahead : read ahead, no read ahead, adaptive read ahead
629 * @stripe_sz : encoded stripe size
630 * @status : status of the logical drive
631 * @write_mode : write mode, write_through/write_back
632 * @direct_io : direct io or through cache
633 * @row_size : number of stripes in a row
634 */
635typedef struct {
636 uint8_t span_depth;
637 uint8_t level;
638 uint8_t read_ahead;
639 uint8_t stripe_sz;
640 uint8_t status;
641 uint8_t write_mode;
642 uint8_t direct_io;
643 uint8_t row_size;
644} __attribute__ ((packed)) logdrv_param_t;
645
646
647/**
648 * logdrv_40ld_t - logical drive definition for 40LD controllers
649 * @lparam : logical drives parameters
650 * @span : span
651 */
652typedef struct {
653 logdrv_param_t lparam;
654 adap_span_40ld_t span[SPAN_DEPTH_8_SPANS];
655}__attribute__ ((packed)) logdrv_40ld_t;
656
657
658/**
659 * logdrv_8ld_span8_t - logical drive definition for 8LD controllers
660 * @lparam : logical drives parameters
661 * @span : span
662 *
663 * 8-LD logical drive with upto 8 spans
664 */
665typedef struct {
666 logdrv_param_t lparam;
667 adap_span_8ld_t span[SPAN_DEPTH_8_SPANS];
668}__attribute__ ((packed)) logdrv_8ld_span8_t;
669
670
671/**
672 * logdrv_8ld_span4_t - logical drive definition for 8LD controllers
673 * @lparam : logical drives parameters
674 * @span : span
675 *
676 * 8-LD logical drive with upto 4 spans
677 */
678typedef struct {
679 logdrv_param_t lparam;
680 adap_span_8ld_t span[SPAN_DEPTH_4_SPANS];
681}__attribute__ ((packed)) logdrv_8ld_span4_t;
682
683
684/**
685 * phys_drive_t - physical device information
686 * @type : Type of the device
687 * @cur_status : current status of the device
688 * @tag_depth : Level of tagging
689 * @sync_neg : sync negotiation - ENABLE or DISBALE
690 * @size : configurable size in terms of 512 byte
691 */
692typedef struct {
693 uint8_t type;
694 uint8_t cur_status;
695 uint8_t tag_depth;
696 uint8_t sync_neg;
697 uint32_t size;
698}__attribute__ ((packed)) phys_drive_t;
699
700
701/**
702 * disk_array_40ld_t - disk array for 40LD controllers
703 * @numldrv : number of logical drives
704 * @resvd :
705 * @ldrv : logical drives information
706 * @pdrv : physical drives information
707 */
708typedef struct {
709 uint8_t numldrv;
710 uint8_t resvd[3];
711 logdrv_40ld_t ldrv[MAX_LOGICAL_DRIVES_40LD];
712 phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
713}__attribute__ ((packed)) disk_array_40ld_t;
714
715
716/**
717 * disk_array_8ld_span8_t - disk array for 8LD controllers
718 * @numldrv : number of logical drives
719 * @resvd :
720 * @ldrv : logical drives information
721 * @pdrv : physical drives information
722 *
723 * Disk array for 8LD logical drives with upto 8 spans
724 */
725typedef struct {
726 uint8_t numldrv;
727 uint8_t resvd[3];
728 logdrv_8ld_span8_t ldrv[MAX_LOGICAL_DRIVES_8LD];
729 phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
730}__attribute__ ((packed)) disk_array_8ld_span8_t;
731
732
733/**
734 * disk_array_8ld_span4_t - disk array for 8LD controllers
735 * @numldrv : number of logical drives
736 * @resvd :
737 * @ldrv : logical drives information
738 * @pdrv : physical drives information
739 *
740 * Disk array for 8LD logical drives with upto 4 spans
741 */
742typedef struct {
743 uint8_t numldrv;
744 uint8_t resvd[3];
745 logdrv_8ld_span4_t ldrv[MAX_LOGICAL_DRIVES_8LD];
746 phys_drive_t pdrv[MBOX_MAX_PHYSICAL_DRIVES];
747}__attribute__ ((packed)) disk_array_8ld_span4_t;
748
749
750/**
751 * private_bios_data - bios private data for boot devices
752 * @geometry : bits 0-3 - BIOS geometry, 0x0001 - 1GB, 0x0010 - 2GB,
753 * 0x1000 - 8GB, Others values are invalid
754 * @unused : bits 4-7 are unused
755 * @boot_drv : logical drive set as boot drive, 0..7 - for 8LD cards,
756 * 0..39 - for 40LD cards
757 * @cksum : 0-(sum of first 13 bytes of this structure)
758 */
759struct private_bios_data {
760 uint8_t geometry :4;
761 uint8_t unused :4;
762 uint8_t boot_drv;
763 uint8_t rsvd[12];
764 uint16_t cksum;
765} __attribute__ ((packed));
766
767
768/**
769 * mbox_sgl64 - 64-bit scatter list for mailbox based controllers
770 * @address : address of the buffer
771 * @length : data transfer length
772 */
773typedef struct {
774 uint64_t address;
775 uint32_t length;
776} __attribute__ ((packed)) mbox_sgl64;
777
778/**
779 * mbox_sgl32 - 32-bit scatter list for mailbox based controllers
780 * @address : address of the buffer
781 * @length : data transfer length
782 */
783typedef struct {
784 uint32_t address;
785 uint32_t length;
786} __attribute__ ((packed)) mbox_sgl32;
787
788#endif // _MRAID_MBOX_DEFS_H_
789
790/* vim: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid/mega_common.h b/drivers/scsi/megaraid/mega_common.h
new file mode 100644
index 000000000000..18969a4946b7
--- /dev/null
+++ b/drivers/scsi/megaraid/mega_common.h
@@ -0,0 +1,286 @@
1/*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : mega_common.h
13 *
14 * Libaray of common routine used by all low-level megaraid drivers
15 */
16
17#ifndef _MEGA_COMMON_H_
18#define _MEGA_COMMON_H_
19
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/delay.h>
26#include <linux/blkdev.h>
27#include <linux/list.h>
28#include <linux/version.h>
29#include <linux/moduleparam.h>
30#include <asm/semaphore.h>
31#include <scsi/scsi.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35
36
37#define LSI_MAX_CHANNELS 16
38#define LSI_MAX_LOGICAL_DRIVES_64LD (64+1)
39
40
41/**
42 * scb_t - scsi command control block
43 * @param ccb : command control block for individual driver
44 * @param list : list of control blocks
45 * @param gp : general purpose field for LLDs
46 * @param sno : all SCBs have a serial number
47 * @param scp : associated scsi command
48 * @param state : current state of scb
49 * @param dma_dir : direction of data transfer
50 * @param dma_type : transfer with sg list, buffer, or no data transfer
51 * @param dev_channel : actual channel on the device
52 * @param dev_target : actual target on the device
53 * @param status : completion status
54 *
55 * This is our central data structure to issue commands the each driver.
56 * Driver specific data structures are maintained in the ccb field.
57 * scb provides a field 'gp', which can be used by LLD for its own purposes
58 *
59 * dev_channel and dev_target must be initialized with the actual channel and
60 * target on the controller.
61 */
62typedef struct {
63 caddr_t ccb;
64 struct list_head list;
65 unsigned long gp;
66 unsigned int sno;
67 struct scsi_cmnd *scp;
68 uint32_t state;
69 uint32_t dma_direction;
70 uint32_t dma_type;
71 uint16_t dev_channel;
72 uint16_t dev_target;
73 uint32_t status;
74} scb_t;
75
76/*
77 * SCB states as it transitions from one state to another
78 */
79#define SCB_FREE 0x0000 /* on the free list */
80#define SCB_ACTIVE 0x0001 /* off the free list */
81#define SCB_PENDQ 0x0002 /* on the pending queue */
82#define SCB_ISSUED 0x0004 /* issued - owner f/w */
83#define SCB_ABORT 0x0008 /* Got an abort for this one */
84#define SCB_RESET 0x0010 /* Got a reset for this one */
85
86/*
87 * DMA types for scb
88 */
89#define MRAID_DMA_NONE 0x0000 /* no data transfer for this command */
90#define MRAID_DMA_WSG 0x0001 /* data transfer using a sg list */
91#define MRAID_DMA_WBUF 0x0002 /* data transfer using a contiguous buffer */
92
93
94/**
95 * struct adapter_t - driver's initialization structure
96 * @param dpc_h : tasklet handle
97 * @param pdev : pci configuration pointer for kernel
98 * @param host : pointer to host structure of mid-layer
99 * @param host_lock : pointer to appropriate lock
100 * @param lock : synchronization lock for mid-layer and driver
101 * @param quiescent : driver is quiescent for now.
102 * @param outstanding_cmds : number of commands pending in the driver
103 * @param kscb_list : pointer to the bulk of SCBs pointers for IO
104 * @param kscb_pool : pool of free scbs for IO
105 * @param kscb_pool_lock : lock for pool of free scbs
106 * @param pend_list : pending commands list
107 * @param pend_list_lock : exlusion lock for pending commands list
108 * @param completed_list : list of completed commands
109 * @param completed_list_lock : exclusion lock for list of completed commands
110 * @param sglen : max sg elements supported
111 * @param device_ids : to convert kernel device addr to our devices.
112 * @param raid_device : raid adapter specific pointer
113 * @param max_channel : maximum channel number supported - inclusive
114 * @param max_target : max target supported - inclusive
115 * @param max_lun : max lun supported - inclusive
116 * @param unique_id : unique identifier for each adapter
117 * @param irq : IRQ for this adapter
118 * @param ito : internal timeout value, (-1) means no timeout
119 * @param ibuf : buffer to issue internal commands
120 * @param ibuf_dma_h : dma handle for the above buffer
121 * @param uscb_list : SCB pointers for user cmds, common mgmt module
122 * @param uscb_pool : pool of SCBs for user commands
123 * @param uscb_pool_lock : exclusion lock for these SCBs
124 * @param max_cmds : max outstanding commands
125 * @param fw_version : firmware version
126 * @param bios_version : bios version
127 * @param max_cdb_sz : biggest CDB size supported.
128 * @param ha : is high availability present - clustering
129 * @param init_id : initiator ID, the default value should be 7
130 * @param max_sectors : max sectors per request
131 * @param cmd_per_lun : max outstanding commands per LUN
132 * @param being_detached : set when unloading, no more mgmt calls
133 *
134 *
135 * mraid_setup_device_map() can be called anytime after the device map is
136 * available and MRAID_GET_DEVICE_MAP() can be called whenever the mapping is
137 * required, usually from LLD's queue entry point. The formar API sets up the
138 * MRAID_IS_LOGICAL(adapter_t *, struct scsi_cmnd *) to find out if the
139 * device in question is a logical drive.
140 *
141 * quiescent flag should be set by the driver if it is not accepting more
142 * commands
143 *
144 * NOTE: The fields of this structures are placed to minimize cache misses
145 */
146
147// amount of space required to store the bios and firmware version strings
148#define VERSION_SIZE 16
149
150typedef struct {
151 struct tasklet_struct dpc_h;
152 struct pci_dev *pdev;
153 struct Scsi_Host *host;
154 spinlock_t *host_lock;
155 spinlock_t lock;
156 uint8_t quiescent;
157 int outstanding_cmds;
158 scb_t *kscb_list;
159 struct list_head kscb_pool;
160 spinlock_t kscb_pool_lock;
161 struct list_head pend_list;
162 spinlock_t pend_list_lock;
163 struct list_head completed_list;
164 spinlock_t completed_list_lock;
165 uint16_t sglen;
166 int device_ids[LSI_MAX_CHANNELS]
167 [LSI_MAX_LOGICAL_DRIVES_64LD];
168 caddr_t raid_device;
169 uint8_t max_channel;
170 uint16_t max_target;
171 uint8_t max_lun;
172
173 uint32_t unique_id;
174 uint8_t irq;
175 uint8_t ito;
176 caddr_t ibuf;
177 dma_addr_t ibuf_dma_h;
178 scb_t *uscb_list;
179 struct list_head uscb_pool;
180 spinlock_t uscb_pool_lock;
181 int max_cmds;
182 uint8_t fw_version[VERSION_SIZE];
183 uint8_t bios_version[VERSION_SIZE];
184 uint8_t max_cdb_sz;
185 uint8_t ha;
186 uint16_t init_id;
187 uint16_t max_sectors;
188 uint16_t cmd_per_lun;
189 atomic_t being_detached;
190} adapter_t;
191
192#define SCSI_FREE_LIST_LOCK(adapter) (&adapter->kscb_pool_lock)
193#define USER_FREE_LIST_LOCK(adapter) (&adapter->uscb_pool_lock)
194#define PENDING_LIST_LOCK(adapter) (&adapter->pend_list_lock)
195#define COMPLETED_LIST_LOCK(adapter) (&adapter->completed_list_lock)
196
197
198// conversion from scsi command
199#define SCP2HOST(scp) (scp)->device->host // to host
200#define SCP2HOSTDATA(scp) SCP2HOST(scp)->hostdata // to soft state
201#define SCP2CHANNEL(scp) (scp)->device->channel // to channel
202#define SCP2TARGET(scp) (scp)->device->id // to target
203#define SCP2LUN(scp) (scp)->device->lun // to LUN
204
205// generic macro to convert scsi command and host to controller's soft state
206#define SCSIHOST2ADAP(host) (((caddr_t *)(host->hostdata))[0])
207#define SCP2ADAPTER(scp) (adapter_t *)SCSIHOST2ADAP(SCP2HOST(scp))
208
209
210/**
211 * MRAID_GET_DEVICE_MAP - device ids
212 * @param adp - Adapter's soft state
213 * @param scp - mid-layer scsi command pointer
214 * @param p_chan - physical channel on the controller
215 * @param target - target id of the device or logical drive number
216 * @param islogical - set if the command is for the logical drive
217 *
218 * Macro to retrieve information about device class, logical or physical and
219 * the corresponding physical channel and target or logical drive number
220 **/
221#define MRAID_IS_LOGICAL(adp, scp) \
222 (SCP2CHANNEL(scp) == (adp)->max_channel) ? 1 : 0
223
224#define MRAID_IS_LOGICAL_SDEV(adp, sdev) \
225 (sdev->channel == (adp)->max_channel) ? 1 : 0
226
227#define MRAID_GET_DEVICE_MAP(adp, scp, p_chan, target, islogical) \
228 /* \
229 * Is the request coming for the virtual channel \
230 */ \
231 islogical = MRAID_IS_LOGICAL(adp, scp); \
232 \
233 /* \
234 * Get an index into our table of drive ids mapping \
235 */ \
236 if (islogical) { \
237 p_chan = 0xFF; \
238 target = \
239 (adp)->device_ids[(adp)->max_channel][SCP2TARGET(scp)]; \
240 } \
241 else { \
242 p_chan = ((adp)->device_ids[SCP2CHANNEL(scp)] \
243 [SCP2TARGET(scp)] >> 8) & 0xFF; \
244 target = ((adp)->device_ids[SCP2CHANNEL(scp)] \
245 [SCP2TARGET(scp)] & 0xFF); \
246 }
247
248/*
249 * ### Helper routines ###
250 */
251#define LSI_DBGLVL mraid_debug_level // each LLD must define a global
252 // mraid_debug_level
253
254#ifdef DEBUG
255#if defined (_ASSERT_PANIC)
256#define ASSERT_ACTION panic
257#else
258#define ASSERT_ACTION printk
259#endif
260
261#define ASSERT(expression) \
262 if (!(expression)) { \
263 ASSERT_ACTION("assertion failed:(%s), file: %s, line: %d:%s\n", \
264 #expression, __FILE__, __LINE__, __FUNCTION__); \
265 }
266#else
267#define ASSERT(expression)
268#endif
269
270/*
271 * struct mraid_pci_blk - structure holds DMA memory block info
272 * @param vaddr : virtual address to a memory block
273 * @param dma_addr : DMA handle to a memory block
274 *
275 * This structure is filled up for the caller. It is the responsibilty of the
276 * caller to allocate this array big enough to store addresses for all
277 * requested elements
278 */
279struct mraid_pci_blk {
280 caddr_t vaddr;
281 dma_addr_t dma_addr;
282};
283
284#endif // _MEGA_COMMON_H_
285
286// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h
new file mode 100644
index 000000000000..bdaee144a1c3
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_ioctl.h
@@ -0,0 +1,296 @@
1/*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_ioctl.h
13 *
14 * Definitions to interface with user level applications
15 */
16
17#ifndef _MEGARAID_IOCTL_H_
18#define _MEGARAID_IOCTL_H_
19
20#include <linux/types.h>
21#include <asm/semaphore.h>
22
23#include "mbox_defs.h"
24
25/**
26 * con_log() - console log routine
27 * @param level : indicates the severity of the message.
28 * @fparam mt : format string
29 *
30 * con_log displays the error messages on the console based on the current
31 * debug level. Also it attaches the appropriate kernel severity level with
32 * the message.
33 *
34 *
35 * consolge messages debug levels
36 */
37#define CL_ANN 0 /* print unconditionally, announcements */
38#define CL_DLEVEL1 1 /* debug level 1, informative */
39#define CL_DLEVEL2 2 /* debug level 2, verbose */
40#define CL_DLEVEL3 3 /* debug level 3, very verbose */
41
42#define con_log(level, fmt) if (LSI_DBGLVL >= level) printk fmt;
43
44/*
45 * Definitions & Declarations needed to use common management module
46 */
47
48#define MEGAIOC_MAGIC 'm'
49#define MEGAIOCCMD _IOWR(MEGAIOC_MAGIC, 0, mimd_t)
50
51#define MEGAIOC_QNADAP 'm' /* Query # of adapters */
52#define MEGAIOC_QDRVRVER 'e' /* Query driver version */
53#define MEGAIOC_QADAPINFO 'g' /* Query adapter information */
54
55#define USCSICMD 0x80
56#define UIOC_RD 0x00001
57#define UIOC_WR 0x00002
58
59#define MBOX_CMD 0x00000
60#define GET_DRIVER_VER 0x10000
61#define GET_N_ADAP 0x20000
62#define GET_ADAP_INFO 0x30000
63#define GET_CAP 0x40000
64#define GET_STATS 0x50000
65#define GET_IOCTL_VERSION 0x01
66
67#define EXT_IOCTL_SIGN_SZ 16
68#define EXT_IOCTL_SIGN "$$_EXTD_IOCTL_$$"
69
70#define MBOX_LEGACY 0x00 /* ioctl has legacy mbox*/
71#define MBOX_HPE 0x01 /* ioctl has hpe mbox */
72
73#define APPTYPE_MIMD 0x00 /* old existing apps */
74#define APPTYPE_UIOC 0x01 /* new apps using uioc */
75
76#define IOCTL_ISSUE 0x00000001 /* Issue ioctl */
77#define IOCTL_ABORT 0x00000002 /* Abort previous ioctl */
78
79#define DRVRTYPE_MBOX 0x00000001 /* regular mbox driver */
80#define DRVRTYPE_HPE 0x00000002 /* new hpe driver */
81
82#define MKADAP(adapno) (MEGAIOC_MAGIC << 8 | (adapno) )
83#define GETADAP(mkadap) ((mkadap) ^ MEGAIOC_MAGIC << 8)
84
85#define MAX_DMA_POOLS 5 /* 4k, 8k, 16k, 32k, 64k*/
86
87
88/**
89 * struct uioc_t - the common ioctl packet structure
90 *
91 * @signature : Must be "$$_EXTD_IOCTL_$$"
92 * @mb_type : Type of the mail box (MB_LEGACY or MB_HPE)
93 * @app_type : Type of the issuing application (existing or new)
94 * @opcode : Opcode of the command
95 * @adapno : Adapter number
96 * @cmdbuf : Pointer to buffer - can point to mbox or plain data buffer
97 * @xferlen : xferlen for DCMD and non mailbox commands
98 * @data_dir : Direction of the data transfer
99 * @status : Status from the driver
100 * @reserved : reserved bytes for future expansion
101 *
102 * @user_data : user data transfer address is saved in this
103 * @user_data_len: length of the data buffer sent by user app
104 * @user_pthru : user passthru address is saves in this (null if DCMD)
105 * @pthru32 : kernel address passthru (allocated per kioc)
106 * @pthru32_h : physicall address of @pthru32
107 * @list : for kioc free pool list maintenance
108 * @done : call back routine for llds to call when kioc is completed
109 * @buf_vaddr : dma pool buffer attached to kioc for data transfer
110 * @buf_paddr : physical address of the dma pool buffer
111 * @pool_index : index of the dma pool that @buf_vaddr is taken from
112 * @free_buf : indicates if buffer needs to be freed after kioc completes
113 *
114 * Note : All LSI drivers understand only this packet. Any other
115 * : format sent by applications would be converted to this.
116 */
117typedef struct uioc {
118
119/* User Apps: */
120
121 uint8_t signature[EXT_IOCTL_SIGN_SZ];
122 uint16_t mb_type;
123 uint16_t app_type;
124 uint32_t opcode;
125 uint32_t adapno;
126 uint64_t cmdbuf;
127 uint32_t xferlen;
128 uint32_t data_dir;
129 int32_t status;
130 uint8_t reserved[128];
131
132/* Driver Data: */
133 void __user * user_data;
134 uint32_t user_data_len;
135 mraid_passthru_t __user *user_pthru;
136
137 mraid_passthru_t *pthru32;
138 dma_addr_t pthru32_h;
139
140 struct list_head list;
141 void (*done)(struct uioc*);
142
143 caddr_t buf_vaddr;
144 dma_addr_t buf_paddr;
145 int8_t pool_index;
146 uint8_t free_buf;
147
148 uint8_t timedout;
149
150} __attribute__ ((aligned(1024),packed)) uioc_t;
151
152
153/**
154 * struct mraid_hba_info - information about the controller
155 *
156 * @param pci_vendor_id : PCI vendor id
157 * @param pci_device_id : PCI device id
158 * @param subsystem_vendor_id : PCI subsystem vendor id
159 * @param subsystem_device_id : PCI subsystem device id
160 * @param baseport : base port of hba memory
161 * @param pci_bus : PCI bus
162 * @param pci_dev_fn : PCI device/function values
163 * @param irq : interrupt vector for the device
164 *
165 * Extended information of 256 bytes about the controller. Align on the single
166 * byte boundary so that 32-bit applications can be run on 64-bit platform
167 * drivers withoug re-compilation.
168 * NOTE: reduce the number of reserved bytes whenever new field are added, so
169 * that total size of the structure remains 256 bytes.
170 */
171typedef struct mraid_hba_info {
172
173 uint16_t pci_vendor_id;
174 uint16_t pci_device_id;
175 uint16_t subsys_vendor_id;
176 uint16_t subsys_device_id;
177
178 uint64_t baseport;
179 uint8_t pci_bus;
180 uint8_t pci_dev_fn;
181 uint8_t pci_slot;
182 uint8_t irq;
183
184 uint32_t unique_id;
185 uint32_t host_no;
186
187 uint8_t num_ldrv;
188} __attribute__ ((aligned(256), packed)) mraid_hba_info_t;
189
190
191/**
192 * mcontroller : adapter info structure for old mimd_t apps
193 *
194 * @base : base address
195 * @irq : irq number
196 * @numldrv : number of logical drives
197 * @pcibus : pci bus
198 * @pcidev : pci device
199 * @pcifun : pci function
200 * @pciid : pci id
201 * @pcivendor : vendor id
202 * @pcislot : slot number
203 * @uid : unique id
204 */
205typedef struct mcontroller {
206
207 uint64_t base;
208 uint8_t irq;
209 uint8_t numldrv;
210 uint8_t pcibus;
211 uint16_t pcidev;
212 uint8_t pcifun;
213 uint16_t pciid;
214 uint16_t pcivendor;
215 uint8_t pcislot;
216 uint32_t uid;
217
218} __attribute__ ((packed)) mcontroller_t;
219
220
221/**
222 * mm_dmapool_t : Represents one dma pool with just one buffer
223 *
224 * @vaddr : Virtual address
225 * @paddr : DMA physicall address
226 * @bufsize : In KB - 4 = 4k, 8 = 8k etc.
227 * @handle : Handle to the dma pool
228 * @lock : lock to synchronize access to the pool
229 * @in_use : If pool already in use, attach new block
230 */
231typedef struct mm_dmapool {
232 caddr_t vaddr;
233 dma_addr_t paddr;
234 uint32_t buf_size;
235 struct dma_pool *handle;
236 spinlock_t lock;
237 uint8_t in_use;
238} mm_dmapool_t;
239
240
241/**
242 * mraid_mmadp_t: Structure that drivers pass during (un)registration
243 *
244 * @unique_id : Any unique id (usually PCI bus+dev+fn)
245 * @drvr_type : megaraid or hpe (DRVRTYPE_MBOX or DRVRTYPE_HPE)
246 * @drv_data : Driver specific; not touched by the common module
247 * @timeout : timeout for issued kiocs
248 * @max_kioc : Maximum ioctl packets acceptable by the lld
249 * @pdev : pci dev; used for allocating dma'ble memory
250 * @issue_uioc : Driver supplied routine to issue uioc_t commands
251 * : issue_uioc(drvr_data, kioc, ISSUE/ABORT, uioc_done)
252 * @quiescent : flag to indicate if ioctl can be issued to this adp
253 * @list : attach with the global list of adapters
254 * @kioc_list : block of mem for @max_kioc number of kiocs
255 * @kioc_pool : pool of free kiocs
256 * @kioc_pool_lock : protection for free pool
257 * @kioc_semaphore : so as not to exceed @max_kioc parallel ioctls
258 * @mbox_list : block of mem for @max_kioc number of mboxes
259 * @pthru_dma_pool : DMA pool to allocate passthru packets
260 * @dma_pool_list : array of dma pools
261 */
262
263typedef struct mraid_mmadp {
264
265/* Filled by driver */
266
267 uint32_t unique_id;
268 uint32_t drvr_type;
269 unsigned long drvr_data;
270 uint16_t timeout;
271 uint8_t max_kioc;
272
273 struct pci_dev *pdev;
274
275 int(*issue_uioc)(unsigned long, uioc_t *, uint32_t);
276
277/* Maintained by common module */
278 uint32_t quiescent;
279
280 struct list_head list;
281 uioc_t *kioc_list;
282 struct list_head kioc_pool;
283 spinlock_t kioc_pool_lock;
284 struct semaphore kioc_semaphore;
285
286 mbox64_t *mbox_list;
287 struct dma_pool *pthru_dma_pool;
288 mm_dmapool_t dma_pool_list[MAX_DMA_POOLS];
289
290} mraid_mmadp_t;
291
292int mraid_mm_register_adp(mraid_mmadp_t *);
293int mraid_mm_unregister_adp(uint32_t);
294uint32_t mraid_mm_adapter_app_handle(uint32_t);
295
296#endif /* _MEGARAID_IOCTL_H_ */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
new file mode 100644
index 000000000000..138fa4815833
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -0,0 +1,4276 @@
1/*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_mbox.c
13 * Version : v2.20.4.5 (Feb 03 2005)
14 *
15 * Authors:
16 * Atul Mukker <Atul.Mukker@lsil.com>
17 * Sreenivas Bagalkote <Sreenivas.Bagalkote@lsil.com>
18 * Manoj Jose <Manoj.Jose@lsil.com>
19 *
20 * List of supported controllers
21 *
22 * OEM Product Name VID DID SSVID SSID
23 * --- ------------ --- --- ---- ----
24 * Dell PERC3/QC 101E 1960 1028 0471
25 * Dell PERC3/DC 101E 1960 1028 0493
26 * Dell PERC3/SC 101E 1960 1028 0475
27 * Dell PERC3/Di 1028 1960 1028 0123
28 * Dell PERC4/SC 1000 1960 1028 0520
29 * Dell PERC4/DC 1000 1960 1028 0518
30 * Dell PERC4/QC 1000 0407 1028 0531
31 * Dell PERC4/Di 1028 000F 1028 014A
32 * Dell PERC 4e/Si 1028 0013 1028 016c
33 * Dell PERC 4e/Di 1028 0013 1028 016d
34 * Dell PERC 4e/Di 1028 0013 1028 016e
35 * Dell PERC 4e/Di 1028 0013 1028 016f
36 * Dell PERC 4e/Di 1028 0013 1028 0170
37 * Dell PERC 4e/DC 1000 0408 1028 0002
38 * Dell PERC 4e/SC 1000 0408 1028 0001
39 *
40 *
41 * LSI MegaRAID SCSI 320-0 1000 1960 1000 A520
42 * LSI MegaRAID SCSI 320-1 1000 1960 1000 0520
43 * LSI MegaRAID SCSI 320-2 1000 1960 1000 0518
44 * LSI MegaRAID SCSI 320-0X 1000 0407 1000 0530
45 * LSI MegaRAID SCSI 320-2X 1000 0407 1000 0532
46 * LSI MegaRAID SCSI 320-4X 1000 0407 1000 0531
47 * LSI MegaRAID SCSI 320-1E 1000 0408 1000 0001
48 * LSI MegaRAID SCSI 320-2E 1000 0408 1000 0002
49 * LSI MegaRAID SATA 150-4 1000 1960 1000 4523
50 * LSI MegaRAID SATA 150-6 1000 1960 1000 0523
51 * LSI MegaRAID SATA 300-4X 1000 0409 1000 3004
52 * LSI MegaRAID SATA 300-8X 1000 0409 1000 3008
53 *
54 * INTEL RAID Controller SRCU42X 1000 0407 8086 0532
55 * INTEL RAID Controller SRCS16 1000 1960 8086 0523
56 * INTEL RAID Controller SRCU42E 1000 0408 8086 0002
57 * INTEL RAID Controller SRCZCRX 1000 0407 8086 0530
58 * INTEL RAID Controller SRCS28X 1000 0409 8086 3008
59 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3431
60 * INTEL RAID Controller SROMBU42E 1000 0408 8086 3499
61 * INTEL RAID Controller SRCU51L 1000 1960 8086 0520
62 *
63 * FSC MegaRAID PCI Express ROMB 1000 0408 1734 1065
64 *
65 * ACER MegaRAID ROMB-2E 1000 0408 1025 004D
66 *
67 * NEC MegaRAID PCI Express ROMB 1000 0408 1033 8287
68 *
69 * For history of changes, see Documentation/ChangeLog.megaraid
70 */
71
72#include "megaraid_mbox.h"
73
74static int megaraid_init(void);
75static void megaraid_exit(void);
76
77static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
78static void megaraid_detach_one(struct pci_dev *);
79static void megaraid_mbox_shutdown(struct device *);
80
81static int megaraid_io_attach(adapter_t *);
82static void megaraid_io_detach(adapter_t *);
83
84static int megaraid_init_mbox(adapter_t *);
85static void megaraid_fini_mbox(adapter_t *);
86
87static int megaraid_alloc_cmd_packets(adapter_t *);
88static void megaraid_free_cmd_packets(adapter_t *);
89
90static int megaraid_mbox_setup_dma_pools(adapter_t *);
91static void megaraid_mbox_teardown_dma_pools(adapter_t *);
92
93static int megaraid_sysfs_alloc_resources(adapter_t *);
94static void megaraid_sysfs_free_resources(adapter_t *);
95
96static int megaraid_abort_handler(struct scsi_cmnd *);
97static int megaraid_reset_handler(struct scsi_cmnd *);
98
99static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
100static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
101static int megaraid_busywait_mbox(mraid_device_t *);
102static int megaraid_mbox_product_info(adapter_t *);
103static int megaraid_mbox_extended_cdb(adapter_t *);
104static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
105static int megaraid_mbox_support_random_del(adapter_t *);
106static int megaraid_mbox_get_max_sg(adapter_t *);
107static void megaraid_mbox_enum_raid_scsi(adapter_t *);
108static void megaraid_mbox_flush_cache(adapter_t *);
109
110static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
111static void megaraid_mbox_setup_device_map(adapter_t *);
112
113static int megaraid_queue_command(struct scsi_cmnd *,
114 void (*)(struct scsi_cmnd *));
115static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
116static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
117static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
118 struct scsi_cmnd *);
119static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
120 struct scsi_cmnd *);
121
122static irqreturn_t megaraid_isr(int, void *, struct pt_regs *);
123
124static void megaraid_mbox_dpc(unsigned long);
125
126static ssize_t megaraid_sysfs_show_app_hndl(struct class_device *, char *);
127static ssize_t megaraid_sysfs_show_ldnum(struct device *, char *);
128
129static int megaraid_cmm_register(adapter_t *);
130static int megaraid_cmm_unregister(adapter_t *);
131static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
132static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
133static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
134static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
135static int wait_till_fw_empty(adapter_t *);
136
137
138
139MODULE_AUTHOR("LSI Logic Corporation");
140MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
141MODULE_LICENSE("GPL");
142MODULE_VERSION(MEGARAID_VERSION);
143
144/*
145 * ### modules parameters for driver ###
146 */
147
148/**
149 * Set to enable driver to expose unconfigured disk to kernel
150 */
151static int megaraid_expose_unconf_disks = 0;
152module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
153MODULE_PARM_DESC(unconf_disks,
154 "Set to expose unconfigured disks to kernel (default=0)");
155
156/**
157 * driver wait time if the adapter's mailbox is busy
158 */
159static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
160module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
161MODULE_PARM_DESC(busy_wait,
162 "Max wait for mailbox in microseconds if busy (default=10)");
163
164/**
165 * number of sectors per IO command
166 */
167static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
168module_param_named(max_sectors, megaraid_max_sectors, int, 0);
169MODULE_PARM_DESC(max_sectors,
170 "Maximum number of sectors per IO command (default=128)");
171
172/**
173 * number of commands per logical unit
174 */
175static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
176module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
177MODULE_PARM_DESC(cmd_per_lun,
178 "Maximum number of commands per logical unit (default=64)");
179
180
181/**
182 * Fast driver load option, skip scanning for physical devices during load.
183 * This would result in non-disk devices being skipped during driver load
184 * time. These can be later added though, using /proc/scsi/scsi
185 */
186static unsigned int megaraid_fast_load = 0;
187module_param_named(fast_load, megaraid_fast_load, int, 0);
188MODULE_PARM_DESC(fast_load,
189 "Faster loading of the driver, skips physical devices! (default=0)");
190
191
192/**
193 * mraid_debug level - threshold for amount of information to be displayed by
194 * the driver. This level can be changed through modules parameters, ioctl or
195 * sysfs/proc interface. By default, print the announcement messages only.
196 */
197int mraid_debug_level = CL_ANN;
198module_param_named(debug_level, mraid_debug_level, int, 0);
199MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
200
201/*
202 * ### global data ###
203 */
204static uint8_t megaraid_mbox_version[8] =
205 { 0x02, 0x20, 0x04, 0x05, 2, 3, 20, 5 };
206
207
208/*
209 * PCI table for all supported controllers.
210 */
211static struct pci_device_id pci_id_table_g[] = {
212 {
213 PCI_VENDOR_ID_DELL,
214 PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
215 PCI_VENDOR_ID_DELL,
216 PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
217 },
218 {
219 PCI_VENDOR_ID_LSI_LOGIC,
220 PCI_DEVICE_ID_PERC4_SC,
221 PCI_VENDOR_ID_DELL,
222 PCI_SUBSYS_ID_PERC4_SC,
223 },
224 {
225 PCI_VENDOR_ID_LSI_LOGIC,
226 PCI_DEVICE_ID_PERC4_DC,
227 PCI_VENDOR_ID_DELL,
228 PCI_SUBSYS_ID_PERC4_DC,
229 },
230 {
231 PCI_VENDOR_ID_LSI_LOGIC,
232 PCI_DEVICE_ID_PERC4_QC,
233 PCI_VENDOR_ID_DELL,
234 PCI_SUBSYS_ID_PERC4_QC,
235 },
236 {
237 PCI_VENDOR_ID_DELL,
238 PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
239 PCI_VENDOR_ID_DELL,
240 PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
241 },
242 {
243 PCI_VENDOR_ID_DELL,
244 PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
245 PCI_VENDOR_ID_DELL,
246 PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
247 },
248 {
249 PCI_VENDOR_ID_DELL,
250 PCI_DEVICE_ID_PERC4E_DI_KOBUK,
251 PCI_VENDOR_ID_DELL,
252 PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
253 },
254 {
255 PCI_VENDOR_ID_DELL,
256 PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
257 PCI_VENDOR_ID_DELL,
258 PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
259 },
260 {
261 PCI_VENDOR_ID_DELL,
262 PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
263 PCI_VENDOR_ID_DELL,
264 PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
265 },
266 {
267 PCI_VENDOR_ID_DELL,
268 PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
269 PCI_VENDOR_ID_DELL,
270 PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
271 },
272 {
273 PCI_VENDOR_ID_LSI_LOGIC,
274 PCI_DEVICE_ID_PERC4E_DC_320_2E,
275 PCI_VENDOR_ID_DELL,
276 PCI_SUBSYS_ID_PERC4E_DC_320_2E,
277 },
278 {
279 PCI_VENDOR_ID_LSI_LOGIC,
280 PCI_DEVICE_ID_PERC4E_SC_320_1E,
281 PCI_VENDOR_ID_DELL,
282 PCI_SUBSYS_ID_PERC4E_SC_320_1E,
283 },
284 {
285 PCI_VENDOR_ID_AMI,
286 PCI_DEVICE_ID_AMI_MEGARAID3,
287 PCI_VENDOR_ID_DELL,
288 PCI_SUBSYS_ID_PERC3_QC,
289 },
290 {
291 PCI_VENDOR_ID_AMI,
292 PCI_DEVICE_ID_AMI_MEGARAID3,
293 PCI_VENDOR_ID_DELL,
294 PCI_SUBSYS_ID_PERC3_DC,
295 },
296 {
297 PCI_VENDOR_ID_AMI,
298 PCI_DEVICE_ID_AMI_MEGARAID3,
299 PCI_VENDOR_ID_DELL,
300 PCI_SUBSYS_ID_PERC3_SC,
301 },
302 {
303 PCI_VENDOR_ID_AMI,
304 PCI_DEVICE_ID_AMI_MEGARAID3,
305 PCI_VENDOR_ID_AMI,
306 PCI_SUBSYS_ID_PERC3_SC,
307 },
308 {
309 PCI_VENDOR_ID_AMI,
310 PCI_DEVICE_ID_AMI_MEGARAID3,
311 PCI_VENDOR_ID_AMI,
312 PCI_SUBSYS_ID_PERC3_DC,
313 },
314 {
315 PCI_VENDOR_ID_LSI_LOGIC,
316 PCI_DEVICE_ID_MEGARAID_SCSI_320_0,
317 PCI_VENDOR_ID_LSI_LOGIC,
318 PCI_SUBSYS_ID_MEGARAID_SCSI_320_0,
319 },
320 {
321 PCI_VENDOR_ID_LSI_LOGIC,
322 PCI_DEVICE_ID_MEGARAID_SCSI_320_1,
323 PCI_VENDOR_ID_LSI_LOGIC,
324 PCI_SUBSYS_ID_MEGARAID_SCSI_320_1,
325 },
326 {
327 PCI_VENDOR_ID_LSI_LOGIC,
328 PCI_DEVICE_ID_MEGARAID_SCSI_320_2,
329 PCI_VENDOR_ID_LSI_LOGIC,
330 PCI_SUBSYS_ID_MEGARAID_SCSI_320_2,
331 },
332 {
333 PCI_VENDOR_ID_LSI_LOGIC,
334 PCI_DEVICE_ID_MEGARAID_SCSI_320_0x,
335 PCI_VENDOR_ID_LSI_LOGIC,
336 PCI_SUBSYS_ID_MEGARAID_SCSI_320_0x,
337 },
338 {
339 PCI_VENDOR_ID_LSI_LOGIC,
340 PCI_DEVICE_ID_MEGARAID_SCSI_320_2x,
341 PCI_VENDOR_ID_LSI_LOGIC,
342 PCI_SUBSYS_ID_MEGARAID_SCSI_320_2x,
343 },
344 {
345 PCI_VENDOR_ID_LSI_LOGIC,
346 PCI_DEVICE_ID_MEGARAID_SCSI_320_4x,
347 PCI_VENDOR_ID_LSI_LOGIC,
348 PCI_SUBSYS_ID_MEGARAID_SCSI_320_4x,
349 },
350 {
351 PCI_VENDOR_ID_LSI_LOGIC,
352 PCI_DEVICE_ID_MEGARAID_SCSI_320_1E,
353 PCI_VENDOR_ID_LSI_LOGIC,
354 PCI_SUBSYS_ID_MEGARAID_SCSI_320_1E,
355 },
356 {
357 PCI_VENDOR_ID_LSI_LOGIC,
358 PCI_DEVICE_ID_MEGARAID_SCSI_320_2E,
359 PCI_VENDOR_ID_LSI_LOGIC,
360 PCI_SUBSYS_ID_MEGARAID_SCSI_320_2E,
361 },
362 {
363 PCI_VENDOR_ID_LSI_LOGIC,
364 PCI_DEVICE_ID_MEGARAID_I4_133_RAID,
365 PCI_VENDOR_ID_LSI_LOGIC,
366 PCI_SUBSYS_ID_MEGARAID_I4_133_RAID,
367 },
368 {
369 PCI_VENDOR_ID_LSI_LOGIC,
370 PCI_DEVICE_ID_MEGARAID_SATA_150_4,
371 PCI_VENDOR_ID_LSI_LOGIC,
372 PCI_SUBSYS_ID_MEGARAID_SATA_150_4,
373 },
374 {
375 PCI_VENDOR_ID_LSI_LOGIC,
376 PCI_DEVICE_ID_MEGARAID_SATA_150_6,
377 PCI_VENDOR_ID_LSI_LOGIC,
378 PCI_SUBSYS_ID_MEGARAID_SATA_150_6,
379 },
380 {
381 PCI_VENDOR_ID_LSI_LOGIC,
382 PCI_DEVICE_ID_MEGARAID_SATA_300_4x,
383 PCI_VENDOR_ID_LSI_LOGIC,
384 PCI_SUBSYS_ID_MEGARAID_SATA_300_4x,
385 },
386 {
387 PCI_VENDOR_ID_LSI_LOGIC,
388 PCI_DEVICE_ID_MEGARAID_SATA_300_8x,
389 PCI_VENDOR_ID_LSI_LOGIC,
390 PCI_SUBSYS_ID_MEGARAID_SATA_300_8x,
391 },
392 {
393 PCI_VENDOR_ID_LSI_LOGIC,
394 PCI_DEVICE_ID_INTEL_RAID_SRCU42X,
395 PCI_VENDOR_ID_INTEL,
396 PCI_SUBSYS_ID_INTEL_RAID_SRCU42X,
397 },
398 {
399 PCI_VENDOR_ID_LSI_LOGIC,
400 PCI_DEVICE_ID_INTEL_RAID_SRCS16,
401 PCI_VENDOR_ID_INTEL,
402 PCI_SUBSYS_ID_INTEL_RAID_SRCS16,
403 },
404 {
405 PCI_VENDOR_ID_LSI_LOGIC,
406 PCI_DEVICE_ID_INTEL_RAID_SRCU42E,
407 PCI_VENDOR_ID_INTEL,
408 PCI_SUBSYS_ID_INTEL_RAID_SRCU42E,
409 },
410 {
411 PCI_VENDOR_ID_LSI_LOGIC,
412 PCI_DEVICE_ID_INTEL_RAID_SRCZCRX,
413 PCI_VENDOR_ID_INTEL,
414 PCI_SUBSYS_ID_INTEL_RAID_SRCZCRX,
415 },
416 {
417 PCI_VENDOR_ID_LSI_LOGIC,
418 PCI_DEVICE_ID_INTEL_RAID_SRCS28X,
419 PCI_VENDOR_ID_INTEL,
420 PCI_SUBSYS_ID_INTEL_RAID_SRCS28X,
421 },
422 {
423 PCI_VENDOR_ID_LSI_LOGIC,
424 PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_ALIEF,
425 PCI_VENDOR_ID_INTEL,
426 PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_ALIEF,
427 },
428 {
429 PCI_VENDOR_ID_LSI_LOGIC,
430 PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_HARWICH,
431 PCI_VENDOR_ID_INTEL,
432 PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_HARWICH,
433 },
434 {
435 PCI_VENDOR_ID_LSI_LOGIC,
436 PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK,
437 PCI_VENDOR_ID_INTEL,
438 PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK,
439 },
440 {
441 PCI_VENDOR_ID_LSI_LOGIC,
442 PCI_DEVICE_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB,
443 PCI_SUBSYS_ID_FSC,
444 PCI_SUBSYS_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB,
445 },
446 {
447 PCI_VENDOR_ID_LSI_LOGIC,
448 PCI_DEVICE_ID_MEGARAID_ACER_ROMB_2E,
449 PCI_VENDOR_ID_AI,
450 PCI_SUBSYS_ID_MEGARAID_ACER_ROMB_2E,
451 },
452 {
453 PCI_VENDOR_ID_LSI_LOGIC,
454 PCI_DEVICE_ID_MEGARAID_NEC_ROMB_2E,
455 PCI_VENDOR_ID_NEC,
456 PCI_SUBSYS_ID_MEGARAID_NEC_ROMB_2E,
457 },
458 {0} /* Terminating entry */
459};
460MODULE_DEVICE_TABLE(pci, pci_id_table_g);
461
462
463static struct pci_driver megaraid_pci_driver_g = {
464 .name = "megaraid",
465 .id_table = pci_id_table_g,
466 .probe = megaraid_probe_one,
467 .remove = __devexit_p(megaraid_detach_one),
468 .driver = {
469 .shutdown = megaraid_mbox_shutdown,
470 }
471};
472
473
474
475// definitions for the device attributes for exporting logical drive number
476// for a scsi address (Host, Channel, Id, Lun)
477
478CLASS_DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
479 NULL);
480
481// Host template initializer for megaraid mbox sysfs device attributes
482static struct class_device_attribute *megaraid_shost_attrs[] = {
483 &class_device_attr_megaraid_mbox_app_hndl,
484 NULL,
485};
486
487
488DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
489
490// Host template initializer for megaraid mbox sysfs device attributes
491static struct device_attribute *megaraid_sdev_attrs[] = {
492 &dev_attr_megaraid_mbox_ld,
493 NULL,
494};
495
496
497/*
498 * Scsi host template for megaraid unified driver
499 */
500static struct scsi_host_template megaraid_template_g = {
501 .module = THIS_MODULE,
502 .name = "LSI Logic MegaRAID driver",
503 .proc_name = "megaraid",
504 .queuecommand = megaraid_queue_command,
505 .eh_abort_handler = megaraid_abort_handler,
506 .eh_device_reset_handler = megaraid_reset_handler,
507 .eh_bus_reset_handler = megaraid_reset_handler,
508 .eh_host_reset_handler = megaraid_reset_handler,
509 .use_clustering = ENABLE_CLUSTERING,
510 .sdev_attrs = megaraid_sdev_attrs,
511 .shost_attrs = megaraid_shost_attrs,
512};
513
514
515/**
516 * megaraid_init - module load hook
517 *
518 * We register ourselves as hotplug enabled module and let PCI subsystem
519 * discover our adaters
520 **/
521static int __init
522megaraid_init(void)
523{
524 int rval;
525
526 // Announce the driver version
527 con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
528 MEGARAID_EXT_VERSION));
529
530 // check validity of module parameters
531 if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
532
533 con_log(CL_ANN, (KERN_WARNING
534 "megaraid mailbox: max commands per lun reset to %d\n",
535 MBOX_MAX_SCSI_CMDS));
536
537 megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
538 }
539
540
541 // register as a PCI hot-plug driver module
542 if ((rval = pci_module_init(&megaraid_pci_driver_g))) {
543 con_log(CL_ANN, (KERN_WARNING
544 "megaraid: could not register hotplug support.\n"));
545 }
546
547 return rval;
548}
549
550
551/**
552 * megaraid_exit - driver unload entry point
553 *
554 * We simply unwrap the megaraid_init routine here
555 */
556static void __exit
557megaraid_exit(void)
558{
559 con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
560
561 // unregister as PCI hotplug driver
562 pci_unregister_driver(&megaraid_pci_driver_g);
563
564 return;
565}
566
567
568/**
569 * megaraid_probe_one - PCI hotplug entry point
570 * @param pdev : handle to this controller's PCI configuration space
571 * @param id : pci device id of the class of controllers
572 *
573 * This routine should be called whenever a new adapter is detected by the
574 * PCI hotplug susbsytem.
575 **/
576static int __devinit
577megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
578{
579 adapter_t *adapter;
580
581
582 // detected a new controller
583 con_log(CL_ANN, (KERN_INFO
584 "megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
585 pdev->vendor, pdev->device, pdev->subsystem_vendor,
586 pdev->subsystem_device));
587
588 con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
589 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
590
591 if (pci_enable_device(pdev)) {
592 con_log(CL_ANN, (KERN_WARNING
593 "megaraid: pci_enable_device failed\n"));
594
595 return -ENODEV;
596 }
597
598 // Enable bus-mastering on this controller
599 pci_set_master(pdev);
600
601 // Allocate the per driver initialization structure
602 adapter = kmalloc(sizeof(adapter_t), GFP_KERNEL);
603
604 if (adapter == NULL) {
605 con_log(CL_ANN, (KERN_WARNING
606 "megaraid: out of memory, %s %d.\n", __FUNCTION__, __LINE__));
607
608 goto out_probe_one;
609 }
610 memset(adapter, 0, sizeof(adapter_t));
611
612
613 // set up PCI related soft state and other pre-known parameters
614 adapter->unique_id = pdev->bus->number << 8 | pdev->devfn;
615 adapter->irq = pdev->irq;
616 adapter->pdev = pdev;
617
618 atomic_set(&adapter->being_detached, 0);
619
620 // Setup the default DMA mask. This would be changed later on
621 // depending on hardware capabilities
622 if (pci_set_dma_mask(adapter->pdev, 0xFFFFFFFF) != 0) {
623
624 con_log(CL_ANN, (KERN_WARNING
625 "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
626
627 goto out_free_adapter;
628 }
629
630
631 // Initialize the synchronization lock for kernel and LLD
632 spin_lock_init(&adapter->lock);
633 adapter->host_lock = &adapter->lock;
634
635
636 // Initialize the command queues: the list of free SCBs and the list
637 // of pending SCBs.
638 INIT_LIST_HEAD(&adapter->kscb_pool);
639 spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
640
641 INIT_LIST_HEAD(&adapter->pend_list);
642 spin_lock_init(PENDING_LIST_LOCK(adapter));
643
644 INIT_LIST_HEAD(&adapter->completed_list);
645 spin_lock_init(COMPLETED_LIST_LOCK(adapter));
646
647
648 // Start the mailbox based controller
649 if (megaraid_init_mbox(adapter) != 0) {
650 con_log(CL_ANN, (KERN_WARNING
651 "megaraid: maibox adapter did not initialize\n"));
652
653 goto out_free_adapter;
654 }
655
656 // Register with LSI Common Management Module
657 if (megaraid_cmm_register(adapter) != 0) {
658
659 con_log(CL_ANN, (KERN_WARNING
660 "megaraid: could not register with management module\n"));
661
662 goto out_fini_mbox;
663 }
664
665 // setup adapter handle in PCI soft state
666 pci_set_drvdata(pdev, adapter);
667
668 // attach with scsi mid-layer
669 if (megaraid_io_attach(adapter) != 0) {
670
671 con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
672
673 goto out_cmm_unreg;
674 }
675
676 return 0;
677
678out_cmm_unreg:
679 pci_set_drvdata(pdev, NULL);
680 megaraid_cmm_unregister(adapter);
681out_fini_mbox:
682 megaraid_fini_mbox(adapter);
683out_free_adapter:
684 kfree(adapter);
685out_probe_one:
686 pci_disable_device(pdev);
687
688 return -ENODEV;
689}
690
691
692/**
693 * megaraid_detach_one - release the framework resources and call LLD release
694 * routine
695 * @param pdev : handle for our PCI cofiguration space
696 *
697 * This routine is called during driver unload. We free all the allocated
698 * resources and call the corresponding LLD so that it can also release all
699 * its resources.
700 *
701 * This routine is also called from the PCI hotplug system
702 **/
703static void
704megaraid_detach_one(struct pci_dev *pdev)
705{
706 adapter_t *adapter;
707 struct Scsi_Host *host;
708
709
710 // Start a rollback on this adapter
711 adapter = pci_get_drvdata(pdev);
712
713 if (!adapter) {
714 con_log(CL_ANN, (KERN_CRIT
715 "megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
716 pdev->vendor, pdev->device, pdev->subsystem_vendor,
717 pdev->subsystem_device));
718
719 return;
720 }
721 else {
722 con_log(CL_ANN, (KERN_NOTICE
723 "megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
724 pdev->vendor, pdev->device, pdev->subsystem_vendor,
725 pdev->subsystem_device));
726 }
727
728
729 host = adapter->host;
730
731 // do not allow any more requests from the management module for this
732 // adapter.
733 // FIXME: How do we account for the request which might still be
734 // pending with us?
735 atomic_set(&adapter->being_detached, 1);
736
737 // detach from the IO sub-system
738 megaraid_io_detach(adapter);
739
740 // reset the device state in the PCI structure. We check this
741 // condition when we enter here. If the device state is NULL,
742 // that would mean the device has already been removed
743 pci_set_drvdata(pdev, NULL);
744
745 // Unregister from common management module
746 //
747 // FIXME: this must return success or failure for conditions if there
748 // is a command pending with LLD or not.
749 megaraid_cmm_unregister(adapter);
750
751 // finalize the mailbox based controller and release all resources
752 megaraid_fini_mbox(adapter);
753
754 kfree(adapter);
755
756 scsi_host_put(host);
757
758 pci_disable_device(pdev);
759
760 return;
761}
762
763
764/**
765 * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
766 * @param device : generice driver model device
767 *
768 * Shutdown notification, perform flush cache
769 */
770static void
771megaraid_mbox_shutdown(struct device *device)
772{
773 adapter_t *adapter = pci_get_drvdata(to_pci_dev(device));
774 static int counter;
775
776 if (!adapter) {
777 con_log(CL_ANN, (KERN_WARNING
778 "megaraid: null device in shutdown\n"));
779 return;
780 }
781
782 // flush caches now
783 con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
784 counter++));
785
786 megaraid_mbox_flush_cache(adapter);
787
788 con_log(CL_ANN, ("done\n"));
789}
790
791
792/**
793 * megaraid_io_attach - attach a device with the IO subsystem
794 * @param adapter : controller's soft state
795 *
796 * Attach this device with the IO subsystem
797 **/
798static int
799megaraid_io_attach(adapter_t *adapter)
800{
801 struct Scsi_Host *host;
802
803 // Initialize SCSI Host structure
804 host = scsi_host_alloc(&megaraid_template_g, 8);
805 if (!host) {
806 con_log(CL_ANN, (KERN_WARNING
807 "megaraid mbox: scsi_register failed\n"));
808
809 return -1;
810 }
811
812 SCSIHOST2ADAP(host) = (caddr_t)adapter;
813 adapter->host = host;
814
815 // export the parameters required by the mid-layer
816 scsi_assign_lock(host, adapter->host_lock);
817 scsi_set_device(host, &adapter->pdev->dev);
818
819 host->irq = adapter->irq;
820 host->unique_id = adapter->unique_id;
821 host->can_queue = adapter->max_cmds;
822 host->this_id = adapter->init_id;
823 host->sg_tablesize = adapter->sglen;
824 host->max_sectors = adapter->max_sectors;
825 host->cmd_per_lun = adapter->cmd_per_lun;
826 host->max_channel = adapter->max_channel;
827 host->max_id = adapter->max_target;
828 host->max_lun = adapter->max_lun;
829
830
831 // notify mid-layer about the new controller
832 if (scsi_add_host(host, &adapter->pdev->dev)) {
833
834 con_log(CL_ANN, (KERN_WARNING
835 "megaraid mbox: scsi_add_host failed\n"));
836
837 scsi_host_put(host);
838
839 return -1;
840 }
841
842 scsi_scan_host(host);
843
844 return 0;
845}
846
847
848/**
849 * megaraid_io_detach - detach a device from the IO subsystem
850 * @param adapter : controller's soft state
851 *
852 * Detach this device from the IO subsystem
853 **/
854static void
855megaraid_io_detach(adapter_t *adapter)
856{
857 struct Scsi_Host *host;
858
859 con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
860
861 host = adapter->host;
862
863 scsi_remove_host(host);
864
865 return;
866}
867
868
869/*
870 * START: Mailbox Low Level Driver
871 *
872 * This is section specific to the single mailbox based controllers
873 */
874
875/**
876 * megaraid_init_mbox - initialize controller
877 * @param adapter - our soft state
878 *
879 * . Allocate 16-byte aligned mailbox memory for firmware handshake
880 * . Allocate controller's memory resources
881 * . Find out all initialization data
882 * . Allocate memory required for all the commands
883 * . Use internal library of FW routines, build up complete soft state
884 */
885static int __init
886megaraid_init_mbox(adapter_t *adapter)
887{
888 struct pci_dev *pdev;
889 mraid_device_t *raid_dev;
890 int i;
891
892
893 adapter->ito = MBOX_TIMEOUT;
894 pdev = adapter->pdev;
895
896 /*
897 * Allocate and initialize the init data structure for mailbox
898 * controllers
899 */
900 raid_dev = kmalloc(sizeof(mraid_device_t), GFP_KERNEL);
901 if (raid_dev == NULL) return -1;
902
903 memset(raid_dev, 0, sizeof(mraid_device_t));
904
905 /*
906 * Attach the adapter soft state to raid device soft state
907 */
908 adapter->raid_device = (caddr_t)raid_dev;
909 raid_dev->fast_load = megaraid_fast_load;
910
911
912 // our baseport
913 raid_dev->baseport = pci_resource_start(pdev, 0);
914
915 if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
916
917 con_log(CL_ANN, (KERN_WARNING
918 "megaraid: mem region busy\n"));
919
920 goto out_free_raid_dev;
921 }
922
923 raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
924
925 if (!raid_dev->baseaddr) {
926
927 con_log(CL_ANN, (KERN_WARNING
928 "megaraid: could not map hba memory\n") );
929
930 goto out_release_regions;
931 }
932
933 //
934 // Setup the rest of the soft state using the library of FW routines
935 //
936
937 // request IRQ and register the interrupt service routine
938 if (request_irq(adapter->irq, megaraid_isr, SA_SHIRQ, "megaraid",
939 adapter)) {
940
941 con_log(CL_ANN, (KERN_WARNING
942 "megaraid: Couldn't register IRQ %d!\n", adapter->irq));
943
944 goto out_iounmap;
945 }
946
947
948 // initialize the mutual exclusion lock for the mailbox
949 spin_lock_init(&raid_dev->mailbox_lock);
950
951 // allocate memory required for commands
952 if (megaraid_alloc_cmd_packets(adapter) != 0) {
953 goto out_free_irq;
954 }
955
956 // Product info
957 if (megaraid_mbox_product_info(adapter) != 0) {
958 goto out_alloc_cmds;
959 }
960
961 // Do we support extended CDBs
962 adapter->max_cdb_sz = 10;
963 if (megaraid_mbox_extended_cdb(adapter) == 0) {
964 adapter->max_cdb_sz = 16;
965 }
966
967 /*
968 * Do we support cluster environment, if we do, what is the initiator
969 * id.
970 * NOTE: In a non-cluster aware firmware environment, the LLD should
971 * return 7 as initiator id.
972 */
973 adapter->ha = 0;
974 adapter->init_id = -1;
975 if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
976 adapter->ha = 1;
977 }
978
979 /*
980 * Prepare the device ids array to have the mapping between the kernel
981 * device address and megaraid device address.
982 * We export the physical devices on their actual addresses. The
983 * logical drives are exported on a virtual SCSI channel
984 */
985 megaraid_mbox_setup_device_map(adapter);
986
987 // If the firmware supports random deletion, update the device id map
988 if (megaraid_mbox_support_random_del(adapter)) {
989
990 // Change the logical drives numbers in device_ids array one
991 // slot in device_ids is reserved for target id, that's why
992 // "<=" below
993 for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
994 adapter->device_ids[adapter->max_channel][i] += 0x80;
995 }
996 adapter->device_ids[adapter->max_channel][adapter->init_id] =
997 0xFF;
998
999 raid_dev->random_del_supported = 1;
1000 }
1001
1002 /*
1003 * find out the maximum number of scatter-gather elements supported by
1004 * this firmware
1005 */
1006 adapter->sglen = megaraid_mbox_get_max_sg(adapter);
1007
1008 // enumerate RAID and SCSI channels so that all devices on SCSI
1009 // channels can later be exported, including disk devices
1010 megaraid_mbox_enum_raid_scsi(adapter);
1011
1012 /*
1013 * Other parameters required by upper layer
1014 *
1015 * maximum number of sectors per IO command
1016 */
1017 adapter->max_sectors = megaraid_max_sectors;
1018
1019 /*
1020 * number of queued commands per LUN.
1021 */
1022 adapter->cmd_per_lun = megaraid_cmd_per_lun;
1023
1024 /*
1025 * Allocate resources required to issue FW calls, when sysfs is
1026 * accessed
1027 */
1028 if (megaraid_sysfs_alloc_resources(adapter) != 0) {
1029 goto out_alloc_cmds;
1030 }
1031
1032 // Set the DMA mask to 64-bit. All supported controllers as capable of
1033 // DMA in this range
1034 if (pci_set_dma_mask(adapter->pdev, 0xFFFFFFFFFFFFFFFFULL) != 0) {
1035
1036 con_log(CL_ANN, (KERN_WARNING
1037 "megaraid: could not set DMA mask for 64-bit.\n"));
1038
1039 goto out_free_sysfs_res;
1040 }
1041
1042 // setup tasklet for DPC
1043 tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
1044 (unsigned long)adapter);
1045
1046 con_log(CL_DLEVEL1, (KERN_INFO
1047 "megaraid mbox hba successfully initialized\n"));
1048
1049 return 0;
1050
1051out_free_sysfs_res:
1052 megaraid_sysfs_free_resources(adapter);
1053out_alloc_cmds:
1054 megaraid_free_cmd_packets(adapter);
1055out_free_irq:
1056 free_irq(adapter->irq, adapter);
1057out_iounmap:
1058 iounmap(raid_dev->baseaddr);
1059out_release_regions:
1060 pci_release_regions(pdev);
1061out_free_raid_dev:
1062 kfree(raid_dev);
1063
1064 return -1;
1065}
1066
1067
1068/**
1069 * megaraid_fini_mbox - undo controller initialization
1070 * @param adapter : our soft state
1071 */
1072static void
1073megaraid_fini_mbox(adapter_t *adapter)
1074{
1075 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1076
1077 // flush all caches
1078 megaraid_mbox_flush_cache(adapter);
1079
1080 tasklet_kill(&adapter->dpc_h);
1081
1082 megaraid_sysfs_free_resources(adapter);
1083
1084 megaraid_free_cmd_packets(adapter);
1085
1086 free_irq(adapter->irq, adapter);
1087
1088 iounmap(raid_dev->baseaddr);
1089
1090 pci_release_regions(adapter->pdev);
1091
1092 kfree(raid_dev);
1093
1094 return;
1095}
1096
1097
1098/**
1099 * megaraid_alloc_cmd_packets - allocate shared mailbox
1100 * @param adapter : soft state of the raid controller
1101 *
1102 * Allocate and align the shared mailbox. This maibox is used to issue
1103 * all the commands. For IO based controllers, the mailbox is also regsitered
1104 * with the FW. Allocate memory for all commands as well.
1105 * This is our big allocator
1106 */
1107static int
1108megaraid_alloc_cmd_packets(adapter_t *adapter)
1109{
1110 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1111 struct pci_dev *pdev;
1112 unsigned long align;
1113 scb_t *scb;
1114 mbox_ccb_t *ccb;
1115 struct mraid_pci_blk *epthru_pci_blk;
1116 struct mraid_pci_blk *sg_pci_blk;
1117 struct mraid_pci_blk *mbox_pci_blk;
1118 int i;
1119
1120 pdev = adapter->pdev;
1121
1122 /*
1123 * Setup the mailbox
1124 * Allocate the common 16-byte aligned memory for the handshake
1125 * mailbox.
1126 */
1127 raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev,
1128 sizeof(mbox64_t), &raid_dev->una_mbox64_dma);
1129
1130 if (!raid_dev->una_mbox64) {
1131 con_log(CL_ANN, (KERN_WARNING
1132 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1133 __LINE__));
1134 return -1;
1135 }
1136 memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t));
1137
1138 /*
1139 * Align the mailbox at 16-byte boundary
1140 */
1141 raid_dev->mbox = &raid_dev->una_mbox64->mbox32;
1142
1143 raid_dev->mbox = (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
1144 (~0UL ^ 0xFUL));
1145
1146 raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
1147
1148 align = ((void *)raid_dev->mbox -
1149 ((void *)&raid_dev->una_mbox64->mbox32));
1150
1151 raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
1152 align;
1153
1154 // Allocate memory for commands issued internally
1155 adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE,
1156 &adapter->ibuf_dma_h);
1157 if (!adapter->ibuf) {
1158
1159 con_log(CL_ANN, (KERN_WARNING
1160 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1161 __LINE__));
1162
1163 goto out_free_common_mbox;
1164 }
1165 memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);
1166
1167 // Allocate memory for our SCSI Command Blocks and their associated
1168 // memory
1169
1170 /*
1171 * Allocate memory for the base list of scb. Later allocate memory for
1172 * CCBs and embedded components of each CCB and point the pointers in
1173 * scb to the allocated components
1174 * NOTE: The code to allocate SCB will be duplicated in all the LLD
1175 * since the calling routine does not yet know the number of available
1176 * commands.
1177 */
1178 adapter->kscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_SCSI_CMDS,
1179 GFP_KERNEL);
1180
1181 if (adapter->kscb_list == NULL) {
1182 con_log(CL_ANN, (KERN_WARNING
1183 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1184 __LINE__));
1185 goto out_free_ibuf;
1186 }
1187 memset(adapter->kscb_list, 0, sizeof(scb_t) * MBOX_MAX_SCSI_CMDS);
1188
1189 // memory allocation for our command packets
1190 if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
1191 con_log(CL_ANN, (KERN_WARNING
1192 "megaraid: out of memory, %s %d\n", __FUNCTION__,
1193 __LINE__));
1194 goto out_free_scb_list;
1195 }
1196
1197 // Adjust the scb pointers and link in the free pool
1198 epthru_pci_blk = raid_dev->epthru_pool;
1199 sg_pci_blk = raid_dev->sg_pool;
1200 mbox_pci_blk = raid_dev->mbox_pool;
1201
1202 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1203 scb = adapter->kscb_list + i;
1204 ccb = raid_dev->ccb_list + i;
1205
1206 ccb->mbox = (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
1207 ccb->raw_mbox = (uint8_t *)ccb->mbox;
1208 ccb->mbox64 = (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
1209 ccb->mbox_dma_h = (unsigned long)mbox_pci_blk[i].dma_addr + 16;
1210
1211 // make sure the mailbox is aligned properly
1212 if (ccb->mbox_dma_h & 0x0F) {
1213 con_log(CL_ANN, (KERN_CRIT
1214 "megaraid mbox: not aligned on 16-bytes\n"));
1215
1216 goto out_teardown_dma_pools;
1217 }
1218
1219 ccb->epthru = (mraid_epassthru_t *)
1220 epthru_pci_blk[i].vaddr;
1221 ccb->epthru_dma_h = epthru_pci_blk[i].dma_addr;
1222 ccb->pthru = (mraid_passthru_t *)ccb->epthru;
1223 ccb->pthru_dma_h = ccb->epthru_dma_h;
1224
1225
1226 ccb->sgl64 = (mbox_sgl64 *)sg_pci_blk[i].vaddr;
1227 ccb->sgl_dma_h = sg_pci_blk[i].dma_addr;
1228 ccb->sgl32 = (mbox_sgl32 *)ccb->sgl64;
1229
1230 scb->ccb = (caddr_t)ccb;
1231 scb->gp = 0;
1232
1233 scb->sno = i; // command index
1234
1235 scb->scp = NULL;
1236 scb->state = SCB_FREE;
1237 scb->dma_direction = PCI_DMA_NONE;
1238 scb->dma_type = MRAID_DMA_NONE;
1239 scb->dev_channel = -1;
1240 scb->dev_target = -1;
1241
1242 // put scb in the free pool
1243 list_add_tail(&scb->list, &adapter->kscb_pool);
1244 }
1245
1246 return 0;
1247
1248out_teardown_dma_pools:
1249 megaraid_mbox_teardown_dma_pools(adapter);
1250out_free_scb_list:
1251 kfree(adapter->kscb_list);
1252out_free_ibuf:
1253 pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
1254 adapter->ibuf_dma_h);
1255out_free_common_mbox:
1256 pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1257 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1258
1259 return -1;
1260}
1261
1262
1263/**
1264 * megaraid_free_cmd_packets - free memory
1265 * @param adapter : soft state of the raid controller
1266 *
1267 * Release memory resources allocated for commands
1268 */
1269static void
1270megaraid_free_cmd_packets(adapter_t *adapter)
1271{
1272 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1273
1274 megaraid_mbox_teardown_dma_pools(adapter);
1275
1276 kfree(adapter->kscb_list);
1277
1278 pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
1279 (void *)adapter->ibuf, adapter->ibuf_dma_h);
1280
1281 pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1282 (caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1283 return;
1284}
1285
1286
1287/**
1288 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
1289 * @param adapter : HBA soft state
1290 *
1291 * setup the dma pools for mailbox, passthru and extended passthru structures,
1292 * and scatter-gather lists
1293 */
1294static int
1295megaraid_mbox_setup_dma_pools(adapter_t *adapter)
1296{
1297 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1298 struct mraid_pci_blk *epthru_pci_blk;
1299 struct mraid_pci_blk *sg_pci_blk;
1300 struct mraid_pci_blk *mbox_pci_blk;
1301 int i;
1302
1303
1304
1305 // Allocate memory for 16-bytes aligned mailboxes
1306 raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
1307 adapter->pdev,
1308 sizeof(mbox64_t) + 16,
1309 16, 0);
1310
1311 if (raid_dev->mbox_pool_handle == NULL) {
1312 goto fail_setup_dma_pool;
1313 }
1314
1315 mbox_pci_blk = raid_dev->mbox_pool;
1316 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1317 mbox_pci_blk[i].vaddr = pci_pool_alloc(
1318 raid_dev->mbox_pool_handle,
1319 GFP_KERNEL,
1320 &mbox_pci_blk[i].dma_addr);
1321 if (!mbox_pci_blk[i].vaddr) {
1322 goto fail_setup_dma_pool;
1323 }
1324 }
1325
1326 /*
1327 * Allocate memory for each embedded passthru strucuture pointer
1328 * Request for a 128 bytes aligned structure for each passthru command
1329 * structure
1330 * Since passthru and extended passthru commands are exclusive, they
1331 * share common memory pool. Passthru structures piggyback on memory
1332 * allocted to extended passthru since passthru is smaller of the two
1333 */
1334 raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
1335 adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
1336
1337 if (raid_dev->epthru_pool_handle == NULL) {
1338 goto fail_setup_dma_pool;
1339 }
1340
1341 epthru_pci_blk = raid_dev->epthru_pool;
1342 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1343 epthru_pci_blk[i].vaddr = pci_pool_alloc(
1344 raid_dev->epthru_pool_handle,
1345 GFP_KERNEL,
1346 &epthru_pci_blk[i].dma_addr);
1347 if (!epthru_pci_blk[i].vaddr) {
1348 goto fail_setup_dma_pool;
1349 }
1350 }
1351
1352
1353 // Allocate memory for each scatter-gather list. Request for 512 bytes
1354 // alignment for each sg list
1355 raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
1356 adapter->pdev,
1357 sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
1358 512, 0);
1359
1360 if (raid_dev->sg_pool_handle == NULL) {
1361 goto fail_setup_dma_pool;
1362 }
1363
1364 sg_pci_blk = raid_dev->sg_pool;
1365 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1366 sg_pci_blk[i].vaddr = pci_pool_alloc(
1367 raid_dev->sg_pool_handle,
1368 GFP_KERNEL,
1369 &sg_pci_blk[i].dma_addr);
1370 if (!sg_pci_blk[i].vaddr) {
1371 goto fail_setup_dma_pool;
1372 }
1373 }
1374
1375 return 0;
1376
1377fail_setup_dma_pool:
1378 megaraid_mbox_teardown_dma_pools(adapter);
1379 return -1;
1380}
1381
1382
1383/**
1384 * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
1385 * @param adapter : HBA soft state
1386 *
1387 * teardown the dma pool for mailbox, passthru and extended passthru
1388 * structures, and scatter-gather lists
1389 */
1390static void
1391megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1392{
1393 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1394 struct mraid_pci_blk *epthru_pci_blk;
1395 struct mraid_pci_blk *sg_pci_blk;
1396 struct mraid_pci_blk *mbox_pci_blk;
1397 int i;
1398
1399
1400 sg_pci_blk = raid_dev->sg_pool;
1401 for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
1402 pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
1403 sg_pci_blk[i].dma_addr);
1404 }
1405 if (raid_dev->sg_pool_handle)
1406 pci_pool_destroy(raid_dev->sg_pool_handle);
1407
1408
1409 epthru_pci_blk = raid_dev->epthru_pool;
1410 for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
1411 pci_pool_free(raid_dev->epthru_pool_handle,
1412 epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
1413 }
1414 if (raid_dev->epthru_pool_handle)
1415 pci_pool_destroy(raid_dev->epthru_pool_handle);
1416
1417
1418 mbox_pci_blk = raid_dev->mbox_pool;
1419 for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
1420 pci_pool_free(raid_dev->mbox_pool_handle,
1421 mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
1422 }
1423 if (raid_dev->mbox_pool_handle)
1424 pci_pool_destroy(raid_dev->mbox_pool_handle);
1425
1426 return;
1427}
1428
1429
1430/**
1431 * megaraid_alloc_scb - detach and return a scb from the free list
1432 * @adapter : controller's soft state
1433 *
1434 * return the scb from the head of the free list. NULL if there are none
1435 * available
1436 **/
1437static inline scb_t *
1438megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
1439{
1440 struct list_head *head = &adapter->kscb_pool;
1441 scb_t *scb = NULL;
1442 unsigned long flags;
1443
1444 // detach scb from free pool
1445 spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1446
1447 if (list_empty(head)) {
1448 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1449 return NULL;
1450 }
1451
1452 scb = list_entry(head->next, scb_t, list);
1453 list_del_init(&scb->list);
1454
1455 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1456
1457 scb->state = SCB_ACTIVE;
1458 scb->scp = scp;
1459 scb->dma_type = MRAID_DMA_NONE;
1460
1461 return scb;
1462}
1463
1464
1465/**
1466 * megaraid_dealloc_scb - return the scb to the free pool
1467 * @adapter : controller's soft state
1468 * @scb : scb to be freed
1469 *
1470 * return the scb back to the free list of scbs. The caller must 'flush' the
1471 * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
1472 * NOTE NOTE: Make sure the scb is not on any list before calling this
1473 * routine.
1474 **/
1475static inline void
1476megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
1477{
1478 unsigned long flags;
1479
1480 // put scb in the free pool
1481 scb->state = SCB_FREE;
1482 scb->scp = NULL;
1483 spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1484
1485 list_add(&scb->list, &adapter->kscb_pool);
1486
1487 spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1488
1489 return;
1490}
1491
1492
1493/**
1494 * megaraid_mbox_mksgl - make the scatter-gather list
1495 * @adapter - controller's soft state
1496 * @scb - scsi control block
1497 *
1498 * prepare the scatter-gather list
1499 */
1500static inline int
1501megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1502{
1503 struct scatterlist *sgl;
1504 mbox_ccb_t *ccb;
1505 struct page *page;
1506 unsigned long offset;
1507 struct scsi_cmnd *scp;
1508 int sgcnt;
1509 int i;
1510
1511
1512 scp = scb->scp;
1513 ccb = (mbox_ccb_t *)scb->ccb;
1514
1515 // no mapping required if no data to be transferred
1516 if (!scp->request_buffer || !scp->request_bufflen)
1517 return 0;
1518
1519 if (!scp->use_sg) { /* scatter-gather list not used */
1520
1521 page = virt_to_page(scp->request_buffer);
1522
1523 offset = ((unsigned long)scp->request_buffer & ~PAGE_MASK);
1524
1525 ccb->buf_dma_h = pci_map_page(adapter->pdev, page, offset,
1526 scp->request_bufflen,
1527 scb->dma_direction);
1528 scb->dma_type = MRAID_DMA_WBUF;
1529
1530 /*
1531 * We need to handle special 64-bit commands that need a
1532 * minimum of 1 SG
1533 */
1534 sgcnt = 1;
1535 ccb->sgl64[0].address = ccb->buf_dma_h;
1536 ccb->sgl64[0].length = scp->request_bufflen;
1537
1538 return sgcnt;
1539 }
1540
1541 sgl = (struct scatterlist *)scp->request_buffer;
1542
1543 // The number of sg elements returned must not exceed our limit
1544 sgcnt = pci_map_sg(adapter->pdev, sgl, scp->use_sg,
1545 scb->dma_direction);
1546
1547 if (sgcnt > adapter->sglen) {
1548 con_log(CL_ANN, (KERN_CRIT
1549 "megaraid critical: too many sg elements:%d\n",
1550 sgcnt));
1551 BUG();
1552 }
1553
1554 scb->dma_type = MRAID_DMA_WSG;
1555
1556 for (i = 0; i < sgcnt; i++, sgl++) {
1557 ccb->sgl64[i].address = sg_dma_address(sgl);
1558 ccb->sgl64[i].length = sg_dma_len(sgl);
1559 }
1560
1561 // Return count of SG nodes
1562 return sgcnt;
1563}
1564
1565
1566/**
1567 * mbox_post_cmd - issue a mailbox command
1568 * @adapter - controller's soft state
1569 * @scb - command to be issued
1570 *
1571 * post the command to the controller if mailbox is availble.
1572 */
1573static inline int
1574mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1575{
1576 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1577 mbox64_t *mbox64;
1578 mbox_t *mbox;
1579 mbox_ccb_t *ccb;
1580 unsigned long flags;
1581 unsigned int i = 0;
1582
1583
1584 ccb = (mbox_ccb_t *)scb->ccb;
1585 mbox = raid_dev->mbox;
1586 mbox64 = raid_dev->mbox64;
1587
1588 /*
1589 * Check for busy mailbox. If it is, return failure - the caller
1590 * should retry later.
1591 */
1592 spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
1593
1594 if (unlikely(mbox->busy)) {
1595 do {
1596 udelay(1);
1597 i++;
1598 rmb();
1599 } while(mbox->busy && (i < max_mbox_busy_wait));
1600
1601 if (mbox->busy) {
1602
1603 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1604
1605 return -1;
1606 }
1607 }
1608
1609
1610 // Copy this command's mailbox data into "adapter's" mailbox
1611 memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
1612 mbox->cmdid = scb->sno;
1613
1614 adapter->outstanding_cmds++;
1615
1616 if (scb->dma_direction == PCI_DMA_TODEVICE) {
1617 if (!scb->scp->use_sg) { // sg list not used
1618 pci_dma_sync_single_for_device(adapter->pdev,
1619 ccb->buf_dma_h,
1620 scb->scp->request_bufflen,
1621 PCI_DMA_TODEVICE);
1622 }
1623 else {
1624 pci_dma_sync_sg_for_device(adapter->pdev,
1625 scb->scp->request_buffer,
1626 scb->scp->use_sg, PCI_DMA_TODEVICE);
1627 }
1628 }
1629
1630 mbox->busy = 1; // Set busy
1631 mbox->poll = 0;
1632 mbox->ack = 0;
1633 wmb();
1634
1635 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
1636
1637 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1638
1639 return 0;
1640}
1641
1642
1643/**
1644 * megaraid_queue_command - generic queue entry point for all LLDs
1645 * @scp : pointer to the scsi command to be executed
1646 * @done : callback routine to be called after the cmd has be completed
1647 *
1648 * Queue entry point for mailbox based controllers.
1649 */
1650static int
1651megaraid_queue_command(struct scsi_cmnd *scp, void (* done)(struct scsi_cmnd *))
1652{
1653 adapter_t *adapter;
1654 scb_t *scb;
1655 int if_busy;
1656
1657 adapter = SCP2ADAPTER(scp);
1658 scp->scsi_done = done;
1659 scp->result = 0;
1660
1661 assert_spin_locked(adapter->host_lock);
1662
1663 spin_unlock(adapter->host_lock);
1664
1665 /*
1666 * Allocate and build a SCB request
1667 * if_busy flag will be set if megaraid_mbox_build_cmd() command could
1668 * not allocate scb. We will return non-zero status in that case.
1669 * NOTE: scb can be null even though certain commands completed
1670 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
1671 * return 0 in that case, and we would do the callback right away.
1672 */
1673 if_busy = 0;
1674 scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
1675
1676 if (scb) {
1677 megaraid_mbox_runpendq(adapter, scb);
1678 }
1679
1680 spin_lock(adapter->host_lock);
1681
1682 if (!scb) { // command already completed
1683 done(scp);
1684 return 0;
1685 }
1686
1687 return if_busy;
1688}
1689
1690
1691/**
1692 * megaraid_mbox_build_cmd - transform the mid-layer scsi command to megaraid
1693 * firmware lingua
1694 * @adapter - controller's soft state
1695 * @scp - mid-layer scsi command pointer
1696 * @busy - set if request could not be completed because of lack of
1697 * resources
1698 *
1699 * convert the command issued by mid-layer to format understood by megaraid
1700 * firmware. We also complete certain command without sending them to firmware
1701 */
1702static scb_t *
1703megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1704{
1705 mraid_device_t *rdev = ADAP2RAIDDEV(adapter);
1706 int channel;
1707 int target;
1708 int islogical;
1709 mbox_ccb_t *ccb;
1710 mraid_passthru_t *pthru;
1711 mbox64_t *mbox64;
1712 mbox_t *mbox;
1713 scb_t *scb;
1714 char skip[] = "skipping";
1715 char scan[] = "scanning";
1716 char *ss;
1717
1718
1719 /*
1720 * Get the appropriate device map for the device this command is
1721 * intended for
1722 */
1723 MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
1724
1725 /*
1726 * Logical drive commands
1727 */
1728 if (islogical) {
1729 switch (scp->cmnd[0]) {
1730 case TEST_UNIT_READY:
1731 /*
1732 * Do we support clustering and is the support enabled
1733 * If no, return success always
1734 */
1735 if (!adapter->ha) {
1736 scp->result = (DID_OK << 16);
1737 return NULL;
1738 }
1739
1740 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1741 scp->result = (DID_ERROR << 16);
1742 *busy = 1;
1743 return NULL;
1744 }
1745
1746 scb->dma_direction = scp->sc_data_direction;
1747 scb->dev_channel = 0xFF;
1748 scb->dev_target = target;
1749 ccb = (mbox_ccb_t *)scb->ccb;
1750
1751 /*
1752 * The command id will be provided by the command
1753 * issuance routine
1754 */
1755 ccb->raw_mbox[0] = CLUSTER_CMD;
1756 ccb->raw_mbox[2] = RESERVATION_STATUS;
1757 ccb->raw_mbox[3] = target;
1758
1759 return scb;
1760
1761 case MODE_SENSE:
1762 if (scp->use_sg) {
1763 struct scatterlist *sgl;
1764 caddr_t vaddr;
1765
1766 sgl = (struct scatterlist *)scp->request_buffer;
1767 if (sgl->page) {
1768 vaddr = (caddr_t)
1769 (page_address((&sgl[0])->page)
1770 + (&sgl[0])->offset);
1771
1772 memset(vaddr, 0, scp->cmnd[4]);
1773 }
1774 else {
1775 con_log(CL_ANN, (KERN_WARNING
1776 "megaraid mailbox: invalid sg:%d\n",
1777 __LINE__));
1778 }
1779 }
1780 else {
1781 memset(scp->request_buffer, 0, scp->cmnd[4]);
1782 }
1783 scp->result = (DID_OK << 16);
1784 return NULL;
1785
1786 case INQUIRY:
1787 /*
1788 * Display the channel scan for logical drives
1789 * Do not display scan for a channel if already done.
1790 */
1791 if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1792
1793 con_log(CL_ANN, (KERN_INFO
1794 "scsi[%d]: scanning scsi channel %d",
1795 adapter->host->host_no,
1796 SCP2CHANNEL(scp)));
1797
1798 con_log(CL_ANN, (
1799 " [virtual] for logical drives\n"));
1800
1801 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1802 }
1803
1804 /* Fall through */
1805
1806 case READ_CAPACITY:
1807 /*
1808 * Do not allow LUN > 0 for logical drives and
1809 * requests for more than 40 logical drives
1810 */
1811 if (SCP2LUN(scp)) {
1812 scp->result = (DID_BAD_TARGET << 16);
1813 return NULL;
1814 }
1815 if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
1816 scp->result = (DID_BAD_TARGET << 16);
1817 return NULL;
1818 }
1819
1820
1821 /* Allocate a SCB and initialize passthru */
1822 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1823 scp->result = (DID_ERROR << 16);
1824 *busy = 1;
1825 return NULL;
1826 }
1827
1828 ccb = (mbox_ccb_t *)scb->ccb;
1829 scb->dev_channel = 0xFF;
1830 scb->dev_target = target;
1831 pthru = ccb->pthru;
1832 mbox = ccb->mbox;
1833 mbox64 = ccb->mbox64;
1834
1835 pthru->timeout = 0;
1836 pthru->ars = 1;
1837 pthru->reqsenselen = 14;
1838 pthru->islogical = 1;
1839 pthru->logdrv = target;
1840 pthru->cdblen = scp->cmd_len;
1841 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1842
1843 mbox->cmd = MBOXCMD_PASSTHRU64;
1844 scb->dma_direction = scp->sc_data_direction;
1845
1846 pthru->dataxferlen = scp->request_bufflen;
1847 pthru->dataxferaddr = ccb->sgl_dma_h;
1848 pthru->numsge = megaraid_mbox_mksgl(adapter,
1849 scb);
1850
1851 mbox->xferaddr = 0xFFFFFFFF;
1852 mbox64->xferaddr_lo = (uint32_t )ccb->pthru_dma_h;
1853 mbox64->xferaddr_hi = 0;
1854
1855 return scb;
1856
1857 case READ_6:
1858 case WRITE_6:
1859 case READ_10:
1860 case WRITE_10:
1861 case READ_12:
1862 case WRITE_12:
1863
1864 /*
1865 * Allocate a SCB and initialize mailbox
1866 */
1867 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1868 scp->result = (DID_ERROR << 16);
1869 *busy = 1;
1870 return NULL;
1871 }
1872 ccb = (mbox_ccb_t *)scb->ccb;
1873 scb->dev_channel = 0xFF;
1874 scb->dev_target = target;
1875 mbox = ccb->mbox;
1876 mbox64 = ccb->mbox64;
1877 mbox->logdrv = target;
1878
1879 /*
1880 * A little HACK: 2nd bit is zero for all scsi read
1881 * commands and is set for all scsi write commands
1882 */
1883 mbox->cmd = (scp->cmnd[0] & 0x02) ? MBOXCMD_LWRITE64:
1884 MBOXCMD_LREAD64 ;
1885
1886 /*
1887 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1888 */
1889 if (scp->cmd_len == 6) {
1890 mbox->numsectors = (uint32_t)scp->cmnd[4];
1891 mbox->lba =
1892 ((uint32_t)scp->cmnd[1] << 16) |
1893 ((uint32_t)scp->cmnd[2] << 8) |
1894 (uint32_t)scp->cmnd[3];
1895
1896 mbox->lba &= 0x1FFFFF;
1897 }
1898
1899 /*
1900 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1901 */
1902 else if (scp->cmd_len == 10) {
1903 mbox->numsectors =
1904 (uint32_t)scp->cmnd[8] |
1905 ((uint32_t)scp->cmnd[7] << 8);
1906 mbox->lba =
1907 ((uint32_t)scp->cmnd[2] << 24) |
1908 ((uint32_t)scp->cmnd[3] << 16) |
1909 ((uint32_t)scp->cmnd[4] << 8) |
1910 (uint32_t)scp->cmnd[5];
1911 }
1912
1913 /*
1914 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1915 */
1916 else if (scp->cmd_len == 12) {
1917 mbox->lba =
1918 ((uint32_t)scp->cmnd[2] << 24) |
1919 ((uint32_t)scp->cmnd[3] << 16) |
1920 ((uint32_t)scp->cmnd[4] << 8) |
1921 (uint32_t)scp->cmnd[5];
1922
1923 mbox->numsectors =
1924 ((uint32_t)scp->cmnd[6] << 24) |
1925 ((uint32_t)scp->cmnd[7] << 16) |
1926 ((uint32_t)scp->cmnd[8] << 8) |
1927 (uint32_t)scp->cmnd[9];
1928 }
1929 else {
1930 con_log(CL_ANN, (KERN_WARNING
1931 "megaraid: unsupported CDB length\n"));
1932
1933 megaraid_dealloc_scb(adapter, scb);
1934
1935 scp->result = (DID_ERROR << 16);
1936 return NULL;
1937 }
1938
1939 scb->dma_direction = scp->sc_data_direction;
1940
1941 // Calculate Scatter-Gather info
1942 mbox64->xferaddr_lo = (uint32_t )ccb->sgl_dma_h;
1943 mbox->numsge = megaraid_mbox_mksgl(adapter,
1944 scb);
1945 mbox->xferaddr = 0xFFFFFFFF;
1946 mbox64->xferaddr_hi = 0;
1947
1948 return scb;
1949
1950 case RESERVE:
1951 case RELEASE:
1952 /*
1953 * Do we support clustering and is the support enabled
1954 */
1955 if (!adapter->ha) {
1956 scp->result = (DID_BAD_TARGET << 16);
1957 return NULL;
1958 }
1959
1960 /*
1961 * Allocate a SCB and initialize mailbox
1962 */
1963 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1964 scp->result = (DID_ERROR << 16);
1965 *busy = 1;
1966 return NULL;
1967 }
1968
1969 ccb = (mbox_ccb_t *)scb->ccb;
1970 scb->dev_channel = 0xFF;
1971 scb->dev_target = target;
1972 ccb->raw_mbox[0] = CLUSTER_CMD;
1973 ccb->raw_mbox[2] = (scp->cmnd[0] == RESERVE) ?
1974 RESERVE_LD : RELEASE_LD;
1975
1976 ccb->raw_mbox[3] = target;
1977 scb->dma_direction = scp->sc_data_direction;
1978
1979 return scb;
1980
1981 default:
1982 scp->result = (DID_BAD_TARGET << 16);
1983 return NULL;
1984 }
1985 }
1986 else { // Passthru device commands
1987
1988 // Do not allow access to target id > 15 or LUN > 7
1989 if (target > 15 || SCP2LUN(scp) > 7) {
1990 scp->result = (DID_BAD_TARGET << 16);
1991 return NULL;
1992 }
1993
1994 // if fast load option was set and scan for last device is
1995 // over, reset the fast_load flag so that during a possible
1996 // next scan, devices can be made available
1997 if (rdev->fast_load && (target == 15) &&
1998 (SCP2CHANNEL(scp) == adapter->max_channel -1)) {
1999
2000 con_log(CL_ANN, (KERN_INFO
2001 "megaraid[%d]: physical device scan re-enabled\n",
2002 adapter->host->host_no));
2003 rdev->fast_load = 0;
2004 }
2005
2006 /*
2007 * Display the channel scan for physical devices
2008 */
2009 if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
2010
2011 ss = rdev->fast_load ? skip : scan;
2012
2013 con_log(CL_ANN, (KERN_INFO
2014 "scsi[%d]: %s scsi channel %d [Phy %d]",
2015 adapter->host->host_no, ss, SCP2CHANNEL(scp),
2016 channel));
2017
2018 con_log(CL_ANN, (
2019 " for non-raid devices\n"));
2020
2021 rdev->last_disp |= (1L << SCP2CHANNEL(scp));
2022 }
2023
2024 // disable channel sweep if fast load option given
2025 if (rdev->fast_load) {
2026 scp->result = (DID_BAD_TARGET << 16);
2027 return NULL;
2028 }
2029
2030 // Allocate a SCB and initialize passthru
2031 if (!(scb = megaraid_alloc_scb(adapter, scp))) {
2032 scp->result = (DID_ERROR << 16);
2033 *busy = 1;
2034 return NULL;
2035 }
2036
2037 ccb = (mbox_ccb_t *)scb->ccb;
2038 scb->dev_channel = channel;
2039 scb->dev_target = target;
2040 scb->dma_direction = scp->sc_data_direction;
2041 mbox = ccb->mbox;
2042 mbox64 = ccb->mbox64;
2043
2044 // Does this firmware support extended CDBs
2045 if (adapter->max_cdb_sz == 16) {
2046 mbox->cmd = MBOXCMD_EXTPTHRU;
2047
2048 megaraid_mbox_prepare_epthru(adapter, scb, scp);
2049
2050 mbox64->xferaddr_lo = (uint32_t)ccb->epthru_dma_h;
2051 mbox64->xferaddr_hi = 0;
2052 mbox->xferaddr = 0xFFFFFFFF;
2053 }
2054 else {
2055 mbox->cmd = MBOXCMD_PASSTHRU64;
2056
2057 megaraid_mbox_prepare_pthru(adapter, scb, scp);
2058
2059 mbox64->xferaddr_lo = (uint32_t)ccb->pthru_dma_h;
2060 mbox64->xferaddr_hi = 0;
2061 mbox->xferaddr = 0xFFFFFFFF;
2062 }
2063 return scb;
2064 }
2065
2066 // NOT REACHED
2067}
2068
2069
2070/**
2071 * megaraid_mbox_runpendq - execute commands queued in the pending queue
2072 * @adapter : controller's soft state
2073 * @scb : SCB to be queued in the pending list
2074 *
2075 * scan the pending list for commands which are not yet issued and try to
2076 * post to the controller. The SCB can be a null pointer, which would indicate
2077 * no SCB to be queue, just try to execute the ones in the pending list.
2078 *
2079 * NOTE: We do not actually traverse the pending list. The SCBs are plucked
2080 * out from the head of the pending list. If it is successfully issued, the
2081 * next SCB is at the head now.
2082 */
2083static void
2084megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
2085{
2086 scb_t *scb;
2087 unsigned long flags;
2088
2089 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2090
2091 if (scb_q) {
2092 scb_q->state = SCB_PENDQ;
2093 list_add_tail(&scb_q->list, &adapter->pend_list);
2094 }
2095
2096 // if the adapter in not in quiescent mode, post the commands to FW
2097 if (adapter->quiescent) {
2098 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2099 return;
2100 }
2101
2102 while (!list_empty(&adapter->pend_list)) {
2103
2104 assert_spin_locked(PENDING_LIST_LOCK(adapter));
2105
2106 scb = list_entry(adapter->pend_list.next, scb_t, list);
2107
2108 // remove the scb from the pending list and try to
2109 // issue. If we are unable to issue it, put back in
2110 // the pending list and return
2111
2112 list_del_init(&scb->list);
2113
2114 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2115
2116 // if mailbox was busy, return SCB back to pending
2117 // list. Make sure to add at the head, since that's
2118 // where it would have been removed from
2119
2120 scb->state = SCB_ISSUED;
2121
2122 if (mbox_post_cmd(adapter, scb) != 0) {
2123
2124 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2125
2126 scb->state = SCB_PENDQ;
2127
2128 list_add(&scb->list, &adapter->pend_list);
2129
2130 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
2131 flags);
2132
2133 return;
2134 }
2135
2136 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2137 }
2138
2139 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2140
2141
2142 return;
2143}
2144
2145
2146/**
2147 * megaraid_mbox_prepare_pthru - prepare a command for physical devices
2148 * @adapter - pointer to controller's soft state
2149 * @scb - scsi control block
2150 * @scp - scsi command from the mid-layer
2151 *
2152 * prepare a command for the scsi physical devices
2153 */
2154static void
2155megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
2156 struct scsi_cmnd *scp)
2157{
2158 mbox_ccb_t *ccb;
2159 mraid_passthru_t *pthru;
2160 uint8_t channel;
2161 uint8_t target;
2162
2163 ccb = (mbox_ccb_t *)scb->ccb;
2164 pthru = ccb->pthru;
2165 channel = scb->dev_channel;
2166 target = scb->dev_target;
2167
2168 // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
2169 pthru->timeout = 4;
2170 pthru->ars = 1;
2171 pthru->islogical = 0;
2172 pthru->channel = 0;
2173 pthru->target = (channel << 4) | target;
2174 pthru->logdrv = SCP2LUN(scp);
2175 pthru->reqsenselen = 14;
2176 pthru->cdblen = scp->cmd_len;
2177
2178 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
2179
2180 if (scp->request_bufflen) {
2181 pthru->dataxferlen = scp->request_bufflen;
2182 pthru->dataxferaddr = ccb->sgl_dma_h;
2183 pthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2184 }
2185 else {
2186 pthru->dataxferaddr = 0;
2187 pthru->dataxferlen = 0;
2188 pthru->numsge = 0;
2189 }
2190 return;
2191}
2192
2193
2194/**
2195 * megaraid_mbox_prepare_epthru - prepare a command for physical devices
2196 * @adapter - pointer to controller's soft state
2197 * @scb - scsi control block
2198 * @scp - scsi command from the mid-layer
2199 *
2200 * prepare a command for the scsi physical devices. This rountine prepares
2201 * commands for devices which can take extended CDBs (>10 bytes)
2202 */
2203static void
2204megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2205 struct scsi_cmnd *scp)
2206{
2207 mbox_ccb_t *ccb;
2208 mraid_epassthru_t *epthru;
2209 uint8_t channel;
2210 uint8_t target;
2211
2212 ccb = (mbox_ccb_t *)scb->ccb;
2213 epthru = ccb->epthru;
2214 channel = scb->dev_channel;
2215 target = scb->dev_target;
2216
2217 // 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
2218 epthru->timeout = 4;
2219 epthru->ars = 1;
2220 epthru->islogical = 0;
2221 epthru->channel = 0;
2222 epthru->target = (channel << 4) | target;
2223 epthru->logdrv = SCP2LUN(scp);
2224 epthru->reqsenselen = 14;
2225 epthru->cdblen = scp->cmd_len;
2226
2227 memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
2228
2229 if (scp->request_bufflen) {
2230 epthru->dataxferlen = scp->request_bufflen;
2231 epthru->dataxferaddr = ccb->sgl_dma_h;
2232 epthru->numsge = megaraid_mbox_mksgl(adapter, scb);
2233 }
2234 else {
2235 epthru->dataxferaddr = 0;
2236 epthru->dataxferlen = 0;
2237 epthru->numsge = 0;
2238 }
2239 return;
2240}
2241
2242
2243/**
2244 * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
2245 * @adapter - controller's soft state
2246 *
2247 * Interrupt ackrowledgement sequence for memory mapped HBAs. Find out the
2248 * completed command and put them on the completed list for later processing.
2249 *
2250 * Returns: 1 if the interrupt is valid, 0 otherwise
2251 */
2252static inline int
2253megaraid_ack_sequence(adapter_t *adapter)
2254{
2255 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2256 mbox_t *mbox;
2257 scb_t *scb;
2258 uint8_t nstatus;
2259 uint8_t completed[MBOX_MAX_FIRMWARE_STATUS];
2260 struct list_head clist;
2261 int handled;
2262 uint32_t dword;
2263 unsigned long flags;
2264 int i, j;
2265
2266
2267 mbox = raid_dev->mbox;
2268
2269 // move the SCBs from the firmware completed array to our local list
2270 INIT_LIST_HEAD(&clist);
2271
2272 // loop till F/W has more commands for us to complete
2273 handled = 0;
2274 spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
2275 do {
2276 /*
2277 * Check if a valid interrupt is pending. If found, force the
2278 * interrupt line low.
2279 */
2280 dword = RDOUTDOOR(raid_dev);
2281 if (dword != 0x10001234) break;
2282
2283 handled = 1;
2284
2285 WROUTDOOR(raid_dev, 0x10001234);
2286
2287 nstatus = 0;
2288 // wait for valid numstatus to post
2289 for (i = 0; i < 0xFFFFF; i++) {
2290 if (mbox->numstatus != 0xFF) {
2291 nstatus = mbox->numstatus;
2292 break;
2293 }
2294 rmb();
2295 }
2296 mbox->numstatus = 0xFF;
2297
2298 adapter->outstanding_cmds -= nstatus;
2299
2300 for (i = 0; i < nstatus; i++) {
2301
2302 // wait for valid command index to post
2303 for (j = 0; j < 0xFFFFF; j++) {
2304 if (mbox->completed[i] != 0xFF) break;
2305 rmb();
2306 }
2307 completed[i] = mbox->completed[i];
2308 mbox->completed[i] = 0xFF;
2309
2310 if (completed[i] == 0xFF) {
2311 con_log(CL_ANN, (KERN_CRIT
2312 "megaraid: command posting timed out\n"));
2313
2314 BUG();
2315 continue;
2316 }
2317
2318 // Get SCB associated with this command id
2319 if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
2320 // a cmm command
2321 scb = adapter->uscb_list + (completed[i] -
2322 MBOX_MAX_SCSI_CMDS);
2323 }
2324 else {
2325 // an os command
2326 scb = adapter->kscb_list + completed[i];
2327 }
2328
2329 scb->status = mbox->status;
2330 list_add_tail(&scb->list, &clist);
2331 }
2332
2333 // Acknowledge interrupt
2334 WRINDOOR(raid_dev, 0x02);
2335
2336 } while(1);
2337
2338 spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
2339
2340
2341 // put the completed commands in the completed list. DPC would
2342 // complete these commands later
2343 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2344
2345 list_splice(&clist, &adapter->completed_list);
2346
2347 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2348
2349
2350 // schedule the DPC if there is some work for it
2351 if (handled)
2352 tasklet_schedule(&adapter->dpc_h);
2353
2354 return handled;
2355}
2356
2357
2358/**
2359 * megaraid_isr - isr for memory based mailbox based controllers
2360 * @irq - irq
2361 * @devp - pointer to our soft state
2362 * @regs - unused
2363 *
2364 * Interrupt service routine for memory-mapped mailbox controllers.
2365 */
2366static irqreturn_t
2367megaraid_isr(int irq, void *devp, struct pt_regs *regs)
2368{
2369 adapter_t *adapter = devp;
2370 int handled;
2371
2372 handled = megaraid_ack_sequence(adapter);
2373
2374 /* Loop through any pending requests */
2375 if (!adapter->quiescent) {
2376 megaraid_mbox_runpendq(adapter, NULL);
2377 }
2378
2379 return IRQ_RETVAL(handled);
2380}
2381
2382
2383/**
2384 * megaraid_mbox_sync_scb - sync kernel buffers
2385 * @adapter : controller's soft state
2386 * @scb : pointer to the resource packet
2387 *
2388 * DMA sync if required.
2389 */
2390static inline void
2391megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2392{
2393 mbox_ccb_t *ccb;
2394
2395 ccb = (mbox_ccb_t *)scb->ccb;
2396
2397 switch (scb->dma_type) {
2398
2399 case MRAID_DMA_WBUF:
2400 if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
2401 pci_dma_sync_single_for_cpu(adapter->pdev,
2402 ccb->buf_dma_h,
2403 scb->scp->request_bufflen,
2404 PCI_DMA_FROMDEVICE);
2405 }
2406
2407 pci_unmap_page(adapter->pdev, ccb->buf_dma_h,
2408 scb->scp->request_bufflen, scb->dma_direction);
2409
2410 break;
2411
2412 case MRAID_DMA_WSG:
2413 if (scb->dma_direction == PCI_DMA_FROMDEVICE) {
2414 pci_dma_sync_sg_for_cpu(adapter->pdev,
2415 scb->scp->request_buffer,
2416 scb->scp->use_sg, PCI_DMA_FROMDEVICE);
2417 }
2418
2419 pci_unmap_sg(adapter->pdev, scb->scp->request_buffer,
2420 scb->scp->use_sg, scb->dma_direction);
2421
2422 break;
2423
2424 default:
2425 break;
2426 }
2427
2428 return;
2429}
2430
2431
2432/**
2433 * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
2434 * @devp : pointer to HBA soft state
2435 *
2436 * Pick up the commands from the completed list and send back to the owners.
2437 * This is a reentrant function and does not assume any locks are held while
2438 * it is being called.
2439 */
2440static void
2441megaraid_mbox_dpc(unsigned long devp)
2442{
2443 adapter_t *adapter = (adapter_t *)devp;
2444 mraid_device_t *raid_dev;
2445 struct list_head clist;
2446 struct scatterlist *sgl;
2447 scb_t *scb;
2448 scb_t *tmp;
2449 struct scsi_cmnd *scp;
2450 mraid_passthru_t *pthru;
2451 mraid_epassthru_t *epthru;
2452 mbox_ccb_t *ccb;
2453 int islogical;
2454 int pdev_index;
2455 int pdev_state;
2456 mbox_t *mbox;
2457 unsigned long flags;
2458 uint8_t c;
2459 int status;
2460
2461
2462 if (!adapter) return;
2463
2464 raid_dev = ADAP2RAIDDEV(adapter);
2465
2466 // move the SCBs from the completed list to our local list
2467 INIT_LIST_HEAD(&clist);
2468
2469 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2470
2471 list_splice_init(&adapter->completed_list, &clist);
2472
2473 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2474
2475
2476 list_for_each_entry_safe(scb, tmp, &clist, list) {
2477
2478 status = scb->status;
2479 scp = scb->scp;
2480 ccb = (mbox_ccb_t *)scb->ccb;
2481 pthru = ccb->pthru;
2482 epthru = ccb->epthru;
2483 mbox = ccb->mbox;
2484
2485 // Make sure f/w has completed a valid command
2486 if (scb->state != SCB_ISSUED) {
2487 con_log(CL_ANN, (KERN_CRIT
2488 "megaraid critical err: invalid command %d:%d:%p\n",
2489 scb->sno, scb->state, scp));
2490 BUG();
2491 continue; // Must never happen!
2492 }
2493
2494 // check for the management command and complete it right away
2495 if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2496 scb->state = SCB_FREE;
2497 scb->status = status;
2498
2499 // remove from local clist
2500 list_del_init(&scb->list);
2501
2502 megaraid_mbox_mm_done(adapter, scb);
2503
2504 continue;
2505 }
2506
2507 // Was an abort issued for this command earlier
2508 if (scb->state & SCB_ABORT) {
2509 con_log(CL_ANN, (KERN_NOTICE
2510 "megaraid: aborted cmd %lx[%x] completed\n",
2511 scp->serial_number, scb->sno));
2512 }
2513
2514 /*
2515 * If the inquiry came of a disk drive which is not part of
2516 * any RAID array, expose it to the kernel. For this to be
2517 * enabled, user must set the "megaraid_expose_unconf_disks"
2518 * flag to 1 by specifying it on module parameter list.
2519 * This would enable data migration off drives from other
2520 * configurations.
2521 */
2522 islogical = MRAID_IS_LOGICAL(adapter, scp);
2523 if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
2524 && IS_RAID_CH(raid_dev, scb->dev_channel)) {
2525
2526 if (scp->use_sg) {
2527 sgl = (struct scatterlist *)
2528 scp->request_buffer;
2529
2530 if (sgl->page) {
2531 c = *(unsigned char *)
2532 (page_address((&sgl[0])->page) +
2533 (&sgl[0])->offset);
2534 }
2535 else {
2536 con_log(CL_ANN, (KERN_WARNING
2537 "megaraid mailbox: invalid sg:%d\n",
2538 __LINE__));
2539 c = 0;
2540 }
2541 }
2542 else {
2543 c = *(uint8_t *)scp->request_buffer;
2544 }
2545
2546 if ((c & 0x1F ) == TYPE_DISK) {
2547 pdev_index = (scb->dev_channel * 16) +
2548 scb->dev_target;
2549 pdev_state =
2550 raid_dev->pdrv_state[pdev_index] & 0x0F;
2551
2552 if (pdev_state == PDRV_ONLINE ||
2553 pdev_state == PDRV_FAILED ||
2554 pdev_state == PDRV_RBLD ||
2555 pdev_state == PDRV_HOTSPARE ||
2556 megaraid_expose_unconf_disks == 0) {
2557
2558 status = 0xF0;
2559 }
2560 }
2561 }
2562
2563 // Convert MegaRAID status to Linux error code
2564 switch (status) {
2565
2566 case 0x00:
2567
2568 scp->result = (DID_OK << 16);
2569 break;
2570
2571 case 0x02:
2572
2573 /* set sense_buffer and result fields */
2574 if (mbox->cmd == MBOXCMD_PASSTHRU ||
2575 mbox->cmd == MBOXCMD_PASSTHRU64) {
2576
2577 memcpy(scp->sense_buffer, pthru->reqsensearea,
2578 14);
2579
2580 scp->result = DRIVER_SENSE << 24 |
2581 DID_OK << 16 | CHECK_CONDITION << 1;
2582 }
2583 else {
2584 if (mbox->cmd == MBOXCMD_EXTPTHRU) {
2585
2586 memcpy(scp->sense_buffer,
2587 epthru->reqsensearea, 14);
2588
2589 scp->result = DRIVER_SENSE << 24 |
2590 DID_OK << 16 |
2591 CHECK_CONDITION << 1;
2592 } else {
2593 scp->sense_buffer[0] = 0x70;
2594 scp->sense_buffer[2] = ABORTED_COMMAND;
2595 scp->result = CHECK_CONDITION << 1;
2596 }
2597 }
2598 break;
2599
2600 case 0x08:
2601
2602 scp->result = DID_BUS_BUSY << 16 | status;
2603 break;
2604
2605 default:
2606
2607 /*
2608 * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
2609 * failed
2610 */
2611 if (scp->cmnd[0] == TEST_UNIT_READY) {
2612 scp->result = DID_ERROR << 16 |
2613 RESERVATION_CONFLICT << 1;
2614 }
2615 else
2616 /*
2617 * Error code returned is 1 if Reserve or Release
2618 * failed or the input parameter is invalid
2619 */
2620 if (status == 1 && (scp->cmnd[0] == RESERVE ||
2621 scp->cmnd[0] == RELEASE)) {
2622
2623 scp->result = DID_ERROR << 16 |
2624 RESERVATION_CONFLICT << 1;
2625 }
2626 else {
2627 scp->result = DID_BAD_TARGET << 16 | status;
2628 }
2629 }
2630
2631 // print a debug message for all failed commands
2632 if (status) {
2633 megaraid_mbox_display_scb(adapter, scb);
2634 }
2635
2636 // Free our internal resources and call the mid-layer callback
2637 // routine
2638 megaraid_mbox_sync_scb(adapter, scb);
2639
2640 // remove from local clist
2641 list_del_init(&scb->list);
2642
2643 // put back in free list
2644 megaraid_dealloc_scb(adapter, scb);
2645
2646 // send the scsi packet back to kernel
2647 spin_lock(adapter->host_lock);
2648 scp->scsi_done(scp);
2649 spin_unlock(adapter->host_lock);
2650 }
2651
2652 return;
2653}
2654
2655
2656/**
2657 * megaraid_abort_handler - abort the scsi command
2658 * @scp : command to be aborted
2659 *
2660 * Abort a previous SCSI request. Only commands on the pending list can be
2661 * aborted. All the commands issued to the F/W must complete.
2662 **/
2663static int
2664megaraid_abort_handler(struct scsi_cmnd *scp)
2665{
2666 adapter_t *adapter;
2667 mraid_device_t *raid_dev;
2668 scb_t *scb;
2669 scb_t *tmp;
2670 int found;
2671 unsigned long flags;
2672 int i;
2673
2674
2675 adapter = SCP2ADAPTER(scp);
2676 raid_dev = ADAP2RAIDDEV(adapter);
2677
2678 assert_spin_locked(adapter->host_lock);
2679
2680 con_log(CL_ANN, (KERN_WARNING
2681 "megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n",
2682 scp->serial_number, scp->cmnd[0], SCP2CHANNEL(scp),
2683 SCP2TARGET(scp), SCP2LUN(scp)));
2684
2685 // If FW has stopped responding, simply return failure
2686 if (raid_dev->hw_error) {
2687 con_log(CL_ANN, (KERN_NOTICE
2688 "megaraid: hw error, not aborting\n"));
2689 return FAILED;
2690 }
2691
2692 // There might a race here, where the command was completed by the
2693 // firmware and now it is on the completed list. Before we could
2694 // complete the command to the kernel in dpc, the abort came.
2695 // Find out if this is the case to avoid the race.
2696 scb = NULL;
2697 spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2698 list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
2699
2700 if (scb->scp == scp) { // Found command
2701
2702 list_del_init(&scb->list); // from completed list
2703
2704 con_log(CL_ANN, (KERN_WARNING
2705 "megaraid: %ld:%d[%d:%d], abort from completed list\n",
2706 scp->serial_number, scb->sno,
2707 scb->dev_channel, scb->dev_target));
2708
2709 scp->result = (DID_ABORT << 16);
2710 scp->scsi_done(scp);
2711
2712 megaraid_dealloc_scb(adapter, scb);
2713
2714 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
2715 flags);
2716
2717 return SUCCESS;
2718 }
2719 }
2720 spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2721
2722
2723 // Find out if this command is still on the pending list. If it is and
2724 // was never issued, abort and return success. If the command is owned
2725 // by the firmware, we must wait for it to complete by the FW.
2726 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2727 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2728
2729 if (scb->scp == scp) { // Found command
2730
2731 list_del_init(&scb->list); // from pending list
2732
2733 ASSERT(!(scb->state & SCB_ISSUED));
2734
2735 con_log(CL_ANN, (KERN_WARNING
2736 "megaraid abort: %ld[%d:%d], driver owner\n",
2737 scp->serial_number, scb->dev_channel,
2738 scb->dev_target));
2739
2740 scp->result = (DID_ABORT << 16);
2741 scp->scsi_done(scp);
2742
2743 megaraid_dealloc_scb(adapter, scb);
2744
2745 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
2746 flags);
2747
2748 return SUCCESS;
2749 }
2750 }
2751 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2752
2753
2754 // Check do we even own this command, in which case this would be
2755 // owned by the firmware. The only way to locate the FW scb is to
2756 // traverse through the list of all SCB, since driver does not
2757 // maintain these SCBs on any list
2758 found = 0;
2759 for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
2760 scb = adapter->kscb_list + i;
2761
2762 if (scb->scp == scp) {
2763
2764 found = 1;
2765
2766 if (!(scb->state & SCB_ISSUED)) {
2767 con_log(CL_ANN, (KERN_WARNING
2768 "megaraid abort: %ld%d[%d:%d], invalid state\n",
2769 scp->serial_number, scb->sno, scb->dev_channel,
2770 scb->dev_target));
2771 BUG();
2772 }
2773 else {
2774 con_log(CL_ANN, (KERN_WARNING
2775 "megaraid abort: %ld:%d[%d:%d], fw owner\n",
2776 scp->serial_number, scb->sno, scb->dev_channel,
2777 scb->dev_target));
2778 }
2779 }
2780 }
2781
2782 if (!found) {
2783 con_log(CL_ANN, (KERN_WARNING
2784 "megaraid abort: scsi cmd:%ld, do now own\n",
2785 scp->serial_number));
2786
2787 // FIXME: Should there be a callback for this command?
2788 return SUCCESS;
2789 }
2790
2791 // We cannot actually abort a command owned by firmware, return
2792 // failure and wait for reset. In host reset handler, we will find out
2793 // if the HBA is still live
2794 return FAILED;
2795}
2796
2797
2798/**
2799 * megaraid_reset_handler - device reset hadler for mailbox based driver
2800 * @scp : reference command
2801 *
2802 * Reset handler for the mailbox based controller. First try to find out if
2803 * the FW is still live, in which case the outstanding commands counter mut go
2804 * down to 0. If that happens, also issue the reservation reset command to
2805 * relinquish (possible) reservations on the logical drives connected to this
2806 * host
2807 **/
2808static int
2809megaraid_reset_handler(struct scsi_cmnd *scp)
2810{
2811 adapter_t *adapter;
2812 scb_t *scb;
2813 scb_t *tmp;
2814 mraid_device_t *raid_dev;
2815 unsigned long flags;
2816 uint8_t raw_mbox[sizeof(mbox_t)];
2817 int rval;
2818 int recovery_window;
2819 int recovering;
2820 int i;
2821
2822 adapter = SCP2ADAPTER(scp);
2823 raid_dev = ADAP2RAIDDEV(adapter);
2824
2825 assert_spin_locked(adapter->host_lock);
2826
2827 con_log(CL_ANN, (KERN_WARNING "megaraid: reseting the host...\n"));
2828
2829 // return failure if adapter is not responding
2830 if (raid_dev->hw_error) {
2831 con_log(CL_ANN, (KERN_NOTICE
2832 "megaraid: hw error, cannot reset\n"));
2833 return FAILED;
2834 }
2835
2836
2837 // Under exceptional conditions, FW can take up to 3 minutes to
2838 // complete command processing. Wait for additional 2 minutes for the
2839 // pending commands counter to go down to 0. If it doesn't, let the
2840 // controller be marked offline
2841 // Also, reset all the commands currently owned by the driver
2842 spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2843 list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2844
2845 list_del_init(&scb->list); // from pending list
2846
2847 con_log(CL_ANN, (KERN_WARNING
2848 "megaraid: %ld:%d[%d:%d], reset from pending list\n",
2849 scp->serial_number, scb->sno,
2850 scb->dev_channel, scb->dev_target));
2851
2852 scp->result = (DID_RESET << 16);
2853 scp->scsi_done(scp);
2854
2855 megaraid_dealloc_scb(adapter, scb);
2856 }
2857 spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2858
2859 if (adapter->outstanding_cmds) {
2860 con_log(CL_ANN, (KERN_NOTICE
2861 "megaraid: %d outstanding commands. Max wait %d sec\n",
2862 adapter->outstanding_cmds, MBOX_RESET_WAIT));
2863 }
2864
2865 spin_unlock(adapter->host_lock);
2866
2867 recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2868
2869 recovering = adapter->outstanding_cmds;
2870
2871 for (i = 0; i < recovery_window && adapter->outstanding_cmds; i++) {
2872
2873 megaraid_ack_sequence(adapter);
2874
2875 // print a message once every 5 seconds only
2876 if (!(i % 5)) {
2877 con_log(CL_ANN, (
2878 "megaraid mbox: Wait for %d commands to complete:%d\n",
2879 adapter->outstanding_cmds,
2880 MBOX_RESET_WAIT - i));
2881 }
2882
2883 // bailout if no recovery happended in reset time
2884 if ((i == MBOX_RESET_WAIT) &&
2885 (recovering == adapter->outstanding_cmds)) {
2886 break;
2887 }
2888
2889 msleep(1000);
2890 }
2891
2892 spin_lock(adapter->host_lock);
2893
2894 // If still outstanding commands, bail out
2895 if (adapter->outstanding_cmds) {
2896 con_log(CL_ANN, (KERN_WARNING
2897 "megaraid mbox: critical hardware error!\n"));
2898
2899 raid_dev->hw_error = 1;
2900
2901 return FAILED;
2902 }
2903 else {
2904 con_log(CL_ANN, (KERN_NOTICE
2905 "megaraid mbox: reset sequence completed sucessfully\n"));
2906 }
2907
2908
2909 // If the controller supports clustering, reset reservations
2910 if (!adapter->ha) return SUCCESS;
2911
2912 // clear reservations if any
2913 raw_mbox[0] = CLUSTER_CMD;
2914 raw_mbox[2] = RESET_RESERVATIONS;
2915
2916 rval = SUCCESS;
2917 if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
2918 con_log(CL_ANN,
2919 (KERN_INFO "megaraid: reservation reset\n"));
2920 }
2921 else {
2922 rval = FAILED;
2923 con_log(CL_ANN, (KERN_WARNING
2924 "megaraid: reservation reset failed\n"));
2925 }
2926
2927 return rval;
2928}
2929
2930
2931/*
2932 * START: internal commands library
2933 *
2934 * This section of the driver has the common routine used by the driver and
2935 * also has all the FW routines
2936 */
2937
2938/**
2939 * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
2940 * @adapter - controller's soft state
2941 * @raw_mbox - the mailbox
2942 *
2943 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2944 * controllers
2945 */
2946static int
2947mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
2948{
2949 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
2950 mbox64_t *mbox64;
2951 mbox_t *mbox;
2952 uint8_t status;
2953 int i;
2954
2955
2956 mbox64 = raid_dev->mbox64;
2957 mbox = raid_dev->mbox;
2958
2959 /*
2960 * Wait until mailbox is free
2961 */
2962 if (megaraid_busywait_mbox(raid_dev) != 0)
2963 goto blocked_mailbox;
2964
2965 /*
2966 * Copy mailbox data into host structure
2967 */
2968 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
2969 mbox->cmdid = 0xFE;
2970 mbox->busy = 1;
2971 mbox->poll = 0;
2972 mbox->ack = 0;
2973 mbox->numstatus = 0xFF;
2974 mbox->status = 0xFF;
2975
2976 wmb();
2977 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2978
2979 // wait for maximum 1 second for status to post. If the status is not
2980 // available within 1 second, assume FW is initializing and wait
2981 // for an extended amount of time
2982 if (mbox->numstatus == 0xFF) { // status not yet available
2983 udelay(25);;
2984
2985 for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
2986 rmb();
2987 msleep(1);
2988 }
2989
2990
2991 if (i == 1000) {
2992 con_log(CL_ANN, (KERN_NOTICE
2993 "megaraid mailbox: wait for FW to boot "));
2994
2995 for (i = 0; (mbox->numstatus == 0xFF) &&
2996 (i < MBOX_RESET_WAIT); i++) {
2997 rmb();
2998 con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
2999 MBOX_RESET_WAIT - i));
3000 msleep(1000);
3001 }
3002
3003 if (i == MBOX_RESET_WAIT) {
3004
3005 con_log(CL_ANN, (
3006 "\nmegaraid mailbox: status not available\n"));
3007
3008 return -1;
3009 }
3010 con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
3011 }
3012 }
3013
3014 // wait for maximum 1 second for poll semaphore
3015 if (mbox->poll != 0x77) {
3016 udelay(25);
3017
3018 for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
3019 rmb();
3020 msleep(1);
3021 }
3022
3023 if (i == 1000) {
3024 con_log(CL_ANN, (KERN_WARNING
3025 "megaraid mailbox: could not get poll semaphore\n"));
3026 return -1;
3027 }
3028 }
3029
3030 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
3031 wmb();
3032
3033 // wait for maximum 1 second for acknowledgement
3034 if (RDINDOOR(raid_dev) & 0x2) {
3035 udelay(25);
3036
3037 for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
3038 rmb();
3039 msleep(1);
3040 }
3041
3042 if (i == 1000) {
3043 con_log(CL_ANN, (KERN_WARNING
3044 "megaraid mailbox: could not acknowledge\n"));
3045 return -1;
3046 }
3047 }
3048 mbox->poll = 0;
3049 mbox->ack = 0x77;
3050
3051 status = mbox->status;
3052
3053 // invalidate the completed command id array. After command
3054 // completion, firmware would write the valid id.
3055 mbox->numstatus = 0xFF;
3056 mbox->status = 0xFF;
3057 for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
3058 mbox->completed[i] = 0xFF;
3059 }
3060
3061 return status;
3062
3063blocked_mailbox:
3064
3065 con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
3066 return -1;
3067}
3068
3069
3070/**
3071 * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
3072 * @adapter - controller's soft state
3073 * @raw_mbox - the mailbox
3074 *
3075 * Issue a scb in synchronous and non-interrupt mode for mailbox based
3076 * controllers. This is a faster version of the synchronous command and
3077 * therefore can be called in interrupt-context as well
3078 */
3079static int
3080mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
3081{
3082 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3083 mbox_t *mbox;
3084 long i;
3085
3086
3087 mbox = raid_dev->mbox;
3088
3089 // return immediately if the mailbox is busy
3090 if (mbox->busy) return -1;
3091
3092 // Copy mailbox data into host structure
3093 memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
3094 mbox->cmdid = 0xFE;
3095 mbox->busy = 1;
3096 mbox->poll = 0;
3097 mbox->ack = 0;
3098 mbox->numstatus = 0xFF;
3099 mbox->status = 0xFF;
3100
3101 wmb();
3102 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
3103
3104 for (i = 0; i < 0xFFFFF; i++) {
3105 if (mbox->numstatus != 0xFF) break;
3106 }
3107
3108 if (i == 0xFFFFF) {
3109 // We may need to re-calibrate the counter
3110 con_log(CL_ANN, (KERN_CRIT
3111 "megaraid: fast sync command timed out\n"));
3112 }
3113
3114 WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
3115 wmb();
3116
3117 return mbox->status;
3118}
3119
3120
3121/**
3122 * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
3123 * @raid_dev - RAID device (HBA) soft state
3124 *
3125 * wait until the controller's mailbox is available to accept more commands.
3126 * wait for at most 1 second
3127 */
3128static int
3129megaraid_busywait_mbox(mraid_device_t *raid_dev)
3130{
3131 mbox_t *mbox = raid_dev->mbox;
3132 int i = 0;
3133
3134 if (mbox->busy) {
3135 udelay(25);
3136 for (i = 0; mbox->busy && i < 1000; i++)
3137 msleep(1);
3138 }
3139
3140 if (i < 1000) return 0;
3141 else return -1;
3142}
3143
3144
3145/**
3146 * megaraid_mbox_product_info - some static information about the controller
3147 * @adapter - our soft state
3148 *
3149 * issue commands to the controller to grab some parameters required by our
3150 * caller.
3151 */
3152static int
3153megaraid_mbox_product_info(adapter_t *adapter)
3154{
3155 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3156 mbox_t *mbox;
3157 uint8_t raw_mbox[sizeof(mbox_t)];
3158 mraid_pinfo_t *pinfo;
3159 dma_addr_t pinfo_dma_h;
3160 mraid_inquiry3_t *mraid_inq3;
3161 int i;
3162
3163
3164 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3165 mbox = (mbox_t *)raw_mbox;
3166
3167 /*
3168 * Issue an ENQUIRY3 command to find out certain adapter parameters,
3169 * e.g., max channels, max commands etc.
3170 */
3171 pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3172 &pinfo_dma_h);
3173
3174 if (pinfo == NULL) {
3175 con_log(CL_ANN, (KERN_WARNING
3176 "megaraid: out of memory, %s %d\n", __FUNCTION__,
3177 __LINE__));
3178
3179 return -1;
3180 }
3181 memset(pinfo, 0, sizeof(mraid_pinfo_t));
3182
3183 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3184 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3185
3186 raw_mbox[0] = FC_NEW_CONFIG;
3187 raw_mbox[2] = NC_SUBOP_ENQUIRY3;
3188 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
3189
3190 // Issue the command
3191 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3192
3193 con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
3194
3195 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3196 pinfo, pinfo_dma_h);
3197
3198 return -1;
3199 }
3200
3201 /*
3202 * Collect information about state of each physical drive
3203 * attached to the controller. We will expose all the disks
3204 * which are not part of RAID
3205 */
3206 mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
3207 for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
3208 raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
3209 }
3210
3211 /*
3212 * Get product info for information like number of channels,
3213 * maximum commands supported.
3214 */
3215 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3216 mbox->xferaddr = (uint32_t)pinfo_dma_h;
3217
3218 raw_mbox[0] = FC_NEW_CONFIG;
3219 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
3220
3221 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3222
3223 con_log(CL_ANN, (KERN_WARNING
3224 "megaraid: product info failed\n"));
3225
3226 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3227 pinfo, pinfo_dma_h);
3228
3229 return -1;
3230 }
3231
3232 /*
3233 * Setup some parameters for host, as required by our caller
3234 */
3235 adapter->max_channel = pinfo->nchannels;
3236
3237 /*
3238 * we will export all the logical drives on a single channel.
3239 * Add 1 since inquires do not come for inititor ID
3240 */
3241 adapter->max_target = MAX_LOGICAL_DRIVES_40LD + 1;
3242 adapter->max_lun = 8; // up to 8 LUNs for non-disk devices
3243
3244 /*
3245 * These are the maximum outstanding commands for the scsi-layer
3246 */
3247 adapter->max_cmds = MBOX_MAX_SCSI_CMDS;
3248
3249 memset(adapter->fw_version, 0, VERSION_SIZE);
3250 memset(adapter->bios_version, 0, VERSION_SIZE);
3251
3252 memcpy(adapter->fw_version, pinfo->fw_version, 4);
3253 adapter->fw_version[4] = 0;
3254
3255 memcpy(adapter->bios_version, pinfo->bios_version, 4);
3256 adapter->bios_version[4] = 0;
3257
3258 con_log(CL_ANN, (KERN_NOTICE
3259 "megaraid: fw version:[%s] bios version:[%s]\n",
3260 adapter->fw_version, adapter->bios_version));
3261
3262 pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
3263 pinfo_dma_h);
3264
3265 return 0;
3266}
3267
3268
3269
3270/**
3271 * megaraid_mbox_extended_cdb - check for support for extended CDBs
3272 * @adapter - soft state for the controller
3273 *
3274 * this routine check whether the controller in question supports extended
3275 * ( > 10 bytes ) CDBs
3276 */
3277static int
3278megaraid_mbox_extended_cdb(adapter_t *adapter)
3279{
3280 mbox_t *mbox;
3281 uint8_t raw_mbox[sizeof(mbox_t)];
3282 int rval;
3283
3284 mbox = (mbox_t *)raw_mbox;
3285
3286 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3287 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3288
3289 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3290
3291 raw_mbox[0] = MAIN_MISC_OPCODE;
3292 raw_mbox[2] = SUPPORT_EXT_CDB;
3293
3294 /*
3295 * Issue the command
3296 */
3297 rval = 0;
3298 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3299 rval = -1;
3300 }
3301
3302 return rval;
3303}
3304
3305
3306/**
3307 * megaraid_mbox_support_ha - Do we support clustering
3308 * @adapter - soft state for the controller
3309 * @init_id - ID of the initiator
3310 *
3311 * Determine if the firmware supports clustering and the ID of the initiator.
3312 */
3313static int
3314megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
3315{
3316 mbox_t *mbox;
3317 uint8_t raw_mbox[sizeof(mbox_t)];
3318 int rval;
3319
3320
3321 mbox = (mbox_t *)raw_mbox;
3322
3323 memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3324
3325 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3326
3327 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3328
3329 raw_mbox[0] = GET_TARGET_ID;
3330
3331 // Issue the command
3332 *init_id = 7;
3333 rval = -1;
3334 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3335
3336 *init_id = *(uint8_t *)adapter->ibuf;
3337
3338 con_log(CL_ANN, (KERN_INFO
3339 "megaraid: cluster firmware, initiator ID: %d\n",
3340 *init_id));
3341
3342 rval = 0;
3343 }
3344
3345 return rval;
3346}
3347
3348
3349/**
3350 * megaraid_mbox_support_random_del - Do we support random deletion
3351 * @adapter - soft state for the controller
3352 *
3353 * Determine if the firmware supports random deletion
3354 * Return: 1 is operation supported, 0 otherwise
3355 */
3356static int
3357megaraid_mbox_support_random_del(adapter_t *adapter)
3358{
3359 mbox_t *mbox;
3360 uint8_t raw_mbox[sizeof(mbox_t)];
3361 int rval;
3362
3363
3364 mbox = (mbox_t *)raw_mbox;
3365
3366 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3367
3368 raw_mbox[0] = FC_DEL_LOGDRV;
3369 raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3370
3371 // Issue the command
3372 rval = 0;
3373 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3374
3375 con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
3376
3377 rval = 1;
3378 }
3379
3380 return rval;
3381}
3382
3383
3384/**
3385 * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
3386 * @adapter - soft state for the controller
3387 *
3388 * Find out the maximum number of scatter-gather elements supported by the
3389 * firmware
3390 */
3391static int
3392megaraid_mbox_get_max_sg(adapter_t *adapter)
3393{
3394 mbox_t *mbox;
3395 uint8_t raw_mbox[sizeof(mbox_t)];
3396 int nsg;
3397
3398
3399 mbox = (mbox_t *)raw_mbox;
3400
3401 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3402
3403 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3404
3405 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3406
3407 raw_mbox[0] = MAIN_MISC_OPCODE;
3408 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3409
3410 // Issue the command
3411 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3412 nsg = *(uint8_t *)adapter->ibuf;
3413 }
3414 else {
3415 nsg = MBOX_DEFAULT_SG_SIZE;
3416 }
3417
3418 if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
3419
3420 return nsg;
3421}
3422
3423
3424/**
3425 * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
3426 * @adapter - soft state for the controller
3427 *
3428 * Enumerate the RAID and SCSI channels for ROMB platoforms so that channels
3429 * can be exported as regular SCSI channels
3430 */
3431static void
3432megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
3433{
3434 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3435 mbox_t *mbox;
3436 uint8_t raw_mbox[sizeof(mbox_t)];
3437
3438
3439 mbox = (mbox_t *)raw_mbox;
3440
3441 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3442
3443 mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3444
3445 memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3446
3447 raw_mbox[0] = CHNL_CLASS;
3448 raw_mbox[2] = GET_CHNL_CLASS;
3449
3450 // Issue the command. If the command fails, all channels are RAID
3451 // channels
3452 raid_dev->channel_class = 0xFF;
3453 if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3454 raid_dev->channel_class = *(uint8_t *)adapter->ibuf;
3455 }
3456
3457 return;
3458}
3459
3460
3461/**
3462 * megaraid_mbox_flush_cache - flush adapter and disks cache
3463 * @param adapter : soft state for the controller
3464 *
3465 * Flush adapter cache followed by disks cache
3466 */
3467static void
3468megaraid_mbox_flush_cache(adapter_t *adapter)
3469{
3470 mbox_t *mbox;
3471 uint8_t raw_mbox[sizeof(mbox_t)];
3472
3473
3474 mbox = (mbox_t *)raw_mbox;
3475
3476 memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3477
3478 raw_mbox[0] = FLUSH_ADAPTER;
3479
3480 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3481 con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
3482 }
3483
3484 raw_mbox[0] = FLUSH_SYSTEM;
3485
3486 if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3487 con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
3488 }
3489
3490 return;
3491}
3492
3493
3494/**
3495 * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
3496 * @param adapter : controllers' soft state
3497 * @param scb : SCB to be displayed
3498 * @param level : debug level for console print
3499 *
3500 * Diplay information about the given SCB iff the current debug level is
3501 * verbose
3502 */
3503static void
3504megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
3505{
3506 mbox_ccb_t *ccb;
3507 struct scsi_cmnd *scp;
3508 mbox_t *mbox;
3509 int level;
3510 int i;
3511
3512
3513 ccb = (mbox_ccb_t *)scb->ccb;
3514 scp = scb->scp;
3515 mbox = ccb->mbox;
3516
3517 level = CL_DLEVEL3;
3518
3519 con_log(level, (KERN_NOTICE
3520 "megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
3521 mbox->cmd, scb->sno));
3522
3523 con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
3524 mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
3525 mbox->numsge));
3526
3527 if (!scp) return;
3528
3529 con_log(level, (KERN_NOTICE "scsi cmnd: "));
3530
3531 for (i = 0; i < scp->cmd_len; i++) {
3532 con_log(level, ("%#2.02x ", scp->cmnd[i]));
3533 }
3534
3535 con_log(level, ("\n"));
3536
3537 return;
3538}
3539
3540
3541/**
3542 * megaraid_mbox_setup_device_map - manage device ids
3543 * @adapter : Driver's soft state
3544 *
3545 * Manange the device ids to have an appropraite mapping between the kernel
3546 * scsi addresses and megaraid scsi and logical drive addresses. We export
3547 * scsi devices on their actual addresses, whereas the logical drives are
3548 * exported on a virtual scsi channel.
3549 **/
3550static void
3551megaraid_mbox_setup_device_map(adapter_t *adapter)
3552{
3553 uint8_t c;
3554 uint8_t t;
3555
3556 /*
3557 * First fill the values on the logical drive channel
3558 */
3559 for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3560 adapter->device_ids[adapter->max_channel][t] =
3561 (t < adapter->init_id) ? t : t - 1;
3562
3563 adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
3564
3565 /*
3566 * Fill the values on the physical devices channels
3567 */
3568 for (c = 0; c < adapter->max_channel; c++)
3569 for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3570 adapter->device_ids[c][t] = (c << 8) | t;
3571}
3572
3573
3574/*
3575 * END: internal commands library
3576 */
3577
3578/*
3579 * START: Interface for the common management module
3580 *
3581 * This is the module, which interfaces with the common mangement module to
3582 * provide support for ioctl and sysfs
3583 */
3584
3585/**
3586 * megaraid_cmm_register - register with the mangement module
3587 * @param adapter : HBA soft state
3588 *
3589 * Register with the management module, which allows applications to issue
3590 * ioctl calls to the drivers. This interface is used by the management module
3591 * to setup sysfs support as well.
3592 */
3593static int
3594megaraid_cmm_register(adapter_t *adapter)
3595{
3596 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3597 mraid_mmadp_t adp;
3598 scb_t *scb;
3599 mbox_ccb_t *ccb;
3600 int rval;
3601 int i;
3602
3603 // Allocate memory for the base list of scb for management module.
3604 adapter->uscb_list = kmalloc(sizeof(scb_t) * MBOX_MAX_USER_CMDS,
3605 GFP_KERNEL);
3606
3607 if (adapter->uscb_list == NULL) {
3608 con_log(CL_ANN, (KERN_WARNING
3609 "megaraid: out of memory, %s %d\n", __FUNCTION__,
3610 __LINE__));
3611 return -1;
3612 }
3613 memset(adapter->uscb_list, 0, sizeof(scb_t) * MBOX_MAX_USER_CMDS);
3614
3615
3616 // Initialize the synchronization parameters for resources for
3617 // commands for management module
3618 INIT_LIST_HEAD(&adapter->uscb_pool);
3619
3620 spin_lock_init(USER_FREE_LIST_LOCK(adapter));
3621
3622
3623
3624 // link all the packets. Note, CCB for commands, coming from the
3625 // commom management module, mailbox physical address are already
3626 // setup by it. We just need placeholder for that in our local command
3627 // control blocks
3628 for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
3629
3630 scb = adapter->uscb_list + i;
3631 ccb = raid_dev->uccb_list + i;
3632
3633 scb->ccb = (caddr_t)ccb;
3634 ccb->mbox64 = raid_dev->umbox64 + i;
3635 ccb->mbox = &ccb->mbox64->mbox32;
3636 ccb->raw_mbox = (uint8_t *)ccb->mbox;
3637
3638 scb->gp = 0;
3639
3640 // COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
3641 // COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
3642 scb->sno = i + MBOX_MAX_SCSI_CMDS;
3643
3644 scb->scp = NULL;
3645 scb->state = SCB_FREE;
3646 scb->dma_direction = PCI_DMA_NONE;
3647 scb->dma_type = MRAID_DMA_NONE;
3648 scb->dev_channel = -1;
3649 scb->dev_target = -1;
3650
3651 // put scb in the free pool
3652 list_add_tail(&scb->list, &adapter->uscb_pool);
3653 }
3654
3655 adp.unique_id = adapter->unique_id;
3656 adp.drvr_type = DRVRTYPE_MBOX;
3657 adp.drvr_data = (unsigned long)adapter;
3658 adp.pdev = adapter->pdev;
3659 adp.issue_uioc = megaraid_mbox_mm_handler;
3660 adp.timeout = 300;
3661 adp.max_kioc = MBOX_MAX_USER_CMDS;
3662
3663 if ((rval = mraid_mm_register_adp(&adp)) != 0) {
3664
3665 con_log(CL_ANN, (KERN_WARNING
3666 "megaraid mbox: did not register with CMM\n"));
3667
3668 kfree(adapter->uscb_list);
3669 }
3670
3671 return rval;
3672}
3673
3674
3675/**
3676 * megaraid_cmm_unregister - un-register with the mangement module
3677 * @param adapter : HBA soft state
3678 *
3679 * Un-register with the management module.
3680 * FIXME: mgmt module must return failure for unregister if it has pending
3681 * commands in LLD
3682 */
3683static int
3684megaraid_cmm_unregister(adapter_t *adapter)
3685{
3686 kfree(adapter->uscb_list);
3687 mraid_mm_unregister_adp(adapter->unique_id);
3688 return 0;
3689}
3690
3691
3692/**
3693 * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
3694 * @param drvr_data : LLD specific data
3695 * @param kioc : CMM interface packet
3696 * @param action : command action
3697 *
3698 * This routine is invoked whenever the Common Mangement Module (CMM) has a
3699 * command for us. The 'action' parameter specifies if this is a new command
3700 * or otherwise.
3701 */
3702static int
3703megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
3704{
3705 adapter_t *adapter;
3706
3707 if (action != IOCTL_ISSUE) {
3708 con_log(CL_ANN, (KERN_WARNING
3709 "megaraid: unsupported management action:%#2x\n",
3710 action));
3711 return (-ENOTSUPP);
3712 }
3713
3714 adapter = (adapter_t *)drvr_data;
3715
3716 // make sure this adapter is not being detached right now.
3717 if (atomic_read(&adapter->being_detached)) {
3718 con_log(CL_ANN, (KERN_WARNING
3719 "megaraid: reject management request, detaching\n"));
3720 return (-ENODEV);
3721 }
3722
3723 switch (kioc->opcode) {
3724
3725 case GET_ADAP_INFO:
3726
3727 kioc->status = gather_hbainfo(adapter, (mraid_hba_info_t *)
3728 (unsigned long)kioc->buf_vaddr);
3729
3730 kioc->done(kioc);
3731
3732 return kioc->status;
3733
3734 case MBOX_CMD:
3735
3736 return megaraid_mbox_mm_command(adapter, kioc);
3737
3738 default:
3739 kioc->status = (-EINVAL);
3740 kioc->done(kioc);
3741 return (-EINVAL);
3742 }
3743
3744 return 0; // not reached
3745}
3746
3747/**
3748 * megaraid_mbox_mm_command - issues commands routed through CMM
3749 * @param adapter : HBA soft state
3750 * @param kioc : management command packet
3751 *
3752 * Issues commands, which are routed through the management module.
3753 */
3754static int
3755megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
3756{
3757 struct list_head *head = &adapter->uscb_pool;
3758 mbox64_t *mbox64;
3759 uint8_t *raw_mbox;
3760 scb_t *scb;
3761 mbox_ccb_t *ccb;
3762 unsigned long flags;
3763
3764 // detach one scb from free pool
3765 spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3766
3767 if (list_empty(head)) { // should never happen because of CMM
3768
3769 con_log(CL_ANN, (KERN_WARNING
3770 "megaraid mbox: bug in cmm handler, lost resources\n"));
3771
3772 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3773
3774 return (-EINVAL);
3775 }
3776
3777 scb = list_entry(head->next, scb_t, list);
3778 list_del_init(&scb->list);
3779
3780 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3781
3782 scb->state = SCB_ACTIVE;
3783 scb->dma_type = MRAID_DMA_NONE;
3784 scb->dma_direction = PCI_DMA_NONE;
3785
3786 ccb = (mbox_ccb_t *)scb->ccb;
3787 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3788 raw_mbox = (uint8_t *)&mbox64->mbox32;
3789
3790 memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
3791
3792 scb->gp = (unsigned long)kioc;
3793
3794 /*
3795 * If it is a logdrv random delete operation, we have to wait till
3796 * there are no outstanding cmds at the fw and then issue it directly
3797 */
3798 if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3799
3800 if (wait_till_fw_empty(adapter)) {
3801 con_log(CL_ANN, (KERN_NOTICE
3802 "megaraid mbox: LD delete, timed out\n"));
3803
3804 kioc->status = -ETIME;
3805
3806 scb->status = -1;
3807
3808 megaraid_mbox_mm_done(adapter, scb);
3809
3810 return (-ETIME);
3811 }
3812
3813 INIT_LIST_HEAD(&scb->list);
3814
3815 scb->state = SCB_ISSUED;
3816 if (mbox_post_cmd(adapter, scb) != 0) {
3817
3818 con_log(CL_ANN, (KERN_NOTICE
3819 "megaraid mbox: LD delete, mailbox busy\n"));
3820
3821 kioc->status = -EBUSY;
3822
3823 scb->status = -1;
3824
3825 megaraid_mbox_mm_done(adapter, scb);
3826
3827 return (-EBUSY);
3828 }
3829
3830 return 0;
3831 }
3832
3833 // put the command on the pending list and execute
3834 megaraid_mbox_runpendq(adapter, scb);
3835
3836 return 0;
3837}
3838
3839
3840static int
3841wait_till_fw_empty(adapter_t *adapter)
3842{
3843 unsigned long flags = 0;
3844 int i;
3845
3846
3847 /*
3848 * Set the quiescent flag to stop issuing cmds to FW.
3849 */
3850 spin_lock_irqsave(adapter->host_lock, flags);
3851 adapter->quiescent++;
3852 spin_unlock_irqrestore(adapter->host_lock, flags);
3853
3854 /*
3855 * Wait till there are no more cmds outstanding at FW. Try for at most
3856 * 60 seconds
3857 */
3858 for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
3859 con_log(CL_DLEVEL1, (KERN_INFO
3860 "megaraid: FW has %d pending commands\n",
3861 adapter->outstanding_cmds));
3862
3863 msleep(1000);
3864 }
3865
3866 return adapter->outstanding_cmds;
3867}
3868
3869
3870/**
3871 * megaraid_mbox_mm_done - callback for CMM commands
3872 * @adapter : HBA soft state
3873 * @scb : completed command
3874 *
3875 * Callback routine for internal commands originated from the management
3876 * module.
3877 */
3878static void
3879megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3880{
3881 uioc_t *kioc;
3882 mbox64_t *mbox64;
3883 uint8_t *raw_mbox;
3884 unsigned long flags;
3885
3886 kioc = (uioc_t *)scb->gp;
3887 kioc->status = 0;
3888 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
3889 mbox64->mbox32.status = scb->status;
3890 raw_mbox = (uint8_t *)&mbox64->mbox32;
3891
3892
3893 // put scb in the free pool
3894 scb->state = SCB_FREE;
3895 scb->scp = NULL;
3896
3897 spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3898
3899 list_add(&scb->list, &adapter->uscb_pool);
3900
3901 spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3902
3903 // if a delete logical drive operation succeeded, restart the
3904 // controller
3905 if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3906
3907 adapter->quiescent--;
3908
3909 megaraid_mbox_runpendq(adapter, NULL);
3910 }
3911
3912 kioc->done(kioc);
3913
3914 return;
3915}
3916
3917
3918/**
3919 * gather_hbainfo - HBA characteristics for the applications
3920 * @param adapter : HBA soft state
3921 * @param hinfo : pointer to the caller's host info strucuture
3922 */
3923static int
3924gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
3925{
3926 uint8_t dmajor;
3927
3928 dmajor = megaraid_mbox_version[0];
3929
3930 hinfo->pci_vendor_id = adapter->pdev->vendor;
3931 hinfo->pci_device_id = adapter->pdev->device;
3932 hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
3933 hinfo->subsys_device_id = adapter->pdev->subsystem_device;
3934
3935 hinfo->pci_bus = adapter->pdev->bus->number;
3936 hinfo->pci_dev_fn = adapter->pdev->devfn;
3937 hinfo->pci_slot = PCI_SLOT(adapter->pdev->devfn);
3938 hinfo->irq = adapter->host->irq;
3939 hinfo->baseport = ADAP2RAIDDEV(adapter)->baseport;
3940
3941 hinfo->unique_id = (hinfo->pci_bus << 8) | adapter->pdev->devfn;
3942 hinfo->host_no = adapter->host->host_no;
3943
3944 return 0;
3945}
3946
3947/*
3948 * END: Interface for the common management module
3949 */
3950
3951
3952
3953/**
3954 * megaraid_sysfs_alloc_resources - allocate sysfs related resources
3955 *
3956 * Allocate packets required to issue FW calls whenever the sysfs attributes
3957 * are read. These attributes would require up-to-date information from the
3958 * FW. Also set up resources for mutual exclusion to share these resources and
3959 * the wait queue.
3960 *
3961 * @param adapter : controller's soft state
3962 *
3963 * @return 0 on success
3964 * @return -ERROR_CODE on failure
3965 */
3966static int
3967megaraid_sysfs_alloc_resources(adapter_t *adapter)
3968{
3969 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
3970 int rval = 0;
3971
3972 raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
3973
3974 raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
3975
3976 raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
3977 PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
3978
3979 if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
3980 !raid_dev->sysfs_buffer) {
3981
3982 con_log(CL_ANN, (KERN_WARNING
3983 "megaraid: out of memory, %s %d\n", __FUNCTION__,
3984 __LINE__));
3985
3986 rval = -ENOMEM;
3987
3988 megaraid_sysfs_free_resources(adapter);
3989 }
3990
3991 sema_init(&raid_dev->sysfs_sem, 1);
3992
3993 init_waitqueue_head(&raid_dev->sysfs_wait_q);
3994
3995 return rval;
3996}
3997
3998
3999/**
4000 * megaraid_sysfs_free_resources - free sysfs related resources
4001 *
4002 * Free packets allocated for sysfs FW commands
4003 *
4004 * @param adapter : controller's soft state
4005 */
4006static void
4007megaraid_sysfs_free_resources(adapter_t *adapter)
4008{
4009 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
4010
4011 if (raid_dev->sysfs_uioc) kfree(raid_dev->sysfs_uioc);
4012
4013 if (raid_dev->sysfs_mbox64) kfree(raid_dev->sysfs_mbox64);
4014
4015 if (raid_dev->sysfs_buffer) {
4016 pci_free_consistent(adapter->pdev, PAGE_SIZE,
4017 raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
4018 }
4019}
4020
4021
4022/**
4023 * megaraid_sysfs_get_ldmap_done - callback for get ldmap
4024 *
4025 * Callback routine called in the ISR/tasklet context for get ldmap call
4026 *
4027 * @param uioc : completed packet
4028 */
4029static void
4030megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
4031{
4032 adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
4033 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
4034
4035 uioc->status = 0;
4036
4037 wake_up(&raid_dev->sysfs_wait_q);
4038}
4039
4040
4041/**
4042 * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
4043 *
4044 * Timeout routine to recover and return to application, in case the adapter
4045 * has stopped responding. A timeout of 60 seconds for this command seem like
4046 * a good value
4047 *
4048 * @param uioc : timed out packet
4049 */
4050static void
4051megaraid_sysfs_get_ldmap_timeout(unsigned long data)
4052{
4053 uioc_t *uioc = (uioc_t *)data;
4054 adapter_t *adapter = (adapter_t *)uioc->buf_vaddr;
4055 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
4056
4057 uioc->status = -ETIME;
4058
4059 wake_up(&raid_dev->sysfs_wait_q);
4060}
4061
4062
4063/**
4064 * megaraid_sysfs_get_ldmap - get update logical drive map
4065 *
4066 * This routine will be called whenever user reads the logical drive
4067 * attributes, go get the current logical drive mapping table from the
4068 * firmware. We use the managment API's to issue commands to the controller.
4069 *
4070 * NOTE: The commands issuance functionality is not generalized and
4071 * implemented in context of "get ld map" command only. If required, the
4072 * command issuance logical can be trivially pulled out and implemented as a
4073 * standalone libary. For now, this should suffice since there is no other
4074 * user of this interface.
4075 *
4076 * @param adapter : controller's soft state
4077 *
4078 * @return 0 on success
4079 * @return -1 on failure
4080 */
4081static int
4082megaraid_sysfs_get_ldmap(adapter_t *adapter)
4083{
4084 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
4085 uioc_t *uioc;
4086 mbox64_t *mbox64;
4087 mbox_t *mbox;
4088 char *raw_mbox;
4089 struct timer_list sysfs_timer;
4090 struct timer_list *timerp;
4091 caddr_t ldmap;
4092 int rval = 0;
4093
4094 /*
4095 * Allow only one read at a time to go through the sysfs attributes
4096 */
4097 down(&raid_dev->sysfs_sem);
4098
4099 uioc = raid_dev->sysfs_uioc;
4100 mbox64 = raid_dev->sysfs_mbox64;
4101 ldmap = raid_dev->sysfs_buffer;
4102
4103 memset(uioc, 0, sizeof(uioc_t));
4104 memset(mbox64, 0, sizeof(mbox64_t));
4105 memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
4106
4107 mbox = &mbox64->mbox32;
4108 raw_mbox = (char *)mbox;
4109 uioc->cmdbuf = (uint64_t)(unsigned long)mbox64;
4110 uioc->buf_vaddr = (caddr_t)adapter;
4111 uioc->status = -ENODATA;
4112 uioc->done = megaraid_sysfs_get_ldmap_done;
4113
4114 /*
4115 * Prepare the mailbox packet to get the current logical drive mapping
4116 * table
4117 */
4118 mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
4119
4120 raw_mbox[0] = FC_DEL_LOGDRV;
4121 raw_mbox[2] = OP_GET_LDID_MAP;
4122
4123 /*
4124 * Setup a timer to recover from a non-responding controller
4125 */
4126 timerp = &sysfs_timer;
4127 init_timer(timerp);
4128
4129 timerp->function = megaraid_sysfs_get_ldmap_timeout;
4130 timerp->data = (unsigned long)uioc;
4131 timerp->expires = jiffies + 60 * HZ;
4132
4133 add_timer(timerp);
4134
4135 /*
4136 * Send the command to the firmware
4137 */
4138 rval = megaraid_mbox_mm_command(adapter, uioc);
4139
4140 if (rval == 0) { // command successfully issued
4141 wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
4142
4143 /*
4144 * Check if the command timed out
4145 */
4146 if (uioc->status == -ETIME) {
4147 con_log(CL_ANN, (KERN_NOTICE
4148 "megaraid: sysfs get ld map timed out\n"));
4149
4150 rval = -ETIME;
4151 }
4152 else {
4153 rval = mbox->status;
4154 }
4155
4156 if (rval == 0) {
4157 memcpy(raid_dev->curr_ldmap, ldmap,
4158 sizeof(raid_dev->curr_ldmap));
4159 }
4160 else {
4161 con_log(CL_ANN, (KERN_NOTICE
4162 "megaraid: get ld map failed with %x\n", rval));
4163 }
4164 }
4165 else {
4166 con_log(CL_ANN, (KERN_NOTICE
4167 "megaraid: could not issue ldmap command:%x\n", rval));
4168 }
4169
4170
4171 del_timer_sync(timerp);
4172
4173 up(&raid_dev->sysfs_sem);
4174
4175 return rval;
4176}
4177
4178
4179/**
4180 * megaraid_sysfs_show_app_hndl - display application handle for this adapter
4181 *
4182 * Display the handle used by the applications while executing management
4183 * tasks on the adapter. We invoke a management module API to get the adapter
4184 * handle, since we do not interface with applications directly.
4185 *
4186 * @param cdev : class device object representation for the host
4187 * @param buf : buffer to send data to
4188 */
4189static ssize_t
4190megaraid_sysfs_show_app_hndl(struct class_device *cdev, char *buf)
4191{
4192 struct Scsi_Host *shost = class_to_shost(cdev);
4193 adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(shost);
4194 uint32_t app_hndl;
4195
4196 app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
4197
4198 return snprintf(buf, 8, "%u\n", app_hndl);
4199}
4200
4201
4202/**
4203 * megaraid_sysfs_show_ldnum - display the logical drive number for this device
4204 *
4205 * Display the logical drive number for the device in question, if it a valid
4206 * logical drive. For physical devices, "-1" is returned
4207 * The logical drive number is displayed in following format
4208 *
4209 * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
4210 * <int> <int> <int> <int>
4211 *
4212 * @param dev : device object representation for the scsi device
4213 * @param buf : buffer to send data to
4214 */
4215static ssize_t
4216megaraid_sysfs_show_ldnum(struct device *dev, char *buf)
4217{
4218 struct scsi_device *sdev = to_scsi_device(dev);
4219 adapter_t *adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
4220 mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
4221 int scsi_id = -1;
4222 int logical_drv = -1;
4223 int ldid_map = -1;
4224 uint32_t app_hndl = 0;
4225 int mapped_sdev_id;
4226 int rval;
4227 int i;
4228
4229 if (raid_dev->random_del_supported &&
4230 MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
4231
4232 rval = megaraid_sysfs_get_ldmap(adapter);
4233 if (rval == 0) {
4234
4235 for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
4236
4237 mapped_sdev_id = sdev->id;
4238
4239 if (sdev->id > adapter->init_id) {
4240 mapped_sdev_id -= 1;
4241 }
4242
4243 if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
4244
4245 scsi_id = sdev->id;
4246
4247 logical_drv = i;
4248
4249 ldid_map = raid_dev->curr_ldmap[i];
4250
4251 app_hndl = mraid_mm_adapter_app_handle(
4252 adapter->unique_id);
4253
4254 break;
4255 }
4256 }
4257 }
4258 else {
4259 con_log(CL_ANN, (KERN_NOTICE
4260 "megaraid: sysfs get ld map failed: %x\n",
4261 rval));
4262 }
4263 }
4264
4265 return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
4266 ldid_map, app_hndl);
4267}
4268
4269
4270/*
4271 * END: Mailbox Low Level Driver
4272 */
4273module_init(megaraid_init);
4274module_exit(megaraid_exit);
4275
4276/* vim: set ts=8 sw=8 tw=78 ai si: */
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
new file mode 100644
index 000000000000..07510009d110
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -0,0 +1,288 @@
1/*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_mbox.h
13 */
14
15#ifndef _MEGARAID_H_
16#define _MEGARAID_H_
17
18
19#include "mega_common.h"
20#include "mbox_defs.h"
21#include "megaraid_ioctl.h"
22
23
24#define MEGARAID_VERSION "2.20.4.5"
25#define MEGARAID_EXT_VERSION "(Release Date: Thu Feb 03 12:27:22 EST 2005)"
26
27
28/*
29 * Define some PCI values here until they are put in the kernel
30 */
31#define PCI_DEVICE_ID_PERC4_DI_DISCOVERY 0x000E
32#define PCI_SUBSYS_ID_PERC4_DI_DISCOVERY 0x0123
33
34#define PCI_DEVICE_ID_PERC4_SC 0x1960
35#define PCI_SUBSYS_ID_PERC4_SC 0x0520
36
37#define PCI_DEVICE_ID_PERC4_DC 0x1960
38#define PCI_SUBSYS_ID_PERC4_DC 0x0518
39
40#define PCI_DEVICE_ID_PERC4_QC 0x0407
41#define PCI_SUBSYS_ID_PERC4_QC 0x0531
42
43#define PCI_DEVICE_ID_PERC4_DI_EVERGLADES 0x000F
44#define PCI_SUBSYS_ID_PERC4_DI_EVERGLADES 0x014A
45
46#define PCI_DEVICE_ID_PERC4E_SI_BIGBEND 0x0013
47#define PCI_SUBSYS_ID_PERC4E_SI_BIGBEND 0x016c
48
49#define PCI_DEVICE_ID_PERC4E_DI_KOBUK 0x0013
50#define PCI_SUBSYS_ID_PERC4E_DI_KOBUK 0x016d
51
52#define PCI_DEVICE_ID_PERC4E_DI_CORVETTE 0x0013
53#define PCI_SUBSYS_ID_PERC4E_DI_CORVETTE 0x016e
54
55#define PCI_DEVICE_ID_PERC4E_DI_EXPEDITION 0x0013
56#define PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION 0x016f
57
58#define PCI_DEVICE_ID_PERC4E_DI_GUADALUPE 0x0013
59#define PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE 0x0170
60
61#define PCI_DEVICE_ID_PERC4E_DC_320_2E 0x0408
62#define PCI_SUBSYS_ID_PERC4E_DC_320_2E 0x0002
63
64#define PCI_DEVICE_ID_PERC4E_SC_320_1E 0x0408
65#define PCI_SUBSYS_ID_PERC4E_SC_320_1E 0x0001
66
67#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0 0x1960
68#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0 0xA520
69
70#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1 0x1960
71#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1 0x0520
72
73#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2 0x1960
74#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2 0x0518
75
76#define PCI_DEVICE_ID_MEGARAID_SCSI_320_0x 0x0407
77#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_0x 0x0530
78
79#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2x 0x0407
80#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2x 0x0532
81
82#define PCI_DEVICE_ID_MEGARAID_SCSI_320_4x 0x0407
83#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_4x 0x0531
84
85#define PCI_DEVICE_ID_MEGARAID_SCSI_320_1E 0x0408
86#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_1E 0x0001
87
88#define PCI_DEVICE_ID_MEGARAID_SCSI_320_2E 0x0408
89#define PCI_SUBSYS_ID_MEGARAID_SCSI_320_2E 0x0002
90
91#define PCI_DEVICE_ID_MEGARAID_I4_133_RAID 0x1960
92#define PCI_SUBSYS_ID_MEGARAID_I4_133_RAID 0x0522
93
94#define PCI_DEVICE_ID_MEGARAID_SATA_150_4 0x1960
95#define PCI_SUBSYS_ID_MEGARAID_SATA_150_4 0x4523
96
97#define PCI_DEVICE_ID_MEGARAID_SATA_150_6 0x1960
98#define PCI_SUBSYS_ID_MEGARAID_SATA_150_6 0x0523
99
100#define PCI_DEVICE_ID_MEGARAID_SATA_300_4x 0x0409
101#define PCI_SUBSYS_ID_MEGARAID_SATA_300_4x 0x3004
102
103#define PCI_DEVICE_ID_MEGARAID_SATA_300_8x 0x0409
104#define PCI_SUBSYS_ID_MEGARAID_SATA_300_8x 0x3008
105
106#define PCI_DEVICE_ID_INTEL_RAID_SRCU42X 0x0407
107#define PCI_SUBSYS_ID_INTEL_RAID_SRCU42X 0x0532
108
109#define PCI_DEVICE_ID_INTEL_RAID_SRCS16 0x1960
110#define PCI_SUBSYS_ID_INTEL_RAID_SRCS16 0x0523
111
112#define PCI_DEVICE_ID_INTEL_RAID_SRCU42E 0x0408
113#define PCI_SUBSYS_ID_INTEL_RAID_SRCU42E 0x0002
114
115#define PCI_DEVICE_ID_INTEL_RAID_SRCZCRX 0x0407
116#define PCI_SUBSYS_ID_INTEL_RAID_SRCZCRX 0x0530
117
118#define PCI_DEVICE_ID_INTEL_RAID_SRCS28X 0x0409
119#define PCI_SUBSYS_ID_INTEL_RAID_SRCS28X 0x3008
120
121#define PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_ALIEF 0x0408
122#define PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_ALIEF 0x3431
123
124#define PCI_DEVICE_ID_INTEL_RAID_SROMBU42E_HARWICH 0x0408
125#define PCI_SUBSYS_ID_INTEL_RAID_SROMBU42E_HARWICH 0x3499
126
127#define PCI_DEVICE_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x1960
128#define PCI_SUBSYS_ID_INTEL_RAID_SRCU41L_LAKE_SHETEK 0x0520
129
130#define PCI_DEVICE_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB 0x0408
131#define PCI_SUBSYS_ID_FSC_MEGARAID_PCI_EXPRESS_ROMB 0x1065
132
133#define PCI_DEVICE_ID_MEGARAID_ACER_ROMB_2E 0x0408
134#define PCI_SUBSYS_ID_MEGARAID_ACER_ROMB_2E 0x004D
135
136#define PCI_SUBSYS_ID_PERC3_QC 0x0471
137#define PCI_SUBSYS_ID_PERC3_DC 0x0493
138#define PCI_SUBSYS_ID_PERC3_SC 0x0475
139
140#define PCI_DEVICE_ID_MEGARAID_NEC_ROMB_2E 0x0408
141#define PCI_SUBSYS_ID_MEGARAID_NEC_ROMB_2E 0x8287
142
143#ifndef PCI_SUBSYS_ID_FSC
144#define PCI_SUBSYS_ID_FSC 0x1734
145#endif
146
147#define MBOX_MAX_SCSI_CMDS 128 // number of cmds reserved for kernel
148#define MBOX_MAX_USER_CMDS 32 // number of cmds for applications
149#define MBOX_DEF_CMD_PER_LUN 64 // default commands per lun
150#define MBOX_DEFAULT_SG_SIZE 26 // default sg size supported by all fw
151#define MBOX_MAX_SG_SIZE 32 // maximum scatter-gather list size
152#define MBOX_MAX_SECTORS 128 // maximum sectors per IO
153#define MBOX_TIMEOUT 30 // timeout value for internal cmds
154#define MBOX_BUSY_WAIT 10 // max usec to wait for busy mailbox
155#define MBOX_RESET_WAIT 180 // wait these many seconds in reset
156#define MBOX_RESET_EXT_WAIT 120 // extended wait reset
157
158/*
159 * maximum transfer that can happen through the firmware commands issued
160 * internnaly from the driver.
161 */
162#define MBOX_IBUF_SIZE 4096
163
164
165/**
166 * mbox_ccb_t - command control block specific to mailbox based controllers
167 * @raw_mbox : raw mailbox pointer
168 * @mbox : mailbox
169 * @mbox64 : extended mailbox
170 * @mbox_dma_h : maibox dma address
171 * @sgl64 : 64-bit scatter-gather list
172 * @sgl32 : 32-bit scatter-gather list
173 * @sgl_dma_h : dma handle for the scatter-gather list
174 * @pthru : passthru structure
175 * @pthru_dma_h : dma handle for the passthru structure
176 * @epthru : extended passthru structure
177 * @epthru_dma_h : dma handle for extended passthru structure
178 * @buf_dma_h : dma handle for buffers w/o sg list
179 *
180 * command control block specific to the mailbox based controllers
181 */
182typedef struct {
183 uint8_t *raw_mbox;
184 mbox_t *mbox;
185 mbox64_t *mbox64;
186 dma_addr_t mbox_dma_h;
187 mbox_sgl64 *sgl64;
188 mbox_sgl32 *sgl32;
189 dma_addr_t sgl_dma_h;
190 mraid_passthru_t *pthru;
191 dma_addr_t pthru_dma_h;
192 mraid_epassthru_t *epthru;
193 dma_addr_t epthru_dma_h;
194 dma_addr_t buf_dma_h;
195} mbox_ccb_t;
196
197
198/**
199 * mraid_device_t - adapter soft state structure for mailbox controllers
200 * @param una_mbox64 : 64-bit mbox - unaligned
201 * @param una_mbox64_dma : mbox dma addr - unaligned
202 * @param mbox : 32-bit mbox - aligned
203 * @param mbox64 : 64-bit mbox - aligned
204 * @param mbox_dma : mbox dma addr - aligned
205 * @param mailbox_lock : exclusion lock for the mailbox
206 * @param baseport : base port of hba memory
207 * @param baseaddr : mapped addr of hba memory
208 * @param mbox_pool : pool of mailboxes
209 * @param mbox_pool_handle : handle for the mailbox pool memory
210 * @param epthru_pool : a pool for extended passthru commands
211 * @param epthru_pool_handle : handle to the pool above
212 * @param sg_pool : pool of scatter-gather lists for this driver
213 * @param sg_pool_handle : handle to the pool above
214 * @param ccb_list : list of our command control blocks
215 * @param uccb_list : list of cmd control blocks for mgmt module
216 * @param umbox64 : array of mailbox for user commands (cmm)
217 * @param pdrv_state : array for state of each physical drive.
218 * @param last_disp : flag used to show device scanning
219 * @param hw_error : set if FW not responding
220 * @param fast_load : If set, skip physical device scanning
221 * @channel_class : channel class, RAID or SCSI
222 * @sysfs_sem : semaphore to serialize access to sysfs res.
223 * @sysfs_uioc : management packet to issue FW calls from sysfs
224 * @sysfs_mbox64 : mailbox packet to issue FW calls from sysfs
225 * @sysfs_buffer : data buffer for FW commands issued from sysfs
226 * @sysfs_buffer_dma : DMA buffer for FW commands issued from sysfs
227 * @sysfs_wait_q : wait queue for sysfs operations
228 * @random_del_supported : set if the random deletion is supported
229 * @curr_ldmap : current LDID map
230 *
231 * Initialization structure for mailbox controllers: memory based and IO based
232 * All the fields in this structure are LLD specific and may be discovered at
233 * init() or start() time.
234 *
235 * NOTE: The fields of this structures are placed to minimize cache misses
236 */
237#define MAX_LD_EXTENDED64 64
238typedef struct {
239 mbox64_t *una_mbox64;
240 dma_addr_t una_mbox64_dma;
241 mbox_t *mbox;
242 mbox64_t *mbox64;
243 dma_addr_t mbox_dma;
244 spinlock_t mailbox_lock;
245 unsigned long baseport;
246 void __iomem * baseaddr;
247 struct mraid_pci_blk mbox_pool[MBOX_MAX_SCSI_CMDS];
248 struct dma_pool *mbox_pool_handle;
249 struct mraid_pci_blk epthru_pool[MBOX_MAX_SCSI_CMDS];
250 struct dma_pool *epthru_pool_handle;
251 struct mraid_pci_blk sg_pool[MBOX_MAX_SCSI_CMDS];
252 struct dma_pool *sg_pool_handle;
253 mbox_ccb_t ccb_list[MBOX_MAX_SCSI_CMDS];
254 mbox_ccb_t uccb_list[MBOX_MAX_USER_CMDS];
255 mbox64_t umbox64[MBOX_MAX_USER_CMDS];
256
257 uint8_t pdrv_state[MBOX_MAX_PHYSICAL_DRIVES];
258 uint32_t last_disp;
259 int hw_error;
260 int fast_load;
261 uint8_t channel_class;
262 struct semaphore sysfs_sem;
263 uioc_t *sysfs_uioc;
264 mbox64_t *sysfs_mbox64;
265 caddr_t sysfs_buffer;
266 dma_addr_t sysfs_buffer_dma;
267 wait_queue_head_t sysfs_wait_q;
268 int random_del_supported;
269 uint16_t curr_ldmap[MAX_LD_EXTENDED64];
270} mraid_device_t;
271
272// route to raid device from adapter
273#define ADAP2RAIDDEV(adp) ((mraid_device_t *)((adp)->raid_device))
274
275#define MAILBOX_LOCK(rdev) (&(rdev)->mailbox_lock)
276
277// Find out if this channel is a RAID or SCSI
278#define IS_RAID_CH(rdev, ch) (((rdev)->channel_class >> (ch)) & 0x01)
279
280
281#define RDINDOOR(rdev) readl((rdev)->baseaddr + 0x20)
282#define RDOUTDOOR(rdev) readl((rdev)->baseaddr + 0x2C)
283#define WRINDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x20)
284#define WROUTDOOR(rdev, value) writel(value, (rdev)->baseaddr + 0x2C)
285
286#endif // _MEGARAID_H_
287
288// vim: set ts=8 sw=8 tw=78:
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
new file mode 100644
index 000000000000..9f1b550713ec
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -0,0 +1,1255 @@
1/*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_mm.c
13 * Version : v2.20.2.5 (Jan 21 2005)
14 *
15 * Common management module
16 */
17
18#include "megaraid_mm.h"
19#include <linux/smp_lock.h>
20
21
22// Entry points for char node driver
23static int mraid_mm_open(struct inode *, struct file *);
24static int mraid_mm_ioctl(struct inode *, struct file *, uint, unsigned long);
25
26
27// routines to convert to and from the old the format
28static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
29static int kioc_to_mimd(uioc_t *, mimd_t __user *);
30
31
32// Helper functions
33static int handle_drvrcmd(void __user *, uint8_t, int *);
34static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
35static void ioctl_done(uioc_t *);
36static void lld_timedout(unsigned long);
37static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
38static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
39static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
40static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
41static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
42static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
43static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
44static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
45
46#ifdef CONFIG_COMPAT
47static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
48#endif
49
50MODULE_AUTHOR("LSI Logic Corporation");
51MODULE_DESCRIPTION("LSI Logic Management Module");
52MODULE_LICENSE("GPL");
53MODULE_VERSION(LSI_COMMON_MOD_VERSION);
54
55static int dbglevel = CL_ANN;
56module_param_named(dlevel, dbglevel, int, 0);
57MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
58
59EXPORT_SYMBOL(mraid_mm_register_adp);
60EXPORT_SYMBOL(mraid_mm_unregister_adp);
61EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
62
63static int majorno;
64static uint32_t drvr_ver = 0x02200201;
65
66static int adapters_count_g;
67static struct list_head adapters_list_g;
68
69static wait_queue_head_t wait_q;
70
71static struct file_operations lsi_fops = {
72 .open = mraid_mm_open,
73 .ioctl = mraid_mm_ioctl,
74#ifdef CONFIG_COMPAT
75 .compat_ioctl = mraid_mm_compat_ioctl,
76#endif
77 .owner = THIS_MODULE,
78};
79
80/**
81 * mraid_mm_open - open routine for char node interface
82 * @inod : unused
83 * @filep : unused
84 *
85 * allow ioctl operations by apps only if they superuser privilege
86 */
87static int
88mraid_mm_open(struct inode *inode, struct file *filep)
89{
90 /*
91 * Only allow superuser to access private ioctl interface
92 */
93 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
94
95 return 0;
96}
97
98/**
99 * mraid_mm_ioctl - module entry-point for ioctls
100 * @inode : inode (ignored)
101 * @filep : file operations pointer (ignored)
102 * @cmd : ioctl command
103 * @arg : user ioctl packet
104 */
105static int
106mraid_mm_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
107 unsigned long arg)
108{
109 uioc_t *kioc;
110 char signature[EXT_IOCTL_SIGN_SZ] = {0};
111 int rval;
112 mraid_mmadp_t *adp;
113 uint8_t old_ioctl;
114 int drvrcmd_rval;
115 void __user *argp = (void __user *)arg;
116
117 /*
118 * Make sure only USCSICMD are issued through this interface.
119 * MIMD application would still fire different command.
120 */
121
122 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
123 return (-EINVAL);
124 }
125
126 /*
127 * Look for signature to see if this is the new or old ioctl format.
128 */
129 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
130 con_log(CL_ANN, (KERN_WARNING
131 "megaraid cmm: copy from usr addr failed\n"));
132 return (-EFAULT);
133 }
134
135 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
136 old_ioctl = 0;
137 else
138 old_ioctl = 1;
139
140 /*
141 * At present, we don't support the new ioctl packet
142 */
143 if (!old_ioctl )
144 return (-EINVAL);
145
146 /*
147 * If it is a driver ioctl (as opposed to fw ioctls), then we can
148 * handle the command locally. rval > 0 means it is not a drvr cmd
149 */
150 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
151
152 if (rval < 0)
153 return rval;
154 else if (rval == 0)
155 return drvrcmd_rval;
156
157 rval = 0;
158 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
159 return rval;
160 }
161
162 /*
163 * Check if adapter can accept ioctl. We may have marked it offline
164 * if any previous kioc had timedout on this controller.
165 */
166 if (!adp->quiescent) {
167 con_log(CL_ANN, (KERN_WARNING
168 "megaraid cmm: controller cannot accept cmds due to "
169 "earlier errors\n" ));
170 return -EFAULT;
171 }
172
173 /*
174 * The following call will block till a kioc is available
175 */
176 kioc = mraid_mm_alloc_kioc(adp);
177
178 /*
179 * User sent the old mimd_t ioctl packet. Convert it to uioc_t.
180 */
181 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
182 mraid_mm_dealloc_kioc(adp, kioc);
183 return rval;
184 }
185
186 kioc->done = ioctl_done;
187
188 /*
189 * Issue the IOCTL to the low level driver. After the IOCTL completes
190 * release the kioc if and only if it was _not_ timedout. If it was
191 * timedout, that means that resources are still with low level driver.
192 */
193 if ((rval = lld_ioctl(adp, kioc))) {
194
195 if (!kioc->timedout)
196 mraid_mm_dealloc_kioc(adp, kioc);
197
198 return rval;
199 }
200
201 /*
202 * Convert the kioc back to user space
203 */
204 rval = kioc_to_mimd(kioc, argp);
205
206 /*
207 * Return the kioc to free pool
208 */
209 mraid_mm_dealloc_kioc(adp, kioc);
210
211 return rval;
212}
213
214
215/**
216 * mraid_mm_get_adapter - Returns corresponding adapters for the mimd packet
217 * @umimd : User space mimd_t ioctl packet
218 * @adapter : pointer to the adapter (OUT)
219 */
220static mraid_mmadp_t *
221mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
222{
223 mraid_mmadp_t *adapter;
224 mimd_t mimd;
225 uint32_t adapno;
226 int iterator;
227
228
229 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
230 *rval = -EFAULT;
231 return NULL;
232 }
233
234 adapno = GETADAP(mimd.ui.fcs.adapno);
235
236 if (adapno >= adapters_count_g) {
237 *rval = -ENODEV;
238 return NULL;
239 }
240
241 adapter = NULL;
242 iterator = 0;
243
244 list_for_each_entry(adapter, &adapters_list_g, list) {
245 if (iterator++ == adapno) break;
246 }
247
248 if (!adapter) {
249 *rval = -ENODEV;
250 return NULL;
251 }
252
253 return adapter;
254}
255
256/*
257 * handle_drvrcmd - This routine checks if the opcode is a driver
258 * cmd and if it is, handles it.
259 * @arg : packet sent by the user app
260 * @old_ioctl : mimd if 1; uioc otherwise
261 */
262static int
263handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
264{
265 mimd_t __user *umimd;
266 mimd_t kmimd;
267 uint8_t opcode;
268 uint8_t subopcode;
269
270 if (old_ioctl)
271 goto old_packet;
272 else
273 goto new_packet;
274
275new_packet:
276 return (-ENOTSUPP);
277
278old_packet:
279 *rval = 0;
280 umimd = arg;
281
282 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
283 return (-EFAULT);
284
285 opcode = kmimd.ui.fcs.opcode;
286 subopcode = kmimd.ui.fcs.subopcode;
287
288 /*
289 * If the opcode is 0x82 and the subopcode is either GET_DRVRVER or
290 * GET_NUMADP, then we can handle. Otherwise we should return 1 to
291 * indicate that we cannot handle this.
292 */
293 if (opcode != 0x82)
294 return 1;
295
296 switch (subopcode) {
297
298 case MEGAIOC_QDRVRVER:
299
300 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
301 return (-EFAULT);
302
303 return 0;
304
305 case MEGAIOC_QNADAP:
306
307 *rval = adapters_count_g;
308
309 if (copy_to_user(kmimd.data, &adapters_count_g,
310 sizeof(uint32_t)))
311 return (-EFAULT);
312
313 return 0;
314
315 default:
316 /* cannot handle */
317 return 1;
318 }
319
320 return 0;
321}
322
323
324/**
325 * mimd_to_kioc - Converter from old to new ioctl format
326 *
327 * @umimd : user space old MIMD IOCTL
328 * @kioc : kernel space new format IOCTL
329 *
330 * Routine to convert MIMD interface IOCTL to new interface IOCTL packet. The
331 * new packet is in kernel space so that driver can perform operations on it
332 * freely.
333 */
334
335static int
336mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
337{
338 mbox64_t *mbox64;
339 mbox_t *mbox;
340 mraid_passthru_t *pthru32;
341 uint32_t adapno;
342 uint8_t opcode;
343 uint8_t subopcode;
344 mimd_t mimd;
345
346 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
347 return (-EFAULT);
348
349 /*
350 * Applications are not allowed to send extd pthru
351 */
352 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
353 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
354 return (-EINVAL);
355
356 opcode = mimd.ui.fcs.opcode;
357 subopcode = mimd.ui.fcs.subopcode;
358 adapno = GETADAP(mimd.ui.fcs.adapno);
359
360 if (adapno >= adapters_count_g)
361 return (-ENODEV);
362
363 kioc->adapno = adapno;
364 kioc->mb_type = MBOX_LEGACY;
365 kioc->app_type = APPTYPE_MIMD;
366
367 switch (opcode) {
368
369 case 0x82:
370
371 if (subopcode == MEGAIOC_QADAPINFO) {
372
373 kioc->opcode = GET_ADAP_INFO;
374 kioc->data_dir = UIOC_RD;
375 kioc->xferlen = sizeof(mraid_hba_info_t);
376
377 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
378 return (-ENOMEM);
379 }
380 else {
381 con_log(CL_ANN, (KERN_WARNING
382 "megaraid cmm: Invalid subop\n"));
383 return (-EINVAL);
384 }
385
386 break;
387
388 case 0x81:
389
390 kioc->opcode = MBOX_CMD;
391 kioc->xferlen = mimd.ui.fcs.length;
392 kioc->user_data_len = kioc->xferlen;
393 kioc->user_data = mimd.ui.fcs.buffer;
394
395 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
396 return (-ENOMEM);
397
398 if (mimd.outlen) kioc->data_dir = UIOC_RD;
399 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
400
401 break;
402
403 case 0x80:
404
405 kioc->opcode = MBOX_CMD;
406 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
407 mimd.outlen : mimd.inlen;
408 kioc->user_data_len = kioc->xferlen;
409 kioc->user_data = mimd.data;
410
411 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
412 return (-ENOMEM);
413
414 if (mimd.outlen) kioc->data_dir = UIOC_RD;
415 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
416
417 break;
418
419 default:
420 return (-EINVAL);
421 }
422
423 /*
424 * If driver command, nothing else to do
425 */
426 if (opcode == 0x82)
427 return 0;
428
429 /*
430 * This is a mailbox cmd; copy the mailbox from mimd
431 */
432 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
433 mbox = &mbox64->mbox32;
434 memcpy(mbox, mimd.mbox, 14);
435
436 if (mbox->cmd != MBOXCMD_PASSTHRU) { // regular DCMD
437
438 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
439
440 if (kioc->data_dir & UIOC_WR) {
441 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
442 kioc->xferlen)) {
443 return (-EFAULT);
444 }
445 }
446
447 return 0;
448 }
449
450 /*
451 * This is a regular 32-bit pthru cmd; mbox points to pthru struct.
452 * Just like in above case, the beginning for memblk is treated as
453 * a mailbox. The passthru will begin at next 1K boundary. And the
454 * data will start 1K after that.
455 */
456 pthru32 = kioc->pthru32;
457 kioc->user_pthru = &umimd->pthru;
458 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
459
460 if (copy_from_user(pthru32, kioc->user_pthru,
461 sizeof(mraid_passthru_t))) {
462 return (-EFAULT);
463 }
464
465 pthru32->dataxferaddr = kioc->buf_paddr;
466 if (kioc->data_dir & UIOC_WR) {
467 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
468 pthru32->dataxferlen)) {
469 return (-EFAULT);
470 }
471 }
472
473 return 0;
474}
475
476/**
477 * mraid_mm_attch_buf - Attach a free dma buffer for required size
478 *
479 * @adp : Adapter softstate
480 * @kioc : kioc that the buffer needs to be attached to
481 * @xferlen : required length for buffer
482 *
483 * First we search for a pool with smallest buffer that is >= @xferlen. If
484 * that pool has no free buffer, we will try for the next bigger size. If none
485 * is available, we will try to allocate the smallest buffer that is >=
486 * @xferlen and attach it the pool.
487 */
488static int
489mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
490{
491 mm_dmapool_t *pool;
492 int right_pool = -1;
493 unsigned long flags;
494 int i;
495
496 kioc->pool_index = -1;
497 kioc->buf_vaddr = NULL;
498 kioc->buf_paddr = 0;
499 kioc->free_buf = 0;
500
501 /*
502 * We need xferlen amount of memory. See if we can get it from our
503 * dma pools. If we don't get exact size, we will try bigger buffer
504 */
505
506 for (i = 0; i < MAX_DMA_POOLS; i++) {
507
508 pool = &adp->dma_pool_list[i];
509
510 if (xferlen > pool->buf_size)
511 continue;
512
513 if (right_pool == -1)
514 right_pool = i;
515
516 spin_lock_irqsave(&pool->lock, flags);
517
518 if (!pool->in_use) {
519
520 pool->in_use = 1;
521 kioc->pool_index = i;
522 kioc->buf_vaddr = pool->vaddr;
523 kioc->buf_paddr = pool->paddr;
524
525 spin_unlock_irqrestore(&pool->lock, flags);
526 return 0;
527 }
528 else {
529 spin_unlock_irqrestore(&pool->lock, flags);
530 continue;
531 }
532 }
533
534 /*
535 * If xferlen doesn't match any of our pools, return error
536 */
537 if (right_pool == -1)
538 return -EINVAL;
539
540 /*
541 * We did not get any buffer from the preallocated pool. Let us try
542 * to allocate one new buffer. NOTE: This is a blocking call.
543 */
544 pool = &adp->dma_pool_list[right_pool];
545
546 spin_lock_irqsave(&pool->lock, flags);
547
548 kioc->pool_index = right_pool;
549 kioc->free_buf = 1;
550 kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
551 &kioc->buf_paddr);
552 spin_unlock_irqrestore(&pool->lock, flags);
553
554 if (!kioc->buf_vaddr)
555 return -ENOMEM;
556
557 return 0;
558}
559
560/**
561 * mraid_mm_alloc_kioc - Returns a uioc_t from free list
562 * @adp : Adapter softstate for this module
563 *
564 * The kioc_semaphore is initialized with number of kioc nodes in the
565 * free kioc pool. If the kioc pool is empty, this function blocks till
566 * a kioc becomes free.
567 */
568static uioc_t *
569mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
570{
571 uioc_t *kioc;
572 struct list_head* head;
573 unsigned long flags;
574
575 down(&adp->kioc_semaphore);
576
577 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
578
579 head = &adp->kioc_pool;
580
581 if (list_empty(head)) {
582 up(&adp->kioc_semaphore);
583 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
584
585 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
586 return NULL;
587 }
588
589 kioc = list_entry(head->next, uioc_t, list);
590 list_del_init(&kioc->list);
591
592 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
593
594 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
595 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
596
597 kioc->buf_vaddr = NULL;
598 kioc->buf_paddr = 0;
599 kioc->pool_index =-1;
600 kioc->free_buf = 0;
601 kioc->user_data = NULL;
602 kioc->user_data_len = 0;
603 kioc->user_pthru = NULL;
604 kioc->timedout = 0;
605
606 return kioc;
607}
608
609/**
610 * mraid_mm_dealloc_kioc - Return kioc to free pool
611 *
612 * @adp : Adapter softstate
613 * @kioc : uioc_t node to be returned to free pool
614 */
615static void
616mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
617{
618 mm_dmapool_t *pool;
619 unsigned long flags;
620
621 if (kioc->pool_index != -1) {
622 pool = &adp->dma_pool_list[kioc->pool_index];
623
624 /* This routine may be called in non-isr context also */
625 spin_lock_irqsave(&pool->lock, flags);
626
627 /*
628 * While attaching the dma buffer, if we didn't get the
629 * required buffer from the pool, we would have allocated
630 * it at the run time and set the free_buf flag. We must
631 * free that buffer. Otherwise, just mark that the buffer is
632 * not in use
633 */
634 if (kioc->free_buf == 1)
635 pci_pool_free(pool->handle, kioc->buf_vaddr,
636 kioc->buf_paddr);
637 else
638 pool->in_use = 0;
639
640 spin_unlock_irqrestore(&pool->lock, flags);
641 }
642
643 /* Return the kioc to the free pool */
644 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
645 list_add(&kioc->list, &adp->kioc_pool);
646 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
647
648 /* increment the free kioc count */
649 up(&adp->kioc_semaphore);
650
651 return;
652}
653
654/**
655 * lld_ioctl - Routine to issue ioctl to low level drvr
656 *
657 * @adp : The adapter handle
658 * @kioc : The ioctl packet with kernel addresses
659 */
660static int
661lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
662{
663 int rval;
664 struct timer_list timer;
665 struct timer_list *tp = NULL;
666
667 kioc->status = -ENODATA;
668 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
669
670 if (rval) return rval;
671
672 /*
673 * Start the timer
674 */
675 if (adp->timeout > 0) {
676 tp = &timer;
677 init_timer(tp);
678
679 tp->function = lld_timedout;
680 tp->data = (unsigned long)kioc;
681 tp->expires = jiffies + adp->timeout * HZ;
682
683 add_timer(tp);
684 }
685
686 /*
687 * Wait till the low level driver completes the ioctl. After this
688 * call, the ioctl either completed successfully or timedout.
689 */
690 wait_event(wait_q, (kioc->status != -ENODATA));
691 if (tp) {
692 del_timer_sync(tp);
693 }
694
695 /*
696 * If the command had timedout, we mark the controller offline
697 * before returning
698 */
699 if (kioc->timedout) {
700 adp->quiescent = 0;
701 }
702
703 return kioc->status;
704}
705
706
707/**
708 * ioctl_done - callback from the low level driver
709 *
710 * @kioc : completed ioctl packet
711 */
712static void
713ioctl_done(uioc_t *kioc)
714{
715 uint32_t adapno;
716 int iterator;
717 mraid_mmadp_t* adapter;
718
719 /*
720 * When the kioc returns from driver, make sure it still doesn't
721 * have ENODATA in status. Otherwise, driver will hang on wait_event
722 * forever
723 */
724 if (kioc->status == -ENODATA) {
725 con_log(CL_ANN, (KERN_WARNING
726 "megaraid cmm: lld didn't change status!\n"));
727
728 kioc->status = -EINVAL;
729 }
730
731 /*
732 * Check if this kioc was timedout before. If so, nobody is waiting
733 * on this kioc. We don't have to wake up anybody. Instead, we just
734 * have to free the kioc
735 */
736 if (kioc->timedout) {
737 iterator = 0;
738 adapter = NULL;
739 adapno = kioc->adapno;
740
741 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
742 "ioctl that was timedout before\n"));
743
744 list_for_each_entry(adapter, &adapters_list_g, list) {
745 if (iterator++ == adapno) break;
746 }
747
748 kioc->timedout = 0;
749
750 if (adapter) {
751 mraid_mm_dealloc_kioc( adapter, kioc );
752 }
753 }
754 else {
755 wake_up(&wait_q);
756 }
757}
758
759
760/*
761 * lld_timedout : callback from the expired timer
762 *
763 * @ptr : ioctl packet that timed out
764 */
765static void
766lld_timedout(unsigned long ptr)
767{
768 uioc_t *kioc = (uioc_t *)ptr;
769
770 kioc->status = -ETIME;
771 kioc->timedout = 1;
772
773 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
774
775 wake_up(&wait_q);
776}
777
778
779/**
780 * kioc_to_mimd : Converter from new back to old format
781 *
782 * @kioc : Kernel space IOCTL packet (successfully issued)
783 * @mimd : User space MIMD packet
784 */
785static int
786kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
787{
788 mimd_t kmimd;
789 uint8_t opcode;
790 uint8_t subopcode;
791
792 mbox64_t *mbox64;
793 mraid_passthru_t __user *upthru32;
794 mraid_passthru_t *kpthru32;
795 mcontroller_t cinfo;
796 mraid_hba_info_t *hinfo;
797
798
799 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
800 return (-EFAULT);
801
802 opcode = kmimd.ui.fcs.opcode;
803 subopcode = kmimd.ui.fcs.subopcode;
804
805 if (opcode == 0x82) {
806 switch (subopcode) {
807
808 case MEGAIOC_QADAPINFO:
809
810 hinfo = (mraid_hba_info_t *)(unsigned long)
811 kioc->buf_vaddr;
812
813 hinfo_to_cinfo(hinfo, &cinfo);
814
815 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
816 return (-EFAULT);
817
818 return 0;
819
820 default:
821 return (-EINVAL);
822 }
823
824 return 0;
825 }
826
827 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
828
829 if (kioc->user_pthru) {
830
831 upthru32 = kioc->user_pthru;
832 kpthru32 = kioc->pthru32;
833
834 if (copy_to_user(&upthru32->scsistatus,
835 &kpthru32->scsistatus,
836 sizeof(uint8_t))) {
837 return (-EFAULT);
838 }
839 }
840
841 if (kioc->user_data) {
842 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
843 kioc->user_data_len)) {
844 return (-EFAULT);
845 }
846 }
847
848 if (copy_to_user(&mimd->mbox[17],
849 &mbox64->mbox32.status, sizeof(uint8_t))) {
850 return (-EFAULT);
851 }
852
853 return 0;
854}
855
856
857/**
858 * hinfo_to_cinfo - Convert new format hba info into old format
859 *
860 * @hinfo : New format, more comprehensive adapter info
861 * @cinfo : Old format adapter info to support mimd_t apps
862 */
863static void
864hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
865{
866 if (!hinfo || !cinfo)
867 return;
868
869 cinfo->base = hinfo->baseport;
870 cinfo->irq = hinfo->irq;
871 cinfo->numldrv = hinfo->num_ldrv;
872 cinfo->pcibus = hinfo->pci_bus;
873 cinfo->pcidev = hinfo->pci_slot;
874 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
875 cinfo->pciid = hinfo->pci_device_id;
876 cinfo->pcivendor = hinfo->pci_vendor_id;
877 cinfo->pcislot = hinfo->pci_slot;
878 cinfo->uid = hinfo->unique_id;
879}
880
881
882/*
883 * mraid_mm_register_adp - Registration routine for low level drvrs
884 *
885 * @adp : Adapter objejct
886 */
887int
888mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
889{
890 mraid_mmadp_t *adapter;
891 mbox64_t *mbox_list;
892 uioc_t *kioc;
893 uint32_t rval;
894 int i;
895
896
897 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
898 return (-EINVAL);
899
900 adapter = kmalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
901
902 if (!adapter) {
903 rval = -ENOMEM;
904 goto memalloc_error;
905 }
906
907 memset(adapter, 0, sizeof(mraid_mmadp_t));
908
909 adapter->unique_id = lld_adp->unique_id;
910 adapter->drvr_type = lld_adp->drvr_type;
911 adapter->drvr_data = lld_adp->drvr_data;
912 adapter->pdev = lld_adp->pdev;
913 adapter->issue_uioc = lld_adp->issue_uioc;
914 adapter->timeout = lld_adp->timeout;
915 adapter->max_kioc = lld_adp->max_kioc;
916 adapter->quiescent = 1;
917
918 /*
919 * Allocate single blocks of memory for all required kiocs,
920 * mailboxes and passthru structures.
921 */
922 adapter->kioc_list = kmalloc(sizeof(uioc_t) * lld_adp->max_kioc,
923 GFP_KERNEL);
924 adapter->mbox_list = kmalloc(sizeof(mbox64_t) * lld_adp->max_kioc,
925 GFP_KERNEL);
926 adapter->pthru_dma_pool = pci_pool_create("megaraid mm pthru pool",
927 adapter->pdev,
928 sizeof(mraid_passthru_t),
929 16, 0);
930
931 if (!adapter->kioc_list || !adapter->mbox_list ||
932 !adapter->pthru_dma_pool) {
933
934 con_log(CL_ANN, (KERN_WARNING
935 "megaraid cmm: out of memory, %s %d\n", __FUNCTION__,
936 __LINE__));
937
938 rval = (-ENOMEM);
939
940 goto memalloc_error;
941 }
942
943 /*
944 * Slice kioc_list and make a kioc_pool with the individiual kiocs
945 */
946 INIT_LIST_HEAD(&adapter->kioc_pool);
947 spin_lock_init(&adapter->kioc_pool_lock);
948 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
949
950 mbox_list = (mbox64_t *)adapter->mbox_list;
951
952 for (i = 0; i < lld_adp->max_kioc; i++) {
953
954 kioc = adapter->kioc_list + i;
955 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
956 kioc->pthru32 = pci_pool_alloc(adapter->pthru_dma_pool,
957 GFP_KERNEL, &kioc->pthru32_h);
958
959 if (!kioc->pthru32) {
960
961 con_log(CL_ANN, (KERN_WARNING
962 "megaraid cmm: out of memory, %s %d\n",
963 __FUNCTION__, __LINE__));
964
965 rval = (-ENOMEM);
966
967 goto pthru_dma_pool_error;
968 }
969
970 list_add_tail(&kioc->list, &adapter->kioc_pool);
971 }
972
973 // Setup the dma pools for data buffers
974 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
975 goto dma_pool_error;
976 }
977
978 list_add_tail(&adapter->list, &adapters_list_g);
979
980 adapters_count_g++;
981
982 return 0;
983
984dma_pool_error:
985 /* Do nothing */
986
987pthru_dma_pool_error:
988
989 for (i = 0; i < lld_adp->max_kioc; i++) {
990 kioc = adapter->kioc_list + i;
991 if (kioc->pthru32) {
992 pci_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
993 kioc->pthru32_h);
994 }
995 }
996
997memalloc_error:
998
999 if (adapter->kioc_list)
1000 kfree(adapter->kioc_list);
1001
1002 if (adapter->mbox_list)
1003 kfree(adapter->mbox_list);
1004
1005 if (adapter->pthru_dma_pool)
1006 pci_pool_destroy(adapter->pthru_dma_pool);
1007
1008 if (adapter)
1009 kfree(adapter);
1010
1011 return rval;
1012}
1013
1014
1015/**
1016 * mraid_mm_adapter_app_handle - return the application handle for this adapter
1017 *
1018 * For the given driver data, locate the adadpter in our global list and
1019 * return the corresponding handle, which is also used by applications to
1020 * uniquely identify an adapter.
1021 *
1022 * @param unique_id : adapter unique identifier
1023 *
1024 * @return adapter handle if found in the list
1025 * @return 0 if adapter could not be located, should never happen though
1026 */
1027uint32_t
1028mraid_mm_adapter_app_handle(uint32_t unique_id)
1029{
1030 mraid_mmadp_t *adapter;
1031 mraid_mmadp_t *tmp;
1032 int index = 0;
1033
1034 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1035
1036 if (adapter->unique_id == unique_id) {
1037
1038 return MKADAP(index);
1039 }
1040
1041 index++;
1042 }
1043
1044 return 0;
1045}
1046
1047
1048/**
1049 * mraid_mm_setup_dma_pools - Set up dma buffer pools per adapter
1050 *
1051 * @adp : Adapter softstate
1052 *
1053 * We maintain a pool of dma buffers per each adapter. Each pool has one
1054 * buffer. E.g, we may have 5 dma pools - one each for 4k, 8k ... 64k buffers.
1055 * We have just one 4k buffer in 4k pool, one 8k buffer in 8k pool etc. We
1056 * dont' want to waste too much memory by allocating more buffers per each
1057 * pool.
1058 */
1059static int
1060mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1061{
1062 mm_dmapool_t *pool;
1063 int bufsize;
1064 int i;
1065
1066 /*
1067 * Create MAX_DMA_POOLS number of pools
1068 */
1069 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1070
1071 for (i = 0; i < MAX_DMA_POOLS; i++){
1072
1073 pool = &adp->dma_pool_list[i];
1074
1075 pool->buf_size = bufsize;
1076 spin_lock_init(&pool->lock);
1077
1078 pool->handle = pci_pool_create("megaraid mm data buffer",
1079 adp->pdev, bufsize, 16, 0);
1080
1081 if (!pool->handle) {
1082 goto dma_pool_setup_error;
1083 }
1084
1085 pool->vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
1086 &pool->paddr);
1087
1088 if (!pool->vaddr)
1089 goto dma_pool_setup_error;
1090
1091 bufsize = bufsize * 2;
1092 }
1093
1094 return 0;
1095
1096dma_pool_setup_error:
1097
1098 mraid_mm_teardown_dma_pools(adp);
1099 return (-ENOMEM);
1100}
1101
1102
1103/*
1104 * mraid_mm_unregister_adp - Unregister routine for low level drivers
1105 * Assume no outstanding ioctls to llds.
1106 *
1107 * @unique_id : UID of the adpater
1108 */
1109int
1110mraid_mm_unregister_adp(uint32_t unique_id)
1111{
1112 mraid_mmadp_t *adapter;
1113 mraid_mmadp_t *tmp;
1114
1115 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1116
1117
1118 if (adapter->unique_id == unique_id) {
1119
1120 adapters_count_g--;
1121
1122 list_del_init(&adapter->list);
1123
1124 mraid_mm_free_adp_resources(adapter);
1125
1126 kfree(adapter);
1127
1128 con_log(CL_ANN, (
1129 "megaraid cmm: Unregistered one adapter:%#x\n",
1130 unique_id));
1131
1132 return 0;
1133 }
1134 }
1135
1136 return (-ENODEV);
1137}
1138
1139/**
1140 * mraid_mm_free_adp_resources - Free adapter softstate
1141 *
1142 * @adp : Adapter softstate
1143 */
1144static void
1145mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1146{
1147 uioc_t *kioc;
1148 int i;
1149
1150 mraid_mm_teardown_dma_pools(adp);
1151
1152 for (i = 0; i < adp->max_kioc; i++) {
1153
1154 kioc = adp->kioc_list + i;
1155
1156 pci_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1157 kioc->pthru32_h);
1158 }
1159
1160 kfree(adp->kioc_list);
1161
1162 kfree(adp->mbox_list);
1163
1164 pci_pool_destroy(adp->pthru_dma_pool);
1165
1166
1167 return;
1168}
1169
1170
1171/**
1172 * mraid_mm_teardown_dma_pools - Free all per adapter dma buffers
1173 *
1174 * @adp : Adapter softstate
1175 */
1176static void
1177mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1178{
1179 int i;
1180 mm_dmapool_t *pool;
1181
1182 for (i = 0; i < MAX_DMA_POOLS; i++) {
1183
1184 pool = &adp->dma_pool_list[i];
1185
1186 if (pool->handle) {
1187
1188 if (pool->vaddr)
1189 pci_pool_free(pool->handle, pool->vaddr,
1190 pool->paddr);
1191
1192 pci_pool_destroy(pool->handle);
1193 pool->handle = NULL;
1194 }
1195 }
1196
1197 return;
1198}
1199
1200/**
1201 * mraid_mm_init : Module entry point
1202 */
1203static int __init
1204mraid_mm_init(void)
1205{
1206 // Announce the driver version
1207 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1208 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1209
1210 majorno = register_chrdev(0, "megadev", &lsi_fops);
1211
1212 if (majorno < 0) {
1213 con_log(CL_ANN, ("megaraid cmm: cannot get major\n"));
1214 return majorno;
1215 }
1216
1217 init_waitqueue_head(&wait_q);
1218
1219 INIT_LIST_HEAD(&adapters_list_g);
1220
1221 return 0;
1222}
1223
1224
1225/**
1226 * mraid_mm_compat_ioctl : 32bit to 64bit ioctl conversion routine
1227 */
1228#ifdef CONFIG_COMPAT
1229static long
1230mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1231 unsigned long arg)
1232{
1233 int err;
1234 lock_kernel();
1235 err = mraid_mm_ioctl(NULL, filep, cmd, arg);
1236 unlock_kernel();
1237 return err;
1238}
1239#endif
1240
1241/**
1242 * mraid_mm_exit : Module exit point
1243 */
1244static void __exit
1245mraid_mm_exit(void)
1246{
1247 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1248
1249 unregister_chrdev(majorno, "megadev");
1250}
1251
1252module_init(mraid_mm_init);
1253module_exit(mraid_mm_exit);
1254
1255/* vi: set ts=8 sw=8 tw=78: */
diff --git a/drivers/scsi/megaraid/megaraid_mm.h b/drivers/scsi/megaraid/megaraid_mm.h
new file mode 100644
index 000000000000..948a0012ab8c
--- /dev/null
+++ b/drivers/scsi/megaraid/megaraid_mm.h
@@ -0,0 +1,102 @@
1/*
2 *
3 * Linux MegaRAID device driver
4 *
5 * Copyright (c) 2003-2004 LSI Logic Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * FILE : megaraid_mm.h
13 */
14
15#ifndef MEGARAID_MM_H
16#define MEGARAID_MM_H
17
18#include <linux/spinlock.h>
19#include <linux/fs.h>
20#include <asm/uaccess.h>
21#include <linux/version.h>
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/pci.h>
25#include <linux/list.h>
26#include <linux/ioctl32.h>
27
28#include "mbox_defs.h"
29#include "megaraid_ioctl.h"
30
31
32#define LSI_COMMON_MOD_VERSION "2.20.2.5"
33#define LSI_COMMON_MOD_EXT_VERSION \
34 "(Release Date: Fri Jan 21 00:01:03 EST 2005)"
35
36
37#define LSI_DBGLVL dbglevel
38
39// The smallest dma pool
40#define MRAID_MM_INIT_BUFF_SIZE 4096
41
42/**
43 * mimd_t : Old style ioctl packet structure (deprecated)
44 *
45 * @inlen :
46 * @outlen :
47 * @fca :
48 * @opcode :
49 * @subopcode :
50 * @adapno :
51 * @buffer :
52 * @pad :
53 * @length :
54 * @mbox :
55 * @pthru :
56 * @data :
57 * @pad :
58 *
59 * Note : This structure is DEPRECATED. New applications must use
60 * : uioc_t structure instead. All new hba drivers use the new
61 * : format. If we get this mimd packet, we will convert it into
62 * : new uioc_t format and send it to the hba drivers.
63 */
64
65typedef struct mimd {
66
67 uint32_t inlen;
68 uint32_t outlen;
69
70 union {
71 uint8_t fca[16];
72 struct {
73 uint8_t opcode;
74 uint8_t subopcode;
75 uint16_t adapno;
76#if BITS_PER_LONG == 32
77 uint8_t __user *buffer;
78 uint8_t pad[4];
79#endif
80#if BITS_PER_LONG == 64
81 uint8_t __user *buffer;
82#endif
83 uint32_t length;
84 } __attribute__ ((packed)) fcs;
85 } __attribute__ ((packed)) ui;
86
87 uint8_t mbox[18]; /* 16 bytes + 2 status bytes */
88 mraid_passthru_t pthru;
89
90#if BITS_PER_LONG == 32
91 char __user *data; /* buffer <= 4096 for 0x80 commands */
92 char pad[4];
93#endif
94#if BITS_PER_LONG == 64
95 char __user *data;
96#endif
97
98} __attribute__ ((packed))mimd_t;
99
100#endif // MEGARAID_MM_H
101
102// vi: set ts=8 sw=8 tw=78: